blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3e488c1e6d8440ad53c140620d92ef2e370ce8d9
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/crnn_seq2seq_ocr/export.py
|
5e3a5b228456dd05aac02549e97f25cb124b1625
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,428
|
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
export.
"""
import os
import numpy as np
from mindspore import context, Tensor
from mindspore.train.serialization import load_checkpoint, load_param_into_net, export
from src.attention_ocr import AttentionOCRInfer
from src.model_utils.config import config
from src.model_utils.device_adapter import get_device_id
def get_model():
'''generate model'''
context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target, device_id=get_device_id())
# Network
network = AttentionOCRInfer(config.eval_batch_size,
int(config.img_width / 4),
config.encoder_hidden_size,
config.decoder_hidden_size,
config.decoder_output_size,
config.max_length,
config.dropout_p)
checkpoint_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), config.checkpoint_path)
ckpt = load_checkpoint(checkpoint_path)
load_param_into_net(network, ckpt)
network.set_train(False)
print("Checkpoint loading Done!")
sos_id = config.characters_dictionary.go_id
images = Tensor(np.zeros((config.eval_batch_size, 3, config.img_height, config.img_width),
dtype=np.float32))
decoder_hidden = Tensor(np.zeros((1, config.eval_batch_size, config.decoder_hidden_size),
dtype=np.float16))
decoder_input = Tensor((np.ones((config.eval_batch_size, 1)) * sos_id).astype(np.int32))
inputs = (images, decoder_input, decoder_hidden)
export(network, *inputs, file_name=config.file_name, file_format=config.file_format)
if __name__ == '__main__':
get_model()
|
[
"chenhaozhe1@huawei.com"
] |
chenhaozhe1@huawei.com
|
38eaa72b941d96798a70a6dda2b4584e8d01c6e4
|
031dbb2a3ea47a0483db310db9f98796cc83c500
|
/787_Cheapest Flights Within K Stops.py
|
6023db6bc0143b13eabbfbbe62dfaa8852018733
|
[] |
no_license
|
Shwan-Yu/Data_Structures_and_Algorithms
|
429fb127983e32931f2168f44ef1484c1cc4c87f
|
9126c2089e41d4d7fd3a204115eba2b5074076ad
|
refs/heads/master
| 2020-03-27T11:46:59.947303
| 2019-08-23T15:15:21
| 2019-08-23T15:15:21
| 146,507,496
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 576
|
py
|
class Solution(object):
def findCheapestPrice(self, n, flights, src, dst, K):
"""
:type n: int
:type flights: List[List[int]]
:type src: int
:type dst: int
:type K: int
:rtype: int
"""
if not flights: return 0
dp = [float("inf")] * n
dp[src] = 0
for k in range(K+1):
dp_cur = dp[:]
for (a, i, price) in flights:
dp_cur[i] = min(dp_cur[i], dp[a] + price)
dp = dp_cur
return dp[dst] if dp[dst] != float("inf") else -1
|
[
"noreply@github.com"
] |
Shwan-Yu.noreply@github.com
|
17f42d961d07ae670ad5a9895a53fe22b9e5e27b
|
3def27e101ca346af6b30247769719f5cd5a27c0
|
/indiaos/config/docs.py
|
95939bb230467499359e5175e7a089a84d88ecaa
|
[
"MIT"
] |
permissive
|
anto-christo/indiaos
|
9fb94527092570981288b42a05001cf33b61b522
|
4b029cf86e49dcabad852312293e6fa5116d4155
|
refs/heads/master
| 2020-09-13T19:50:15.187112
| 2019-11-20T08:30:13
| 2019-11-20T08:30:13
| 222,887,034
| 1
| 0
|
NOASSERTION
| 2019-11-20T08:24:27
| 2019-11-20T08:24:26
| null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
"""
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/indiaos"
# docs_base_url = "https://[org_name].github.io/indiaos"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "IndiaOS"
|
[
"scm.mymail@gmail.com"
] |
scm.mymail@gmail.com
|
bfcf8014c00faabd8828474c77a4d96497ba9a38
|
da54cb56e69ca730156156ca70a720cfbd7723ea
|
/others/coffee_plackett/mindsdb_acc.py
|
78dfe064c969ce8ad89c969d29d4efe3ffbc3f23
|
[
"MIT"
] |
permissive
|
setohe0909/mindsdb-examples
|
b4f7908aa0c96fc0ea0721931f95bc4960bc2867
|
04fc9b4ad9bb8e960a996e1c4eab1e6054bca8ff
|
refs/heads/master
| 2022-11-27T00:21:16.114913
| 2020-08-06T15:33:17
| 2020-08-06T15:33:17
| 296,442,864
| 1
| 0
|
MIT
| 2020-09-17T21:08:52
| 2020-09-17T21:08:51
| null |
UTF-8
|
Python
| false
| false
| 237
|
py
|
from mindsdb_native import Predictor
mdb = Predictor(name='coffee_predictor')
mdb.learn(from_data='data.tsv', to_predict=['Coffe_Malt', 'Chocolat', 'Gold', 'Medium_Barley', 'Dark_Barley', 'Dandelion', 'Beets', 'Chicory_Roots', 'Figs'])
|
[
"george@cerebralab.com"
] |
george@cerebralab.com
|
484d7007aa18126e562a439f5ddb39f19a4e0ea8
|
908655251066427f654ee33ebdf804f9f302fcc3
|
/Tests/CartPoleAST/CartPoleNdRewardt/MultiCartPoleNd_RLNonInter.py
|
7bde5b51f00c93349bfd677128345b1493e7a0c2
|
[] |
no_license
|
maxiaoba/MCTSPO
|
be567f80f1dcf5c35ac857a1e6690e1ac599a59d
|
eedfccb5a94e089bd925b58f3d65eef505378bbc
|
refs/heads/main
| 2023-07-05T02:20:16.752650
| 2021-07-06T06:04:40
| 2021-07-06T06:04:40
| 381,811,407
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,181
|
py
|
import os
os.environ["CUDA_VISIBLE_DEVICES"]="-1" #just use CPU
# from garage.tf.algos.trpo import TRPO
from garage.baselines.zero_baseline import ZeroBaseline
from mylab.envs.tfenv import TfEnv
from garage.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy
from garage.tf.policies.gaussian_lstm_policy import GaussianLSTMPolicy
from garage.tf.optimizers.conjugate_gradient_optimizer import ConjugateGradientOptimizer, FiniteDifferenceHvp
from garage.misc import logger
from mylab.rewards.ast_reward import ASTReward
from mylab.envs.ast_env import ASTEnv
from mylab.simulators.policy_simulator import PolicySimulator
from CartPoleNd.cartpole_nd import CartPoleNdEnv
from mylab.algos.trpo import TRPO
import os.path as osp
import argparse
# from example_save_trials import *
import tensorflow as tf
import joblib
import math
import numpy as np
import mcts.BoundedPriorityQueues as BPQ
import csv
# Logger Params
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', type=str, default="cartpole")
parser.add_argument('--n_trial', type=int, default=5)
parser.add_argument('--trial_start', type=int, default=0)
parser.add_argument('--n_itr', type=int, default=2500)
parser.add_argument('--batch_size', type=int, default=4000)
parser.add_argument('--snapshot_mode', type=str, default="gap")
parser.add_argument('--snapshot_gap', type=int, default=500)
parser.add_argument('--log_dir', type=str, default='./Data/AST/RLNonInter')
parser.add_argument('--args_data', type=str, default=None)
args = parser.parse_args()
top_k = 10
max_path_length = 100
interactive = False
tf.set_random_seed(0)
sess = tf.Session()
sess.__enter__()
# Instantiate the env
env_inner = CartPoleNdEnv(nd=10,use_seed=False)
data = joblib.load("../CartPole/Data/Train/itr_50.pkl")
policy_inner = data['policy']
reward_function = ASTReward()
simulator = PolicySimulator(env=env_inner,policy=policy_inner,max_path_length=max_path_length)
env = TfEnv(ASTEnv(interactive=interactive,
simulator=simulator,
sample_init_state=False,
s_0=[0.0, 0.0, 0.0 * math.pi / 180, 0.0],
reward_function=reward_function,
))
# Create policy
policy = GaussianLSTMPolicy(name='lstm_policy',
env_spec=env.spec,
hidden_dim=128,
use_peepholes=True)
with open(osp.join(args.log_dir, 'total_result.csv'), mode='w') as csv_file:
fieldnames = ['step_count']
for i in range(top_k):
fieldnames.append('reward '+str(i))
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for trial in range(args.trial_start,args.trial_start+args.n_trial):
# Create the logger
log_dir = args.log_dir+'/'+str(trial)
tabular_log_file = osp.join(log_dir, 'process.csv')
text_log_file = osp.join(log_dir, 'text.txt')
params_log_file = osp.join(log_dir, 'args.txt')
logger.set_snapshot_dir(log_dir)
logger.set_snapshot_mode(args.snapshot_mode)
logger.set_snapshot_gap(args.snapshot_gap)
logger.log_parameters_lite(params_log_file, args)
if trial > args.trial_start:
old_log_dir = args.log_dir+'/'+str(trial-1)
logger.pop_prefix()
logger.remove_text_output(osp.join(old_log_dir, 'text.txt'))
logger.remove_tabular_output(osp.join(old_log_dir, 'process.csv'))
logger.add_text_output(text_log_file)
logger.add_tabular_output(tabular_log_file)
logger.push_prefix("["+args.exp_name+'_trial '+str(trial)+"]")
np.random.seed(trial)
params = policy.get_params()
sess.run(tf.variables_initializer(params))
baseline = ZeroBaseline(env_spec=env.spec)
optimizer = ConjugateGradientOptimizer(hvp_approach=FiniteDifferenceHvp(base_eps=1e-5))
top_paths = BPQ.BoundedPriorityQueue(top_k)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=args.batch_size,
step_size=0.1,
n_itr=args.n_itr,
store_paths=True,
optimizer= optimizer,
max_path_length=max_path_length,
top_paths = top_paths,
plot=False,
)
algo.train(sess=sess, init_var=False)
row_content = dict()
row_content['step_count'] = args.n_itr*args.batch_size
i = 0
for (r,action_seq) in algo.top_paths:
row_content['reward '+str(i)] = r
i += 1
writer.writerow(row_content)
|
[
"xiaobaima@DNab421bb2.stanford.edu"
] |
xiaobaima@DNab421bb2.stanford.edu
|
5e58b6483a21d1dcda87883dadabb128dcf9cdbe
|
4ed038a638725ac77731b0b97ddd61aa37dd8d89
|
/cairis/gui/SecurityPatternDialog.py
|
08a80cb3e41d28e481cae171536b5d583ce0b767
|
[
"Apache-2.0"
] |
permissive
|
RachelLar/cairis_update
|
0b784101c4aff81ff0390328eb615e335301daa2
|
0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2
|
refs/heads/master
| 2021-01-19T06:25:47.644993
| 2016-07-11T20:48:11
| 2016-07-11T20:48:11
| 63,103,727
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,874
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
from cairis.core.armid import *
from SecurityPatternPanel import SecurityPatternPanel
from cairis.core.SecurityPatternParameters import SecurityPatternParameters
import DialogClassParameters
class SecurityPatternDialog(wx.Dialog):
def __init__(self,parent,parameters):
wx.Dialog.__init__(self,parent,parameters.id(),parameters.label(),style=wx.DEFAULT_DIALOG_STYLE|wx.MAXIMIZE_BOX|wx.THICK_FRAME|wx.RESIZE_BORDER,size=(400,500))
self.thePatternName = ''
self.thePatternContext = ''
self.thePatternProblem = ''
self.thePatternSolution = ''
self.theConcernAssociations = []
self.theRequirements = []
self.thePatternId = -1
self.panel = 0
self.buildControls(parameters)
self.commitVerb = 'Add'
def buildControls(self,parameters):
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.panel = SecurityPatternPanel(self)
self.panel.buildControls(parameters.createFlag())
mainSizer.Add(self.panel,1,wx.EXPAND)
self.SetSizer(mainSizer)
wx.EVT_BUTTON(self,SECURITYPATTERN_BUTTONCOMMIT_ID,self.onCommit)
def load(self,pattern):
self.thePatternId = pattern.id()
self.panel.loadControls(pattern)
self.commitVerb = 'Edit'
def onCommit(self,evt):
commitLabel = self.commitVerb + ' security pattern'
nameCtrl = self.FindWindowById(SECURITYPATTERN_TEXTNAME_ID)
contextCtrl = self.FindWindowById(SECURITYPATTERN_TEXTCONTEXT_ID)
problemCtrl = self.FindWindowById(SECURITYPATTERN_TEXTPROBLEM_ID)
solutionCtrl = self.FindWindowById(SECURITYPATTERN_TEXTSOLUTION_ID)
concernsCtrl = self.FindWindowById(SECURITYPATTERN_LISTPATTERNSTRUCTURE_ID)
reqsCtrl = self.FindWindowById(SECURITYPATTERN_LISTREQUIREMENTS_ID)
self.thePatternName = nameCtrl.GetValue()
self.thePatternContext = contextCtrl.GetValue()
self.thePatternProblem = problemCtrl.GetValue()
self.thePatternSolution = solutionCtrl.GetValue()
self.theConcernAssociations = concernsCtrl.associations()
self.theRequirements = reqsCtrl.requirements()
if len(self.thePatternName) == 0:
dlg = wx.MessageDialog(self,'Pattern name cannot be empty',commitLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
if len(self.thePatternContext) == 0:
dlg = wx.MessageDialog(self,'Context cannot be empty',commitLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
if len(self.thePatternProblem) == 0:
dlg = wx.MessageDialog(self,'Problem cannot be empty',commitLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
elif (len(self.thePatternSolution) == 0):
dlg = wx.MessageDialog(self,'Solution cannot be empty',commitLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
else:
self.EndModal(SECURITYPATTERN_BUTTONCOMMIT_ID)
def parameters(self):
parameters = SecurityPatternParameters(self.thePatternName,self.thePatternContext,self.thePatternProblem,self.thePatternSolution,self.theRequirements,self.theConcernAssociations)
parameters.setId(self.thePatternId)
return parameters
|
[
"shamal.faily@googlemail.com"
] |
shamal.faily@googlemail.com
|
a683c1f4c81d2952675346854e2f80efb8473601
|
37ba3d21dcb6edd21e48dbb7f12591ac3590ab64
|
/python_problems_competitive/ten_kinds_of_people.py
|
4ef27f518e9cd247adcaa9041da10f96bc2643ec
|
[] |
no_license
|
Hygens/hackerearth_hackerrank_solutions
|
2feaedec255a85792d305bb8ff35675254a03f2a
|
86cc4c9ca4d5246f24db8cda93400f1d7ee00882
|
refs/heads/master
| 2021-07-06T15:56:28.906533
| 2020-04-28T22:45:56
| 2020-04-28T22:45:56
| 55,160,498
| 1
| 2
| null | 2020-10-01T06:48:09
| 2016-03-31T15:15:36
|
Python
|
UTF-8
|
Python
| false
| false
| 331
|
py
|
r,c = map(int,input().split(' '))
l = []
for i in range(r):
l.append(list(input().strip()))
n = int(input().strip())
for _ in range(n):
r1,c1,r2,c2 = map(int,input().split(' '))
if l[r1-1][c1-1]==l[r2-1][c2-1]=='0': print('binary')
elif l[r1-1][c1-1]==l[r2-1][c2-1]=='1': print('decimal')
else: print('neither')
|
[
"noreply@github.com"
] |
Hygens.noreply@github.com
|
2e6d525f0693ba26ecf20429238d8ba878370522
|
bc441bb06b8948288f110af63feda4e798f30225
|
/resource_package_tools_sdk/model/container/ingress_rule_pb2.py
|
e7533ea4ef040c1f29394bd3dd0d9f6cdf9fbc34
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 4,319
|
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: ingress_rule.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from resource_package_tools_sdk.model.container import http_ingress_path_pb2 as resource__package__tools__sdk_dot_model_dot_container_dot_http__ingress__path__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='ingress_rule.proto',
package='container',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'),
serialized_pb=_b('\n\x12ingress_rule.proto\x12\tcontainer\x1a\x42resource_package_tools_sdk/model/container/http_ingress_path.proto\"y\n\x0bIngressRule\x12\x0c\n\x04host\x18\x01 \x01(\t\x12)\n\x04http\x18\x02 \x01(\x0b\x32\x1b.container.IngressRule.Http\x1a\x31\n\x04Http\x12)\n\x05paths\x18\x01 \x03(\x0b\x32\x1a.container.HTTPIngressPathBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\x06proto3')
,
dependencies=[resource__package__tools__sdk_dot_model_dot_container_dot_http__ingress__path__pb2.DESCRIPTOR,])
_INGRESSRULE_HTTP = _descriptor.Descriptor(
name='Http',
full_name='container.IngressRule.Http',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='paths', full_name='container.IngressRule.Http.paths', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=173,
serialized_end=222,
)
_INGRESSRULE = _descriptor.Descriptor(
name='IngressRule',
full_name='container.IngressRule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='host', full_name='container.IngressRule.host', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='http', full_name='container.IngressRule.http', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_INGRESSRULE_HTTP, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=101,
serialized_end=222,
)
_INGRESSRULE_HTTP.fields_by_name['paths'].message_type = resource__package__tools__sdk_dot_model_dot_container_dot_http__ingress__path__pb2._HTTPINGRESSPATH
_INGRESSRULE_HTTP.containing_type = _INGRESSRULE
_INGRESSRULE.fields_by_name['http'].message_type = _INGRESSRULE_HTTP
DESCRIPTOR.message_types_by_name['IngressRule'] = _INGRESSRULE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
IngressRule = _reflection.GeneratedProtocolMessageType('IngressRule', (_message.Message,), {
'Http' : _reflection.GeneratedProtocolMessageType('Http', (_message.Message,), {
'DESCRIPTOR' : _INGRESSRULE_HTTP,
'__module__' : 'ingress_rule_pb2'
# @@protoc_insertion_point(class_scope:container.IngressRule.Http)
})
,
'DESCRIPTOR' : _INGRESSRULE,
'__module__' : 'ingress_rule_pb2'
# @@protoc_insertion_point(class_scope:container.IngressRule)
})
_sym_db.RegisterMessage(IngressRule)
_sym_db.RegisterMessage(IngressRule.Http)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
3fa376f3ef087cee256d7492675fdc21898a7b95
|
92c724afcc40c9e4d86af24b1b493e10fc8a994d
|
/src/figures/exploratory/exploratory_plots.py
|
f99cbafd230e2935a17d634a4cf0fd989b289b41
|
[
"MIT"
] |
permissive
|
willgdjones/GTEx
|
48d7551c765700d0db34bb8f6e01f7f2a55bec6c
|
c56a5d548978545ab8a98e74236d52343113e9e6
|
refs/heads/master
| 2021-09-13T13:21:12.928226
| 2018-02-06T16:42:41
| 2018-02-06T16:42:41
| 90,028,785
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,241
|
py
|
import pickle
import numpy as np
import matplotlib.pyplot as plt
import seaborn
import h5py
GTEx_directory = '/hps/nobackup/research/stegle/users/willj/GTEx'
retrained_mean_features = {}
with h5py.File(GTEx_directory + '/small_data/new_retrained_inceptionet_aggregations.hdf5','r') as f:
expression = f['lung']['256']['expression'].value
for s in ['128','256','512','1024','2048','4096']:
size_retrained_mean_features = f['lung'][s]['mean'].value
retrained_mean_features[s] = size_retrained_mean_features
expression_IDs = f['lung']['256']['expression_IDs'].value
raw_mean_features = {}
with h5py.File(GTEx_directory + '/small_data/new_raw_inceptionet_aggregations.hdf5','r') as f:
for s in ['128','256','512','1024','2048','4096']:
size_raw_mean_features = f['lung'][s]['mean'].value
size_raw_mean_features[size_raw_mean_features < 0] = 0
raw_mean_features[s] = size_raw_mean_features
# Comparing variation for each patch size
# f, a = plt.subplots(1,6, figsize=(35,5))
# f.suptitle("Image feature variation. Lung, patch-size 256",size=30)
# for (i,s) in enumerate(['128','256','512','1024','2048','4096']):
# a[i].hist(np.std(retrained_mean_features[s],axis=0),bins=100)
# a[i].set_title("Patch-size {}".format(s),size=20)
# plt.tight_layout()
# plt.subplots_adjust(top=0.80)
# plt.savefig('figures/exploratory/plots/feature_variation.eps',format='eps', dpi=600)
# Comparing variation when concatenating all features together
# plt.figure()
# concatenated_features = np.vstack([retrained_mean_features['128'], retrained_mean_features['256'], retrained_mean_features['512'], retrained_mean_features['1024'], retrained_mean_features['2048'], retrained_mean_features['4096']])
# plt.hist(np.std(concatenated_features,axis=0),bins=100)
# cutoff = min(np.std(concatenated_features[:,np.argsort(np.std(concatenated_features,axis=0))[-500:]],axis=0))
# plt.plot([cutoff, cutoff], [0, 300],c='red')
# plt.title("Histogram of variance from concatenated features across patch-sizes",size=11)
# plt.xlabel("Variance")
# plt.ylabel("Counts")
# plt.tight_layout()
# plt.savefig('figures/exploratory/plots/concatenated_feature_variation.eps',format='eps', dpi=600)
# Histogram of expression means.
# Include cutoff for top 500
# plt.figure()
# plt.hist(np.mean(expression,axis=0),bins=100)
# cutoff = min(np.mean(expression[:,np.argsort(np.mean(expression,axis=0))[-1000:]],axis=0))
# plt.plot([cutoff, cutoff], [0, 4500],c='red')
# plt.title("Histogram of mean gene expression")
# plt.xlabel("Mean expression")
# plt.ylabel("Count")
# plt.tight_layout()
# plt.savefig('figures/exploratory/plots/mean_expression_histogram.eps',format='eps', dpi=600)
#
# # Histogram of expression standard deviation.
# # Include cutoff for top 1000
# plt.figure()
# plt.hist(np.std(expression,axis=0),bins=100)
# cutoff = min(np.std(expression[:,np.argsort(np.std(expression,axis=0))[-1000:]],axis=0))
# plt.plot([cutoff, cutoff], [0, 2500],c='red')
# plt.title("Histogram of gene expression standard deviation")
# plt.xlabel("Expression standard devation")
# plt.ylabel("Count")
# plt.tight_layout()
# plt.savefig('figures/exploratory/plots/std_expression_histogram.eps',format='eps', dpi=600)
|
[
"williamgdjones@gmail.com"
] |
williamgdjones@gmail.com
|
7c36ac1c024cf960649d2e0a49ddbbd0087fdc2f
|
a849caca4cc7b66bb3ca93552da873c1415f435d
|
/Lab Exercise 1.6.2020/fermi.py
|
0fb3f6a5b5b8d5291e9c7c3a08e24662cec98290
|
[] |
no_license
|
nmessa/Python
|
5215b957dc73ece422a0f4cc65752c387a437d34
|
1a32ca1f59aa5a3f89453b6e42d4336e6e8fb961
|
refs/heads/master
| 2021-07-11T04:45:08.222102
| 2020-09-17T17:32:07
| 2020-09-17T17:32:07
| 199,273,131
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,182
|
py
|
## Game of Fermi Version 0.5
## Author:
## Date: 1/6/2020
## The goal of the game is for the player to guess the digits in
## the three positions in the least number of tries. For each guess,
## the player provides three digits for position 1, 2, and 3.
## The program replies with a hint consisting of Fermi, Pico, and Nano.
## If the digit guess for a given position is correct, then the reply is Fermi.
## If the digit guessed for a given position is in a different position, then
## the reply is Pico. If the digit guessed for a given position does not match
## any of the three digits, then the reply is Nano.
from random import *
#Create variables
numbers = [1,2,3,4,5,6,7,8,9]
again = True
while again:
win = False
#Build the secret number of 3 unique numbers from 1 to 9
secret = []
while len(secret) < 3:
temp = choice(numbers)
if temp not in secret:
secret.append(temp)
numGuesses = 0 #keep track of numbers guessed
#Play a round
while not win:
#initialize counter and phrases list
count = 0
phrases = []
#Get number guess from user
temp = input("Enter 3 numbers (1 - 9)seperated by spaces: ").split()
#Build a list that represents the number guessed
#Add code here
#update number of guesses
#Add code here
#Algorithm to test number and generate 3 phrases
#Add code here
#Print the result of algorithm execution
for p in phrases:
print(p, end = ' ')
print()
#Check to see if you won
if phrases.count('Fermi') == 3: #this means you won
print('You won in', numGuesses, 'guesses')
win = True
answer = input("Play again (y/n)? ")
if answer == 'n':
again = False
## Sample Output
## Enter 3 numbers (1 - 9): 6 3 5
## Nano Pico Nano
## Enter 3 numbers (1 - 9): 3 4 2
## Pico Pico Nano
## Enter 3 numbers (1 - 9): 4 3 7
## Fermi Pico Nano
## Enter 3 numbers (1 - 9): 4 8 3
## Fermi Fermi Fermi
## You won in 4 guesses
|
[
"noreply@github.com"
] |
nmessa.noreply@github.com
|
ef31a019c6a45e981d10734a870eb4e44043c0d3
|
1dacbf90eeb384455ab84a8cf63d16e2c9680a90
|
/pkgs/bokeh-0.11.1-py27_0/lib/python2.7/site-packages/bokeh/command/subcommands/tests/test_info.py
|
42f6e6e775b0cc0b11df05470c21ff00bfa6d4cd
|
[
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] |
permissive
|
wangyum/Anaconda
|
ac7229b21815dd92b0bd1c8b7ec4e85c013b8994
|
2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6
|
refs/heads/master
| 2022-10-21T15:14:23.464126
| 2022-10-05T12:10:31
| 2022-10-05T12:10:31
| 76,526,728
| 11
| 10
|
Apache-2.0
| 2022-10-05T12:10:32
| 2016-12-15T05:26:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,238
|
py
|
from __future__ import absolute_import
import bokeh.command.subcommands.info as scinfo
from bokeh.command.bootstrap import main
def test_create():
import argparse
from bokeh.command.subcommand import Subcommand
obj = scinfo.Info(parser=argparse.ArgumentParser())
assert isinstance(obj, Subcommand)
def test_name():
assert scinfo.Info.name == "info"
def test_help():
assert scinfo.Info.help == "print information about Bokeh and Bokeh server configuration"
def test_args():
assert scinfo.Info.args == (
('--static', dict(
action='store_true',
help="Print the locations of BokehJS static files",
)),
)
def test_run(capsys):
main(["bokeh", "info"])
out, err = capsys.readouterr()
lines = out.split("\n")
assert len(lines) == 5
assert lines[0].startswith("Python version")
assert lines[1].startswith("IPython version")
assert lines[2].startswith("Bokeh version")
assert lines[3].startswith("BokehJS static")
assert lines[4] == ""
assert err == ""
def test_run_static(capsys):
main(["bokeh", "info", "--static"])
out, err = capsys.readouterr()
assert err == ""
assert out.endswith('/bokeh/server/static\n')
|
[
"wgyumg@mgail.com"
] |
wgyumg@mgail.com
|
38a80c15f2ce13d2c78e5913a3b1aadf4fc2e70a
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/pa3/benchmarks/sieve-457.py
|
99bc8c55e3be4471951f624dbaca0a87b6c3a62a
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,586
|
py
|
# A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if ($ID.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
# Data
v:Vector = None
i:int = 0
# Crunch
v = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
de584c4eb19bb366f817c87559e42f7c262ffe1d
|
64a2e19c11929e9077a8c99e8d388de279e512e9
|
/testRunner/runner.py
|
0278655d701a52dba205cd68ffe58b706039b381
|
[] |
no_license
|
wallaceok/appiumn_auto
|
60f8a2b152a27c39cabf12529345909979527115
|
e543a662897c0eedfafdea64297947aa6de45539
|
refs/heads/master
| 2020-12-24T06:00:26.338592
| 2016-11-08T10:04:04
| 2016-11-08T10:04:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,471
|
py
|
__author__ = 'Administrator'
# -*- coding: utf-8 -*-
import sys
sys.path.append("..")
import datetime
import xlsxwriter
import time
import unittest
from common import reportPhone
from testRunner.runnerBase import TestInterfaceCase, ga
from testCase.Home import testHome
from testCase.work import testContact
from testCase.web.comment import testComment
from testBLL import email as b_email
from testBLL import server
from testBLL import adbCommon
from testMode import email as memail
from testBLL import report as b_report
from testBLL import appBase
from testBLL import apkBase
from testMode import report as m_report
from common.variable import GetVariable as common
from common import dataToString
import os
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
def get_email():
m_email = memail.GetEmail()
m_email.file = PATH( '../email.ini' )
email = b_email.read_email(m_email)
return email
def get_app_msg(f=r"D:\app\appium_study\img\t.apk"):
return apkBase.apkInfo(f).get_app_msg()
def get_common_report(start_test_time, endtime, starttime):
mreport = m_report.GetReport()
b_get_hp_info = appBase.get_phone_info()
raw = appBase.get_men_total(r"d:\men.log")
app_msg = get_app_msg(PATH( '../img/t.apk'))
mreport.test_sum = common.test_sum
mreport.test_failed = common.test_failed
mreport.test_success = common.test_success
mreport.test_sum_date = str((endtime - starttime).seconds-6) +"秒"
mreport.app_name = app_msg[0]
mreport.app_size = app_msg[1]
mreport.phone_name = b_get_hp_info["phone_name"] +" " + b_get_hp_info["phone_model"]
mreport.phone_rel =b_get_hp_info["release"]
mreport.phone_pix = appBase.get_app_pix()
mreport.phone_raw = reportPhone.phone_raw(raw/1024)
print(common.MEN)
avg_men = appBase.get_avg_raw(common.MEN) # 获取每次占用内存多少
mreport.phone_avg_use_raw = avg_men
mreport.phone_max_use_raw = reportPhone.phone_max_use_raw(common.MEN)
mreport.phone_cpu = appBase.get_cpu_kel()
mreport.phone_avg_use_cpu = reportPhone.phone_avg_use_cpu(common.CPU)
mreport.phone_avg_max_use_cpu = reportPhone.phone_avg_max_use_cpu(common.CPU)
mreport.app_version = app_msg[2]
mreport.test_date = start_test_time
mreport.fps_max = reportPhone.fps_max(common.FPS)
mreport.fps_avg = reportPhone.fps_avg(common.FPS)
b_report.OperateReport().set_report(mreport)
def get_common_web_report(start_test_time, endtime, starttime):
pass
def runnerCaseWeb():
suite = unittest.TestSuite()
starttime = datetime.datetime.now()
suite.addTest(TestInterfaceCase.parametrize(testComment))
unittest.TextTestRunner(verbosity=2).run(suite)
def runnerCaseApp():
start_test_time = dataToString.getStrTime(time.localtime(), "%Y-%m-%d %H:%M %p")
suite = unittest.TestSuite()
starttime = datetime.datetime.now()
suite.addTest(TestInterfaceCase.parametrize(testHome))
# suite.addTest(TestInterfaceCase.parametrize(testContact))
unittest.TextTestRunner(verbosity=2).run(suite)
endtime = datetime.datetime.now()
get_common_report(start_test_time, endtime, starttime)
report()
def report():
workbook = xlsxwriter.Workbook('GetReport.xlsx')
worksheet = workbook.add_worksheet("测试总况")
worksheet2 = workbook.add_worksheet("测试详情")
print(common.RRPORT)
b_OperateReport = b_report.OperateReport(wd=workbook, data=common.RRPORT)
b_OperateReport.init(worksheet)
b_OperateReport.detail(worksheet2)
b_OperateReport.close()
b_email.send_mail(get_email())
if __name__ == '__main__':
if ga.selenium_appium == common.APPIUM and ga.platformName == common.ANDROID :
if adbCommon.attached_devices():
appium_server = server.AppiumServer(ga.appiumJs, ga.Remote,ga.selenium_appium)
appium_server.start_server()
while not appium_server.is_runnnig():
time.sleep(2)
runnerCaseApp()
appium_server.stop_server()
else:
print(u"设备不存在")
if ga.selenium_appium == common.SELENIUM:
appium_server = server.AppiumServer(ga.selenium_jar, ga.sel_remote, ga.selenium_appium)
appium_server.start_server()
while not appium_server.is_runnnig():
time.sleep(2)
runnerCaseWeb()
appium_server.stop_server()
|
[
"284772894@qq.com"
] |
284772894@qq.com
|
ca4b7e3b02e3f9d8bd800d4002d8a1a7aaa44271
|
0b7add5d8583ba3bb02faf4fd5c356fd578f2fcc
|
/compileProtobuf/dstPb/RightInputProto_pb2.py
|
6c7f725c3da07982fafe4f3b3735e4d2df9ca053
|
[] |
no_license
|
cappuccino213/IMCIS2Performance
|
281f052f1a5dddb4956b3e7127781d2395c07e04
|
74528e0606f78459f6f3bfcf38d4fdf176a36f90
|
refs/heads/master
| 2023-03-27T20:44:57.266345
| 2021-03-29T07:56:56
| 2021-03-29T07:56:56
| 352,560,398
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 3,095
|
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: RightInputProto.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='RightInputProto.proto',
package='',
syntax='proto3',
serialized_options=b'\252\002\037TomTaw.eWordIMCIS.WebAPI.Models',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x15RightInputProto.proto\"K\n\x0fRightInputProto\x12\x0f\n\x07roleUID\x18\x01 \x01(\t\x12\x0f\n\x07userUID\x18\x02 \x01(\t\x12\x16\n\x0eisSuperManager\x18\x03 \x01(\tB\"\xaa\x02\x1fTomTaw.eWordIMCIS.WebAPI.Modelsb\x06proto3'
)
_RIGHTINPUTPROTO = _descriptor.Descriptor(
name='RightInputProto',
full_name='RightInputProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='roleUID', full_name='RightInputProto.roleUID', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='userUID', full_name='RightInputProto.userUID', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='isSuperManager', full_name='RightInputProto.isSuperManager', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=25,
serialized_end=100,
)
DESCRIPTOR.message_types_by_name['RightInputProto'] = _RIGHTINPUTPROTO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RightInputProto = _reflection.GeneratedProtocolMessageType('RightInputProto', (_message.Message,), {
'DESCRIPTOR' : _RIGHTINPUTPROTO,
'__module__' : 'RightInputProto_pb2'
# @@protoc_insertion_point(class_scope:RightInputProto)
})
_sym_db.RegisterMessage(RightInputProto)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"yeahcheung213@163.com"
] |
yeahcheung213@163.com
|
a1ca652bbcbc6fe3ceebec0c3c56a8205ba2449f
|
3597ecf8a014dbd6f7d998ab59919a94aff8011d
|
/front-web/src/www/application/modules/treatment/block/actions.py
|
18cc3477c9e71c71e2a949ed2b6fbd5799dbce77
|
[] |
no_license
|
duytran92-cse/nas-genomebrowser
|
f42b8ccbb7c5245bde4e52a0feed393f4b5f6bf1
|
d0240ad5edc9cfa8e7f89db52090d7d733d2bb8a
|
refs/heads/master
| 2022-10-24T05:26:01.760241
| 2020-06-14T19:01:35
| 2020-06-14T19:01:35
| 272,264,694
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,214
|
py
|
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.conf import settings
from notasquare.urad_web import actions, page_contexts, widgets
from notasquare.urad_web_material import renderers
from application.modules.common import page_contexts, actions as common_actions, components as common_components
from application.themes.genopedia import renderers as genopedia_renderers
from application.themes.genopedia import widgets as genopedia_widgets
from application import constants
from . import components
class Update(actions.crud.UpdateAction, common_actions.BaseAction):
def create_form(self):
treatment_block = components.TreatmentBlockStore(self.get_container()).get(self.params['block_id'])
kind = treatment_block['data']['record']['kind']
form = widgets.form.Form()
form.renderer = renderers.widgets.form.HorizontalFormRenderer()
if kind == 'general_text':
form.add_field(widgets.field.Textbox('title'))
form.add_field(widgets.field.Textarea('text'))
form.renderer.add_section('General - Text')
form.renderer.add_field('title', 'Title')
form.renderer.add_field('text', 'Text', rows=15)
if kind == 'general_publications':
form.add_field(widgets.field.List('publications', {
'pmid': widgets.field.Textbox('pmid'),
'doi': widgets.field.Textbox('doi'),
'pmc': widgets.field.Textbox('pmc'),
'title': widgets.field.Textarea('title'),
'authors': widgets.field.Textarea('authors'),
'journal': widgets.field.Textarea('journal')
}))
form.renderer.add_section('General - Publications')
form.renderer.add_field('publications', 'Publications', columns=[
{'id': 'pmid', 'label': 'PMID', 'width': '10%'},
{'id': 'doi', 'label': 'DOI', 'width': '10%'},
{'id': 'pmc', 'label': 'PMC', 'width': '10%'},
{'id': 'title', 'label': 'Title', 'width': '30%'},
{'id': 'authors', 'label': 'Authors', 'width': '15%'},
{'id': 'journal', 'label': 'Journal', 'width': '15%'},
])
if kind == 'general_alias':
# Show effect & risk
form.add_field(widgets.field.List('alias', {
'id': widgets.field.Textbox('id'),
'alias': widgets.field.Textbox('alias')
}))
form.renderer.add_section('Variation - Alias')
form.renderer.add_field('alias', 'Alias', columns=[
{'id': 'alias', 'label': 'Alias', 'width': '50%'}
])
form.renderer.set_field_renderer('textbox', renderers.widgets.field.TextboxRenderer())
form.renderer.set_field_renderer('textarea', renderers.widgets.field.TextareaRenderer())
form.renderer.set_field_renderer('combobox', renderers.widgets.field.ComboboxRenderer())
form.renderer.set_field_renderer('list', renderers.widgets.field.ListRenderer())
return form
def load_form(self, form):
result = components.TreatmentBlockStore(self.get_container()).get(self.params['block_id'])
if result['status'] == 'ok':
record = result['data']['record']
form.set_things({
'page': 'treatment',
'page_title': record['treatment_title']
})
form.set_form_data(record)
else:
form.add_message('danger', "Can't load form")
def process_form_data(self, data):
# print "POST-Params-Update:", self.params
data['new_version'] = True
res = components.TreatmentBlockStore(self.get_container()).update(data, self.params['block_id'])
rs = components.TreatmentBlockStore(self.get_container()).helper(res['data']['pk'])
self.params['page_title'] = rs['data']['record']['title']
return res
def handle_on_success(self, messages):
return HttpResponseRedirect('/treatment/%s' % (self.params["page_title"]))
|
[
"thanh.tran@etudiant.univ-lr.fr"
] |
thanh.tran@etudiant.univ-lr.fr
|
2acbb3b79b0a4861189cb1c43f2d7fd5049f0132
|
fc2447b91cbee82e74e939092ec1903678f3217a
|
/PythonPractice/hm_py/hm_oop/oop_single.py
|
4b09ef3d411106af86bc146dc8c60d1ee2a315ee
|
[] |
no_license
|
yglj/learngit
|
0eac654e7c49f2ede064b720e6ee621a702193b4
|
74fb4b93d5726c735b64829cafc99878d8082121
|
refs/heads/master
| 2022-12-24T10:01:56.705046
| 2019-05-27T21:04:08
| 2019-05-27T21:04:08
| 146,157,116
| 0
| 1
| null | 2022-12-12T07:01:25
| 2018-08-26T06:28:20
|
HTML
|
UTF-8
|
Python
| false
| false
| 924
|
py
|
# 单例设计模式
# 类只有创建唯一个对象实例
# 应用场景: 打印机,回收站,音乐播放对象
# __new__() object提供的内置静态方法,作用:为对象分配空间,返回对象引用
class MusicPlayer:
__init_flag = False
instance = None
def __new__(cls, *args):
if cls.instance is None: # 利用__new__只分配一次对象空间,来实现单例
print('创建对象时,自动分配空间')
cls.instance = super().__new__(cls)
# print(instance)
return cls.instance # 返回对象引用
return cls.instance
def __init__(self): # 让初始化动作只执行一次:利用标志位控制
if MusicPlayer.__init_flag:
return
print('初始化对象,分配实例对象属性')
MusicPlayer.__init_flag = True
m = MusicPlayer()
print('-' * 30)
m2 = MusicPlayer()
|
[
"2365952530@qq.com"
] |
2365952530@qq.com
|
e6cc8b2f9f4f193759e2a16a4b7d84f28a162423
|
d87483a2c0b50ed97c1515d49d62c6e9feaddbe0
|
/.history/get_positions_20210205021452.py
|
db0322e005440ad0d993a22856f8587be75cdf25
|
[
"MIT"
] |
permissive
|
HopperKremer/hoptrader
|
0d36b6e33922414003cf689fb81f924da076a54b
|
406793c10bc888648290fd15c7c2af62cf8c6c67
|
refs/heads/main
| 2023-06-12T15:51:00.910310
| 2021-07-06T16:15:41
| 2021-07-06T16:15:41
| 334,754,936
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,840
|
py
|
# Buy top tickers from Financhill
import requests
from tda import auth, client
from tda.orders.equities import equity_buy_market, equity_buy_limit
from tda.orders.common import Duration, Session
import os, sys
import time
from selenium import webdriver
import json
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
import config # stored in parent directory for security
token_path = "token"
c = auth.client_from_token_file(token_path, config.api_key)
# positions = c.get_account(config.tda_acct_num, c.Account.Fields.POSITIONS)
# account_info = c.get_account(config.tda_acct_num, fields=[c.Account.Fields.POSITIONS]).json()
# print(account_info)
# positions = c.Account.Fields.POSITIONS
# r = c.get_account(config.tda_acct_num, fields=positions)
# stocks = r.json()['securitiesAccount']['positions']
# # stocks = json.dumps(r.json(), indent=4)
# for stock in stocks:
# print('--------------------------------')
# print(stock['instrument']['symbol'])
# orders = c.Order.Status.FILLED
# r = c.get_orders_by_path(config.tda_acct_num, status = client.Client.Order.Status.WORKING)
# res = c.get_orders_by_path(config.tda_acct_num, status = orders)
# res = s = c.get_account(config.tda_acct_num, fields=c.Account.Fields.POSITIONS)
# data = r.json()
# print(r.json())
orders = client.Client.Account.Fields.ORDERS
r = c.get_account(config.tda_acct_num, fields=orders)
print(json.dumps(r.json(), indent=4))#queued orders would appear here, if not blank list
l = r.json()['securitiesAccount']['orderStrategies']
canceled_orders = [i['orderId'] for i in l if i['status'] == 'CANCELED']
print('canceled', canceled_orders)
id
for order_id in canceled_orders:
g = c.get_order(order_id, config.tda_acct_num)
print(json.dumps(g.json(), indent=4))
|
[
"hopperkremer@gmail.com"
] |
hopperkremer@gmail.com
|
dc816389c06442347a202791e2f3ecfc4e43a317
|
2cd06e44dd79b45708ddf010c31289458d850b94
|
/test/functional/feature_maxuploadtarget.py
|
b5a44cbc6b5cb6b89aca3c4c47d2ce7ef4634a00
|
[
"MIT"
] |
permissive
|
adymoloca/flocoin
|
bc66233e5b3b1af294ca6719b4a26f8829d682e4
|
d9244577577dede975c852f6fcfe1afba4d71a57
|
refs/heads/master
| 2023-08-21T23:51:28.266695
| 2021-10-06T01:40:10
| 2021-10-06T01:40:10
| 408,609,250
| 0
| 0
|
MIT
| 2021-09-30T10:11:53
| 2021-09-20T21:45:28
|
C++
|
UTF-8
|
Python
| false
| false
| 6,653
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respected even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
"""
from collections import defaultdict
import time
from test_framework.messages import CInv, MSG_BLOCK, msg_getdata
from test_framework.p2p import P2PInterface
from test_framework.test_framework import FlocoinTestFramework
from test_framework.util import assert_equal, mine_large_block
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.block_receive_map = defaultdict(int)
def on_inv(self, message):
pass
def on_block(self, message):
message.block.calc_sha256()
self.block_receive_map[message.block.sha256] += 1
class MaxUploadTest(FlocoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[
"-maxuploadtarget=800",
"-acceptnonstdtxn=1",
"-peertimeout=9999", # bump because mocktime might cause a disconnect otherwise
]]
self.supports_cli = False
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# p2p_conns[0] will only request old blocks
# p2p_conns[1] will only request new blocks
# p2p_conns[2] will test resetting the counters
p2p_conns = []
for _ in range(3):
p2p_conns.append(self.nodes[0].add_p2p_connection(TestP2PConn()))
# Now mine a big block
mine_large_block(self.nodes[0], self.utxo_cache)
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
mine_large_block(self.nodes[0], self.utxo_cache)
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
big_new_block = int(big_new_block, 16)
# p2p_conns[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(MSG_BLOCK, big_old_block))
max_bytes_per_day = 800*1024*1024
daily_buffer = 144 * 4000000
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 576MB will be reserved for relaying new blocks, so expect this to
# succeed for ~235 tries.
for i in range(success_count):
p2p_conns[0].send_and_ping(getdata_request)
assert_equal(p2p_conns[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for _ in range(3):
p2p_conns[0].send_message(getdata_request)
p2p_conns[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
self.log.info("Peer 0 disconnected after downloading old block too many times")
# Requesting the current block on p2p_conns[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 800 times
getdata_request.inv = [CInv(MSG_BLOCK, big_new_block)]
for i in range(800):
p2p_conns[1].send_and_ping(getdata_request)
assert_equal(p2p_conns[1].block_receive_map[big_new_block], i+1)
self.log.info("Peer 1 able to repeatedly download new block")
# But if p2p_conns[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)]
p2p_conns[1].send_message(getdata_request)
p2p_conns[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
self.log.info("Peer 1 disconnected after trying to download old block")
self.log.info("Advancing system time on node to clear counters...")
# If we advance the time by 24 hours, then the counters should reset,
# and p2p_conns[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
p2p_conns[2].sync_with_ping()
p2p_conns[2].send_and_ping(getdata_request)
assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1)
self.log.info("Peer 2 able to download old block")
self.nodes[0].disconnect_p2ps()
self.log.info("Restarting node 0 with download permission and 1MB maxuploadtarget")
self.restart_node(0, ["-whitelist=download@127.0.0.1", "-maxuploadtarget=1"])
# Reconnect to self.nodes[0]
peer = self.nodes[0].add_p2p_connection(TestP2PConn())
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(MSG_BLOCK, big_new_block)]
for i in range(20):
peer.send_and_ping(getdata_request)
assert_equal(peer.block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)]
peer.send_and_ping(getdata_request)
self.log.info("Peer still connected after trying to download old block (download permission)")
peer_info = self.nodes[0].getpeerinfo()
assert_equal(len(peer_info), 1) # node is still connected
assert_equal(peer_info[0]['permissions'], ['download'])
if __name__ == '__main__':
MaxUploadTest().main()
|
[
"adymoloca91@gmail.com"
] |
adymoloca91@gmail.com
|
ea8bcdc0b183def68c8745950edbbf13533c588d
|
65b708f0646ea090a4e9bc615cd37fd799bd9bce
|
/venv/Scripts/pip3-script.py
|
307f938a7427296d42bf18912a97aeee71dc9f96
|
[] |
no_license
|
chrisna2/python-web-scrapping
|
af803079586c7b798365d23f5667a24d0c6633e8
|
92e74b4985006246f543de87ff26673b94e8c0a8
|
refs/heads/master
| 2020-07-08T14:40:32.959560
| 2019-08-23T03:19:47
| 2019-08-23T03:19:47
| 203,703,270
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
#!D:\tyn_dev\workspace_pycham\web-scrapping\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
|
[
"chrisna2@hanmail.net"
] |
chrisna2@hanmail.net
|
fb7d5bf4a453cf483c36820ec233a56926f63930
|
c7e765a9bed33d3bfb21774e3995bf4a09e04add
|
/adminmgr/media/code/A2/python/task/BD_1117_1375_1419_1525.py
|
ea86b27f17cbd91f3957294e879438d4f68c005f
|
[
"Apache-2.0"
] |
permissive
|
IamMayankThakur/test-bigdata
|
13dd2ac7fb76c9baed6c3a0aa943057a22e2d237
|
7f507918c7bec31c92eedcd94491a83486623049
|
refs/heads/master
| 2022-05-03T00:59:44.127494
| 2022-02-10T19:50:16
| 2022-02-10T19:50:16
| 201,585,028
| 10
| 4
|
Apache-2.0
| 2022-04-22T23:39:45
| 2019-08-10T05:34:09
|
Python
|
UTF-8
|
Python
| false
| false
| 2,562
|
py
|
from __future__ import print_function
import re
import sys
from operator import add
from pyspark.sql import *
def calcRank(BatBowl, rank):
n = len(BatBowl)
for i in BatBowl:
yield (i, float(rank)/float(n))
checking = 1
def batbowlKeyValue(x):
lol = x.split(',')
return lol[0],lol[1]
def batbowlRank(x):
lol = x.split(',')
return lol[1],float(lol[2])/float(lol[3])
if __name__ == "__main__" :
if len(sys.argv) != 4:
sys.exit(-1)
spark = SparkSession.builder.appName("Bowlerrank").getOrCreate()
lol = spark.read.text(sys.argv[1]).rdd.map(lambda x : x[0])
lol2 = lol.map(lambda x: batbowlKeyValue(x)).distinct().groupByKey().cache()
lol_temp = lol.map(lambda x: batbowlRank(x)).distinct().groupByKey()
bowr = lol_temp.map(lambda x : (x[0], max(sum(x[1]),1.00)))
itcount = 0
bowr_temp = bowr
noi = int(sys.argv[2])
if (noi <= 0) :
while True:
lol3 = lol2.join(bowr).flatMap(lambda x : calcRank(x[1][0], x[1][1]))
perc = int(sys.argv[3])
if(perc!=0):
bowr = lol3.reduceByKey(add).mapValues(lambda deadpool : deadpool*(float(perc/100)) + 1-(float(perc/100)))
else:
bowr = lol3.reduceByKey(add).mapValues(lambda deadpool : deadpool*0.8 + 0.2)
#for wolverine, iron_man in bowr.collect():
# print("%s has rank: %s." % (wolverine, iron_man))
temp = bowr.join(bowr_temp)
temp2 = temp.collect()
flag = 0
for i in temp2:
if(abs(i[1][0]-i[1][1])<0.0001):
flag = flag + 1
else:
break
itcount = itcount + 1
bowr_temp = bowr
if flag==len(temp2):
break
else:
t = int(sys.argv[2])
for _ in range(t):
lol3 = lol2.join(bowr).flatMap(lambda x : calcRank(x[1][0], x[1][1]))
perc = int(sys.argv[3])
if(perc!=0):
bowr = lol3.reduceByKey(add).mapValues(lambda deadpool : deadpool*(float(perc)/100.00) + 1-(float(perc)/100.00))
else:
bowr = lol3.reduceByKey(add).mapValues(lambda deadpool : deadpool*0.8 + 0.2)
bowr = bowr.sortBy(lambda x : (-x[1],x[0]))
for wolverine, iron_man in bowr.collect():
print("%s,%.12f" % (wolverine, iron_man))
#print("...................................",itcount,"...............................................")
spark.stop()
|
[
"ubuntu@ip-172-31-18-251.ap-south-1.compute.internal"
] |
ubuntu@ip-172-31-18-251.ap-south-1.compute.internal
|
7a3f9d1a7437cf258fd93efcfdfa3f3a3316d099
|
45ca434bdb9e48fdbb2cda0e7fdd9a76474117b0
|
/aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/SetFileCacheExpiredConfigRequest.py
|
55bd0c22970cd64217840720fb797559c0c97d7f
|
[
"Apache-2.0"
] |
permissive
|
wanyanzhenjiang/aliyun-openapi-python-sdk
|
e41e9937ad3f851e5a58f6bea95663e88f7fee13
|
4a5bf1b35f2395d047ead4444ea46721976bdd24
|
refs/heads/master
| 2020-12-30T10:37:55.789911
| 2017-07-27T06:55:15
| 2017-07-27T06:55:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,989
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class SetFileCacheExpiredConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2014-11-11', 'SetFileCacheExpiredConfig')
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_CacheContent(self):
return self.get_query_params().get('CacheContent')
def set_CacheContent(self,CacheContent):
self.add_query_param('CacheContent',CacheContent)
def get_TTL(self):
return self.get_query_params().get('TTL')
def set_TTL(self,TTL):
self.add_query_param('TTL',TTL)
def get_Weight(self):
return self.get_query_params().get('Weight')
def set_Weight(self,Weight):
self.add_query_param('Weight',Weight)
|
[
"haowei.yao@alibaba-inc.com"
] |
haowei.yao@alibaba-inc.com
|
dc4e498d5e94244fea4ccc62a2671836d7858c62
|
438f8490be1fa3818daad38254a77bb11ba367b3
|
/project/settings.py
|
6b500d837b395a5d1f67ad16fa8a0d8088cd6b65
|
[] |
no_license
|
n7ey233/maximanat
|
2ea0b3c80729dd10e6023b053523ebe7e6ba22d8
|
812d7396fe64af85f86e2dd5e257935bde8719e2
|
refs/heads/master
| 2020-04-13T10:13:01.382912
| 2018-12-28T06:31:19
| 2018-12-28T06:31:19
| 163,133,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,284
|
py
|
"""
Django settings for project project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-@xf882li3g_x28_oqt5(=fj8b$*2*9*$hm3(17g^#(klc7pgg'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'maximanat.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"you@example.com"
] |
you@example.com
|
5b161e0a9d07b0bddab72ace77e7c27caff8d41a
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/89/usersdata/202/62093/submittedfiles/matriz1.py
|
f2273f8e67d6acee35c26497ab06b264203a4c29
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 774
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
def cortel1(a):
for i in range(0,a.shape[0],1):
for i in range (0,a.shape[1],1):
if a[i,j]==1:
return i
def cortel2(a):
for j in range(0,a.shape[1],1):
for i in range (0,a.shape[0],1):
if a[i,j]==1:
return j
def cortec1(a):
for j in range (0,a.shape[1],1):
for i in range(0,a.shape[0],1):
if a[i,j]==1:
c2=j
return c2
linhas=int(input('linhas:'))
colunas=int(input('colunas:'))
a=np.zeros((linhas,colunas))
for i in range(0,a.shape[0],1):
for j in range (0,a.shape[1],1):
a[i,j]=int(input('valor:'))
l1=cortel1(a)
l2=cortel2(a)
c1=cortec1(a)
c2=cortec2(a)
print(a[l1:l2+1,c1:c2+1])
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
dcb9a544cce84c43cf9b3d7b349db60e8139ccde
|
9ce822c07edef943dc519a7ab3916f5a667e114a
|
/location_and_settings/location_and_settings/doctype/location_list/test_location_list.py
|
28da20eda77448d48672ee27fa1509ed7b30bbe6
|
[
"MIT"
] |
permissive
|
hrgadeha/location_and_setting
|
250fec37c2e63ce7c31b41ac52e50bea1e333392
|
770a7a719ce66bfe699dc805839a972063ff8ab6
|
refs/heads/master
| 2020-04-25T03:43:09.423485
| 2019-02-28T12:09:31
| 2019-02-28T12:09:31
| 172,486,490
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Hardik Gadesha and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestLocationList(unittest.TestCase):
pass
|
[
"you@example.com"
] |
you@example.com
|
943046ca83cc744a00369e1e7ddfec263a7dc795
|
ad23b164febd12d5c6d97cfbcd91cf70e2914ab3
|
/webtestdata/wsgi.py
|
882a4bbee8f33c55053afc3819608ab439306db9
|
[] |
no_license
|
wawj901124/webtestdata
|
9eedf9a01dec2c157725299bda9a42e8d357ef0b
|
54f6412566fce07ece912760c5caea73ede819cb
|
refs/heads/master
| 2022-12-09T14:18:38.125191
| 2021-04-25T07:54:07
| 2021-04-25T07:54:07
| 175,773,318
| 1
| 1
| null | 2022-12-08T02:39:15
| 2019-03-15T07:49:16
|
Python
|
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for webtestdata project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webtestdata.settings")
application = get_wsgi_application()
|
[
"410287958@qq.com"
] |
410287958@qq.com
|
345e69a557ad41d9aae7895d883495769eee2017
|
41b4702e359e3352116eeecf2bdf59cb13c71cf2
|
/full_model_walker_param/utils/env_utils.py
|
110ef30017c069549d041f0bfb487b464dfec838
|
[] |
no_license
|
CaralHsi/Multi-Task-Batch-RL
|
b0aad53291c1713fd2d89fa4fff4a85c98427d4d
|
69d29164ab7d82ec5e06a929ed3b96462db21853
|
refs/heads/master
| 2022-12-22T19:23:45.341092
| 2020-10-01T00:05:36
| 2020-10-01T00:05:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,899
|
py
|
import os
from gym import Env
from gym.spaces import Box, Discrete, Tuple
import numpy as np
from env.hopper import HopperVelEnv
from env.half_cheetah import HalfCheetahVelEnv
from env.ant_goal import AntGoalEnv
from env.ant_dir import AntDirEnv
from env.humanoid_dir import HumanoidDirEnv
from env.humanoid_dir_openai import HumanoidDirEnvOpenAI
from env.humanoid_goal_ndone import HumanoidGoalEnvNDone
from env.walker_param import Walker2DRandParamsEnv
def get_dim(space):
if isinstance(space, Box):
return space.low.size
elif isinstance(space, Discrete):
return space.n
elif isinstance(space, Tuple):
return sum(get_dim(subspace) for subspace in space.spaces)
elif hasattr(space, 'flat_dim'):
return space.flat_dim
else:
raise TypeError("Unknown space: {}".format(space))
class ProxyEnv(Env):
def __init__(self, wrapped_env):
self._wrapped_env = wrapped_env
self.action_space = self._wrapped_env.action_space
self.observation_space = self._wrapped_env.observation_space
@property
def wrapped_env(self):
return self._wrapped_env
def reset(self, **kwargs):
return self._wrapped_env.reset(**kwargs)
def step(self, action):
return self._wrapped_env.step(action)
def render(self, *args, **kwargs):
return self._wrapped_env.render(*args, **kwargs)
@property
def horizon(self):
return self._wrapped_env.horizon
def terminate(self):
if hasattr(self.wrapped_env, "terminate"):
self.wrapped_env.terminate()
def __getattr__(self, attr):
if attr == '_wrapped_env':
raise AttributeError()
return getattr(self._wrapped_env, attr)
def __getstate__(self):
"""
This is useful to override in case the wrapped env has some funky
__getstate__ that doesn't play well with overriding __getattr__.
The main problematic case is/was gym's EzPickle serialization scheme.
:return:
"""
return self.__dict__
def __setstate__(self, state):
self.__dict__.update(state)
def __str__(self):
return '{}({})'.format(type(self).__name__, self.wrapped_env)
class NormalizedBoxEnv(ProxyEnv):
"""
Normalize action to in [-1, 1].
Optionally normalize observations and scale reward.
"""
def __init__(
self,
env,
reward_scale=1.,
obs_mean=None,
obs_std=None,
):
ProxyEnv.__init__(self, env)
self._should_normalize = not (obs_mean is None and obs_std is None)
if self._should_normalize:
if obs_mean is None:
obs_mean = np.zeros_like(env.observation_space.low)
else:
obs_mean = np.array(obs_mean)
if obs_std is None:
obs_std = np.ones_like(env.observation_space.low)
else:
obs_std = np.array(obs_std)
self._reward_scale = reward_scale
self._obs_mean = obs_mean
self._obs_std = obs_std
ub = np.ones(self._wrapped_env.action_space.shape)
self.action_space = Box(-1 * ub, ub)
def estimate_obs_stats(self, obs_batch, override_values=False):
if self._obs_mean is not None and not override_values:
raise Exception("Observation mean and std already set. To "
"override, set override_values to True.")
self._obs_mean = np.mean(obs_batch, axis=0)
self._obs_std = np.std(obs_batch, axis=0)
def _apply_normalize_obs(self, obs):
return (obs - self._obs_mean) / (self._obs_std + 1e-8)
def step(self, action):
lb = self._wrapped_env.action_space.low
ub = self._wrapped_env.action_space.high
scaled_action = lb + (action + 1.) * 0.5 * (ub - lb)
scaled_action = np.clip(scaled_action, lb, ub)
wrapped_step = self._wrapped_env.step(scaled_action)
next_obs, reward, done, info = wrapped_step
if self._should_normalize:
next_obs = self._apply_normalize_obs(next_obs)
return next_obs, reward * self._reward_scale, done, info
def __str__(self):
return "Normalized: %s" % self._wrapped_env
def domain_to_env(name):
from gym.envs.mujoco import HalfCheetahEnv, \
InvertedPendulumEnv, HumanoidEnv, \
HopperEnv, AntEnv, Walker2dEnv
return {
'invertedpendulum': InvertedPendulumEnv,
'humanoid': HumanoidEnv,
'halfcheetah': HalfCheetahEnv,
'halfcheetah-vel': HalfCheetahVelEnv,
'hopper': HopperEnv,
'hopper-vel': HopperVelEnv,
'ant': AntEnv,
'ant-goal': AntGoalEnv,
'ant-dir': AntDirEnv,
'humanoid-dir':HumanoidDirEnv,
'humanoid-openai-dir': HumanoidDirEnvOpenAI,
'humanoid-ndone-goal': HumanoidGoalEnvNDone,
'walker2d': Walker2dEnv,
'walker-param': Walker2DRandParamsEnv,
}[name]
def domain_to_epoch(name):
return {
'invertedpendulum': 300,
'humanoid': 9000,
'halfcheetah': 5000,
'halfcheetah-vel': 50,
'hopper': 50,
'hopper-vel': 50,
'ant-goal': 590,
'ant-dir': 590,
'ant': 5000,
'humanoid-dir':590,
'humanoid-openai-dir':590,
'humanoid-ndone-goal': 590,
'walker2d': 5000,
'walker-param': 390,
}[name]
def domain_to_num_goals(name):
return {
'halfcheetah-vel': 32,
'hopper-vel': 16,
'ant-goal': 32,
'ant-dir': 32,
'humanoid-dir': 32,
'humanoid-openai-dir': 10,
'humanoid-ndone-goal': 10,
'walker-param': 32,
}[name]
def env_producer(domain, seed, goal=None):
env = domain_to_env(domain)(goal=goal)
env.seed(seed)
env = NormalizedBoxEnv(env)
return env
|
[
"jil021@eng.ucsd.edu"
] |
jil021@eng.ucsd.edu
|
3973203794a335401a2e5cfa6e3206483a4d7116
|
d26b3bbf0192cc334e5ac431c753ebcbf2baeb1a
|
/l10n_cn_hr_payroll/__init__.py
|
6adc439b170cc365b31453ea0481a8ba0709b7a9
|
[] |
no_license
|
davgit/Xero-2
|
1d566357174d15d4f3b15cc849ce9f32f0c9ef3a
|
6477d844fde3f3b8f91d21b15ee7f8986a505de5
|
refs/heads/master
| 2021-01-21T20:49:47.585328
| 2013-02-16T08:13:22
| 2013-02-16T08:13:22
| 22,778,180
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,072
|
py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import l10n_cn_hr_payroll
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"wangsong1233276@sina.com"
] |
wangsong1233276@sina.com
|
07ccbaa13946f30e8d2d81bdcc3c948f8adb3036
|
5eff9df4d276e83c68ce843d58868499858f701a
|
/Interview/Trees/binary_tree_traversal.py
|
e5a7ce276633e535f5c96cfc7a75b9b0cfffea65
|
[] |
no_license
|
arunraman/Code-Katas
|
b6723deb00caed58f0c9a1cafdbe807e39e96961
|
7fe3582fa6acf59a2620fe73e1e14bd8635bbee8
|
refs/heads/master
| 2023-03-04T17:27:44.037145
| 2023-03-02T21:09:53
| 2023-03-02T21:09:53
| 25,232,784
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 851
|
py
|
from binarytree import Node as Treenode
class Solution():
def preOrder(self, root):
if root == None:
return
print root.value,
self.preOrder(root.left)
self.preOrder(root.right)
def inOrder(self, root):
if root == None:
return
self.inOrder(root.left)
print root.value,
self.inOrder(root.right)
def postOrder(self, root):
if root == None:
return
self.postOrder(root.left)
self.postOrder(root.right)
print root.value,
S = Solution()
root = Treenode(1)
root.left = Treenode(2)
root.right = Treenode(3)
root.left.left = Treenode(8)
root.left.right = Treenode(12)
root.right.left = Treenode(3)
root.right.right = Treenode(25)
print root
S.preOrder(root)
print "\n"
S.inOrder(root)
print "\n"
S.postOrder(root)
|
[
"arunraman.19@gmail.com"
] |
arunraman.19@gmail.com
|
7c522e09e37bfa9cd52933f4b3a202340868c5d4
|
8c95e2185100db97f74d948407f9f6ac563905e5
|
/metronotation/routemap.py
|
8a6691a352602ddc2fcb031cd4e836d9009a1748
|
[
"MIT"
] |
permissive
|
kitao/metro-notation
|
c5fec21fccba4ef2a21c3294575fd29498ff8ebc
|
34a9d2ca9fe17452c8eb5426636484f7cc29c605
|
refs/heads/main
| 2023-08-20T15:02:04.631092
| 2021-10-30T04:28:17
| 2021-10-30T04:28:17
| 321,700,124
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,037
|
py
|
LAYER_TP = 0
LAYER_MD = 1
LAYER_BT = 2
LAYER_TM = 3
LAYER_BM = 4
LAYER_AL = 5
DIR_UP = (0, -1)
DIR_DN = (0, 1)
DIR_LT = (-1, 0)
DIR_RT = (1, 0)
DIR_LU = (-1, -1)
DIR_RD = (1, 1)
LETTER_TABLE = [
("R", (LAYER_TP, DIR_UP, 1)),
("M", (LAYER_MD, DIR_DN, 1)),
("L", (LAYER_BT, DIR_DN, 1)),
("U", (LAYER_TP, DIR_RT, 1)),
("E", (LAYER_MD, DIR_LT, 1)),
("D", (LAYER_BT, DIR_LT, 1)),
("F", (LAYER_TP, DIR_RD, 1)),
("S", (LAYER_MD, DIR_RD, 1)),
("B", (LAYER_BT, DIR_LU, 1)),
#
("R2", (LAYER_TP, DIR_UP, 2)),
("M2", (LAYER_MD, DIR_DN, 2)),
("L2", (LAYER_BT, DIR_DN, 2)),
("U2", (LAYER_TP, DIR_RT, 2)),
("E2", (LAYER_MD, DIR_LT, 2)),
("D2", (LAYER_BT, DIR_LT, 2)),
("F2", (LAYER_TP, DIR_RD, 2)),
("S2", (LAYER_MD, DIR_RD, 2)),
("B2", (LAYER_BT, DIR_LU, 2)),
#
("R'", (LAYER_TP, DIR_DN, 1)),
("M'", (LAYER_MD, DIR_UP, 1)),
("L'", (LAYER_BT, DIR_UP, 1)),
("U'", (LAYER_TP, DIR_LT, 1)),
("E'", (LAYER_MD, DIR_RT, 1)),
("D'", (LAYER_BT, DIR_RT, 1)),
("F'", (LAYER_TP, DIR_LU, 1)),
("S'", (LAYER_MD, DIR_LU, 1)),
("B'", (LAYER_BT, DIR_RD, 1)),
#
("R2'", (LAYER_TP, DIR_DN, 2)),
("M2'", (LAYER_MD, DIR_UP, 2)),
("L2'", (LAYER_BT, DIR_UP, 2)),
("U2'", (LAYER_TP, DIR_LT, 2)),
("E2'", (LAYER_MD, DIR_RT, 2)),
("D2'", (LAYER_BT, DIR_RT, 2)),
("F2'", (LAYER_TP, DIR_LU, 2)),
("S2'", (LAYER_MD, DIR_LU, 2)),
("B2'", (LAYER_BT, DIR_RD, 2)),
#
("Rw", (LAYER_TM, DIR_UP, 1)),
("Lw", (LAYER_BM, DIR_DN, 1)),
("Uw", (LAYER_TM, DIR_RT, 1)),
("Dw", (LAYER_BM, DIR_LT, 1)),
("Fw", (LAYER_TM, DIR_RD, 1)),
("Bw", (LAYER_BM, DIR_LU, 1)),
#
("Rw2", (LAYER_TM, DIR_UP, 2)),
("Lw2", (LAYER_BM, DIR_DN, 2)),
("Uw2", (LAYER_TM, DIR_RT, 2)),
("Dw2", (LAYER_BM, DIR_LT, 2)),
("Fw2", (LAYER_TM, DIR_RD, 2)),
("Bw2", (LAYER_BM, DIR_LU, 2)),
#
("Rw'", (LAYER_TM, DIR_DN, 1)),
("Lw'", (LAYER_BM, DIR_UP, 1)),
("Uw'", (LAYER_TM, DIR_LT, 1)),
("Dw'", (LAYER_BM, DIR_RT, 1)),
("Fw'", (LAYER_TM, DIR_LU, 1)),
("Bw'", (LAYER_BM, DIR_RD, 1)),
#
("Rw2'", (LAYER_TM, DIR_DN, 2)),
("Lw2'", (LAYER_BM, DIR_UP, 2)),
("Uw2'", (LAYER_TM, DIR_LT, 2)),
("Dw2'", (LAYER_BM, DIR_RT, 2)),
("Fw2'", (LAYER_TM, DIR_LU, 2)),
("Bw2'", (LAYER_BM, DIR_RD, 2)),
#
("x", (LAYER_AL, DIR_UP, 1)),
("x'", (LAYER_AL, DIR_DN, 1)),
("y", (LAYER_AL, DIR_RT, 1)),
("y'", (LAYER_AL, DIR_LT, 1)),
("z", (LAYER_AL, DIR_RD, 1)),
("z'", (LAYER_AL, DIR_LU, 1)),
]
LETTER_TABLE.sort(key=lambda x: len(x[0]), reverse=True)
CUBE_RF = 0
CUBE_OF = 1
CUBE_BF = 2
CUBE_GF = 3
CUBE_WF = 4
CUBE_YF = 5
CUBE_RB = 6
CUBE_OB = 7
CUBE_BB = 8
CUBE_GB = 9
CUBE_WB = 10
CUBE_YB = 11
CUBE_TABLE = {
"R": CUBE_RF,
"O": CUBE_OF,
"B": CUBE_BF,
"G": CUBE_GF,
"W": CUBE_WF,
"Y": CUBE_YF,
"r": CUBE_RB,
"o": CUBE_OB,
"b": CUBE_BB,
"g": CUBE_GB,
"w": CUBE_WB,
"y": CUBE_YB,
}
class Node:
def __init__(self, letters, layer, direction, distance):
self.letters = letters
self.layer = layer
self.direction = direction
self.distance = distance
self.is_start_hit = False
self.is_end_hit = False
def from_letters(letters):
for l, n in LETTER_TABLE:
if letters.startswith(l):
return Node(l, *n), letters[len(l) :]
raise ValueError
class Route:
def __init__(self, nodes):
x = y = 0
min_x = min_y = 0
max_x = max_y = 0
route_count = {(0, 0): 1}
last_direction = (0, 0)
last_layer = -1
for node in nodes:
if (
node.direction == last_direction
and node.layer == last_layer
or node.direction[0] + last_direction[0] == 0
and node.direction[1] + last_direction[1] == 0
):
raise ValueError
last_direction = node.direction
last_layer = node.layer
for i in range(node.distance):
x += node.direction[0]
y += node.direction[1]
min_x = min(x, min_x)
min_y = min(y, min_y)
max_x = max(x, max_x)
max_y = max(y, max_y)
if (x, y) in route_count:
route_count[(x, y)] += 1
else:
route_count[(x, y)] = 1
for pos, count in route_count.items():
if count >= 3 or count >= 2 and pos != (0, 0) and pos != (x, y):
raise ValueError
self.nodes = nodes
self.width = max_x - min_x
self.height = max_y - min_y
self.start_x = -min_x
self.start_y = -min_y
nodes[0].is_start_hit = route_count[(0, 0)] > 1
nodes[-1].is_end_hit = route_count[(x, y)] > 1
def from_letters(letters):
try:
nodes = []
rest = letters
while rest:
node, rest = Node.from_letters(rest)
nodes.append(node)
route = Route(nodes)
except ValueError:
raise ValueError(letters)
return route
class RouteMap:
def __init__(self, name, cube, routes):
self.name = name
self.cube = cube
self.routes = routes
self.width = sum([route.width for route in routes])
self.height = max([route.height for route in routes])
for route in routes:
route.start_y += (self.height - route.height) / 2
def from_letters(name, cube, letters):
if not cube:
cube = "w" * 21
elif len(cube) != 21:
raise ValueError(cube)
try:
cube = [CUBE_TABLE[c] for c in cube]
except KeyError:
raise ValueError(cube)
name = name or "no name"
routes = [Route.from_letters(l) for l in letters.split()]
return RouteMap(name, cube, routes)
|
[
"takashi.kitao@gmail.com"
] |
takashi.kitao@gmail.com
|
4f5f6cf6b975bc75e55183392098c5035bdaf30d
|
a742bd051641865d2e5b5d299c6bc14ddad47f22
|
/algorithm/牛客网/55-链表中环的入口节点.py
|
cb9f7c566cc7b629c3e7d7a7aef88c03f3a1a921
|
[] |
no_license
|
lxconfig/UbuntuCode_bak
|
fb8f9fae7c42cf6d984bf8231604ccec309fb604
|
3508e1ce089131b19603c3206aab4cf43023bb19
|
refs/heads/master
| 2023-02-03T19:10:32.001740
| 2020-12-19T07:27:57
| 2020-12-19T07:27:57
| 321,351,481
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,604
|
py
|
"""
给一个链表,若其中包含环,请找出该链表的环的入口结点,否则,输出null。
思路:
双指针法
快指针先走两步,慢指针走一步
当两个指针又相遇了,此时指向的节点可能是环的入口节点
再次让慢指针回到链表头,然后和快指针一起走,再次相遇时,就是环的入口节点
否则,快指针不存在时,表示没有环
或:
先让快指针走n步,n=链表的长度
之后再让快指针和慢指针一起走,直到相遇,此时就是环的入口节点
"""
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def EntryNodeOfLoop(self, pHead):
# 运行时间:22ms 占用内存:5864k
if not pHead:
return None
fast = slow = pHead
while fast and fast.next:
fast = fast.next.next
slow = slow.next
if fast == slow:
break
if not fast or not fast.next:
return None
slow = pHead
while fast != slow:
fast = fast.next
slow = slow.next
return fast.val
if __name__ == "__main__":
solution = Solution()
a = ListNode(1)
b = ListNode(2)
c = ListNode(3)
d = ListNode(4)
e = ListNode(5)
f = ListNode(6)
a.next= b
b.next = c
c.next = d
d.next = e
e.next = c
# f.next = d
print(solution.EntryNodeOfLoop(a))
|
[
"525868229@qq.com"
] |
525868229@qq.com
|
0c602c4d5aba8185e74b266e1050df2cd0ec026c
|
111082d7fd02a5f64cd1784b923a109cc95dc557
|
/dj_rulitool/wsgi.py
|
7ac1fe5349618d888f4dc721c11e7dfd58b406e3
|
[] |
no_license
|
270466585/dj_rulitool
|
ba65a6ef1bc44b599f19ac1172d86e8d4b2a12af
|
0d2e97454c66d30537780d81b2a0b4b2f953b2ed
|
refs/heads/master
| 2020-04-14T20:25:36.592762
| 2019-01-04T10:28:18
| 2019-01-04T10:28:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
"""
WSGI config for dj_rulitool project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dj_rulitool.settings")
application = get_wsgi_application()
|
[
"27066585@qq.com"
] |
27066585@qq.com
|
5224d8389d28f53149bb9a84556ad05b34511670
|
32711a21edff968fdbf9fa9baf0e0f8373d0e131
|
/authapp/forms.py
|
fa7e7e88283332593f25f77dbbd8f2f33b5d24c6
|
[] |
no_license
|
acid-n/GeekShop
|
ca836a4daeb97754fafd44d36e705f0e160c8d4d
|
9749debe92e6ded46ed01082fbdb497a5f8485fa
|
refs/heads/master
| 2023-01-15T15:29:18.172547
| 2020-11-25T18:34:39
| 2020-11-25T18:34:39
| 296,569,582
| 0
| 0
| null | 2020-10-04T18:39:29
| 2020-09-18T09:02:03
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,620
|
py
|
import hashlib
import random
from django import forms
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm, UserChangeForm
from authapp.models import ShopUser, ShopUserProfile
class ShopUserLoginForm(AuthenticationForm):
class Meta:
model = ShopUser
fields = ('username', 'password')
def __init__(self, *args, **kwargs):
super(ShopUserLoginForm, self).__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = "form-control"
field.help_text = ''
class ShopUserRegisterForm(UserCreationForm):
class Meta:
model = ShopUser
fields = ('username', 'first_name', 'password1', 'password2', 'email', 'avatar', 'age')
def __init__(self, *args, **kwargs):
super(ShopUserRegisterForm, self).__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = "form-control"
field.help_text = ''
def clean_age(self):
data = self.cleaned_data['age']
if data < 18:
raise forms.ValidationError("Вы слишком молоды")
return data
def save(self, **kwargs):
user = super(ShopUserRegisterForm, self).save()
user.is_active = False
salt = hashlib.sha1(str(random.random()).encode('utf8')).hexdigest()[:6]
user.activation_key = hashlib.sha1((user.email + salt).encode('utf8')).hexdigest()
user.save()
return user
class ShopUserEditForm(UserChangeForm):
class Meta:
model = ShopUser
fields = ('username', 'first_name', 'email', 'avatar', 'age')
def __init__(self, *args, **kwargs):
super(ShopUserEditForm, self).__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = "form-control"
field.help_text = ''
if field_name == 'password':
field.widget = forms.HiddenInput()
def clean_age(self):
data = self.cleaned_data['age']
if data < 18:
raise forms.ValidationError("Вы слишком молоды")
return data
class ShopUserProfileEditForm(forms.ModelForm):
class Meta:
model = ShopUserProfile
fields = ('tagline', 'about_me', 'gender')
def __init__(self, *args, **kwargs):
super(ShopUserProfileEditForm, self).__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = "form-control"
field.help_text = ''
|
[
"acid_n@mail.ru"
] |
acid_n@mail.ru
|
6ac6c0894bfa4d2f46e20bd466eb57471523bfb5
|
ed78041a12c60e46bb0c4d347c47536e84307a96
|
/app/__init__.py
|
ea59f07f74501ccfe1fe861e921ef187326337da
|
[] |
no_license
|
Garfield247/news_nlp
|
4875842af4249def6ffdc65a6e5896b02610dd8d
|
e18d178824ea9bf11d3895c58037a211f4b21cb6
|
refs/heads/master
| 2022-12-11T21:00:36.967826
| 2019-03-15T02:32:46
| 2019-03-15T02:32:46
| 161,159,779
| 0
| 0
| null | 2022-12-08T04:52:15
| 2018-12-10T10:48:41
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 945
|
py
|
# -*- coding: utf-8 -*-
from flask import Flask, render_template
from app.config import config
from app.extensions import config_extensions
from app.views import config_blueprint
# 封装一个方法,专门用于创建Flask实例
def create_app(config_name): # development
# 创建应用实例
app = Flask(__name__)
# 初始化配置
app.config.from_object(config.get(config_name) or config['default'])
# 调用初始化函数
config[config_name].init_app(app)
# 配置扩展
config_extensions(app)
# 配置蓝本
config_blueprint(app)
# 错误页面定制
config_errorhandler(app)
# 返回应用实例
return app
def config_errorhandler(app):
# 如果在蓝本定制,只针对本蓝本的错误有效,
# 可以使用app_errorhandler定制全局有效的错误显示
@app.errorhandler(404)
def page_not_found(e):
return render_template('errors/404.html')
|
[
"Garfield_lv@163.com"
] |
Garfield_lv@163.com
|
09997d079fdba85719df5fe4ccf2d3f6d5988d74
|
0e9789668dcfeeedacf78aa9917bb95ec9a5f763
|
/preprocessing/load_data.py
|
5ff6f999bcc4fb4aae3d0baad46dc27ccc9be878
|
[] |
no_license
|
mma1979/Simple-Sentence-Similarity
|
76151619bcdfd39054f8b6cbe1e26af99d0f6a37
|
dfacb34c325df771056f34f85c7927148d69691c
|
refs/heads/master
| 2022-04-11T00:15:07.415752
| 2020-01-28T13:06:42
| 2020-01-28T13:06:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,926
|
py
|
import os
import pandas as pd
import requests
import tensorflow as tf
def load_sts_dataset(filename):
"""
Loads a subset of the STS dataset into a DataFrame.
In particular both sentences and their human rated similarity score.
:param filename:
:return:
"""
sent_pairs = []
with tf.gfile.GFile(filename, "r") as f:
for line in f:
ts = line.strip().split("\t")
sent_pairs.append((ts[5], ts[6], float(ts[4])))
return pd.DataFrame(sent_pairs, columns=["sent_1", "sent_2", "sim"])
def download_and_load_sts_data():
sts_dataset = tf.keras.utils.get_file(
fname="Stsbenchmark.tar.gz",
origin="http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz",
extract=True)
sts_dev = load_sts_dataset(os.path.join(os.path.dirname(sts_dataset), "stsbenchmark", "sts-dev.csv"))
sts_test = load_sts_dataset(os.path.join(os.path.dirname(sts_dataset), "stsbenchmark", "sts-test.csv"))
return sts_dev, sts_test
def download_sick_dataset(url):
response = requests.get(url).text
lines = response.split("\n")[1:]
lines = [l.split("\t") for l in lines if len(l) > 0]
lines = [l for l in lines if len(l) == 5]
df = pd.DataFrame(lines, columns=["idx", "sent_1", "sent_2", "sim", "label"])
df['sim'] = pd.to_numeric(df['sim'])
return df
def download_and_load_sick_dataset():
sick_train = download_sick_dataset(
"https://raw.githubusercontent.com/alvations/stasis/master/SICK-data/SICK_train.txt")
sick_dev = download_sick_dataset(
"https://raw.githubusercontent.com/alvations/stasis/master/SICK-data/SICK_trial.txt")
sick_test = download_sick_dataset(
"https://raw.githubusercontent.com/alvations/stasis/master/SICK-data/SICK_test_annotated.txt")
sick_all = sick_train.append(sick_test).append(sick_dev)
return sick_all, sick_train, sick_test, sick_dev
|
[
"rhtdranasinghe@gmail.com"
] |
rhtdranasinghe@gmail.com
|
cfe30dfb145e5c7610d9b424ad9cb71f37e95724
|
09e63e204cf3f70b0f878fe237f231af0786611e
|
/LifeQA/LSTM_QA.py
|
2e7249b817077a5418b4be8df812dcb9c1c1f866
|
[] |
no_license
|
shubham14/Machine_learning_research
|
8f00788366abf2d330afe8914e48d4279fcd8aea
|
b134e4e6b1e6c110fad8cb38b033c92c34d3c8ce
|
refs/heads/master
| 2022-11-08T13:24:58.722027
| 2019-11-10T09:21:28
| 2019-11-10T09:21:28
| 132,386,307
| 3
| 2
| null | 2022-10-17T15:36:25
| 2018-05-07T00:16:38
|
Python
|
UTF-8
|
Python
| false
| false
| 3,186
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 16 16:55:06 2018
@author: Shubham
"""
import numpy as np
from keras import backend as K
from keras.layers import Embedding
from keras.layers import LSTM, Input, merge, Lambda
from keras.layers.wrappers import Bidirectional
from keras.layers.convolutional import Convolution1D
from keras.models import Model
class Model:
def __init__(self, margin, enc_timesteps, dec_timesteps,
margin, hidden_dim, embedding_file, vocab_size):
self.margin = margin
self.enc_timesteps = enc_timesteps
self.dec_timesteps = dec_timesteps
self.hidden_dim = hidden_dim
self.embedding_file = embedding_file
self.vocab_size = vocab_size
def cosine_similarity(self):
dot = lambda a, b: K.batch_dot(a, b, axes=1)
return lambda x: dot(x[0], x[1]) / K.maximum(K.sqrt(dot(x[0], x[0]) * dot(x[1], x[1])), K.epsilon())
def build_model(self):
# initialize the question and answer shapes and datatype
question = Input(shape=(self.enc_timesteps,), dtype='int32', name='question_base')
answer = Input(shape=(self.dec_timesteps,), dtype='int32', name='answer')
answer_good = Input(shape=(self.dec_timesteps,), dtype='int32', name='answer_good_base')
answer_bad = Input(shape=(self.dec_timesteps,), dtype='int32', name='answer_bad_base')
weights = np.load(self.embedding_file)
qa_embedding = Embedding(input_dim=self.vocab_size,
output_dim=weights.shape[1],mask_zero=True,weights=[weights])
bi_lstm = Bidirectional(LSTM(activation='tanh', dropout=0.2, units=self.hidden_dim,
return_sequences=False))
# embed the question and pass it through bilstm
question_embedding = qa_embedding(question)
question_enc_1 = bi_lstm(question_embedding)
# embed the answer and pass it through bilstm
answer_embedding = qa_embedding(answer)
answer_enc_1 = bi_lstm(answer_embedding)
# get the cosine similarity
similarity = self.get_cosine_similarity()
question_answer_merged = merge(inputs=[question_enc_1, answer_enc_1], mode=similarity, output_shape=lambda _: (None, 1))
lstm_model = Model(name="bi_lstm", inputs=[question, answer], outputs=question_answer_merged)
good_similarity = lstm_model([question, answer_good])
bad_similarity = lstm_model([question, answer_bad])
loss = merge(
[good_similarity, bad_similarity],
mode=lambda x: K.relu(margin - x[0] + x[1]),
output_shape=lambda x: x[0])
training_model = Model(inputs=[question, answer_good, answer_bad], outputs=loss, name='training_model')
training_model.compile(loss=lambda y_true, y_pred: y_pred, optimizer="rmsprop")
prediction_model = Model(inputs=[question, answer_good], outputs=good_similarity, name='prediction_model')
prediction_model.compile(loss=lambda y_true, y_pred: y_pred, optimizer="rmsprop")
return training_model, prediction_model
|
[
"shubham.ddash@gmail.com"
] |
shubham.ddash@gmail.com
|
a5c8acc3f261fc484e471a9c6729ba0a2951f7ea
|
6cc37dfc44880f57823bb9523ea5f8206d5e3f22
|
/python_OOP/labs_and_homeworks/09_decorators_exercise/07_execution_time.py
|
672ebddef602603926ee47bec252adbc7b08d114
|
[] |
no_license
|
dimitar-daskalov/SoftUni-Courses
|
70d265936fd86712a7bfe0586ec6ebd1c7384f77
|
2054bc58ffb5f41ed86f5d7c98729b101c3b1368
|
refs/heads/main
| 2023-05-31T06:44:35.498399
| 2021-07-11T10:16:08
| 2021-07-11T10:16:08
| 322,896,365
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 534
|
py
|
import time
def exec_time(func):
def wrapper(*args):
start = time.time()
func(*args)
end = time.time()
time_spend = end - start
return time_spend
return wrapper
@exec_time
def loop(start, end):
total = 0
for x in range(start, end):
total += x
return total
print(loop(1, 10000000))
@exec_time
def concatenate(strings):
result = ""
for string in strings:
result += string
return result
print(concatenate(["a" for i in range(1000000)]))
|
[
"dimitar.daskalov22@gmail.com"
] |
dimitar.daskalov22@gmail.com
|
488718466f0f0e87ffa34be480e9e92c0c8df57a
|
9a701c23ef6e70dc3704f012ffbb1e2689f7a8cb
|
/Lib/zDogPy/box.py
|
5006b0c8963abce56336c69e361803f02212a395
|
[
"MIT"
] |
permissive
|
gferreira/zdogpy
|
a832db713524d1343b85de1c8215511f438a2e41
|
41304e5db7cc2e145d43b6b2f7d77d25ec3c8b08
|
refs/heads/master
| 2020-05-30T07:50:24.621323
| 2019-09-11T09:30:59
| 2019-09-11T09:30:59
| 189,606,401
| 7
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,196
|
py
|
'''Box composite shape'''
from importlib import reload
import zDogPy.anchor
reload(zDogPy.anchor)
import zDogPy.shape
reload(zDogPy.shape)
import zDogPy.rect
reload(zDogPy.rect)
from zDogPy.boilerplate import hexToRGB, TAU
from zDogPy.anchor import Anchor
from zDogPy.shape import Shape
from zDogPy.rect import Rect
# -------
# BoxRect
# -------
class BoxRect(Rect):
def copyGraph(self):
pass
# ---
# Box
# ---
class Box(Anchor):
frontFace = None
rearFace = None
leftFace = None
rightFace = None
topFace = None
bottomFace = None
def __init__(self, width=1, height=1, depth=1, stroke=1, fill=True, color=True, frontFace=True, rearFace=True, leftFace=True, rightFace=True, topFace=True, bottomFace=True, **kwargs):
self.width = width
self.height = height
self.depth = depth
self.stroke = stroke
self.fill = fill
self.color = color
self.frontFace = frontFace
self.rearFace = rearFace
self.leftFace = leftFace
self.rightFace = rightFace
self.topFace = topFace
self.bottomFace = bottomFace
Anchor.__init__(self, **kwargs)
self.updatePath()
def updatePath(self):
self.setFace('frontFace', {
'width' : self.width,
'height' : self.height,
'translate' : { 'z': self.depth / 2 },
})
self.setFace('rearFace', {
'width' : self.width,
'height' : self.height,
'translate' : { 'z': -self.depth / 2 },
})
self.setFace('leftFace', {
'width' : self.depth,
'height' : self.height,
'translate' : { 'x': -self.width / 2 },
'rotate' : { 'y': -TAU / 4 },
})
self.setFace('rightFace', {
'width' : self.depth,
'height' : self.height,
'translate' : { 'x': self.width / 2 },
'rotate' : { 'y': TAU / 4 },
})
self.setFace('topFace', {
'width' : self.width,
'height' : self.depth,
'translate' : { 'y': -self.height / 2 },
'rotate' : { 'x': -TAU / 4 },
})
self.setFace('bottomFace', {
'width' : self.width,
'height' : self.depth,
'translate' : { 'y': self.height / 2 },
'rotate' : { 'x': -TAU / 4 },
})
def setFace(self, faceName, options):
attr = getattr(self, faceName)
rectProperty = faceName + 'Rect'
# remove if False (??)
if not attr:
# self.removeChild(rectProperty)
return
if isinstance(attr, tuple):
color = attr
elif type(attr) is str:
color = hexToRGB(attr)
else:
color = self.color
rect = BoxRect(**options)
rect.stroke = self.stroke
rect.fill = self.fill
rect.color = color
# rect.backface = self.backface
# rect.front = self.front
# rect.visible = self.visible
rect.updatePath()
self.addChild(rect)
|
[
"gustavo@hipertipo.com"
] |
gustavo@hipertipo.com
|
a4536f0fe2f8a612a01725277078ce3b79778683
|
5707a6e95d6388a320416d7c06c275daf61e3406
|
/Unidad2/ej1.py
|
54e6a486f22b1ad338b6cbaf6b579ebbc1bebc68
|
[] |
no_license
|
hectorrdz98/lenguajes-y-automatas-1
|
ed3406e8a7b7eaad489c530146cddac5a972bc81
|
0004c4696a92cdd33a86a24f82d0f9b7e01e455c
|
refs/heads/master
| 2022-01-21T21:24:21.819330
| 2019-05-29T12:42:42
| 2019-05-29T12:42:42
| 167,203,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,084
|
py
|
"""
Autor: Hector Rodriguez
"""
"""
Este codigo lee el archivo doc.txt (debe estar al mismo nivel de carpeta que este archivo)
y muestra en consola para cada linea del documento que tipo de elemento o a que categoría pertenece.
Estas son mis condiciones:
1.- Entero: Números de 0-9
2.- Flotante: Números de 0-9 seguido de un . y más números de 0-9
3.- Variable: Conjunto de letras de la A-Z mayúsculas y minúsculas, _ y dígitos de 0-9 que no debe iniciar con 0-9
4.- String: Cadena de carateres que inicia y cierra con "
5.- Aritmética: Expresión con entero, flotante o variable seguida de un * + - / ^ y luego otro entero, flotante o variable
no pueden haber dos * + - / ^ juntos o terminar la expresión con * + - / ^
6.- Relacional: Expresión con entero, flotante o variable seguida de un < > y un posible = o un != o == y luego otro entero, flotante o variable
no pueden haber dos < > y un posible = o un != o == juntos o terminar la expresión con < > y un posible = o un != o ==
"""
import re
# Regex necesarias
RegexPatterns = {
'entero': r'^[\-|\+]?\d+$',
'flotante': r'^[\-|\+]?\d+\.\d+$',
'variable': r'^[a-zA-Z_]\w{0,29}$',
'string': r'^\"[^\"]*\"$',
'aritmetica': r'^(\d+|\d+\.\d+|[a-zA-Z_]\w{0,29})([\*\/\+\-\^](\d+|\d+\.\d+|[a-zA-Z_]\w{0,29}))+$',
'relacional': r'^(\d+|\d+\.\d+|[a-zA-Z_]\w{0,29})(([\<\>]\=?|[\!\=]=)(\d+|\d+\.\d+|[a-zA-Z_]\w{0,29}))+$'
}
try:
with open('doc.txt', encoding='utf-8') as file:
for line in file:
flag = False
for regexName, regex in RegexPatterns.items():
foundRegex = re.findall(regex, line)
if line != '\n':
if foundRegex != []:
flag = True
print('{}: es {}'.format(line[0:len(line)-1], regexName))
break
if not flag and line != '\n':
print('{}: no lo conozco'.format(line[0:len(line)-1]))
except Exception as e:
print('Error al abrir el archivo: {}'.format(e))
|
[
"="
] |
=
|
33b9a0b28178626117cfa52bbee000bdf746fae2
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/QcswPnY2cAbrfwuWE_1.py
|
3dc657a83394c0074459ebb833c7727b69c41094
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
def filter_factorials(n):
def is_fact(x):
i=1
while(True):
if x%i<1:x//=i
else:break
i+=1
return x==1
return[i for i in n if is_fact(i)]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
4f8410227745a3f05d1ded00c637145222b001f5
|
acf03baf55e36dc2a9a5ecd697ec5cb15446ae32
|
/vng_api_common/decorators.py
|
eeb5d658ccfe42cff1208a7c375b34363017ad85
|
[] |
no_license
|
GemeenteUtrecht/vng-api-common
|
b6eb55121dc44c72179cbcd63896bbc020dc3975
|
97786cac041d867468007f9b9a9703d21644391a
|
refs/heads/master
| 2021-06-29T01:53:11.647065
| 2019-07-18T12:04:39
| 2019-07-18T12:05:27
| 198,412,309
| 0
| 0
| null | 2019-07-23T10:57:53
| 2019-07-23T10:57:52
| null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
from typing import Any
from django.db.models.base import ModelBase
def field_default(field: str, default: Any):
def decorator(cls: ModelBase):
model_field = cls._meta.get_field(field)
model_field.default = default
return cls
return decorator
|
[
"sergei@maykinmedia.nl"
] |
sergei@maykinmedia.nl
|
cb9254defb1dc4344f4dc02075844f7a20a3bc07
|
69729ce2a0d2147b7b52e14008d8fc9960e3c099
|
/fast_rl/core/metrics.py
|
3233e2fc611672b7ffd218e9a04ec813f65eb1a7
|
[
"Apache-2.0"
] |
permissive
|
swelchm/fast-reinforcement-learning
|
2f5d5aa51830f774ca0e6814833a736029e88f4d
|
9649b6d1bb931c4e4b7200a73b172325a1d8346f
|
refs/heads/master
| 2020-07-29T16:04:10.926035
| 2019-09-18T02:43:50
| 2019-09-18T02:43:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 937
|
py
|
import torch
from fastai.basic_train import LearnerCallback
from fastai.callback import Callback, is_listy, add_metrics
class EpsilonMetric(LearnerCallback):
_order = -20 # Needs to run before the recorder
def __init__(self, learn):
super().__init__(learn)
self.epsilon = 0
if not hasattr(self.learn.model, 'exploration_strategy'):
raise ValueError('Your model is not using an exploration strategy! Please use epsilon based exploration')
if not hasattr(self.learn.model.exploration_strategy, 'epsilon'):
raise ValueError('Please use epsilon based exploration (should have an epsilon field)')
def on_train_begin(self, **kwargs):
self.learn.recorder.add_metric_names(['epsilon'])
def on_epoch_end(self, last_metrics, **kwargs):
self.epsilon = self.learn.model.exploration_strategy.epsilon
return add_metrics(last_metrics, [self.epsilon])
|
[
"jokellum@northstate.net"
] |
jokellum@northstate.net
|
fb4eb2d3fc2b6b557ef5f486e64a77a51611a0bc
|
87a6d7e83a25cb3b1696fb6094fda88858754c19
|
/src/review/views.py
|
a2a5efdfd814e12518e46fac60f1fd21ab2a9492
|
[
"BSD-3-Clause"
] |
permissive
|
tegarty/socialrating
|
20b45f8eb233fed0b69ae0fd8110cf8a73f1f782
|
b80888ee8e637bd0a5517614c78235d563fead2e
|
refs/heads/master
| 2020-04-20T08:42:52.231718
| 2018-12-06T17:57:43
| 2018-12-06T17:57:43
| 168,747,496
| 1
| 0
|
BSD-3-Clause
| 2019-02-01T19:11:19
| 2019-02-01T19:11:19
| null |
UTF-8
|
Python
| false
| false
| 4,163
|
py
|
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django import forms
from django.shortcuts import redirect, reverse
from item.mixins import ItemViewMixin
from rating.models import Vote
from team.mixins import TeamViewMixin
from context.models import Context
from .models import Review
class ReviewListView(ItemViewMixin, ListView):
model = Review
paginate_by = 100
template_name = 'review_list.html'
def get_queryset(self):
return super().get_queryset().filter(item=self.item)
class ReviewCreateView(TeamViewMixin, ItemViewMixin, CreateView):
model = Review
template_name = 'review_form.html'
fields = ['headline', 'body', 'context']
def get_context_data(self):
"""
Add Item to the context
"""
context = super().get_context_data()
context['item'] = self.item
return context
def get_form(self, form_class=None):
"""
Add ratings to the form and set initial Context
QuerySet
"""
form = super().get_form(form_class)
for rating in self.item.category.ratings.all():
choices = []
for choice in range(1, rating.max_rating+1):
choices.append((choice, choice))
form.fields["%s_vote" % rating.slug] = forms.TypedChoiceField(
choices=choices,
coerce=int,
widget=forms.widgets.RadioSelect,
required=False,
label='%s: Please vote between 1-%s' % (rating.name, rating.max_rating),
)
form.fields["%s_comment" % rating.slug] = forms.CharField(
label='%s: A short comment for the Vote above' % rating.name,
required=False,
)
form.fields['context'].queryset = Context.objects.filter(team=self.team)
return form
def form_valid(self, form):
"""
First save the new Review,
then save any Votes, Attachments and Tags.
"""
review = form.save(commit=False)
review.item = self.item
review.actor = self.request.user.actor
review.save()
# loop over ratings available for this item,
# saving a new Vote for each as needed
for rating in self.item.category.ratings.all():
votefield = "%s_vote" % rating.slug
commentfield = "%s_comment" % rating.slug
if votefield in form.fields and form.cleaned_data[votefield]:
Vote.objects.create(
review=review,
rating=rating,
vote=form.cleaned_data[votefield],
comment=form.cleaned_data[commentfield] if commentfield in form.cleaned_data else '',
)
return redirect(reverse(
'team:category:item:review:detail',
kwargs={
'team_slug': self.team.slug,
'category_slug': self.item.category.slug,
'item_slug': self.item.slug,
'review_uuid': review.pk
}
))
class ReviewDetailView(ItemViewMixin, DetailView):
model = Review
template_name = 'review_detail.html'
pk_url_kwarg = 'review_uuid'
class ReviewUpdateView(ItemViewMixin, UpdateView):
model = Review
template_name = 'review_form.html'
pk_url_kwarg = 'review_uuid'
fields = ['headline', 'body', 'context']
class ReviewDeleteView(ItemViewMixin, DeleteView):
model = Review
template_name = 'review_delete.html'
pk_url_kwarg = 'review_uuid'
def delete(self, request, *args, **kwargs):
messages.success(self.request, "Review %s has been deleted, along with all Votes that related to it." % self.get_object())
return super().delete(request, *args, **kwargs)
def get_success_url(self):
return(reverse('team:category:item:detail', kwargs={
'camp_slug': self.camp.slug,
'category_slug': self.category.slug,
'item_slug': self.item.slug,
}))
|
[
"thomas@gibfest.dk"
] |
thomas@gibfest.dk
|
1d01b17589e954f3dd2578ee3bc07e5bbed380dc
|
ff99c677aba11e27c252f773b52cd54f5de79279
|
/ctt-server/openapi_server/models/project.py
|
0220631032783c2d8b7da9e44e5e0a94cbfdbdab
|
[
"Apache-2.0"
] |
permissive
|
radon-h2020/radon-ctt
|
b7eeb82f59e36e2a258d0a2ba9cd9483eb3dd247
|
97fcf5e800a0129d24e119b430d94f07ca248ba9
|
refs/heads/master
| 2023-01-04T23:44:49.611599
| 2021-09-15T15:34:41
| 2021-09-15T15:34:41
| 235,379,642
| 0
| 7
|
Apache-2.0
| 2022-12-27T15:56:38
| 2020-01-21T15:48:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,758
|
py
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server import util
class Project(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, uuid=None, name=None, repository_url=None): # noqa: E501
"""Project - a model defined in OpenAPI
:param uuid: The uuid of this Project. # noqa: E501
:type uuid: str
:param name: The name of this Project. # noqa: E501
:type name: str
:param repository_url: The repository_url of this Project. # noqa: E501
:type repository_url: str
"""
self.openapi_types = {
'uuid': str,
'name': str,
'repository_url': str
}
self.attribute_map = {
'uuid': 'uuid',
'name': 'name',
'repository_url': 'repository_url'
}
self._uuid = uuid
self._name = name
self._repository_url = repository_url
@classmethod
def from_dict(cls, dikt) -> 'Project':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Project of this Project. # noqa: E501
:rtype: Project
"""
return util.deserialize_model(dikt, cls)
@property
def uuid(self):
"""Gets the uuid of this Project.
:return: The uuid of this Project.
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this Project.
:param uuid: The uuid of this Project.
:type uuid: str
"""
self._uuid = uuid
@property
def name(self):
"""Gets the name of this Project.
:return: The name of this Project.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Project.
:param name: The name of this Project.
:type name: str
"""
self._name = name
@property
def repository_url(self):
"""Gets the repository_url of this Project.
:return: The repository_url of this Project.
:rtype: str
"""
return self._repository_url
@repository_url.setter
def repository_url(self, repository_url):
"""Sets the repository_url of this Project.
:param repository_url: The repository_url of this Project.
:type repository_url: str
"""
self._repository_url = repository_url
|
[
"duellmann@iste.uni-stuttgart.de"
] |
duellmann@iste.uni-stuttgart.de
|
5dc3e5eb54602009e6f8a02450af13bf34566f0c
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_workers.py
|
925498db6457cf5ab2857092165fbc8709111a52
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from xai.brain.wordbase.nouns._worker import _WORKER
#calss header
class _WORKERS(_WORKER, ):
def __init__(self,):
_WORKER.__init__(self)
self.name = "WORKERS"
self.specie = 'nouns'
self.basic = "worker"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
ebf9b4a30f7ce8099e5020d7dc4df985c9055dc2
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2138/60585/257214.py
|
6a0f930bf2e12114cdb52468e38c6cf2a97dc12e
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 278
|
py
|
arr=list(map(int,input().strip().split(',')))
k=eval(input())
n=len(arr)
isM=False
for i in range(0,n-1):
j=i+1
temp=arr[i]
while j<n:
temp+=arr[j]
j+=1
if temp%k==0:
isM=True
break
if isM:
break
print(isM)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
f5a0e695f1a50cbc20de0463e14d5f362bb054ee
|
6499d0b71b19fd4416bfd74fa9fd88e3d0b0618a
|
/king_phisher/client/dialogs/exception.py
|
70b21f7817f9aa453b632b5a60c493afdd5eccd9
|
[
"BSD-3-Clause"
] |
permissive
|
Meatballs1/king-phisher
|
dfb0a539a2d0455113b40698f7151521774addb1
|
a16b1de055260f6f33d8c1fd0765bd06ffb733c2
|
refs/heads/master
| 2020-05-20T17:55:30.441239
| 2015-10-15T19:21:22
| 2015-10-15T19:21:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,018
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/dialogs/exception.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import platform
import sys
import traceback
from king_phisher import its
from king_phisher import utilities
from king_phisher import version
from king_phisher.client import gui_utilities
from king_phisher.third_party import AdvancedHTTPServer
from gi.repository import Gtk
__all__ = ['ExceptionDialog']
EXCEPTION_DETAILS_TEMPLATE = """
Error Type: {error_type}
Error Details: {error_details}
Error UID: {error_uid}
RPC Error: {rpc_error_details}
King Phisher Version: {king_phisher_version}
Platform Version: {platform_version}
Python Version: {python_version}
Gtk Version: {gtk_version}
{stack_trace}
"""
class ExceptionDialog(gui_utilities.GladeGObject):
"""
Display a dialog which shows an error message for a python exception.
The dialog includes useful details for reporting and debugging the exception
which occurred.
"""
gobject_ids = ('linkbutton_github_issues',)
top_gobject = 'dialog'
def __init__(self, application, exc_info=None, error_uid=None):
"""
:param application: The parent application for this object.
:type application: :py:class:`Gtk.Application`
:param tuple exc_info: The exception information as provided by :py:func:`sys.exc_info`.
:param str error_uid: An optional unique identifier for the exception that can be provided for tracking purposes.
"""
super(ExceptionDialog, self).__init__(application)
self.error_description = self.gtk_builder_get('label_error_description')
self.error_details = self.gtk_builder_get('textview_error_details')
self.exc_info = exc_info or sys.exc_info()
self.error_uid = error_uid
linkbutton = self.gobjects['linkbutton_github_issues']
linkbutton.set_label('Project Issue Tracker')
linkbutton.connect('activate-link', lambda _: utilities.open_uri(linkbutton.get_property('uri')))
def interact(self):
exc_type, exc_value, exc_traceback = self.exc_info
pversion = 'UNKNOWN'
if its.on_linux:
pversion = 'Linux: ' + ' '.join(platform.linux_distribution())
elif its.on_windows:
pversion = 'Windows: ' + ' '.join(platform.win32_ver())
if its.frozen:
pversion += ' (Frozen=True)'
else:
pversion += ' (Frozen=False)'
exc_name = "{0}.{1}".format(exc_type.__module__, exc_type.__name__)
rpc_error_details = 'N/A (Not a remote RPC error)'
if isinstance(exc_value, AdvancedHTTPServer.AdvancedHTTPServerRPCError) and exc_value.is_remote_exception:
rpc_error_details = "Name: {0}".format(exc_value.remote_exception['name'])
if exc_value.remote_exception.get('message'):
rpc_error_details += " Message: '{0}'".format(exc_value.remote_exception['message'])
details = EXCEPTION_DETAILS_TEMPLATE.format(
error_details=repr(exc_value),
error_type=exc_name,
error_uid=(self.error_uid or 'N/A'),
rpc_error_details=rpc_error_details,
king_phisher_version=version.version,
platform_version=pversion,
python_version="{0}.{1}.{2}".format(*sys.version_info),
gtk_version="{0}.{1}.{2}".format(Gtk.get_major_version(), Gtk.get_minor_version(), Gtk.get_micro_version()),
stack_trace=''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
)
details = details.strip() + '\n'
if exc_name.startswith('king_phisher.third_party.'):
exc_name = exc_name[25:]
self.error_description.set_text("Error type: {0}".format(exc_name))
self.error_details.get_buffer().set_text(details)
self.dialog.show_all()
self.dialog.run()
self.dialog.destroy()
return
|
[
"zeroSteiner@gmail.com"
] |
zeroSteiner@gmail.com
|
7d4ad877ef0674f0248f5b402f5ca2ec0fbca0b5
|
83932f1d956a6b7818c6e58a31205e6e26f2fb5c
|
/0x11-python-network_1/2-post_email.py
|
ae506265afc23c32cdffd2a0428200f828ddb688
|
[] |
no_license
|
Nzparra/holbertonschool-higher_level_programming
|
a17834b8239e477a7284119acac69da0e7d7261e
|
6cf7a44a10db7a10be3c3c02cbacfea9a7b897f2
|
refs/heads/master
| 2020-09-29T02:45:04.458850
| 2020-05-14T21:12:45
| 2020-05-14T21:12:45
| 226,930,910
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
#!/usr/bin/python3
""" sends a POST request to the passed URL with the email as a parameter """
from urllib import request, parse
import sys
if __name__ == "__main__":
req = parse.urlencode({'email': sys.argv[2]})
req = req.encode('ascii')
reqst = request.Request(sys.argv[1], req)
with request.urlopen(reqst) as response:
html = response.read()
print(html.decode('utf-8'))
|
[
"nzparra@gmail.com"
] |
nzparra@gmail.com
|
99b9f127259fa1b88da83c73c1b13ae51336a33c
|
20a3cc1106fa86fc2d45cd1728cc87d5db97e1f7
|
/old/s3c/explosion.py
|
d214948ae8a509ca9fcf7d2f5cbf3d133373c71a
|
[] |
no_license
|
sarahboufelja54/galatea
|
f5664f0b3117629b2c5bbe078a1bd52bb5e359e6
|
002a9f2905868be25b71770190fb2d5eda11c861
|
refs/heads/master
| 2020-12-04T13:45:07.697189
| 2018-12-12T16:27:09
| 2018-12-12T16:27:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 790
|
py
|
#attempting to create an excitation-based explosion (I don't think it's possible)
hat_h = [1., 1.]
alpha = [.01 , .01]
W = [ -1., 1. ]
beta = 1
w = [ beta * (weight ** 2) for weight in W ]
init_hat_s = [ 1., 1.5 ]
hat_s = [ val for val in init_hat_s ]
#like mu in our current model, except that it isn't gated by h
always_on_mu = [ 0., 0. ]
v = 1
def update():
rval = []
for i in xrange(2):
scaleback = alpha[i] + w[i]
mean_term = always_on_mu[i]
data_term = beta * v * W[i]
j = 1 - i
interaction_term = - W[i] * W[j] * beta * hat_h[j] * hat_s[j]
hat_s_i = (mean_term + data_term + interaction_term) / scaleback
rval.append(hat_s_i)
return rval
for iter in xrange(100):
print hat_s
hat_s = update()
|
[
"goodfellow.ian@gmail.com"
] |
goodfellow.ian@gmail.com
|
42de88eb553c0e4e996822b8763fa6c13507faa7
|
e5eeb6d9e7c2d7a53f864f8b9df7ca0cb79932ef
|
/sa/profiles/Alstec/MSPU/__init__.py
|
b7d0c1ced7d78897c77243798ea9274c7900e37e
|
[
"BSD-3-Clause"
] |
permissive
|
0pt1on/noc
|
aa583a6684f8299467c665e303f7ffa47ad6b88a
|
4eb26dd44002a0a4a562973815567237d979cab5
|
refs/heads/master
| 2020-06-20T08:51:11.653330
| 2019-07-12T07:13:45
| 2019-07-12T07:13:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 599
|
py
|
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Vendor: Alstec
# OS: MSPU
# ---------------------------------------------------------------------
# Copyright (C) 2007-2017 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.core.profile.base import BaseProfile
class Profile(BaseProfile):
name = "Alstec.MSPU"
pattern_prompt = r"^\S+\$> "
pattern_more = r"^--More-- or \(q\)uit$"
pattern_syntax_error = r"\^ error"
command_exit = "exit"
|
[
"dmitryluhtionov@gmail.com"
] |
dmitryluhtionov@gmail.com
|
5360f0f0d9b911bb3033292064920cc4edcb718e
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Flask/Book_evaluator/venv/Lib/site-packages/urllib3/util/__init__.py
|
130a48f4f4e13e706d68fa3f49aa7081eb6997c7
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:e916a7e28015232e340aefe810f5a7355f5bc05e6b5f1e86d43519ee87a18cf6
size 1044
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
a8fa9c19001403543360c111212013e80ce6d390
|
45799ccc3a16c785ab3c65f3296d66f8463590dc
|
/docs/_downloads/7dfb273e58ce9eea02e428696e9a9672/q108.py
|
de3417bd434085db9999cb7596c5a4dfcfe82b2f
|
[
"MIT"
] |
permissive
|
odys-z/hello
|
9d29b7af68ea8c490b43994cf16d75c0e8ace08e
|
fedd0aec7273f3170aa77316d0d5f317cc18a979
|
refs/heads/master
| 2023-08-19T03:25:58.684050
| 2023-08-18T08:07:27
| 2023-08-18T08:07:27
| 154,006,292
| 0
| 0
|
MIT
| 2023-04-18T22:50:56
| 2018-10-21T12:34:12
|
C++
|
UTF-8
|
Python
| false
| false
| 1,496
|
py
|
'''
108. Convert Sorted Array to Binary Search Tree
https://leetcode.com/problems/convert-sorted-array-to-binary-search-tree/
Given an integer array nums where the elements are sorted in ascending
order, convert it to a height-balanced binary search tree.
A height-balanced binary tree is a binary tree in which the depth of
the two subtrees of every node never differs by more than one.
Created on 17 Apr 2021
@author: Odys Zhou
'''
from unittest import TestCase
from typing import List
from utils.treehelper2 import TreeNode, list2tree
# Definition for a binary tree node.
class Solution:
'''
64.58%
'''
def sortedArrayToBST(self, nums: List[int]) -> TreeNode:
'''
nums: sorted
'''
l, r = 0, len(nums) - 1
def buildTree(lix, rix) -> 'root':
'''
0, 0 0, 1 0, 2 0, 3 0, 4
0 0 1 1 2
'''
if lix > rix: return None
m = (lix + rix) // 2
root = TreeNode(nums[m])
root.left = buildTree(lix, m-1)
root.right = buildTree(m+1, rix)
return root
return buildTree(l, r)
if __name__ == "__main__":
t = TestCase()
s = Solution()
res =s.sortedArrayToBST([-10, -3, 0, 5, 9]).print()
t.assertTrue( list2tree([0, -3, 9, -10, None, 5]).print() == res or
list2tree([0, -10, 5, None, -3, None, 9]).print() == res)
print('q108 OK!')
|
[
"odysseusj@163.com"
] |
odysseusj@163.com
|
72228121b096510616532a4edb9408df229e04ab
|
5b9485c4ad9db15ff3e535085092fb45057f7364
|
/src/nuxeo/javascript/cpsskins/tests/functional/treeview/browser.py
|
6162bef9cd7e0d77dbb40da6b1080e30ac097356
|
[
"ZPL-2.1"
] |
permissive
|
nuxeo-cps/zope3--nuxeo.javascript
|
06109541949c1e612b232efeddec3aa04ecb7d84
|
3ac03c8c46daf75ae7b3ff2fba308cba8caff245
|
refs/heads/main
| 2023-01-24T06:54:13.659442
| 2009-12-22T09:24:26
| 2009-12-22T09:24:26
| 317,995,433
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,947
|
py
|
from urllib import unquote
from zope.app.publisher.browser import BrowserView
from cpsskins import minjson as json
tree_data = {
'items': [
{'id': '1', 'title': 'item 1', 'depth': 1, 'type': 'inner'},
{'id': '2', 'title': 'item 2', 'depth': 2, 'type': 'inner',
'empty': True},
{'id': '3', 'title': 'item 3', 'depth': 2, 'type': 'leaf'},
{'id': '4', 'title': 'item 4', 'depth': 2, 'type': 'inner'},
{'id': '5', 'title': 'item 5', 'depth': 3, 'type': 'leaf',
'position': 'last'},
{'id': '6', 'title': 'item 6', 'depth': 1, 'type': 'inner'},
{'id': '7', 'title': 'item 7', 'depth': 2, 'type': 'inner',
'empty': True},
{'id': '8', 'title': 'item 8', 'depth': 2, 'type': 'leaf',
'position': 'last'},
]
}
MAX_DEPTH = 10
class Views(BrowserView):
def getTreeData(self):
local_data = self._getLocalStorageData(1)
if local_data is None:
local_data = {}
tree_state = local_data.get('state', {})
filtered_items = []
filter_depth = MAX_DEPTH
for item in tree_data['items']:
depth = item['depth']
if depth > filter_depth:
continue
else:
filter_depth = MAX_DEPTH
if item['type'] == 'inner':
state = tree_state.get(item['id'])
if state != 'open':
filter_depth = depth
filtered_items.append(item)
self.request.response.setHeader('content-type', 'text/x-json')
return json.write({'items': filtered_items})
def setTreeData(self, data):
return self.getTreeData()
# TODO: moves this to an API
def _getLocalStorageData(self, id):
value = self.request.cookies.get('cpsskins_local_storage_%s' % id)
if value is not None:
return json.read(unquote(value))
return None
|
[
"devnull@localhost"
] |
devnull@localhost
|
9b80d82c0f685c41a834444780cd8207ebb71348
|
9f9b19a26ed931207878364d395e47a3d986751b
|
/dmam/migrations/0006_auto_20181022_2230.py
|
6b6b882350caefc90a3b9690311255482d54076e
|
[] |
no_license
|
lishulincug/waterwork
|
6697f5264dc880a92d9b91e91b703eda3818d7a3
|
690fb344e7f271a3ded66f0cdf4c9161811ed1f4
|
refs/heads/master
| 2020-09-09T13:19:21.301200
| 2019-07-25T09:37:04
| 2019-07-25T09:37:04
| 221,456,996
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
# Generated by Django 2.0 on 2018-10-22 22:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dmam', '0005_auto_20181022_1555'),
]
operations = [
migrations.AlterField(
model_name='station',
name='dmaid',
field=models.ManyToManyField(to='dmam.DMABaseinfo'),
),
]
|
[
"apengok@163.com"
] |
apengok@163.com
|
a9dbd0cdcd940053789e278ea1754c00d7bcc81d
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/app/plugin/core/diff/DiffProgramManager.pyi
|
a0a10fe1025a5187fd858ff298fe56649ee228e8
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142
| 2021-03-18T13:31:40
| 2021-03-18T13:31:40
| 338,577,905
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,233
|
pyi
|
from typing import List
import ghidra.app.services
import ghidra.framework.model
import ghidra.program.model.address
import ghidra.program.model.listing
import java.awt
import java.lang
import java.net
class DiffProgramManager(object, ghidra.app.services.ProgramManager):
OPEN_CURRENT: int = 1
OPEN_HIDDEN: int = 0
OPEN_VISIBLE: int = 2
def __init__(self, __a0: ghidra.app.plugin.core.diff.ProgramDiffPlugin): ...
def closeAllPrograms(self, __a0: bool) -> bool: ...
def closeOtherPrograms(self, __a0: bool) -> bool: ...
@overload
def closeProgram(self) -> bool: ...
@overload
def closeProgram(self, __a0: ghidra.program.model.listing.Program, __a1: bool) -> bool: ...
def equals(self, __a0: object) -> bool: ...
def getAllOpenPrograms(self) -> List[ghidra.program.model.listing.Program]: ...
def getClass(self) -> java.lang.Class: ...
def getCurrentProgram(self) -> ghidra.program.model.listing.Program: ...
def getProgram(self, __a0: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Program: ...
def hashCode(self) -> int: ...
def isLocked(self) -> bool: ...
def isVisible(self, __a0: ghidra.program.model.listing.Program) -> bool: ...
def lockDown(self, __a0: bool) -> None: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
@overload
def openProgram(self, __a0: ghidra.framework.model.DomainFile) -> ghidra.program.model.listing.Program: ...
@overload
def openProgram(self, __a0: ghidra.program.model.listing.Program) -> None: ...
@overload
def openProgram(self, __a0: ghidra.framework.model.DomainFile, __a1: int) -> ghidra.program.model.listing.Program: ...
@overload
def openProgram(self, __a0: ghidra.program.model.listing.Program, __a1: int) -> None: ...
@overload
def openProgram(self, __a0: java.net.URL, __a1: int) -> ghidra.program.model.listing.Program: ...
@overload
def openProgram(self, __a0: ghidra.program.model.listing.Program, __a1: bool) -> None: ...
@overload
def openProgram(self, __a0: ghidra.framework.model.DomainFile, __a1: java.awt.Component) -> ghidra.program.model.listing.Program: ...
@overload
def openProgram(self, __a0: ghidra.framework.model.DomainFile, __a1: int, __a2: int) -> ghidra.program.model.listing.Program: ...
def releaseProgram(self, __a0: ghidra.program.model.listing.Program, __a1: object) -> None: ...
def setCurrentProgram(self, __a0: ghidra.program.model.listing.Program) -> None: ...
def setPersistentOwner(self, __a0: ghidra.program.model.listing.Program, __a1: object) -> bool: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def allOpenPrograms(self) -> List[ghidra.program.model.listing.Program]: ...
@property
def currentProgram(self) -> ghidra.program.model.listing.Program: ...
@currentProgram.setter
def currentProgram(self, value: ghidra.program.model.listing.Program) -> None: ...
@property
def locked(self) -> bool: ...
|
[
"tsunekou1019@gmail.com"
] |
tsunekou1019@gmail.com
|
d5fc867cf915437ad5f65f07e94dd1e3c0cf089d
|
101ffbee515a5b8f23d77361558dea1e42794dbd
|
/pip_save/toml/tests/test_writer/test_statement_nodes.py
|
87c6eb7bd5d88df7645289d2751f38fa6795af0e
|
[] |
no_license
|
mkurnikov/pip-save
|
0a841710c28983c1c769d87e18f2e584a554e1a1
|
e1e2fb9b0404a25790edcb5fd134267b92675470
|
refs/heads/master
| 2021-01-12T16:49:50.163661
| 2016-10-21T11:13:37
| 2016-10-21T11:13:37
| 71,442,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
from collections import OrderedDict
from unittest import TestCase
from pip_save.toml.model import TomlStatementNodes, Table
class TestStatementNodes(TestCase):
def test_append(self):
toml_nodes = TomlStatementNodes()
toml_nodes[('keyword',)] = '1'
self.assertEqual(len(toml_nodes), 1)
self.assertTrue(('keyword',) in toml_nodes)
def test_insert_after(self):
od = OrderedDict()
od[('deps',)] = Table()
od[('django',)] = '1.10.2'
toml_nodes = TomlStatementNodes(od)
toml_nodes.insert_after(('deps',), ('flask',), '1.3')
self.assertEqual(toml_nodes.keys(), [('deps',), ('flask',), ('django',)])
def test_insert_before(self):
od = OrderedDict()
od[('deps',)] = Table()
od[('django',)] = '1.10.2'
toml_nodes = TomlStatementNodes(od)
toml_nodes.insert_before(('django',), ('flask',), '1.3')
self.assertEqual(toml_nodes.keys(), [('deps',), ('flask',), ('django',)])
|
[
"maxim.kurnikov@gmail.com"
] |
maxim.kurnikov@gmail.com
|
8ce29595818ea2d4b7f8186cbb954cbdb7739d39
|
a3ff13ecac60f891a3ebdcb4c72bf6a4b581a2d8
|
/YCD/10.16公开课红心代码heart_3d.py
|
307b46ff179ed773578ac66438fc7032e575e55a
|
[] |
no_license
|
kekirk/pycode
|
75533afc3018cba30d0abd3c29ab1c945b85504b
|
06dab1a61d7b445cc19b41e4d281f62251e2583b
|
refs/heads/master
| 2020-04-01T17:13:53.828118
| 2019-01-04T09:02:00
| 2019-01-04T09:02:00
| 153,419,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 707
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[3]:
from pyecharts import Scatter3D
import numpy as np
# # 心形解析式
# # (x^2+9/4*y^2+z^2-1)^3-x^2*z^3-9/80*y^2*z^3=0
# In[5]:
scatter3D = Scatter3D("I Love You", width=1700, height=1000)
data = list()
x = list(np.linspace(-1.5, 1.5,150))
y = list(np.linspace(-1,1,100))
z = list(np.linspace(-1.5,1.5,100))
for a in x:
for b in y:
for c in z:
if -0.05<=(a**2+9.0/4.0*b**2+c**2-1)**3-a**2*c**3-9.0/80.0*b**2*c**3 <=0:
data.append([a,b,c])
scatter3D.add("", data, is_visualmap=True, visual_range_color="red")
scatter3D.render()
scatter3D
# In[20]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
|
[
"kekirk@163.com"
] |
kekirk@163.com
|
63d35cddd89c965242e94321cf091a8e71be87ec
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_swivels.py
|
ee26ae2f31371bd1bbcf01ad3ec765b20b2961cb
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from xai.brain.wordbase.verbs._swivel import _SWIVEL
#calss header
class _SWIVELS(_SWIVEL, ):
def __init__(self,):
_SWIVEL.__init__(self)
self.name = "SWIVELS"
self.specie = 'verbs'
self.basic = "swivel"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
917caa8803de6237510c15044bfbe71ebee37d83
|
4564fd0cfb9009f0b85d15c3b9164b865c4c86e7
|
/tests/test_model.py
|
6640508bf3a1581cded6a9fe52d2d2d572937326
|
[
"Apache-2.0"
] |
permissive
|
rajaramcomputers/client
|
0188a1cf8e989dcd180c280a4db4d00c44bac390
|
65badf61fb9a5430596d6d2c0b9b7833cf30ec06
|
refs/heads/master
| 2021-01-16T21:59:35.657394
| 2016-02-01T22:08:18
| 2016-02-01T22:08:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 748
|
py
|
""" run with
nosetests -v --nocapture tests/test_model.py
or
nosetests -v tests/test_model.py
"""
from __future__ import print_function
from pprint import pprint
from cloudmesh_base.util import HEADING
import cloudmesh_client.db
import cloudmesh_client.db.model
class Test_model:
def setup(self):
pass
def tearDown(self):
pass
def test_001(self):
HEADING()
pprint(cloudmesh_client.db.tables())
assert True
def test_002(self):
HEADING()
print(cloudmesh_client.db.tablenames())
assert True
def test_003(self):
HEADING()
for name in cloudmesh_client.db.tablenames():
print(cloudmesh_client.db.table(name))
assert True
|
[
"laszewski@gmail.com"
] |
laszewski@gmail.com
|
b113b1db3bfe5f8e92de554cc4f803a2b126bac7
|
902e8b6f2c39c0a7baa8abd9637aa43f4be27e27
|
/Code/Chapter 1/src/blueblog/urls.py
|
9e80e946967b3d343885c7d48be82d6ec68c8c7b
|
[] |
no_license
|
PacktPublishing/Django-Projects-Blueprints
|
8151e611ae5cf95dc985ac7d08ce503bd41e0c4a
|
7d2409ea1b43b057d1e4c337e348cb6e102f75d6
|
refs/heads/master
| 2023-02-08T13:34:22.658965
| 2023-01-30T10:17:40
| 2023-01-30T10:17:40
| 59,006,898
| 32
| 30
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,744
|
py
|
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth.views import login
from django.contrib.auth.views import logout
from accounts.views import UserRegistrationView
from blog.views import NewBlogView
from blog.views import HomeView
from blog.views import UpdateBlogView
from blog.views import NewBlogPostView
from blog.views import UpdateBlogPostView
from blog.views import BlogPostDetailsView
from blog.views import SharePostWithBlog
from blog.views import StopSharingPostWithBlog
from blog.views import ShareBlogPostView
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^$', HomeView.as_view(), name='home'),
url(r'^new-user/$', UserRegistrationView.as_view(), name='user_registration'),
url(r'^login/$', login, {'template_name': 'login.html'}, name='login'),
url(r'^logout/$', logout, {'next_page': '/login/'}, name='logout'),
url(r'^blog/new/$', NewBlogView.as_view(), name='new-blog'),
url(r'^blog/(?P<pk>\d+)/update/$', UpdateBlogView.as_view(), name='update-blog'),
url(r'blog/post/new/$', NewBlogPostView.as_view(), name='new-blog-post'),
url(r'blog/post/(?P<pk>\d+)/update/$', UpdateBlogPostView.as_view(), name='update-blog-post'),
url(r'blog/post/(?P<pk>\d+)/$', BlogPostDetailsView.as_view(), name='blog-post-details'),
url(r'blog/post/(?P<pk>\d+)/share/$', ShareBlogPostView.as_view(), name='share-blog-post-with-blog'),
url(r'blog/post/(?P<post_pk>\d+)/share/to/(?P<blog_pk>\d+)/$', SharePostWithBlog.as_view(), name='share-post-with-blog'),
url(r'blog/post/(?P<post_pk>\d+)/stop/share/to/(?P<blog_pk>\d+)/$', StopSharingPostWithBlog.as_view(), name='stop-sharing-post-with-blog'),
]
|
[
"packt.danishs@gmail.com"
] |
packt.danishs@gmail.com
|
a9e2cbb4176684f4ffa52c1888fae3102c5fa7b6
|
9b59f76f3b312951519a15651290476c34a54174
|
/QUANTAXIS_Test/QABacktest_Test/QABacktestSimple_Test.py
|
37e9feda4909d833898186c4d41be55ad36d35fd
|
[
"MIT"
] |
permissive
|
sjtututu/QUANTAXIS
|
b8d9ba35d20159680f25cd3e583ebcfc7ff34c75
|
e9e20cdeda8b8d132433037b639a7e60f286a190
|
refs/heads/master
| 2020-08-16T11:19:19.689925
| 2020-02-22T01:21:57
| 2020-02-22T01:21:57
| 215,495,655
| 1
| 0
|
MIT
| 2019-12-28T08:13:57
| 2019-10-16T08:22:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,471
|
py
|
import unittest
import numpy as np
import pandas as pd
import QUANTAXIS as QA
class QABacktestSimple_Test(unittest.TestCase):
# define the MACD strategy
def MACD_JCSC(self, dataframe, SHORT=12, LONG=26, M=9):
"""
1.DIF向上突破DEA,买入信号参考。
2.DIF向下跌破DEA,卖出信号参考。
"""
CLOSE = dataframe.close
DIFF = QA.EMA(CLOSE, SHORT) - QA.EMA(CLOSE, LONG)
DEA = QA.EMA(DIFF, M)
MACD = 2 * (DIFF - DEA)
CROSS_JC = QA.CROSS(DIFF, DEA)
CROSS_SC = QA.CROSS(DEA, DIFF)
ZERO = 0
return pd.DataFrame(
{'DIFF': DIFF, 'DEA': DEA, 'MACD': MACD, 'CROSS_JC': CROSS_JC, 'CROSS_SC': CROSS_SC, 'ZERO': ZERO})
def setUp(self):
# 准备数据
# create account
self.Account = QA.QA_Account()
self.Broker = QA.QA_BacktestBroker()
self.Account.reset_assets(1000000)
self.Account.account_cookie = 'user_admin_macd'
# get data from mongodb
self.data = QA.QA_fetch_stock_day_adv(
['000001', '000002', '000004', '600000'], '2017-09-01', '2018-05-20')
self.data = self.data.to_qfq()
# add indicator
self.ind = self.data.add_func(self.MACD_JCSC)
# ind.xs('000001',level=1)['2018-01'].plot()
self.data_forbacktest = self.data.select_time(
'2018-01-01', '2018-05-20')
def tearDown(self):
print(self.Account.history)
print(self.Account.history_table)
print(self.Account.daily_hold)
# create Risk analysis
Risk = QA.QA_Risk(self.Account)
print(Risk.message)
print(Risk.assets)
Risk.plot_assets_curve()
Risk.plot_dailyhold()
Risk.plot_signal()
# Risk.assets.plot()
# Risk.benchmark_assets.plot()
# save result
self.Account.save()
Risk.save()
account_info = QA.QA_fetch_account(
{'account_cookie': 'user_admin_macd'})
account = QA.QA_Account().from_message(account_info[0])
print(account)
def test_simpleQABacktest(self):
for items in self.data_forbacktest.panel_gen:
for item in items.security_gen:
daily_ind = self.ind.loc[item.index]
if daily_ind.CROSS_JC.iloc[0] > 0:
order = self.Account.send_order(
code=item.code[0],
time=item.date[0],
amount=1000,
towards=QA.ORDER_DIRECTION.BUY,
price=0,
order_model=QA.ORDER_MODEL.CLOSE,
amount_model=QA.AMOUNT_MODEL.BY_AMOUNT
)
if order:
self.Broker.receive_order(QA.QA_Event(order=order, market_data=item))
trade_mes = self.Broker.query_orders(self.Account.account_cookie, 'filled')
res = trade_mes.loc[order.account_cookie, order.realorder_id]
order.trade(res.trade_id, res.trade_price,
res.trade_amount, res.trade_time)
elif daily_ind.CROSS_SC.iloc[0] > 0:
if self.Account.sell_available.get(item.code[0], 0) > 0:
order = self.Account.send_order(
code=item.code[0],
time=item.date[0],
amount=self.Account.sell_available.get(
item.code[0], 0),
towards=QA.ORDER_DIRECTION.SELL,
price=0,
order_model=QA.ORDER_MODEL.MARKET,
amount_model=QA.AMOUNT_MODEL.BY_AMOUNT
)
if order:
self.Broker.receive_order(QA.QA_Event(order=order, market_data=item))
trade_mes = self.Broker.query_orders(self.Account.account_cookie, 'filled')
res = trade_mes.loc[order.account_cookie, order.realorder_id]
order.trade(res.trade_id, res.trade_price,
res.trade_amount, res.trade_time)
self.Account.settle()
|
[
"415496929@qq.com"
] |
415496929@qq.com
|
922632bd7fd107d2f4b5713afca0a914316f2f55
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/59/usersdata/195/48791/submittedfiles/testes.py
|
697187f7d52f0e988ed1e5a2cdacc5e64b225503
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
# -*- coding: utf-8 -*-
a=int(input('digite a:'))
b=int(input('digite b:'))
c=int(input('digite c:'))
d=int(input('digite d:'))
if a>b and a>c and a>d:
print(a)
if b>a and b>c and b>d:
print(b)
if c>a and c>b and c>d:
print(c)
if d>a and d>b and d>c:
print(d)
if a<b and a<c and a<d:
print(a)
if b<a and b<c and b<d:
print(b)
if c<a and c<b and c<d:
print(c)
if d<a and d<b and d<c:
print(d)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
d80dd720858dc042a2f195293139c38d8a080e38
|
50de54517ef5e157b43598e412c477fd66890a3e
|
/Assignment 05/Problem 04.py
|
b0883029d93e9bfb1ca023132749cea0e5ea3943
|
[] |
no_license
|
Shihabsarker93/BRACU-CSE111
|
f530be247bebaaee9cc5e85948dc070adae0c6ae
|
17c95c76f84abffe9d9bdcb5861fbacbc510b5a6
|
refs/heads/main
| 2023-08-13T15:33:57.331850
| 2021-10-07T10:56:09
| 2021-10-07T10:56:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 592
|
py
|
class Color:
def __init__(self, color):
self.clr = color
def __add__(self, other):
self.clr = self.clr + other.clr
if self.clr == "redyellow" or self.clr == "yellowred":
self.clr = "Orange"
elif self.clr == "redblue" or self.clr == "bluered":
self.clr = "Violet"
elif self.clr == "yellowblue" or self.clr == "blueyellow":
self.clr = "Green"
return Color(self.clr)
C1 = Color(input("First Color: ").lower())
C2 = Color(input("Second Color: ").lower())
C3 = C1 + C2
print("Color formed:", C3.clr)
|
[
"mirzamahrabhossain@gmail.com"
] |
mirzamahrabhossain@gmail.com
|
51856a03ef40020ac8c9e0586c08bcf06f66111d
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part003471.py
|
2cabb575ba5e4c06ee0f115ab7c8fc7bab070e46
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,927
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher16359(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.4.1.0', 1, 1, S(0)), Add)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Add
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher16359._instance is None:
CommutativeMatcher16359._instance = CommutativeMatcher16359()
return CommutativeMatcher16359._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 16358
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.4.1.1.0', S(1))
except ValueError:
pass
else:
pass
# State 16360
if len(subjects) >= 1:
tmp2 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.3.1.1.0', tmp2)
except ValueError:
pass
else:
pass
# State 16361
if len(subjects) == 0:
pass
# 0: f*x
yield 0, subst2
subjects.appendleft(tmp2)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp4 = subjects.popleft()
associative1 = tmp4
associative_type1 = type(tmp4)
subjects5 = deque(tmp4._args)
matcher = CommutativeMatcher16363.get()
tmp6 = subjects5
subjects5 = []
for s in tmp6:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp6, subst0):
pass
if pattern_index == 0:
pass
# State 16364
if len(subjects) == 0:
pass
# 0: f*x
yield 0, subst1
subjects.appendleft(tmp4)
return
yield
from .generated_part003472 import *
from matchpy.matching.many_to_one import CommutativeMatcher
from collections import deque
from matchpy.utils import VariableWithCount
from multiset import Multiset
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
c3e9e1cc8cff9dfb3fae569dd6c04fa4f03eb0c9
|
b1fbe7460427dbb891d4b1951e43e551e86b1e3b
|
/arcnlp/torch/nn/encoders/rnn_encoder.py
|
650f222a3a14fcc7307c3081e46773c751216295
|
[] |
no_license
|
linhx13/arc-nlp
|
88a45601e09deb7883ddf4583f6f2f4607fb85d0
|
760cca0d44958fb4011eaa039263575388a858ae
|
refs/heads/master
| 2023-05-04T12:59:21.232168
| 2021-05-18T17:38:28
| 2021-05-18T17:38:28
| 230,442,944
| 1
| 0
| null | 2021-04-17T03:41:42
| 2019-12-27T12:48:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,606
|
py
|
import torch
import torch.nn as nn
from ...nn.utils import get_sequence_lengths
__all__ = ["RNNEncoder", "LSTMEncoder", "GRUEncoder"]
class _RNNBaseEncoder(nn.Module):
def __init__(self, module, return_sequences):
super(_RNNBaseEncoder, self).__init__()
self.module = module
self.return_sequences = return_sequences
@property
def input_dim(self) -> int:
return self.module.input_size
@property
def output_dim(self) -> int:
return self.module.hidden_size * (
2 if self.module.bidirectional else 1
)
def forward(
self,
inputs: torch.Tensor,
mask: torch.BoolTensor = None,
hidden_state: torch.Tensor = None,
) -> torch.Tensor:
if mask is None:
outputs, _ = self.module(inputs, hidden_state)
if self.return_sequences:
return outputs
else:
return outputs[:, -1, :]
total_length = inputs.size(1)
lengths = get_sequence_lengths(mask)
packed_inputs = nn.utils.rnn.pack_padded_sequence(
inputs, lengths.cpu(), batch_first=True, enforce_sorted=False
)
packed_outputs, state = self.module(packed_inputs, hidden_state)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
packed_outputs, batch_first=True, total_length=total_length
)
if self.return_sequences:
return outputs
else:
if isinstance(state, tuple):
state = state[0]
state = state.transpose(0, 1)
num_directions = 2 if self.module.bidirectional else 1
last_state = state[:, -num_directions:, :]
return last_state.contiguous().view([-1, self.output_dim])
class RNNEncoder(_RNNBaseEncoder):
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
return_sequences: bool = False,
):
module = nn.RNN(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
super().__init__(module, return_sequences)
class LSTMEncoder(_RNNBaseEncoder):
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
return_sequences: bool = False,
):
module = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
super().__init__(module, return_sequences)
class GRUEncoder(_RNNBaseEncoder):
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
return_sequences: bool = False,
):
module = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
super().__init__(module, return_sequences)
|
[
"mylhx288@gmail.com"
] |
mylhx288@gmail.com
|
c5b7c0831380a9b4fd9effc5cea7908430770144
|
92f6ffb240a1fbaa52ae23f614663b2b915e4187
|
/backend/home/migrations/0002_load_initial_data.py
|
79e2423fce37abb55ff579976a85fe906d1c2f41
|
[] |
no_license
|
crowdbotics-apps/msgs-sghsg56-dev-12782
|
3b196351f5ff932916802912c7740c7455a78459
|
10f95c9e897dcad50e21950879adc97b9fe689f4
|
refs/heads/master
| 2022-12-24T00:35:35.056481
| 2020-10-06T09:11:48
| 2020-10-06T09:11:48
| 301,672,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,306
|
py
|
from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "MSGS-sghsg56"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">MSGS-sghsg56</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "msgs-sghsg56-dev-12782.botics.co"
site_params = {
"name": "MSGS-sghsg56",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
47c023614d7d2ba1c4b4f921d42350aec154cb40
|
ace30d0a4b1452171123c46eb0f917e106a70225
|
/filesystems/vnx_rootfs_lxc_ubuntu64-16.04-v025-openstack-compute/rootfs/usr/lib/python2.7/dist-packages/openstackclient/common/clientmanager.py
|
3e1a50e3e6423cbe6c7010004e2266d04e7627b8
|
[
"Python-2.0"
] |
permissive
|
juancarlosdiaztorres/Ansible-OpenStack
|
e98aa8c1c59b0c0040c05df292964520dd796f71
|
c01951b33e278de9e769c2d0609c0be61d2cb26b
|
refs/heads/master
| 2022-11-21T18:08:21.948330
| 2018-10-15T11:39:20
| 2018-10-15T11:39:20
| 152,568,204
| 0
| 3
| null | 2022-11-19T17:38:49
| 2018-10-11T09:45:48
|
Python
|
UTF-8
|
Python
| false
| false
| 5,435
|
py
|
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Manage access to the clients, including authenticating when needed."""
import logging
import pkg_resources
import sys
from osc_lib import clientmanager
from osc_lib import shell
LOG = logging.getLogger(__name__)
PLUGIN_MODULES = []
USER_AGENT = 'python-openstackclient'
class ClientManager(clientmanager.ClientManager):
"""Manages access to API clients, including authentication
Wrap osc_lib's ClientManager to maintain compatibility for the existing
plugin V2 interface. Some currently private attributes become public
in osc-lib so we need to maintain a transition period.
"""
# A simple incrementing version for the plugin to know what is available
PLUGIN_INTERFACE_VERSION = "2"
def __init__(
self,
cli_options=None,
api_version=None,
):
super(ClientManager, self).__init__(
cli_options=cli_options,
api_version=api_version,
# TODO(dtroyer): Remove this when osc-lib 1.2 is released
pw_func=shell.prompt_for_password,
)
# TODO(dtroyer): For compatibility; mark this for removal when plugin
# interface v2 is removed
self._region_name = self.region_name
self._interface = self.interface
self._cacert = self.cacert
self._insecure = not self.verify
# store original auth_type
self._original_auth_type = cli_options.auth_type
def setup_auth(self):
"""Set up authentication"""
if self._auth_setup_completed:
return
# NOTE(dtroyer): Validate the auth args; this is protected with 'if'
# because openstack_config is an optional argument to
# CloudConfig.__init__() and we'll die if it was not
# passed.
if self._cli_options._openstack_config is not None:
self._cli_options._openstack_config._pw_callback = \
shell.prompt_for_password
try:
self._cli_options._auth = \
self._cli_options._openstack_config.load_auth_plugin(
self._cli_options.config,
)
except TypeError as e:
self._fallback_load_auth_plugin(e)
return super(ClientManager, self).setup_auth()
def _fallback_load_auth_plugin(self, e):
# NOTES(RuiChen): Hack to avoid auth plugins choking on data they don't
# expect, delete fake token and endpoint, then try to
# load auth plugin again with user specified options.
# We know it looks ugly, but it's necessary.
if self._cli_options.config['auth']['token'] == 'x':
# restore original auth_type
self._cli_options.config['auth_type'] = \
self._original_auth_type
del self._cli_options.config['auth']['token']
del self._cli_options.config['auth']['endpoint']
self._cli_options._auth = \
self._cli_options._openstack_config.load_auth_plugin(
self._cli_options.config,
)
else:
raise e
def is_network_endpoint_enabled(self):
"""Check if the network endpoint is enabled"""
# NOTE(dtroyer): is_service_available() can also return None if
# there is no Service Catalog, callers here are
# not expecting that so fold None into True to
# use Network API by default
return self.is_service_available('network') is not False
# Plugin Support
def get_plugin_modules(group):
"""Find plugin entry points"""
mod_list = []
for ep in pkg_resources.iter_entry_points(group):
LOG.debug('Found plugin %r', ep.name)
__import__(ep.module_name)
module = sys.modules[ep.module_name]
mod_list.append(module)
init_func = getattr(module, 'Initialize', None)
if init_func:
init_func('x')
# Add the plugin to the ClientManager
setattr(
clientmanager.ClientManager,
module.API_NAME,
clientmanager.ClientCache(
getattr(sys.modules[ep.module_name], 'make_client', None)
),
)
return mod_list
def build_plugin_option_parser(parser):
"""Add plugin options to the parser"""
# Loop through extensions to get parser additions
for mod in PLUGIN_MODULES:
parser = mod.build_option_parser(parser)
return parser
# Get list of base plugin modules
PLUGIN_MODULES = get_plugin_modules(
'openstack.cli.base',
)
# Append list of external plugin modules
PLUGIN_MODULES.extend(get_plugin_modules(
'openstack.cli.extension',
))
|
[
"jcdiaztorres96@gmail.com"
] |
jcdiaztorres96@gmail.com
|
24a54a7565b8d38155fddd08742ae1389e50ac05
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5695413893988352_0/Python/algomaus/b.py
|
2d3892eda43ade0e73e3d0638dc64a9dc402c531
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,205
|
py
|
#! /usr/bin/env python
def parse(lines):
n = int(lines[0])
words = []
for i in range(n):
words.append(lines[i+1])
return words
def asInt(string, lastPosition):
if lastPosition == -1:
return 0
#if lastPosition == 0 and string[0] == '?':
#return 0
lis = []
for i in range(lastPosition+1):
lis.append(string[i])
return int(''.join(lis).replace(' ','').replace('[','').replace(']','').replace(',',''))
def solve(word):
splitted = word.split(' ')
coder = []
jammer = []
for i in splitted[0]:
coder.append(i)
for i in splitted[1]:
jammer.append(i)
coder_add = []
jammer_add = []
for i in range(len(coder)):
if coder[i] == '?' and jammer[i] == '?':
if i == 0 or (asInt(coder, i-1) == asInt(jammer, i-1)):
if i+1 < len(coder) and coder[i+1] != '?' and jammer[i+1] != '?':
if coder[i+1] > jammer[i+1]:
coder[i] = '0'
coder_add.append('0')
jammer[i] = '1'
jammer_add.append('1')
elif coder[i+1] < jammer[i+1]:
coder[i] = '1'
coder_add.append('1')
jammer[i] = '0'
jammer_add.append('0')
else:
coder[i] = '0'
coder_add.append(0)
jammer[i] = '0'
jammer_add.append(0)
else:
coder[i] = '0'
coder_add.append(0)
jammer[i] = '0'
jammer_add.append(0)
elif asInt(coder, i-1) > asInt(jammer, i-1):
coder[i] = '0'
coder_add.append(0)
jammer[i] = '9'
jammer_add.append(9)
else:
coder[i] = '9'
coder_add.append(9)
jammer[i] = '0'
jammer_add.append(0)
elif coder[i] == '?':
if asInt(coder, i-1) == asInt(jammer, i-1):
coder[i] = jammer[i]
coder_add.append(jammer[i])
#if int(jammer[i]) <= 5:
#coder[i] = '0'
#coder_add.append(0)
#else:
#coder[i] = '9'
#coder_add.append(9)
elif asInt(coder, i-1) > asInt(jammer, i-1):
coder[i] = '0'
coder_add.append(0)
else:
coder[i] = '9'
coder_add.append(9)
elif jammer[i] == '?':
if asInt(coder, i-1) == asInt(jammer, i-1):
jammer[i] = coder[i]
jammer_add.append(coder[i])
#if int(coder[i]) <= 5:
# jammer[i] = '0'
# jammer_add.append(0)
#else:
# jammer[i] = '9'
# jammer_add.append(9)
elif asInt(coder, i-1) < asInt(jammer, i-1):
jammer[i] = '0'
jammer_add.append(0)
else:
jammer[i] = '9'
jammer_add.append(9)
coder_add_str = str(coder).replace(' ','').replace('[','').replace(']','').replace(',','').replace('\'','')
jammer_add_str = str(jammer).replace(' ','').replace('[','').replace(']','').replace(',','').replace('\'','')
return coder_add_str + " " + jammer_add_str
#with open('A-large.in', 'r') as f:
with open('B-small-attempt1.in', 'r') as f:
words = parse(f.read().splitlines())
for i in range(len(words)):
wordSorted = solve(words[i])
print "Case #" + str(i+1) + ": " + wordSorted
|
[
"alexandra1.back@gmail.com"
] |
alexandra1.back@gmail.com
|
1ebf50f2fe945bd4d55d54c13e76a24165a05cf2
|
a0f0efaaaf69d6ccdc2a91596db29f04025f122c
|
/build/botcmd_msgs/devel/lib/python2.7/dist-packages/botcmd_msgs/srv/_bot_getenabledi_cmd.py
|
56881c75882d7bfcd72f305eeff5b2ca7dffd6bc
|
[] |
no_license
|
chiuhandsome/ros_ws_test-git
|
75da2723154c0dadbcec8d7b3b1f3f8b49aa5cd6
|
619909130c23927ccc902faa3ff6d04ae0f0fba9
|
refs/heads/master
| 2022-12-24T05:45:43.845717
| 2020-09-22T10:12:54
| 2020-09-22T10:12:54
| 297,582,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,723
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from botcmd_msgs/bot_getenabledi_cmdRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class bot_getenabledi_cmdRequest(genpy.Message):
_md5sum = "481ac5a494c3140a2539020bd74c82c7"
_type = "botcmd_msgs/bot_getenabledi_cmdRequest"
_has_header = False # flag to mark the presence of a Header object
_full_text = """int8 command
"""
__slots__ = ['command']
_slot_types = ['int8']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
command
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(bot_getenabledi_cmdRequest, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.command is None:
self.command = 0
else:
self.command = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.command
buff.write(_get_struct_b().pack(_x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.command,) = _get_struct_b().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.command
buff.write(_get_struct_b().pack(_x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.command,) = _get_struct_b().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_b = None
def _get_struct_b():
global _struct_b
if _struct_b is None:
_struct_b = struct.Struct("<b")
return _struct_b
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from botcmd_msgs/bot_getenabledi_cmdResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class bot_getenabledi_cmdResponse(genpy.Message):
_md5sum = "01a64608314d5f77b6df20caba78d455"
_type = "botcmd_msgs/bot_getenabledi_cmdResponse"
_has_header = False # flag to mark the presence of a Header object
_full_text = """bool result
int32 status
"""
__slots__ = ['result','status']
_slot_types = ['bool','int32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
result,status
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(bot_getenabledi_cmdResponse, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.result is None:
self.result = False
if self.status is None:
self.status = 0
else:
self.result = False
self.status = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_Bi().pack(_x.result, _x.status))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 5
(_x.result, _x.status,) = _get_struct_Bi().unpack(str[start:end])
self.result = bool(self.result)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_Bi().pack(_x.result, _x.status))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 5
(_x.result, _x.status,) = _get_struct_Bi().unpack(str[start:end])
self.result = bool(self.result)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_Bi = None
def _get_struct_Bi():
global _struct_Bi
if _struct_Bi is None:
_struct_Bi = struct.Struct("<Bi")
return _struct_Bi
class bot_getenabledi_cmd(object):
_type = 'botcmd_msgs/bot_getenabledi_cmd'
_md5sum = 'c310784b062f6ef0f7752130ef306c28'
_request_class = bot_getenabledi_cmdRequest
_response_class = bot_getenabledi_cmdResponse
|
[
"chiuhandsome1966@gmail.com"
] |
chiuhandsome1966@gmail.com
|
4570702ee558fd5356cbb6e61347d548044dc91f
|
98efe1aee73bd9fbec640132e6fb2e54ff444904
|
/loldib/getratings/models/NA/na_velkoz/na_velkoz_jng.py
|
03ed1057207505472af839e30740f1e89491e018
|
[
"Apache-2.0"
] |
permissive
|
koliupy/loldib
|
be4a1702c26546d6ae1b4a14943a416f73171718
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
refs/heads/master
| 2021-07-04T03:34:43.615423
| 2017-09-21T15:44:10
| 2017-09-21T15:44:10
| 104,359,388
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,545
|
py
|
from getratings.models.ratings import Ratings
class NA_Velkoz_Jng_Aatrox(Ratings):
pass
class NA_Velkoz_Jng_Ahri(Ratings):
pass
class NA_Velkoz_Jng_Akali(Ratings):
pass
class NA_Velkoz_Jng_Alistar(Ratings):
pass
class NA_Velkoz_Jng_Amumu(Ratings):
pass
class NA_Velkoz_Jng_Anivia(Ratings):
pass
class NA_Velkoz_Jng_Annie(Ratings):
pass
class NA_Velkoz_Jng_Ashe(Ratings):
pass
class NA_Velkoz_Jng_AurelionSol(Ratings):
pass
class NA_Velkoz_Jng_Azir(Ratings):
pass
class NA_Velkoz_Jng_Bard(Ratings):
pass
class NA_Velkoz_Jng_Blitzcrank(Ratings):
pass
class NA_Velkoz_Jng_Brand(Ratings):
pass
class NA_Velkoz_Jng_Braum(Ratings):
pass
class NA_Velkoz_Jng_Caitlyn(Ratings):
pass
class NA_Velkoz_Jng_Camille(Ratings):
pass
class NA_Velkoz_Jng_Cassiopeia(Ratings):
pass
class NA_Velkoz_Jng_Chogath(Ratings):
pass
class NA_Velkoz_Jng_Corki(Ratings):
pass
class NA_Velkoz_Jng_Darius(Ratings):
pass
class NA_Velkoz_Jng_Diana(Ratings):
pass
class NA_Velkoz_Jng_Draven(Ratings):
pass
class NA_Velkoz_Jng_DrMundo(Ratings):
pass
class NA_Velkoz_Jng_Ekko(Ratings):
pass
class NA_Velkoz_Jng_Elise(Ratings):
pass
class NA_Velkoz_Jng_Evelynn(Ratings):
pass
class NA_Velkoz_Jng_Ezreal(Ratings):
pass
class NA_Velkoz_Jng_Fiddlesticks(Ratings):
pass
class NA_Velkoz_Jng_Fiora(Ratings):
pass
class NA_Velkoz_Jng_Fizz(Ratings):
pass
class NA_Velkoz_Jng_Galio(Ratings):
pass
class NA_Velkoz_Jng_Gangplank(Ratings):
pass
class NA_Velkoz_Jng_Garen(Ratings):
pass
class NA_Velkoz_Jng_Gnar(Ratings):
pass
class NA_Velkoz_Jng_Gragas(Ratings):
pass
class NA_Velkoz_Jng_Graves(Ratings):
pass
class NA_Velkoz_Jng_Hecarim(Ratings):
pass
class NA_Velkoz_Jng_Heimerdinger(Ratings):
pass
class NA_Velkoz_Jng_Illaoi(Ratings):
pass
class NA_Velkoz_Jng_Irelia(Ratings):
pass
class NA_Velkoz_Jng_Ivern(Ratings):
pass
class NA_Velkoz_Jng_Janna(Ratings):
pass
class NA_Velkoz_Jng_JarvanIV(Ratings):
pass
class NA_Velkoz_Jng_Jax(Ratings):
pass
class NA_Velkoz_Jng_Jayce(Ratings):
pass
class NA_Velkoz_Jng_Jhin(Ratings):
pass
class NA_Velkoz_Jng_Jinx(Ratings):
pass
class NA_Velkoz_Jng_Kalista(Ratings):
pass
class NA_Velkoz_Jng_Karma(Ratings):
pass
class NA_Velkoz_Jng_Karthus(Ratings):
pass
class NA_Velkoz_Jng_Kassadin(Ratings):
pass
class NA_Velkoz_Jng_Katarina(Ratings):
pass
class NA_Velkoz_Jng_Kayle(Ratings):
pass
class NA_Velkoz_Jng_Kayn(Ratings):
pass
class NA_Velkoz_Jng_Kennen(Ratings):
pass
class NA_Velkoz_Jng_Khazix(Ratings):
pass
class NA_Velkoz_Jng_Kindred(Ratings):
pass
class NA_Velkoz_Jng_Kled(Ratings):
pass
class NA_Velkoz_Jng_KogMaw(Ratings):
pass
class NA_Velkoz_Jng_Leblanc(Ratings):
pass
class NA_Velkoz_Jng_LeeSin(Ratings):
pass
class NA_Velkoz_Jng_Leona(Ratings):
pass
class NA_Velkoz_Jng_Lissandra(Ratings):
pass
class NA_Velkoz_Jng_Lucian(Ratings):
pass
class NA_Velkoz_Jng_Lulu(Ratings):
pass
class NA_Velkoz_Jng_Lux(Ratings):
pass
class NA_Velkoz_Jng_Malphite(Ratings):
pass
class NA_Velkoz_Jng_Malzahar(Ratings):
pass
class NA_Velkoz_Jng_Maokai(Ratings):
pass
class NA_Velkoz_Jng_MasterYi(Ratings):
pass
class NA_Velkoz_Jng_MissFortune(Ratings):
pass
class NA_Velkoz_Jng_MonkeyKing(Ratings):
pass
class NA_Velkoz_Jng_Mordekaiser(Ratings):
pass
class NA_Velkoz_Jng_Morgana(Ratings):
pass
class NA_Velkoz_Jng_Nami(Ratings):
pass
class NA_Velkoz_Jng_Nasus(Ratings):
pass
class NA_Velkoz_Jng_Nautilus(Ratings):
pass
class NA_Velkoz_Jng_Nidalee(Ratings):
pass
class NA_Velkoz_Jng_Nocturne(Ratings):
pass
class NA_Velkoz_Jng_Nunu(Ratings):
pass
class NA_Velkoz_Jng_Olaf(Ratings):
pass
class NA_Velkoz_Jng_Orianna(Ratings):
pass
class NA_Velkoz_Jng_Ornn(Ratings):
pass
class NA_Velkoz_Jng_Pantheon(Ratings):
pass
class NA_Velkoz_Jng_Poppy(Ratings):
pass
class NA_Velkoz_Jng_Quinn(Ratings):
pass
class NA_Velkoz_Jng_Rakan(Ratings):
pass
class NA_Velkoz_Jng_Rammus(Ratings):
pass
class NA_Velkoz_Jng_RekSai(Ratings):
pass
class NA_Velkoz_Jng_Renekton(Ratings):
pass
class NA_Velkoz_Jng_Rengar(Ratings):
pass
class NA_Velkoz_Jng_Riven(Ratings):
pass
class NA_Velkoz_Jng_Rumble(Ratings):
pass
class NA_Velkoz_Jng_Ryze(Ratings):
pass
class NA_Velkoz_Jng_Sejuani(Ratings):
pass
class NA_Velkoz_Jng_Shaco(Ratings):
pass
class NA_Velkoz_Jng_Shen(Ratings):
pass
class NA_Velkoz_Jng_Shyvana(Ratings):
pass
class NA_Velkoz_Jng_Singed(Ratings):
pass
class NA_Velkoz_Jng_Sion(Ratings):
pass
class NA_Velkoz_Jng_Sivir(Ratings):
pass
class NA_Velkoz_Jng_Skarner(Ratings):
pass
class NA_Velkoz_Jng_Sona(Ratings):
pass
class NA_Velkoz_Jng_Soraka(Ratings):
pass
class NA_Velkoz_Jng_Swain(Ratings):
pass
class NA_Velkoz_Jng_Syndra(Ratings):
pass
class NA_Velkoz_Jng_TahmKench(Ratings):
pass
class NA_Velkoz_Jng_Taliyah(Ratings):
pass
class NA_Velkoz_Jng_Talon(Ratings):
pass
class NA_Velkoz_Jng_Taric(Ratings):
pass
class NA_Velkoz_Jng_Teemo(Ratings):
pass
class NA_Velkoz_Jng_Thresh(Ratings):
pass
class NA_Velkoz_Jng_Tristana(Ratings):
pass
class NA_Velkoz_Jng_Trundle(Ratings):
pass
class NA_Velkoz_Jng_Tryndamere(Ratings):
pass
class NA_Velkoz_Jng_TwistedFate(Ratings):
pass
class NA_Velkoz_Jng_Twitch(Ratings):
pass
class NA_Velkoz_Jng_Udyr(Ratings):
pass
class NA_Velkoz_Jng_Urgot(Ratings):
pass
class NA_Velkoz_Jng_Varus(Ratings):
pass
class NA_Velkoz_Jng_Vayne(Ratings):
pass
class NA_Velkoz_Jng_Veigar(Ratings):
pass
class NA_Velkoz_Jng_Velkoz(Ratings):
pass
class NA_Velkoz_Jng_Vi(Ratings):
pass
class NA_Velkoz_Jng_Viktor(Ratings):
pass
class NA_Velkoz_Jng_Vladimir(Ratings):
pass
class NA_Velkoz_Jng_Volibear(Ratings):
pass
class NA_Velkoz_Jng_Warwick(Ratings):
pass
class NA_Velkoz_Jng_Xayah(Ratings):
pass
class NA_Velkoz_Jng_Xerath(Ratings):
pass
class NA_Velkoz_Jng_XinZhao(Ratings):
pass
class NA_Velkoz_Jng_Yasuo(Ratings):
pass
class NA_Velkoz_Jng_Yorick(Ratings):
pass
class NA_Velkoz_Jng_Zac(Ratings):
pass
class NA_Velkoz_Jng_Zed(Ratings):
pass
class NA_Velkoz_Jng_Ziggs(Ratings):
pass
class NA_Velkoz_Jng_Zilean(Ratings):
pass
class NA_Velkoz_Jng_Zyra(Ratings):
pass
|
[
"noreply@github.com"
] |
koliupy.noreply@github.com
|
e878483efb96ff6a75498766da8723c34864fa39
|
694d3929b23a8434cab14ddab623030a0fe4ac38
|
/apps/reports/views.py
|
f3a408b99c91eae03444b5863ff332d455c98ab2
|
[] |
no_license
|
gehongming/django_api
|
03fec87a25c2ad3cb603aad2f1b5d9b680debf12
|
fb8e0623e9171deb8706ed258cc5d5bd0d9fe6aa
|
refs/heads/main
| 2023-09-01T06:57:22.677374
| 2021-10-12T05:39:07
| 2021-10-12T05:39:07
| 415,173,097
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,648
|
py
|
import json
import re
import os
from datetime import datetime
from django.http import StreamingHttpResponse
from django.utils.encoding import escape_uri_path
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from rest_framework import permissions, status
from rest_framework.decorators import action
from rest_framework.settings import settings
from .models import Reports
from .serializer import ReportsSerializer
from .utils import format_output, get_file_contents
class ReportsViewSet(ModelViewSet):
"""
list:
返回测试报告(多个)列表数据
create:
创建测试报告
update:
更新测试报告
partial_update:
更新(部分)测试报告
destroy:
逻辑删除测试报告
retrieve:
获取测试报告详情
"""
queryset = Reports.objects.filter(is_delete=0)
serializer_class = ReportsSerializer
ordering_fields = ['name']
# 定义权限
permission_classes = [permissions.IsAuthenticated]
def list(self, request, *args, **kwargs):
# 调用父类的list方法。
response = super().list(request, *args, **kwargs)
response.data['results'] = format_output(response.data['results'])
return response
# 逻辑删除 重写。原有的destroy 是物理删除
def perform_destroy(self, instance):
# 修改字段 is_delete
instance.is_delete = 1
instance.save()
@action(detail=True)
def download(self, request, pk=None):
# 1、获取html源码
instance = self.get_object()
html = instance.html
name = instance.name
# 正则取存储的文件名称
mtch = re.match(r'(.*_)\d+', name)
if mtch:
mtch = mtch.group(1)
report_filename = mtch + datetime.strftime(datetime.now(), '%Y%m%d%H%M%S' + '.html')
# 获取文件路径。可以通过settings.BASE_DI获取项目路径
else:
report_filename = name+'.html'
# settings.REPORTS_DIR 设置 报告目录。
report_path = os.path.join(settings.REPORTS_DIR, report_filename)
# 将html数据 写入到保存的html文件内。
with open(report_path, 'w+', encoding='utf-8') as one_file:
one_file.write(html)
# 下载专用返回格式
response = StreamingHttpResponse(get_file_contents(report_path))
report_path_final = escape_uri_path(report_filename)
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = f"attachment; filename*=UTF-8''{report_path_final}"
return response
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
data = serializer.data
try:
data['summary'] = json.loads(data['summary'], encoding='utf-8')
return Response(data)
except Exception as e:
return Response({
'err': '测试报告summary格式有误'
}, status=status.HTTP_400_BAD_REQUEST)
# def retrieve(self, request, *args, **kwargs):
# instance = self.get_object()
# try:
# summary = json.loads(instance.summary, encoding='utf-8')
# return Response({
# 'id': instance.id,
# 'summary': summary
# }, status=status.HTTP_200_OK)
# except Exception:
# return Response({
# 'err': '测试报告summary格式有误'
# }, status=status.HTTP_400_BAD_REQUEST)
|
[
"1010562639@qq.com"
] |
1010562639@qq.com
|
397da806a95f70217bf79901c8e1ad9ffe4fcefe
|
e0ed932fc2e4edb953cc4e423362dabc19083008
|
/python/002_note/learn_with/002_有异常的例子.py
|
8704a23fa87700b015cb24d95bd2053e1d7f4bde
|
[] |
no_license
|
glfAdd/note
|
90baee45003ac3998d898dcfbc618caa28f33b74
|
19a9aff61450be25904bff0fe672f660d49d90ff
|
refs/heads/main
| 2023-05-27T13:28:36.092352
| 2023-05-24T03:35:58
| 2023-05-24T03:35:58
| 240,066,208
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 816
|
py
|
class Test:
def __enter__(self):
print('__enter__() is call!')
return self
@staticmethod
def start():
print('------------------------------ test')
return 1 / 0
def __exit__(self, exc_type, exc_value, traceback):
"""
@param exc_type:
@param exc_value:
@param traceback:
@return:
True: 不抛出异常
False: 抛出异常
"""
print('__exit__() is call!')
print(f'exc_type:{exc_type}')
print(f'exc_value:{exc_value}')
print(f'traceback:{traceback}')
print('__exit()__ is call!')
return True
# return False
with Test() as t:
print('------------ 1')
t.start()
print('------------ 2')
raise TypeError
print('------------ 3')
|
[
"2239660080@qq.com"
] |
2239660080@qq.com
|
2e56820469786281eea6a55179cfaa0fae7337b3
|
5635a3b02f7695a50471c8c08970520858d2277c
|
/venv/bin/pyrsa-sign
|
12ff831eef1cc1f5b697b68f04379992425ffe5c
|
[] |
no_license
|
BethMwangi/Flask-social
|
358325ea09b143c2aaa059594607d0a872fcabd1
|
4d0d902ee959054a95f0d7ab0dbfee3692521f91
|
refs/heads/master
| 2020-04-02T06:13:40.307975
| 2016-06-13T17:16:11
| 2016-06-13T17:16:11
| 60,806,302
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
#!/home/beth/Documents/Github/Flask-social/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import sign
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(sign())
|
[
"wanjikumwangib@gmail.com"
] |
wanjikumwangib@gmail.com
|
|
e299c60be9d53012b8b77da119af0d359f1e54d0
|
c4ffab6cc6b5470a212d1b6a0d241de9427266ee
|
/test/functional/rpc_bind.py
|
ee454df4bb87a910eb3f5749321d750a3a4c467f
|
[
"MIT"
] |
permissive
|
Upsidedoge/upsidedoge
|
1b8d49787eedb84cb7c5aff77549d7d1239ab807
|
32dd022d43b8b90ae1aa1ad7d81c0dfeb89611a2
|
refs/heads/main
| 2023-04-26T16:56:17.024158
| 2021-05-21T21:12:57
| 2021-05-21T21:12:57
| 369,643,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,428
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running upsidedoged with the -rpcbind and -rpcallowip options."""
import sys
from test_framework.netutil import all_interfaces, addr_to_hex, get_bind_addrs, test_ipv6_local
from test_framework.test_framework import BitcoinTestFramework, SkipTest
from test_framework.util import assert_equal, assert_raises_rpc_error, get_rpc_proxy, rpc_port, rpc_url
class RPCBindTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.bind_to_localhost_only = False
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes, None)
def add_options(self, parser):
parser.add_argument("--ipv4", action='store_true', dest="run_ipv4", help="Run ipv4 tests only", default=False)
parser.add_argument("--ipv6", action='store_true', dest="run_ipv6", help="Run ipv6 tests only", default=False)
parser.add_argument("--nonloopback", action='store_true', dest="run_nonloopback", help="Run non-loopback tests only", default=False)
def run_bind_test(self, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
self.log.info("Bind test for %s" % str(addresses))
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
self.nodes[0].rpchost = connect_to
self.start_node(0, base_args + binds)
pid = self.nodes[0].process.pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
self.stop_nodes()
def run_allowip_test(self, allow_ips, rpchost, rpcport):
'''
Start a node with rpcallow IP, and request getnetworkinfo
at a non-localhost IP.
'''
self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport))
node_args = \
['-disablewallet', '-nolisten'] + \
['-rpcallowip='+x for x in allow_ips] + \
['-rpcbind='+addr for addr in ['127.0.0.1', "%s:%d" % (rpchost, rpcport)]] # Bind to localhost as well so start_nodes doesn't hang
self.nodes[0].rpchost = None
self.start_nodes([node_args])
# connect to node through non-loopback interface
node = get_rpc_proxy(rpc_url(self.nodes[0].datadir, 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir)
node.getnetworkinfo()
self.stop_nodes()
def run_test(self):
# due to OS-specific network stats queries, this test works only on Linux
if sum([self.options.run_ipv4, self.options.run_ipv6, self.options.run_nonloopback]) > 1:
raise AssertionError("Only one of --ipv4, --ipv6 and --nonloopback can be set")
self.log.info("Check for linux")
if not sys.platform.startswith('linux'):
raise SkipTest("This test can only be run on linux.")
self.log.info("Check for ipv6")
have_ipv6 = test_ipv6_local()
if not have_ipv6 and not (self.options.run_ipv4 or self.options.run_nonloopback):
raise SkipTest("This test requires ipv6 support.")
self.log.info("Check for non-loopback interface")
self.non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
self.non_loopback_ip = ip
break
if self.non_loopback_ip is None and self.options.run_nonloopback:
raise SkipTest("This test requires a non-loopback ip address.")
self.defaultport = rpc_port(0)
if not self.options.run_nonloopback:
self._run_loopback_tests()
if not self.options.run_ipv4 and not self.options.run_ipv6:
self._run_nonloopback_tests()
def _run_loopback_tests(self):
if self.options.run_ipv4:
# check only IPv4 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', self.defaultport)])
# check only IPv4 localhost (explicit) with alternative port
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
else:
# check default without rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(None, '127.0.0.1', [],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
# check default with rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', [],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
# check only IPv6 localhost (explicit)
self.run_bind_test(['[::1]'], '[::1]', ['[::1]'],
[('::1', self.defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
def _run_nonloopback_tests(self):
self.log.info("Using interface %s for testing" % self.non_loopback_ip)
# check only non-loopback interface
self.run_bind_test([self.non_loopback_ip], self.non_loopback_ip, [self.non_loopback_ip],
[(self.non_loopback_ip, self.defaultport)])
# Check that with invalid rpcallowip, we are denied
self.run_allowip_test([self.non_loopback_ip], self.non_loopback_ip, self.defaultport)
assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], self.non_loopback_ip, self.defaultport)
if __name__ == '__main__':
RPCBindTest().main()
|
[
"36169687+blockinator@users.noreply.github.com"
] |
36169687+blockinator@users.noreply.github.com
|
e496f6a4b65e3fb3ed5cffda376a44cc1e6829cb
|
7357d367b0af4650ccc5b783b7a59090fdde47bb
|
/py-appscript/tags/py-appscript-0.18.0/Lib/aem/types/objectspecifiers/testclause.py
|
cd023de05f848f30e64af15fc725899e0d6eb54c
|
[
"MIT"
] |
permissive
|
BarracudaPff/code-golf-data-python
|
fb0cfc74d1777c4246d56a5db8525432bf37ab1a
|
42e8858c2ebc6a061012bcadb167d29cebb85c5e
|
refs/heads/main
| 2023-05-29T05:52:22.856551
| 2020-05-23T22:12:48
| 2020-05-23T22:12:48
| 378,832,634
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,297
|
py
|
"""testclause -- Used to construct test expressions for use in by-filter references.
(C) 2005 HAS
"""
from CarbonX import kAE
import base
class Test(base.BASE):
"""Base class for all comparison and logic test classes (Equals, NotEquals, AND, OR, etc.)."""
def AND(self, operand2, *operands):
"""AND(test,...) --> logical AND test"""
return AND((self, operand2) + operands)
def OR(self, operand2, *operands):
"""OR(test,...) --> logical OR test"""
return OR((self, operand2) + operands)
NOT = property(lambda self: NOT((self,)), doc="NOT --> logical NOT test")
class _ComparisonTest(Test):
"""Subclassed by comparison test classes."""
def __init__(self, operand1, operand2):
self._operand1 = operand1
self._operand2 = operand2
def __repr__(self):
return "%r.%s(%r)" % (self._operand1, self._name, self._operand2)
def AEM_resolve(self, obj):
return getattr(self._operand1.AEM_resolve(obj), self._name)(self._operand2)
def AEM_packSelf(self, codecs):
return base.packListAs(kAE.typeCompDescriptor, [(kAE.keyAEObject1, codecs.pack(self._operand1)), (kAE.keyAECompOperator, self._operator), (kAE.keyAEObject2, codecs.pack(self._operand2))])
class GreaterThan(_ComparisonTest):
_name = "gt"
_operator = base.packEnum(kAE.kAEGreaterThan)
class GreaterOrEquals(_ComparisonTest):
_name = "ge"
_operator = base.packEnum(kAE.kAEGreaterThanEquals)
class Equals(_ComparisonTest):
_name = "eq"
_operator = base.packEnum(kAE.kAEEquals)
class NotEquals(Equals):
_name = "ne"
_operatorNOT = base.packEnum(kAE.kAENOT)
def AEM_packSelf(self, codecs):
return self._operand1.eq(self._operand2).NOT.AEM_packSelf(codecs)
class LessThan(_ComparisonTest):
_name = "lt"
_operator = base.packEnum(kAE.kAELessThan)
class LessOrEquals(_ComparisonTest):
_name = "le"
_operator = base.packEnum(kAE.kAELessThanEquals)
class BeginsWith(_ComparisonTest):
_name = "beginswith"
_operator = base.packEnum(kAE.kAEBeginsWith)
class EndsWith(_ComparisonTest):
_name = "endswith"
_operator = base.packEnum(kAE.kAEEndsWith)
class Contains(_ComparisonTest):
_name = "contains"
_operator = base.packEnum(kAE.kAEContains)
class IsIn(Contains):
_name = "isin"
def AEM_packSelf(self, codecs):
return base.packListAs(kAE.typeCompDescriptor, [(kAE.keyAEObject1, codecs.pack(self._operand2)), (kAE.keyAECompOperator, self._operator), (kAE.keyAEObject2, codecs.pack(self._operand1))])
class _LogicalTest(Test):
"""Subclassed by logical test classes."""
def __init__(self, operands):
self._operands = operands
def __repr__(self):
return "%r.%s(%s)" % (self._operands[0], self._name, repr(list(self._operands[1:]))[1:-1])
def AEM_resolve(self, obj):
return getattr(self._operands[0].AEM_resolve(obj), self._name)(*self._operands[1:])
def AEM_packSelf(self, codecs):
return base.packListAs(kAE.typeLogicalDescriptor, [(kAE.keyAELogicalOperator, self._operator), (kAE.keyAELogicalTerms, codecs.pack(self._operands))])
class AND(_LogicalTest):
_operator = base.packEnum(kAE.kAEAND)
_name = "AND"
class OR(_LogicalTest):
_operator = base.packEnum(kAE.kAEOR)
_name = "OR"
class NOT(_LogicalTest):
_operator = base.packEnum(kAE.kAENOT)
_name = "NOT"
def __repr__(self):
return "%r.NOT" % self._operands[0]
def AEM_resolve(self, obj):
return self._operands[0].AEM_resolve(obj).NOT
|
[
"sokolov.yas@gmail.com"
] |
sokolov.yas@gmail.com
|
e60acfc6dfaaa850aa14c36de95d0f2dd9dbd345
|
baefee5fbbc015cdc0b71ffc8956fad2d7d93683
|
/openstack_dashboard/dashboards/admin/routers/ports/forms.py
|
6010f5c792917a435eb64386f99e60d176fda8e1
|
[
"Apache-2.0"
] |
permissive
|
dsullivanwr/stx-horizon
|
8312fa01bf28a6bfad175e66f4172add6cabf60c
|
ee6c9b17e34d1dc310790b9d5e0252361c86b8fb
|
refs/heads/master
| 2020-03-29T06:51:49.902050
| 2018-10-11T19:37:40
| 2018-10-11T19:37:40
| 149,643,878
| 0
| 0
|
Apache-2.0
| 2018-10-10T16:02:36
| 2018-09-20T17:11:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,970
|
py
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.routers.ports \
import forms as project_forms
LOG = logging.getLogger(__name__)
class SetGatewayForm(project_forms.SetGatewayForm):
network_id = forms.ChoiceField(label=_("External Network"))
ip_address = forms.IPField(
label=_("IP Address (optional)"),
required=False,
initial="",
help_text=_("IP address of gateway interface (e.g. 192.168.0.254). "
"Specify an explicit address to use when creating the "
"gateway interface. If one is not specified an address "
"will be allocated from the external subnet."),
version=forms.IPv4 | forms.IPv6,
mask=False)
router_name = forms.CharField(label=_("Router Name"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
router_id = forms.CharField(label=_("Router ID"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
enable_snat = forms.BooleanField(label=_("Enable SNAT"),
initial=True, required=False)
failure_url = 'horizon:admin:routers:index'
def handle(self, request, data):
try:
ip_address = data.get('ip_address') or None
enable_snat = data.get('enable_snat', True)
api.neutron.router_add_gateway(request,
data['router_id'],
data['network_id'],
ip_address=ip_address,
enable_snat=enable_snat)
msg = _('Gateway interface is added')
LOG.debug(msg)
messages.success(request, msg)
return True
except Exception as e:
msg = _('Failed to set gateway %s') % e
LOG.info(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
|
[
"dtroyer@gmail.com"
] |
dtroyer@gmail.com
|
cbb5b5e0a29153cfef89be24a515e1b90dbd5ce0
|
2a1e2c298773148983805f1e0fba62bc2bf79267
|
/lib/network/vgg_base.py
|
7057d7ce930283300e3f9abeacd0c7ce46869275
|
[] |
no_license
|
copperdong/CTPN
|
42fde81010ba5c0bff193b4132d4c397c251dedd
|
3d559406c7ad2a02ac54b07ff1cc3603b3c5b6c9
|
refs/heads/master
| 2020-11-25T10:51:23.753733
| 2019-07-22T12:29:15
| 2019-07-22T12:29:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,410
|
py
|
import tensorflow.contrib.slim as slim
from lib.utils.config import cfg
def vgg_base(inputs, scope=None):
featuremap_scale = 1
net = slim.conv2d(inputs, 64, [3, 3], scope='conv1_1')
net = slim.conv2d(net, 64, [3, 3], scope='conv1_2')
net = slim.max_pool2d(net, [2, 2], stride=2, padding='VALID', scope='pool1')
featuremap_scale *= 2
net = slim.conv2d(net, 128, [3, 3], scope='conv2_1')
net = slim.conv2d(net, 128, [3, 3], scope='conv2_2')
net = slim.max_pool2d(net, [2, 2], stride=2, padding='VALID', scope='pool2')
featuremap_scale *= 2
net = slim.conv2d(net, 256, [3, 3], scope='conv3_1')
net = slim.conv2d(net, 256, [3, 3], scope='conv3_2')
net = slim.conv2d(net, 256, [3, 3], scope='conv3_3')
net = slim.max_pool2d(net, [2, 2], stride=2, padding='VALID', scope='pool3')
featuremap_scale *= 2
net = slim.conv2d(net, 512, [3, 3], scope='conv4_1')
net = slim.conv2d(net, 512, [3, 3], scope='conv4_2')
net = slim.conv2d(net, 512, [3, 3], scope='conv4_3')
if featuremap_scale != cfg["ANCHOR_WIDTH"]:
net = slim.max_pool2d(net, [2, 2], stride=2, padding='VALID', scope='pool4')
featuremap_scale *= 2
net = slim.conv2d(net, 512, [3, 3], scope='conv5_1')
net = slim.conv2d(net, 512, [3, 3], scope='conv5_2')
net = slim.conv2d(net, 512, [3, 3], scope='conv5_3')
return net, featuremap_scale
|
[
"chizhanyuefeng@gmail.com"
] |
chizhanyuefeng@gmail.com
|
39f9f6cb12e59735ebe32a3c579294e54cc3f58e
|
9039f309649d0b7c6dd974706fc507938ed0e47a
|
/03. Logistics.py
|
51054e067ae37313a5cfc1e9833e3de6735c07c5
|
[] |
no_license
|
antondelchev/For-Loop---More-Exercises
|
2b5dadb31c273611c15e6523b536f994a0353a52
|
891266ff8b931e19d179b22dd33647887814555e
|
refs/heads/main
| 2023-03-03T11:59:16.990004
| 2021-02-16T15:01:02
| 2021-02-16T15:01:02
| 335,062,985
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,040
|
py
|
number_of_loads = int(input())
tonnes_total = 0
bus_tonnes_total = 0
truck_tonnes_total = 0
train_tonnes_total = 0
bus_price_total = 0
truck_price_total = 0
train_price_total = 0
for i in range(1, number_of_loads + 1):
tonnes = int(input())
tonnes_total += tonnes
if tonnes <= 3:
bus_tonnes_total += tonnes
bus_price_total += tonnes * 200
elif 4 <= tonnes <= 11:
truck_tonnes_total += tonnes
truck_price_total += tonnes * 175
elif tonnes >= 12:
train_tonnes_total += tonnes
train_price_total += tonnes * 120
average_ton_price = (bus_price_total + truck_price_total + train_price_total) / tonnes_total
percent_tonnes_bus = bus_tonnes_total / tonnes_total * 100
percent_tonnes_truck = truck_tonnes_total / tonnes_total * 100
percent_tonnes_train = train_tonnes_total / tonnes_total * 100
print(f"{average_ton_price:.2f}")
print(f"{percent_tonnes_bus:.2f}%")
print(f"{percent_tonnes_truck:.2f}%")
print(f"{percent_tonnes_train:.2f}%")
|
[
"noreply@github.com"
] |
antondelchev.noreply@github.com
|
964b812d02375eb43441088299f997192ca9d36b
|
894b290b4f4f47b5eb523c23efd7bd6110d91b2f
|
/116_fang_shop/fang_shop/fang_shop/spiders/fang_shop_spider.py
|
c3f9547cad1f9b1ec5db7c8618dd0e8ddbf53a24
|
[] |
no_license
|
wliustc/SpiderS
|
6650c00616d11239de8c045828bafdc5a299b1ce
|
441f309c50d28c1a3917bed19321cd5cbe7c2861
|
refs/heads/master
| 2020-03-27T06:15:39.495785
| 2018-06-14T07:55:44
| 2018-06-14T07:55:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,684
|
py
|
# -*- coding: utf-8 -*-
import scrapy
import re
from fang_shop.items import FangShopItem
import web
import urlparse
import hashlib
import json
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/59.0.3071.115 Safari/537.36'
}
dbo2o = web.database(dbn='mysql', db='o2o', user='writer', pw='hh$writer', port=3306, host='10.15.1.24')
db = web.database(dbn='mysql', db='hillinsight', user='writer', pw='hh$writer', port=3306, host='10.15.1.24')
class Fang_Shop_Spider(scrapy.Spider):
name = 'fang_shop_spider'
def start_requests(self):
sql = '''select city,city_link,province from t_hh_fang_city_list'''
results = db.query(sql)
for result in results:
if result['city'] == '北京':
url = 'http://shop.fang.com/loupan/house/'
yield scrapy.Request(url, headers=headers, callback=self.list_parse, meta={
'city': result['city'], 'province': result['province']
}, dont_filter=True)
else:
pattern = re.search('(.*?)\.fang', result['city_link'])
city_code = pattern.group(1)
url = 'http://shop.%s.fang.com/loupan/house/' % city_code
yield scrapy.Request(url, headers=headers, callback=self.list_parse, meta={
'city': result['city'], 'province': result['province']
}, dont_filter=True)
def list_parse(self, response):
content = str(response.body).decode('gb18030').encode('utf-8')
pattern = re.compile('class="title"><a target="_blank" href="(.*?)"')
city = response.meta['city']
province = response.meta['province']
url_list = re.findall(pattern, content)
for url in url_list:
url = re.sub('/esf/', '/', url)
url_new = url + 'xiangqing/'
yield scrapy.Request(url_new, headers=headers, callback=self.detail_parse, meta={
'city': city, 'province': province
}, dont_filter=True)
pattern_next = re.search('id="PageControl1_hlk_next" href="(.*?)"', content)
url_domain = urlparse.urlparse(response.url).netloc
if pattern_next:
url_next = 'http://' + url_domain + pattern_next.group(1)
yield scrapy.Request(url_next, headers=headers, callback=self.list_parse, meta={
'city': city, 'province': province
}, dont_filter=True)
def detail_parse(self, response):
content = str(response.body).decode('gb18030').encode('utf-8')
city = response.meta['city']
province = response.meta['province']
items = FangShopItem()
base_info = {}
pattern1 = re.search('所属区域:([\s\S]*?)<', content)
base_info['所属区域'] = pattern1.group(1)
pattern2 = re.search('楼盘地址:<span title="([\s\S]*?)"', content)
base_info['楼盘地址'] = pattern2.group(1)
pattern3 = re.search('环线位置:([\s\S]*?)<', content)
base_info['环线位置'] = pattern3.group(1)
pattern4 = re.search('物业类别:([\s\S]*?)<', content)
base_info['物业类别'] = pattern4.group(1)
pattern5 = re.search('建筑类别:([\s\S]*?)<', content)
base_info['建筑类别'] = pattern5.group(1)
pattern6 = re.search('总 层 数:([\s\S]*?)<', content)
base_info['总层数'] = pattern6.group(1)
pattern7 = re.search('开 发 商:([\s\S]*?)<', content)
base_info['开发商'] = pattern7.group(1)
pattern8 = re.search('竣工时间:([\s\S]*?)<', content)
base_info['竣工时间'] = pattern8.group(1)
pattern9 = re.search('物 业 费:([\s\S]*?)<', content)
base_info['物业费'] = pattern9.group(1)
pattern10 = re.search('物业公司:([\s\S]*?)<', content)
base_info['物业公司'] = pattern10.group(1)
pattern11 = re.search('占地面积:([\s\S]*?)<', content)
base_info['占地面积'] = pattern11.group(1)
pattern12 = re.search('建筑面积:([\s\S]*?)<', content)
base_info['建筑面积'] = pattern12.group(1)
pattern13 = re.search('开间面积:([\s\S]*?)<', content)
base_info['开间面积'] = pattern13.group(1)
pattern14 = re.search('是否可分割:([\s\S]*?)<', content)
base_info['是否可分割'] = pattern14.group(1)
pattern15 = re.search('电梯数量:([\s\S]*?)<', content)
base_info['电梯数量'] = pattern15.group(1)
pattern16 = re.search('空 调:([\s\S]*?)<', content)
base_info['空调'] = pattern16.group(1)
pattern17 = re.search('装修状况:([\s\S]*?)<', content)
base_info['装修状况'] = pattern17.group(1)
pattern18 = re.search('停 车 位:([\s\S]*?)<', content)
base_info['停车位'] = pattern18.group(1)
base_info = json.dumps(base_info, ensure_ascii=False, encoding='utf-8')
items['base_info'] = base_info
pattern19 = re.search('交通状况</dt>[\s\S]*?<dl class="xiangqing">([\s\S]*?)</div>', content)
traffic_con = pattern19.group(1)
if '暂无资料' in traffic_con:
items['traffic_info'] = '暂无资料'
# print traffic_con
# raw_input('enter')
else:
traffic_info = {}
pattern19_1 = re.search('公交:([\s\S]*?)<', traffic_con)
if pattern19_1:
traffic_info['公交'] = pattern19_1.group(1)
pattern19_2 = re.search('地铁:([\s\S]*?)<', traffic_con)
if pattern19_2:
traffic_info['地铁'] = pattern19_2.group(1)
traffic_info = json.dumps(traffic_info, ensure_ascii=False, encoding='utf-8')
items['traffic_info'] = traffic_info
pattern20 = re.search('周边信息</dt>[\s\S]*?<dl class="xiangqing">([\s\S]*?)</div>', content)
around_con = pattern20.group(1)
if '暂无资料' in around_con:
items['around_info'] = '暂无资料'
else:
around_info = {}
pattern20_1 = re.search('商场:([\s\S]*?)<', around_con)
if pattern20_1:
around_info['商场'] = pattern20_1.group(1)
pattern20_2 = re.search('医院:([\s\S]*?)<', around_con)
if pattern20_2:
around_info['医院'] = pattern20_2.group(1)
pattern20_3 = re.search('邮局:([\s\S]*?)<', around_con)
if pattern20_3:
around_info['邮局'] = pattern20_3.group(1)
pattern20_4 = re.search('银行:([\s\S]*?)<', around_con)
if pattern20_4:
around_info['银行'] = pattern20_4.group(1)
pattern20_5 = re.search('餐饮:([\s\S]*?)<', around_con)
if pattern20_5:
around_info['餐饮'] = pattern20_5.group(1)
around_info = json.dumps(around_info, ensure_ascii=False, encoding='utf-8')
items['around_info'] = around_info
pattern21 = re.search('class="biaoti">([\s\S]*?)<', content)
pattern22 = re.search('newcode=(\d+)"', content)
items['shop_name'] = pattern21.group(1)
if pattern22:
items['src_uid'] = pattern22.group(1)
else:
md5 = hashlib.md5()
md5.update(response.url)
items['src_uid'] = md5.hexdigest()
items['city'] = city
items['province'] = province
items['url'] = response.url
yield items
|
[
"luoshao23@gmail.com"
] |
luoshao23@gmail.com
|
0a61d3455c62c56d19a40625fbc67c86684cf673
|
de64b143a346585f51590bd674e8d13bbc672386
|
/algorithm/Intermediate_Class/뉴스 클러스터링/Wooseong.py
|
a3809d890839206d63713c42df2c288ccf43d48e
|
[] |
no_license
|
ai-kmu/etc
|
304ec20f59e4026025abdcbcae21863c80630dcb
|
9c29941e19b7dd2a2037b110dd6e16690e9a0cc2
|
refs/heads/master
| 2023-08-21T16:30:31.149956
| 2023-08-21T16:26:19
| 2023-08-21T16:26:19
| 199,843,899
| 3
| 24
| null | 2023-05-31T09:56:59
| 2019-07-31T11:36:16
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,976
|
py
|
import copy
def solution(str1, str2):
# 대소문자 무시 - 모두 소문자로
str1 = str1.lower()
str2 = str2.lower()
# 각 str을 다중 집합으로 만들기 - 알파벳쌍만 가능
# -> .isalpha()는 str가 모두 알파벳일 때만 True
set1 = []
for i in range(len(str1) - 1):
temp = str1[i:i+2]
if temp.isalpha():
set1.append(temp)
set2 = []
for i in range(len(str2) - 1):
temp = str2[i:i+2]
if temp.isalpha():
set2.append(temp)
# 두 집합이 모두 공집합일 경우는 1로 정의
if (not set1) and (not set2):
return 65536
# 교집합과 합집합
# 겹친 게 나올 경우
# 교집합에는 적은 개수만큼, 합집합에는 많은 개수만큼 넣음
# 안 겹치면 합집합에만 넣음
set1_copy = copy.deepcopy(set1)
set2_copy = copy.deepcopy(set2)
inter = []
union = []
# 둘 중 하나 다 떨어짐 = 겹칠 수 없음
while set1_copy and set2_copy:
elem = set1_copy.pop()
if elem in set2_copy:
# set1은 이미 하나 pop 해서 +1로 보정
in_set1 = set1_copy.count(elem) + 1
in_set2 = set2_copy.count(elem)
# 교집합엔 적은 개수만큼
inter += [elem] * min(in_set1, in_set2)
# 합집합엔 많은 개수만큼
union += [elem] * max(in_set1, in_set2)
# 넣은 거 빼기
set1_copy = [i for i in set1_copy if i != elem]
set2_copy = [i for i in set2_copy if i != elem]
# 안 겹치는 건 union에만 넣음
else:
union.append(elem)
# 합집합에 남은 거 더 해주기 (둘 중 하나는 빈 리스트)
union += set1_copy + set2_copy
# print("교", inter)
# print("합", union)
return ((len(inter) / len(union)) * 655360) // 10
|
[
"noreply@github.com"
] |
ai-kmu.noreply@github.com
|
77ea35da65f61abce7c44b9a46ee137770cc95ec
|
fc5becca3e2e48a444b512e059df1cd21601829b
|
/Aulas/Aula23A.py
|
3baac0f53fa2d741ffa7e4838bd99fbeb5af6205
|
[
"MIT"
] |
permissive
|
Felix-xilef/Curso-de-Python
|
c44bf8c22b393aefaed3a2bb3127ef7999e27fb8
|
cdff7c7f3850e6326e274c8c1987b9e1a18ce910
|
refs/heads/master
| 2021-05-19T11:09:22.644638
| 2020-04-01T22:09:02
| 2020-04-01T22:09:02
| 251,665,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,148
|
py
|
try:
a = int(input('\n\tNumerador: '))
b = int(input('\tDenominador: '))
r = a / b
# except: - apenas redireciona caso dê erro (GENÉRICO)
# print('\n\tProblema encontrao') # erro - mostra erro | erro.__class__ - mostra classe do erro
except (ValueError, TypeError): # except classe: - apenas redireciona, caso dê erro da classe informada *colocar entre parêntesis e separado por vírgula caso haja mais de um erro
print('\n\tTivemos um problema com os tipos de dados que você digitou')
except ZeroDivisionError:
print('\n\tNão é possível dividir um número por zero!')
except KeyboardInterrupt:
print('\n\tO usuário preferiu não informar os dados!')
except Exception as erro: # guarda a exeção na variável erro (GENÉRICO)
print(f'\n\tProblema encontrao:\n\t{erro.__class__}') # erro - mostra erro | erro.__class__ - mostra classe do erro
else: # opcional (o que ocorrerá caso não der erro)
print(f'\n\t{a}/{b} = {r}')
finally: # opcional (sempre é executado, isto é, caso dê ou não erro)
print('\n\tVolte sempre! Muito obrigado!')
input('\n\nPressione <enter> para continuar')
|
[
"felixpb@yahoo.com.br"
] |
felixpb@yahoo.com.br
|
37f9ffe43f45931ee39051d3b509924093639327
|
33af6185b48bd76f97f0a74390a3a812ee216c78
|
/angr/angr/procedures/libc/fseek.py
|
12804e949829a38007056038d366ae0bb5839ae7
|
[
"BSD-2-Clause"
] |
permissive
|
Ruide/angr-dev
|
dab0cabd907fce47ac698f890c3f3a8b80ab7e2a
|
964dc80c758e25c698c2cbcc454ef5954c5fa0a0
|
refs/heads/master
| 2022-11-10T11:27:13.355024
| 2017-10-07T14:29:09
| 2017-10-07T14:29:09
| 104,417,044
| 0
| 1
|
BSD-2-Clause
| 2022-10-16T04:48:10
| 2017-09-22T01:35:12
|
C
|
UTF-8
|
Python
| false
| false
| 1,201
|
py
|
import angr
from . import io_file_data_for_arch
######################################
# fseek
######################################
class fseek(angr.SimProcedure):
#pylint:disable=arguments-differ
def run(self, file_ptr, offset, whence):
# TODO: Support symbolic file_ptr, offset, and whence
# Make sure whence can only be one of the three values: SEEK_SET(0), SEEK_CUR(1), and SEEK_END(2)
if self.state.se.symbolic(whence) and len(self.state.se.eval_upto(whence, 2)) > 1:
raise angr.SimProcedureError('multi-valued "whence" is not supported in fseek.')
else:
# Get all possible values
all_whence = self.state.se.eval_upto(whence, 2)
if not all_whence:
raise angr.SimProcedureError('"whence" has no satisfiable value.')
# There is only one value left
whence_int = all_whence[0]
if whence_int not in (0, 1, 2):
return 22 # EINVAL
fd_offset = io_file_data_for_arch(self.state.arch)['fd']
fd = self.state.mem[file_ptr + fd_offset : ].int.resolved
r = self.state.posix.seek(fd, offset, whence_int)
return r
|
[
"rd.cheung.bupt.sms@gmail.com"
] |
rd.cheung.bupt.sms@gmail.com
|
fc1594f425c1a54f1e64a6aef2c262b5c450c273
|
736730d72c24470a0c9ba58309ee3a95fe09d5e4
|
/projeto/feriados/feriados/urls.py
|
d23c9df344b2022619b6e27648331c943d93a479
|
[] |
no_license
|
orlandosaraivajr/FATEC_2021_1SEM_Topicos3
|
3f9c6b983c8b012330527848862f9f22649f0f5a
|
83610f798510e1bad69eedaed6b3b4ed08e2014e
|
refs/heads/master
| 2023-05-02T10:24:05.865947
| 2021-05-19T00:20:38
| 2021-05-19T00:20:38
| 339,551,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('feriado.urls')),
]
|
[
"orlandosaraivajr@gmail.com"
] |
orlandosaraivajr@gmail.com
|
6e4abc00113d6b561e5acc7b39de395f44ae02c3
|
388ff52dec8f4780a2d1cfd3f07f9228373a6b03
|
/0x0A-python-inheritance/6-base_geometry.py
|
1f8561cd5c07e6e9c0738b8ac8295dfb5d3a7038
|
[] |
no_license
|
dairof7/holbertonschool-higher_level_programming
|
6bbbb0eb2f2c13553e63056e0cee0ade7e028afe
|
6de0ea30c02a69f9721b4304eb0d48fca626e2df
|
refs/heads/master
| 2023-01-14T09:58:13.327692
| 2020-11-10T16:51:50
| 2020-11-10T16:51:50
| 259,339,091
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
#!/usr/bin/python3
""" this module createa class BaseGeometry"""
class BaseGeometry():
"""empty BaseGeometry class"""
pass
def area(self):
"""method area
return a Exception"""
raise Exception("area() is not implemented")
|
[
"dairof7@gmail.com"
] |
dairof7@gmail.com
|
a46a09b36dea4eddb1483fcdee6e292962b2ab51
|
f47d17b53977cf745d453b654529e8cd6be7890f
|
/3level_N20_ainbin1.py
|
120aacee0e37205a96e1666348518b2b537c19d0
|
[] |
no_license
|
rareearthquantum/model_upconversion_peter
|
b4cce7556a167ba0e9813625dc924d3542d33cd1
|
dcf08000ec21770659318409a686bb2b88a7a1be
|
refs/heads/master
| 2020-04-28T19:54:34.795590
| 2019-06-14T09:43:28
| 2019-06-14T09:43:28
| 175,526,148
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,357
|
py
|
from Frequency_response_3level import *
p = {}
p['deltamu'] = 0.
p['deltao'] = 0.
p['d13'] = 2e-32*math.sqrt(1/3)
p['d23'] = 2e-32*math.sqrt(2/3)
p['gamma13'] = p['d13']**2/(p['d13']**2+p['d23']**2)*1/11e-3
p['gamma23'] = p['d23']**2/(p['d13']**2+p['d23']**2)*1/11e-3
p['gamma2d'] = 1e6
p['gamma3d'] = 1e6
p['nbath'] = 20
p['gammamu'] = 1/(p['nbath']+1) * 1e3
p['go'] = 51.9 #optical coupling
p['No'] = 1.28e15 # number of atoms in the optical mode
p['deltac']=0 #detuning for
p['kappaoi']=2*pi*7.95e6 # intrinsic loss for optical resonator
p['kappaoc']=2*pi*1.7e6 # coupling loss for optical resonator
#p['df']=0.1e6 # how small descretisation step to take when integrating over the
# inhomogeneous lines
p['mean_delam']=0
p['sd_delam']=2*pi*25e6/2.355 #microwave inhomogeneous broadening
#2.355is to turn FWHM into standard deviation
p['mean_delao']=0
p['sd_delao']=2*pi*170e6/2.355 #optical inhomogeneous broadening
p['kappami'] = 650e3*2*pi # intrinsic loss for microwave cavity
p['kappamc'] = 70e3*2*pi # coupling loss for optical cavity
# this is for one of the two output ports
p['Nm'] = 2.22e16 #toal number of atoms
p['gm'] = 1.04 #coupling between atoms and microwave field
p['gammaoc']=2*pi*1.7e6
p['gammaoi']=2*pi*7.95e6
p['gammamc']=2*pi*70e3
p['gammami']=2*pi*650e3
muBohr=927.4009994e-26; # Bohr magneton in J/T in J* T^-1
p['mu12'] = 4.3803*muBohr # transition dipole moment for microwave cavity (J T^-1)
p['Lsample']=12e-3 # the length of the sample, in m
p['dsample']=5e-3 # the diameter of the sample, in m
p['fillfactor']=0.8 #microwave filling factor
p['freqmu'] = 5.186e9
p['freq_pump'] = 195113.36e9 #pump frequency
p['freqo']=p['freqmu']+p['freq_pump']
p['Lcavity_vac'] = 49.5e-3 # length of the vacuum part of the optical
# Fabry Perot (m)
p['Wcavity'] = 0.6e-3# width of optical resonator beam in sample (m)
p['nYSO'] = 1.76
p['Omega']=-492090.88755145477
delovals=np.linspace(-20e5,20e5,31)
delmvals=np.linspace(-1e6,1e6,31)
binvals=[600000]
ainvals=[600000]
aoutvals,boutvals,effic_a,effic_b=find_outputs(ainvals,binvals,delovals,delmvals,p)
np.savez('output_N20_ainbin1',aoutvals=aoutvals,boutvals=boutvals,effic_a=effic_a,effic_b=effic_b,ainvals=ainvals,binvals=binvals,delovals=delovals,delmvals=delmvals,p=p)
|
[
"peterbarnettnz@gmail.com"
] |
peterbarnettnz@gmail.com
|
6e94b8ef6dd9af3e5218e7cac10b5f3da2521727
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_270/ch54_2020_03_27_00_44_11_634478.py
|
4a4969942833af9e67d2166564562fd7a64f393d
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
def calcula_fibonnacci(n):
k = 0
lista = []
soma = 1
while k < n:
lista.append(soma)
soma += soma
k += 1
return lista
|
[
"you@example.com"
] |
you@example.com
|
0e6b139dec6db4c8aa222b7937adfc0f12e6045a
|
d41d18d3ea6edd2ec478b500386375a8693f1392
|
/plotly/validators/heatmap/_y0.py
|
8bae24f08506a20ba2d7ca6fb4ba46ef4651f570
|
[
"MIT"
] |
permissive
|
miladrux/plotly.py
|
38921dd6618650d03be9891d6078e771ffccc99a
|
dbb79e43e2cc6c5762251537d24bad1dab930fff
|
refs/heads/master
| 2020-03-27T01:46:57.497871
| 2018-08-20T22:37:38
| 2018-08-20T22:37:38
| 145,742,203
| 1
| 0
|
MIT
| 2018-08-22T17:37:07
| 2018-08-22T17:37:07
| null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
import _plotly_utils.basevalidators
class Y0Validator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name='y0', parent_name='heatmap', **kwargs):
super(Y0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc+clearAxisTypes',
implied_edits={'ytype': 'scaled'},
role='info',
**kwargs
)
|
[
"adam.kulidjian@gmail.com"
] |
adam.kulidjian@gmail.com
|
44c1925930e893f90665e267105f0de38e06806c
|
885a722e3e5814ae4942ac5e8cf8d0091e734b4c
|
/게임 개발_Python/CodingTest.py
|
44a46c74629a33f66008719905f685de00396184
|
[] |
no_license
|
ledpear/algorithm
|
52f3ea25842eee20b3bbd48e51825b9df4942e03
|
4922c6fe5ca0b98a90dee218b756006e7ba05d82
|
refs/heads/master
| 2023-06-09T17:47:45.674244
| 2023-06-03T13:47:11
| 2023-06-03T13:47:11
| 133,370,193
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,493
|
py
|
n = 4
m = 4
pos_x = 1
pos_y = 1
dir = 0 # 0 : 북, 1 : 동, 2 : 남, 3 : 서
game_map = [[1,1,1,1], [1,0,0,1], [1,1,0,1], [1,1,1,1]]
bool_map = [[0,0,0,0], [0,0,0,0], [0,0,0,0], [0,0,0,0]]
bool_map[pos_y][pos_x] = 1
count = 0
score = 1
while True:
dir -= 1
if dir < 0 : dir = 3
bResult = False
if dir == 0 :
if pos_y - 1 >= 0 :
if game_map[pos_y - 1][pos_x] == 0 and bool_map[pos_y - 1][pos_x] == 0 :
pos_y -= 1;
bool_map[pos_y][pos_x] = 1
bResult = True
score += 1
elif dir == 1 :
if pos_x + 1 < m :
if game_map[pos_y][pos_x + 1] == 0 and bool_map[pos_y][pos_x + 1] == 0 :
pos_x += 1;
bool_map[pos_y][pos_x] = 1
bResult = True
score += 1
elif dir == 2 :
if pos_y + 1 < n :
if game_map[pos_y + 1][pos_x] == 0 and bool_map[pos_y + 1][pos_x] == 0 :
pos_y += 1;
bool_map[pos_y][pos_x] = 1
bResult = True
score += 1
elif dir == 3 :
if pos_x - 1 >= 0 :
if game_map[pos_y][pos_x - 1] == 0 and bool_map[pos_y][pos_x - 1] == 0 :
pos_x -= 1;
bool_map[pos_y][pos_x] = 1
bResult = True
score += 1
if bResult :
count = 0
else :
count += 1
if count == 4 :
if dir == 0 :
if pos_y + 1 < n :
if game_map[pos_y + 1][pos_x] == 0 :
pos_y += 1
count = 0
else :
break
else :
break
elif dir == 1 :
if pos_x - 1 >= 0 :
if game_map[pos_y][pos_x - 1] == 0 :
pos_x -= 1
count = 0
else :
break
else :
break
elif dir == 2 :
if pos_y - 1 >= 0 :
if game_map[pos_y - 1][pos_x] == 0 :
pos_y -= 1
count = 0
else :
break
else :
break
elif dir == 3 :
if pos_x + 1 < m :
if game_map[pos_y][pos_x + 1] == 0 :
pos_x += 1
count = 0
else :
break
else :
break
print(score)
|
[
"tjsrb75@gmail.com"
] |
tjsrb75@gmail.com
|
d8a5803e900c1a81f57eb6e8232a6067e465a51c
|
3c300c79359f1c989df4403835abbc5513364fee
|
/bitshares_tradehistory_analyzer/parser.py
|
56c9520cd316cf14dfea532af60b1ebf20c94920
|
[
"MIT"
] |
permissive
|
ds-voting/bitshares-tradehistory-analyzer
|
73ef81a1748fabef055f512b46366dc848c09a15
|
1dfd293dd6b4d692a078c403b79355fef0165799
|
refs/heads/master
| 2020-07-23T15:06:04.733405
| 2019-07-19T13:51:33
| 2019-07-19T13:51:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,029
|
py
|
import copy
import logging
from decimal import Decimal
from bitshares.account import Account
from bitshares.amount import Amount
from bitshares.asset import Asset
from .consts import LINE_DICT_TEMPLATE
log = logging.getLogger(__name__)
class Parser:
""" Entries parser
:param BitShares bitshares_instance:
:param Account account:
"""
def __init__(self, bitshares_instance, account):
self.bitshares = bitshares_instance
self.account = Account(account, bitshares_instance=self.bitshares)
def parse_transfer_entry(self, entry):
""" Parse single transfer entry into a dict object suitable for writing line
:param dict entry: elastic wrapper entry
:return: dict object suitable for writing line
"""
op_id = entry['account_history']['operation_id']
op_date = entry['block_data']['block_time']
op = entry['operation_history']['op_object']
data = copy.deepcopy(LINE_DICT_TEMPLATE)
amount = Amount(op['amount_'], bitshares_instance=self.bitshares)
from_account = Account(op['from'], bitshares_instance=self.bitshares)
to_account = Account(op['to'], bitshares_instance=self.bitshares)
fee = Amount(op['fee'], bitshares_instance=self.bitshares)
log.info('Transfer: {} -> {}, {}'.format(from_account.name, to_account.name, amount))
if from_account.name == self.account.name:
data['kind'] = 'Withdrawal'
data['sell_cur'] = amount.symbol
data['sell_amount'] = amount.amount
data['fee_cur'] = fee.symbol
data['fee_amount'] = fee.amount
else:
data['kind'] = 'Deposit'
data['buy_cur'] = amount.symbol
data['buy_amount'] = amount.amount
data['comment'] = op_id
data['date'] = op_date
return data
def parse_trade_entry(self, entry):
""" Parse single trade entry (fill order) into a dict object suitable for writing line
:param dict entry: elastic wrapper entry
:return: dict object suitable for writing line
"""
op_id = entry['account_history']['operation_id']
op_date = entry['block_data']['block_time']
op = entry['operation_history']['op_object']
data = copy.deepcopy(LINE_DICT_TEMPLATE)
op = entry['operation_history']['op_object']
sell_asset = Asset(op['pays']['asset_id'], bitshares_instance=self.bitshares)
sell_amount = Decimal(op['pays']['amount']).scaleb(-sell_asset['precision'])
buy_asset = Asset(op['receives']['asset_id'], bitshares_instance=self.bitshares)
buy_amount = Decimal(op['receives']['amount']).scaleb(-buy_asset['precision'])
fee_asset = Asset(op['fee']['asset_id'], bitshares_instance=self.bitshares)
fee_amount = Decimal(op['fee']['amount']).scaleb(-fee_asset['precision'])
# Subtract fee from buy_amount
# For ccgains, any fees for the transaction should already have been substracted from *amount*, but included
# in *cost*.
if fee_asset.symbol == buy_asset.symbol:
buy_amount -= fee_amount
data['kind'] = 'Trade'
data['sell_cur'] = sell_asset.symbol
data['sell_amount'] = sell_amount
data['buy_cur'] = buy_asset.symbol
data['buy_amount'] = buy_amount
data['fee_cur'] = fee_asset.symbol
data['fee_amount'] = fee_amount
data['comment'] = op_id
data['order_id'] = op['order_id']
data['prec'] = max(sell_asset['precision'], buy_asset['precision'])
# Prevent division by zero
price = Decimal('0')
price_inverted = Decimal('0')
if sell_amount and buy_amount:
price = buy_amount / sell_amount
price_inverted = sell_amount / buy_amount
data['price'] = price
data['price_inverted'] = price_inverted
data['date'] = entry['block_data']['block_time']
return data
|
[
"vvk@vvk.pp.ru"
] |
vvk@vvk.pp.ru
|
6db846cc3de7d7f5c3535eafad242cb11e1da445
|
9dee94907e6456a4af9855d358693923c17b4e0d
|
/0111_Minimum_Depth_of_Binary_Tree.py
|
711a407cde57b1a9863453b7f34b3ebbcf63c43b
|
[] |
no_license
|
chien-wei/LeetCode
|
e215915a8103e56f182040dacc9fb0d6996c86ec
|
0d6f414e7610fedb2ec4818ecf88d51aa69e1355
|
refs/heads/master
| 2021-05-13T14:48:22.891100
| 2019-08-20T05:52:59
| 2019-08-20T05:52:59
| 116,749,327
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 764
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def minDepth(self, root: TreeNode) -> int:
# BFS find the first leaf
if not root:
return 0
queue = [root]
depth = 1
while len(queue) > 0:
new_queue = []
for i in range(len(queue)):
q = queue[i]
if not q.left and not q.right:
return depth
if q.left:
new_queue.append(q.left)
if q.right:
new_queue.append(q.right)
queue = new_queue
depth += 1
return 0
|
[
"chien-wei@outlook.com"
] |
chien-wei@outlook.com
|
1b304b18d44960ff768c90217ce7ba455dec8c93
|
3378d73f5e7c67ddcf0179e3574357e3354c7c11
|
/stripe/db/api.py
|
11d21c1e2826ff99a1d951f04beb5a8753b50b8e
|
[
"Apache-2.0"
] |
permissive
|
babarnazmi/stripe
|
e8cece6f4697d05c4262b25f40e7056bb61349e5
|
f98454e7260b5140aaec35d932a78b3ada73e7a4
|
refs/heads/master
| 2021-01-15T12:41:17.140601
| 2013-10-30T04:25:50
| 2013-10-30T04:25:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,694
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2013 PolyBeacon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SQLAlchemy storage backend."""
from sqlalchemy.orm import exc
from stripe.common import exception
from stripe.db import models
from stripe.openstack.common.db import api
from stripe.openstack.common.db.sqlalchemy import session as db_session
from stripe.openstack.common import log as logging
LOG = logging.getLogger(__name__)
get_session = db_session.get_session
def get_instance():
"""Return a DB API instance."""
backend_mapping = {'sqlalchemy': 'stripe.db.api'}
return api.DBAPI(backend_mapping=backend_mapping)
def get_backend():
"""The backend is this module itself."""
return Connection()
def model_query(model, *args, **kwargs):
"""Query helper for simpler session usage.
:param session: if present, the session to use
"""
session = kwargs.get('session') or get_session()
query = session.query(model, *args)
return query
class Connection(object):
"""SqlAlchemy connection."""
def __init__(self):
pass
def create_agent(self, values):
"""Create a new agent."""
res = self._create_model(model=models.Agent(), values=values)
return res
def create_queue(self, values):
"""Create a new queue."""
res = self._create_model(model=models.Queue(), values=values)
return res
def create_queue_member(self, agent_id, queue_id):
"""Create a new queue member."""
values = {
'agent_id': agent_id,
'queue_id': queue_id,
}
res = self._create_model(model=models.QueueMember(), values=values)
return res
def delete_agent(self, agent_id):
"""Delete an agent."""
res = self._delete_model(model=models.Agent, id=agent_id)
if res != 1:
raise exception.AgentNotFound(agent_id=agent_id)
def delete_queue(self, queue_id):
"""Delete a queue."""
res = self._delete_model(model=models.Queue, id=queue_id)
if res != 1:
raise exception.QueueNotFound(queue_id=queue_id)
def delete_queue_member(self, agent_id, queue_id):
"""Delete a queue member."""
res = self._delete_model(
model=models.QueueMember, agent_id=agent_id, queue_id=queue_id
)
if res != 1:
raise exception.QueueMemberNotFound(
agent_id=agent_id
)
def get_agent(self, agent_id):
"""Retrieve information about the given agent."""
try:
res = self._get_model(model=models.Agent, id=agent_id)
except exc.NoResultFound:
raise exception.AgentNotFound(agent_id=agent_id)
return res
def get_queue(self, queue_id):
"""Retrieve information about the given queue."""
try:
res = self._get_model(model=models.Queue, id=queue_id)
except exc.NoResultFound:
raise exception.QueueNotFound(queue_id=queue_id)
return res
def get_queue_member(self, agent_id, queue_id):
"""Retrieve information about the given queue member."""
try:
res = self._get_model(
model=models.QueueMember, agent_id=agent_id, queue_id=queue_id
)
except exc.NoResultFound:
raise exception.QueueMemberNotFound(
agent_id=agent_id
)
return res
def get_user(self, user_id):
"""Retrieve information about the given user."""
try:
res = self._get_model(model=models.User, id=user_id)
except exc.NoResultFound:
raise exception.UserNotFound(user_id=user_id)
return res
def list_agents(self):
"""Retrieve a list of agents."""
res = self._list_model(model=models.Agent)
return res
def list_queues(self):
"""Retrieve a list of queues."""
res = self._list_model(model=models.Queue)
return res
def list_queue_members(self):
"""Retrieve a list of queue members."""
res = self._list_model(model=models.QueueMember)
return res
def list_users(self):
"""Retrieve a list of users."""
res = self._list_model(model=models.User)
return res
def _create_model(self, model, values):
"""Create a new model."""
model.update(values)
model.save()
return model
def _delete_model(self, model, **kwargs):
session = get_session()
with session.begin():
query = model_query(
model, session=session
).filter_by(**kwargs)
count = query.delete()
return count
def _get_model(self, model, **kwargs):
"""Retrieve information about the given model."""
query = model_query(model).filter_by(**kwargs)
res = query.one()
return res
def _list_model(self, model):
"""Retrieve a list of the given model."""
query = model_query(model)
return [m for m in query.all()]
|
[
"paul.belanger@polybeacon.com"
] |
paul.belanger@polybeacon.com
|
c0d8734c640e57bc7339310e1f014f3f748709bb
|
8b95a7225a67b6e8ad30b8ab0ef66076858a29e5
|
/app/db.py
|
87ae41110e3fe4c87ec667bc808b744a168090c4
|
[] |
no_license
|
tehhuu/auto_key
|
e413669b61b7f3f5832b66e753b86c68d16daa1a
|
95866259de5781cdde1f010d286c7e42ba99d5ff
|
refs/heads/master
| 2021-04-16T02:05:03.564332
| 2020-06-12T02:42:14
| 2020-06-12T02:42:14
| 252,633,541
| 0
| 0
| null | 2020-04-03T04:38:09
| 2020-04-03T04:38:09
| null |
UTF-8
|
Python
| false
| false
| 1,446
|
py
|
from sqlalchemy import create_engine, Column, String, Integer, DATETIME
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
import hashlib
engine = create_engine('sqlite:///shimerundesu.db', connect_args={"check_same_thread": False}) #違うスレッドからもアクセスできるようにする
Base = declarative_base()
#与えられた文字列をハッシュ化
def hash(password): return str(hashlib.sha256(password.strip().encode("utf-8")).digest())
#データの構造
class User(Base):
__tablename__ = 'users'
name = Column(String, primary_key=True, unique=True)
password = Column(String)
email = Column(String)
def __repr__(self):
return "User<{}, {}, {}>".format(self.name, self.password)
Base.metadata.create_all(engine)
SessionMaker = sessionmaker(bind=engine)
session = SessionMaker()
if __name__ == "__main__":
#データベース作成処理。このファイルを直接実行すればデータベースを作成可能
user1 = User(name="AAA", password=hash("AAA"), email="AAA@gmail.com")
user2 = User(name="BBB", password=hash("BBB"), email="BBB@gmail.com")
user3 = User(name="CCC", password=hash("CCC"), email="CCC@gmail.com")
user4 = User(name="DDD", password=hash("DDD"), email="DDD@gmail.com")
session.add(user1)
session.add(user2)
session.add(user3)
session.add(user4)
session.commit()
|
[
"volley_neverlose_exile@yahoo.co.jp"
] |
volley_neverlose_exile@yahoo.co.jp
|
a0abbc1ed0bab74222442b06db0a1214f2cf0b8a
|
a44d853d6a7354129d7fdfcf0f43e4f9a9106015
|
/tests/mesh_utils_test.py
|
8e2b29f7f37cc3baabd584c9ba35ddee05fc4abe
|
[
"Apache-2.0"
] |
permissive
|
matthewfeickert/jax
|
4f6b9ba2a96e1521f776886a08be38dd229f1402
|
b0d96bd42440231cc7e98c61f52106f46578fca4
|
refs/heads/main
| 2021-12-10T06:03:36.919415
| 2021-12-09T06:04:13
| 2021-12-09T06:04:46
| 436,520,694
| 0
| 0
|
Apache-2.0
| 2021-12-09T07:23:30
| 2021-12-09T07:23:29
| null |
UTF-8
|
Python
| false
| false
| 6,407
|
py
|
# Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for mesh utils."""
import dataclasses
from typing import Sequence
from absl.testing import absltest
from absl.testing import parameterized
from jax import test_util
from jax.experimental import mesh_utils
@dataclasses.dataclass
class MockTpuDevice:
"""Mock TPU device for testing."""
platform: str
device_kind: str
process_index: int
coords: Sequence[int]
core_on_chip: int
def mock_devices(x, y, z, dev_kind, two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 8x8, 4x4x4."""
devices = []
process_index = 0
device_id = 0
for k in range(z):
for j in range(0, y, 2):
for i in range(0, x, 2):
# Local 2x2 subgrid of chips, with 2 cores per chip.
host_devices = [
MockTpuDevice('tpu', dev_kind, process_index, (i, j, k), 0),
MockTpuDevice('tpu', dev_kind, process_index, (i, j, k), 1),
MockTpuDevice('tpu', dev_kind, process_index, (i + 1, j, k), 0),
MockTpuDevice('tpu', dev_kind, process_index, (i + 1, j, k), 1),
MockTpuDevice('tpu', dev_kind, process_index, (i, j + 1, k), 0),
MockTpuDevice('tpu', dev_kind, process_index, (i, j + 1, k), 1),
MockTpuDevice('tpu', dev_kind, process_index, (i + 1, j + 1, k), 0),
MockTpuDevice('tpu', dev_kind, process_index, (i + 1, j + 1, k), 1),
]
if two_cores_per_chip:
# Only include core_on_chip = 0.
host_devices = host_devices[::2]
devices.extend(host_devices)
device_id += len(host_devices)
process_index += 1
return devices
def mock_8x8_devices():
"""Hard-coded reproduction of jax.devices() output on 8x8."""
return mock_devices(8, 8, 1, 'TPU v3', False)
def mock_2x2x1_devices(two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 2x2x1."""
return mock_devices(2, 2, 1, 'TPU v4', two_cores_per_chip)
def mock_2x2x4_devices(two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 2x2x4."""
return mock_devices(2, 2, 4, 'TPU v4', two_cores_per_chip)
def mock_4x4x4_devices(two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 4x4x4."""
return mock_devices(4, 4, 4, 'TPU v4', two_cores_per_chip)
def mock_4x4x8_devices(two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 4x4x4."""
return mock_devices(4, 4, 8, 'TPU v4', two_cores_per_chip)
def mock_8x8x8_devices(two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 8x8x8."""
return mock_devices(8, 8, 8, 'TPU v4', two_cores_per_chip)
def mock_4x8x8_devices(two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 4x8x8."""
return mock_devices(4, 8, 8, 'TPU v4', two_cores_per_chip)
def mock_4x8x16_devices(two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 4x8x16."""
return mock_devices(4, 8, 16, 'TPU v4', two_cores_per_chip)
def mock_8x8x16_devices(two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 8x8x16."""
return mock_devices(8, 8, 16, 'TPU v4', two_cores_per_chip)
class PartitioningTest(test_util.JaxTestCase):
@parameterized.named_parameters(
('2x2x1_t', mock_2x2x1_devices, True, (2, 2, 1, 1)),
('2x2x1_f', mock_2x2x1_devices, False, (2, 2, 1, 2)),
('8x8x16_t', mock_8x8x16_devices, True, (8, 8, 16, 1)),
('8x8x16_f', mock_8x8x16_devices, False, (8, 8, 16, 2)),
)
def test_bounds_from_last_device(self, devices, two_cores_per_chip,
expected_bounds):
self.assertEqual(
mesh_utils._bounds_from_last_device(devices(two_cores_per_chip)[-1]),
expected_bounds)
@parameterized.named_parameters(
('4x4x4', mock_4x4x4_devices, (4, 4, 4)),
('4x4x8', mock_4x4x8_devices, (4, 4, 8)),
('8x8x8', mock_8x8x8_devices, (8, 8, 8)),
('8x8x16', mock_8x8x16_devices, (8, 8, 16)),
)
def test_jax_devices_order_normalized(self, devices, expected_shape):
jax_local_devices_from_process_0 = mock_2x2x1_devices(True)
jax_devices = devices(True)
normalized = mesh_utils._jax_devices_order_normalized(
jax_local_devices_from_process_0, jax_devices)
self.assertEqual(normalized.shape, expected_shape)
x, y, z = expected_shape
# major_to_minor: x, y, z
for i in range(x):
for j in range(y):
for k in range(z):
self.assertEqual(normalized[i, j, k].coords, (i, j, k))
@parameterized.named_parameters(
('2x2x1', mock_2x2x1_devices, [1, 1, 4], ((), (2,), (0, 1))),
('2x2x4', mock_2x2x4_devices, [1, 4, 4], ((), (2,), (0, 1))),
('4x4x4', mock_4x4x4_devices, [1, 16, 4], ((), (1, 2), (0,))),
('4x4x8a', mock_4x4x8_devices, [1, 16, 8], ((), (0, 1), (2,))),
('4x4x8b', mock_4x4x8_devices, [1, 8, 16], ((), (2,), (0, 1))),
('4x4x8c', mock_4x4x8_devices, [16, 8, 1], ((0, 1), (2,), ())),
('4x8x8', mock_4x8x8_devices, [1, 32, 8], ((), (0, 2), (1,))),
('8x8x8', mock_8x8x8_devices, [1, 64, 8], ((), (1, 2), (0,))),
('8x8x16', mock_8x8x16_devices, [1, 64, 16], ((), (0, 1), (2,))),
)
def test_create_device_mesh_for_tpu_v4(self, devices, mesh_shape,
expected_assignment):
jax_local_devices_from_process_0 = mock_2x2x1_devices(True)
jax_devices = devices(True)
physical_mesh = mesh_utils._jax_devices_order_normalized(
jax_local_devices_from_process_0, jax_devices)
_, assignment = mesh_utils._create_device_mesh_for_tpu_v4(
physical_mesh, mesh_shape)
self.assertEqual(assignment, expected_assignment)
if __name__ == '__main__':
absltest.main()
|
[
"no-reply@google.com"
] |
no-reply@google.com
|
ad6bf91b33b968d54e7db7520ad4160735b51f89
|
64bf39b96a014b5d3f69b3311430185c64a7ff0e
|
/intro-ansible/venv2/lib/python3.8/site-packages/ansible/modules/cloud/rackspace/rax_mon_notification.py
|
6aee351b964b059b494cccdaa5d0ebe4607d31ee
|
[
"MIT"
] |
permissive
|
SimonFangCisco/dne-dna-code
|
7072eba7da0389e37507b7a2aa5f7d0c0735a220
|
2ea7d4f00212f502bc684ac257371ada73da1ca9
|
refs/heads/master
| 2023-03-10T23:10:31.392558
| 2021-02-25T15:04:36
| 2021-02-25T15:04:36
| 342,274,373
| 0
| 0
|
MIT
| 2021-02-25T14:39:22
| 2021-02-25T14:39:22
| null |
UTF-8
|
Python
| false
| false
| 5,165
|
py
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_mon_notification
short_description: Create or delete a Rackspace Cloud Monitoring notification.
description:
- Create or delete a Rackspace Cloud Monitoring notification that specifies a
channel that can be used to communicate alarms, such as email, webhooks, or
PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check ->
*rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm
version_added: "2.0"
options:
state:
description:
- Ensure that the notification with this C(label) exists or does not exist.
choices: ['present', 'absent']
label:
description:
- Defines a friendly name for this notification. String between 1 and 255
characters long.
required: true
notification_type:
description:
- A supported notification type.
choices: ["webhook", "email", "pagerduty"]
required: true
details:
description:
- Dictionary of key-value pairs used to initialize the notification.
Required keys and meanings vary with notification type. See
http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/
service-notification-types-crud.html for details.
required: true
author: Ash Wilson
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Monitoring notification example
gather_facts: False
hosts: local
connection: local
tasks:
- name: Email me when something goes wrong.
rax_mon_entity:
credentials: ~/.rax_pub
label: omg
type: email
details:
address: me@mailhost.com
register: the_notification
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
def notification(module, state, label, notification_type, details):
if len(label) < 1 or len(label) > 255:
module.fail_json(msg='label must be between 1 and 255 characters long')
changed = False
notification = None
cm = pyrax.cloud_monitoring
if not cm:
module.fail_json(msg='Failed to instantiate client. This typically '
'indicates an invalid region or an incorrectly '
'capitalized region name.')
existing = []
for n in cm.list_notifications():
if n.label == label:
existing.append(n)
if existing:
notification = existing[0]
if state == 'present':
should_update = False
should_delete = False
should_create = False
if len(existing) > 1:
module.fail_json(msg='%s existing notifications are labelled %s.' %
(len(existing), label))
if notification:
should_delete = (notification_type != notification.type)
should_update = (details != notification.details)
if should_update and not should_delete:
notification.update(details=notification.details)
changed = True
if should_delete:
notification.delete()
else:
should_create = True
if should_create:
notification = cm.create_notification(notification_type,
label=label, details=details)
changed = True
else:
for n in existing:
n.delete()
changed = True
if notification:
notification_dict = {
"id": notification.id,
"type": notification.type,
"label": notification.label,
"details": notification.details
}
module.exit_json(changed=changed, notification=notification_dict)
else:
module.exit_json(changed=changed)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
label=dict(required=True),
notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']),
details=dict(required=True, type='dict')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
label = module.params.get('label')
notification_type = module.params.get('notification_type')
details = module.params.get('details')
setup_rax_module(module, pyrax)
notification(module, state, label, notification_type, details)
if __name__ == '__main__':
main()
|
[
"sifang@cisco.com"
] |
sifang@cisco.com
|
3623809ed7baff2ef3553ae5ea56de4d7103565c
|
930309163b930559929323647b8d82238724f392
|
/abc104_b.py
|
71226076067ba3980f151a868e680909d3029fb5
|
[] |
no_license
|
GINK03/atcoder-solvers
|
874251dffc9f23b187faa77c439b445e53f8dfe1
|
b1e7ac6e9d67938de9a85df4a2f9780fb1fbcee7
|
refs/heads/master
| 2021-11-07T14:16:52.138894
| 2021-09-12T13:32:29
| 2021-09-12T13:32:29
| 11,724,396
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
from collections import Counter
s = input()
head = s[0]
mid = dict(Counter(s[2:-1]))
removed = s.replace('A', '').replace('C', '')
if head == 'A' and mid.get('C') is not None and mid['C'] == 1 and removed == removed.lower():
print('AC')
else:
print('WA')
|
[
"gim.kobayashi@gmail.com"
] |
gim.kobayashi@gmail.com
|
772a6b05963c1796c9a2f54b96ab884eee44995f
|
067020d4bd39b6a2df300492c09b6cc65915ab71
|
/engineerx/posts/modules/initialize.py
|
a4d80863fe566411bd4139a90152dae2e145ce37
|
[] |
no_license
|
HsnVahedi/engineerx-backend
|
2e6d43079d94311f60089d052c278e2cbbfec76b
|
018257fc53e2588aec2dd159922275d544147e18
|
refs/heads/main
| 2023-04-30T22:21:25.873313
| 2021-05-15T22:00:37
| 2021-05-15T22:00:37
| 336,623,414
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
from posts.models import PostsPage
from home.models import HomePage
def create_posts_page(owner):
if PostsPage.objects.exists():
return
posts_page = PostsPage(title='Posts', owner=owner)
home_page = HomePage.objects.first()
home_page.add_child(instance=posts_page)
posts_page = PostsPage.objects.get(slug=posts_page.slug)
posts_page.save()
posts_page.save_revision().publish()
|
[
"mcs.hsn.vahedi@gmail.com"
] |
mcs.hsn.vahedi@gmail.com
|
d3366b8875c54405497810ad860a6ad92779b450
|
2265c393b8396292b79fdbcdd08727be24c2337a
|
/tbonlineproject/relatedcontent/models.py
|
2795965bfa7595e7eab4cec3e5338a95be54a301
|
[
"MIT"
] |
permissive
|
nathangeffen/tbonline-2
|
4275b2f970170f01f62e01ade008ab5cd1aee0d5
|
0d5869197e66a0057fa07cb99f21dde7f5b47c30
|
refs/heads/master
| 2023-01-07T08:43:35.261568
| 2019-03-31T15:54:16
| 2019-03-31T15:54:16
| 30,840,752
| 0
| 0
|
MIT
| 2022-12-26T20:18:09
| 2015-02-15T20:24:49
|
Python
|
UTF-8
|
Python
| false
| false
| 3,347
|
py
|
from django.db import models
# Create your models here.
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from enhancedtext.fields import EnhancedTextField
TYPES_OF_RELATED_CONTENT = (
('00', _('Related articles')),
('05', _('Further Reading')),
('10', _('See also')),
('15', _('Source')),
('20', _('Reference'))
)
class Webpage(models.Model):
"""Represents manually maintained links to external web pages for display,
say, on the front page of a website.
"""
title = models.CharField(max_length=200)
url = models.CharField(max_length=200,
verbose_name=_('URL'))
byline = models.CharField(blank=True, max_length=200,
help_text=_('The institution or organisation '
'that produces this website. There is no '
'problem with leaving this blank.'))
date = models.DateField(blank=True, null=True,
help_text=_('Sometimes it is useful to include the '
'date a blog was written. But mostly this '
'field will be left blank.'))
html_A_tag_options = models.CharField(max_length=200, blank=True,
help_text=_('You can put link, title and other '
'HTML A tag attributes here. '
'Leave blank if you are unsure.'))
description = EnhancedTextField(blank=True, default="\W")
date_last_edited = models.DateTimeField(auto_now=True, editable=False)
def __unicode__(self):
return self.title
class Meta:
ordering = ['date_last_edited',]
verbose_name = _('webpage')
verbose_name_plural = _('webpages')
class RelatedContent(models.Model):
'''Model for representing additional reading links that can be attached
to articles.
'''
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
webpage = models.ForeignKey(Webpage,
verbose_name=_('link'))
category = models.CharField(max_length=2,
choices=TYPES_OF_RELATED_CONTENT,
default='05')
position = models.PositiveIntegerField(default=0, blank=True, null=True)
@staticmethod
def get_related_content(model_instance=None, content_type=None, object_id=None):
'''Returns all instances on this model which point to either the given model_instance
or the model instance specified by content_type and object_id.
Either pass model_instance or content_type and object_id. Don't pass both.
'''
if model_instance:
content_type = ContentType.objects.get_for_model(model_instance)
object_id = model_instance.pk
return RelatedContent.objects.filter(content_type=content_type, object_id=object_id)
def __unicode__(self):
return unicode(self.content_type) + u' - ' + unicode(self.webpage)
class Meta:
verbose_name = _("related content")
verbose_name_plural = _("related content")
ordering = ('content_type', 'object_id', 'category', 'position',)
|
[
"nathangeffen@gmail.com"
] |
nathangeffen@gmail.com
|
404d772e9f913c90fd54e1ed82b4691f76b47fc4
|
66213c48da0b752dc6c350789935fe2b2b9ef5ca
|
/abc/115/d_.py
|
cb8036d381327e5ffd4f0470a37d3047600699e7
|
[] |
no_license
|
taketakeyyy/atcoder
|
28c58ae52606ba85852687f9e726581ab2539b91
|
a57067be27b27db3fee008cbcfe639f5309103cc
|
refs/heads/master
| 2023-09-04T16:53:55.172945
| 2023-09-04T07:25:59
| 2023-09-04T07:25:59
| 123,848,306
| 0
| 0
| null | 2019-04-21T07:39:45
| 2018-03-05T01:37:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,047
|
py
|
# -*- coding:utf-8 -*-
import sys
def solve():
N, X = list(map(int, sys.stdin.readline().split()))
As = [1] # レベルiバーガーの厚さ(層の総数)(必ず奇数)
Ps = [1] # レベルiバーガーのパティの総数
for i in range(N):
As.append(As[i]*2 + 3) # レベルが1上がると、総数は2倍+3になる
Ps.append(Ps[i]*2 + 1) # レベルが1上がると、パティの数は2倍+1になる
# dp[i][x] := レベルiバーガーの下からx層に含まれているパティの総数
dp = [[0]*(X+1) for _ in range(2)]
dp[0][0] = 0
for i in range(1, X+1):
dp[0][i] = 1
# 漸化式を解く
for i in range(1, 2):
median = (As[i]+1)//2
for x in range(X+1):
if x < median:
dp[i&1][x] = dp[i-1][x-1]
elif x == median:
dp[i][x] = Ps[i-1] + 1
else:
dp[i][x] = Ps[i-1] + 1 + dp[i-1][x-median]
print(dp[N][X])
if __name__ == "__main__":
solve()
|
[
"taketakeyyy@gmail.com"
] |
taketakeyyy@gmail.com
|
0ea487eefddf2b691bbd4615be6c28583189c22e
|
02c394db353d996038c9bedbeaf91bb080c12ca2
|
/dsm/epaxos/replica/config.py
|
fbfd36f11f0765a18dcf07d8a0b82a49e91101b1
|
[
"MIT"
] |
permissive
|
Limber0117/python-epaxos
|
0633752cffaca65c0d8b9c3aecf9c8bc6ca70f3e
|
e68bab50e7df32770103196c91d8708863691579
|
refs/heads/master
| 2021-08-23T22:31:47.283682
| 2017-12-06T22:16:21
| 2017-12-06T22:16:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,040
|
py
|
from collections import defaultdict
from typing import Any, List
class ReplicaState:
def __init__(
self,
# channel: Any,
epoch: int,
replica_id: int,
quorum_fast: List[int],
quorum_full: List[int],
live: bool = True,
timeout: int = 3,
jiffies: int = 33,
timeout_range: int = 3,
checkpoint_each: int = 10,
):
# self.channel = channel
self.epoch = epoch
self.replica_id = replica_id
self.quorum_fast = quorum_fast
self.quorum_full = quorum_full
self.live = live
self.timeout = timeout
self.ticks = 0
self.jiffies = jiffies
self.seconds_per_tick = 1. / self.jiffies
self.packet_counts = defaultdict(int)
self.timeout_range = timeout_range
self.total_sleep = 0
self.total_exec = 0
self.total_timeouts = 0
self.total_recv = 0
self.checkpoint_each = checkpoint_each
def tick(self):
self.ticks += 1
|
[
"acizov@gmail.com"
] |
acizov@gmail.com
|
ee99160a507f18d502ef1b8e1695b0e8369b54d8
|
8049ba531ea34f07b065a11dd1c9a5d68a00580f
|
/app/models.py
|
bac68d9dcf6103505c200b55cdaa3262065c452d
|
[] |
no_license
|
aoisoratoumi/django-booking
|
94b29020c2390bd51d0d1a8451e3be08a9062793
|
a178c5f2d05bffe629fc828e7dc307f517718f37
|
refs/heads/master
| 2022-09-13T22:43:26.308133
| 2020-05-29T23:57:22
| 2020-05-29T23:57:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,674
|
py
|
from django.db import models
from django.utils import timezone
from accounts.models import CustomUser
class Store(models.Model):
name = models.CharField('店舗', max_length=100)
address = models.CharField('住所', max_length=100, null=True, blank=True)
tel = models.CharField('電話番号', max_length=100, null=True, blank=True)
description = models.TextField('説明', default="", blank=True)
image = models.ImageField(upload_to='images', verbose_name='イメージ画像', null=True, blank=True)
def __str__(self):
return self.name
class Staff(models.Model):
user = models.OneToOneField(CustomUser, verbose_name='スタッフ', on_delete=models.CASCADE)
store = models.ForeignKey(Store, verbose_name='店舗', on_delete=models.CASCADE)
def __str__(self):
return f'{self.store}:{self.user}'
class Booking(models.Model):
staff = models.ForeignKey(Staff, verbose_name='スタッフ', on_delete=models.CASCADE)
first_name = models.CharField('姓', max_length=100, null=True, blank=True)
last_name = models.CharField('名', max_length=100, null=True, blank=True)
tel = models.CharField('電話番号', max_length=100, null=True, blank=True)
remarks = models.TextField('備考', default="", blank=True)
start = models.DateTimeField('開始時間', default=timezone.now)
end = models.DateTimeField('終了時間', default=timezone.now)
def __str__(self):
start = timezone.localtime(self.start).strftime('%Y/%m/%d %H:%M')
end = timezone.localtime(self.end).strftime('%Y/%m/%d %H:%M')
return f'{self.first_name}{self.last_name} {start} ~ {end} {self.staff}'
|
[
"harukun2002@gmail.com"
] |
harukun2002@gmail.com
|
67625ed8122fc11c906ad83907a8303cc83d77b9
|
fb28906c1f0347ffe50193f6c2bad2d4b490fa9c
|
/budger/directory/migrations/0018_ksoemployee_is_developer.py
|
035b40d574b286ea6259fb47f17f3cee1ebb2261
|
[] |
no_license
|
pavkozlov/budger-server
|
20c695309c34a0451d25b83ab8583b14f0d21c0c
|
7a98c1789414c83625bda1e5b29cbe5587c3cd6a
|
refs/heads/master
| 2020-12-17T06:35:10.550905
| 2020-01-13T13:27:42
| 2020-01-13T13:27:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
# Generated by Django 2.2.6 on 2019-12-04 13:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('directory', '0017_auto_20191203_1640'),
]
operations = [
migrations.AddField(
model_name='ksoemployee',
name='is_developer',
field=models.BooleanField(db_index=True, default=False),
),
]
|
[
"it.pavelkozlov@gmail.com"
] |
it.pavelkozlov@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.