blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4496d5f07d39a193ef3fbfd8710da46756d19ecc
|
c62dbc5715fe80e106a666a8f7a6aeb051d0b40e
|
/analytical_solution.py
|
016425a97e584c740f05ad933c74f8b757d5a4e2
|
[] |
no_license
|
mishaukr7/MM_LAB_5
|
14ebb2c8553cfb1f1b13293e6160294fb2684a9c
|
610a623d1a63ddf0c231575c2b78c4fc1bb4a454
|
refs/heads/master
| 2021-08-23T15:16:34.096484
| 2017-12-05T09:03:46
| 2017-12-05T09:03:46
| 113,076,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 500
|
py
|
import math
def analytical_method_find_solution_free(t0, N0, r, T):
N = []
time = []
for t in range(t0, T+1):
N_new = N0*math.exp(r*(t-20))
N.append(N_new)
time.append(t)
return time, N
def analytical_method_find_solution_limited(t0, N0, r, k, T):
N = []
time = []
for t in range(t0, T):
N_new = (k * N0 * math.exp(r * (t - 20)))/(k + N0 * (math.exp(r * (t - 20)) - 1))
N.append(N_new)
time.append(t)
return time, N
|
[
"mishaukr22@gmail.com"
] |
mishaukr22@gmail.com
|
e92090672df6dbc77947cca8dd3f20b98894a501
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_2/rffada002/question2.py
|
5ad1e0412877bdf376192722edcf2c9130f0adb5
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,664
|
py
|
print ("Welcome to the 30 Second Rule Expert")
print ("------------------------------------")
print ("Answer the following questions by selecting from among the options.")
seen=input("Did anyone see you? (yes/no)\n")
if (seen == 'no'):
sticky=input("Was it sticky? (yes/no)\n")
if (sticky == 'no'):
emausaurus=input("Is it an Emausaurus? (yes/no)\n")
if (emausaurus == 'no'):
cat=input("Did the cat lick it? (yes/no)\n")
if (cat == 'no'):
print ("Decision: Eat it.")
elif (cat == 'yes'):
healthy=input("Is your cat healthy? (yes/no)\n")
if (healthy == 'yes'):
print ("Decision: Eat it.")
elif (healthy == 'no'):
print ("Decision: Your call.")
elif (emausaurus == 'yes'):
megalosaurus=input("Are you a Megalosaurus? (yes/no)\n")
if (megalosaurus == 'yes'):
print ("Decision: Eat it.")
elif (megalosaurus == 'no'):
print ("Decision: Don't eat it.")
elif (sticky == 'yes'):
steak=input("Is it a raw steak? (yes/no)\n")
if (steak == 'no'):
cat=input("Did the cat lick it? (yes/no)\n")
if (cat == 'no'):
print ("Decision: Eat it.")
elif (cat == 'yes'):
healthy=input("Is your cat healthy? (yes/no)\n")
if (healthy == 'yes'):
print ("Decision: Eat it.")
elif (healthy == 'no'):
print ("Decision: Your call.")
elif (steak == 'yes'):
puma=input("Are you a puma? (yes/no)\n")
if (puma == 'yes'):
print ("Decision: Eat it.")
elif (puma == 'no'):
print ("Decision: Don't eat it.")
elif (seen == 'yes'):
friend=input("Was it a boss/lover/parent? (yes/no)\n")
if (friend == 'no'):
print ("Decision: Eat it.")
elif (friend == 'yes'):
price=input("Was it expensive? (yes/no)\n")
if (price == 'no'):
chocolate=input("Is it chocolate? (yes/no)\n")
if (chocolate == 'no'):
print ("Decision: Don't eat it.")
elif (chocolate == 'yes'):
print ("Decision: Eat it.")
elif (price == 'yes'):
cut=input("Can you cut off the part that touched the floor? (yes/no)\n")
if (cut == 'yes'):
print ("Decision: Eat it.")
elif (cut == 'no'):
print ("Decision: Your call.")
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
6d164cfc391db5ee4400cf4280c951a39b8e146a
|
443585e4fc146308b18bc2f9234d0947da38d3e5
|
/practice/yj/csv/Quiz2.py
|
cc4f15f0435d1e5ad3b650c79dc1a5fe19b07be9
|
[] |
no_license
|
ggyudongggyu/20201208commit
|
b524c4a7fb241cacaacffa5882c55d1d0ccba11f
|
fbb58a8ed06f454a2a79a9b8c75deabaec62b317
|
refs/heads/master
| 2023-02-02T21:59:51.518218
| 2020-12-24T14:32:21
| 2020-12-24T14:32:21
| 319,578,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
from matplotlib.pyplot import *
title('plot graph')
plot([1, 2, 3, 4], [10, 20, 30, 40], marker='.', color= 'green', label = '1st')
plot([1, 2, 3, 4], [30, 15, 25, 10], marker= '^' ,color = 'pink', label = '2nd')
# plot([1, 2, 3, 4], [15, 25, 15, 25], linestyle= '-.' ,color = 'red', label = '3rd')
# plot([1, 2, 3, 4], [20, 10, 30, 5], linestyle= '-' ,color = 'blue', label = '4th')
legend()
show()
|
[
"donggyu0219@gmail.com"
] |
donggyu0219@gmail.com
|
daaf7110d0464d08291fb7f7a191cb8182631fa6
|
27040f0d537c1898c9f1fce4db68b24588840987
|
/7. Reverse Integer.py
|
834d39db9bf66caba7c2392e1009abf6fb37a850
|
[] |
no_license
|
xtanmaygarg/LeetCodeSolutions
|
0197474e92d4ef14676342d00933e764f8b29581
|
5fd06d2f0da222977c1ae6e4d219a682b3596341
|
refs/heads/master
| 2021-06-14T09:39:37.795785
| 2020-12-04T10:44:07
| 2020-12-04T10:44:07
| 254,488,075
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
class Solution:
def reverse(self, x: int) -> int:
if x >= 0:
res = int(str(x)[::-1])
else:
res = -int(str(x)[1:][::-1])
if -2**31 <= res <= (2**31-1):
return res
return 0
|
[
"xtanmaygarg@gmail.com"
] |
xtanmaygarg@gmail.com
|
7cd5bf667dfd5853848da023118f67243641925b
|
e1adcd0173cf849867144a511c029b8f5529b711
|
/ros_ws/Archive/ProductFiles20180213/positionControlPackage.py
|
c6e35e7f5eaadfa197321d29d10af5ea39366fea
|
[] |
no_license
|
adubredu/cartbot_arm_subsystem
|
20a6e0c7bacc28dc0486160c6e25fede49f013f2
|
3e451272ddaf720bc7bd24da2ad5201b27248f1c
|
refs/heads/master
| 2022-01-04T23:01:25.061143
| 2019-05-14T16:45:02
| 2019-05-14T16:45:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,466
|
py
|
import argparse
import sys
import struct
import time
import json
import rospy
from math import *
from std_msgs.msg import (
UInt16,
)
from StringIO import StringIO
import baxter_interface as baxter
import speech_recognition as SR
from geometry_msgs.msg import (
PoseStamped,
Pose,
Point,
Quaternion,
)
from std_msgs.msg import Header
from baxter_core_msgs.srv import (
SolvePositionIK,
SolvePositionIKRequest,
)
def xyzToAngles(limbs, x, y, z, xr, yr, zr, wr):
ns = "ExternalTools/" + limbs + "/PositionKinematicsNode/IKService"
iksvc = rospy.ServiceProxy(ns, SolvePositionIK)
ikreq = SolvePositionIKRequest()
hdr = Header(stamp=rospy.Time.now(), frame_id='base')
pose = PoseStamped(
header=hdr,
pose=Pose(
position=Point(
x=x,
y=y,
z=z,
),
orientation=Quaternion(
x=xr,
y=yr,
z=zr,
w=wr,
),
),
)
ikreq.pose_stamp.append(pose)
try:
rospy.wait_for_service(ns, 5.0)
resp = iksvc(ikreq)
except (rospy.ServiceException, rospy.ROSException), e:
rospy.logerr("Service call failed: %s" % (e,))
exit()
resp_seeds = struct.unpack('<%dB' % len(resp.result_type),
resp.result_type)
if (resp_seeds[0] != resp.RESULT_INVALID):
seed_str = {
ikreq.SEED_USER: 'User Provided Seed',
ikreq.SEED_CURRENT: 'Current Joint Angles',
ikreq.SEED_NS_MAP: 'Nullspace Setpoints',
}.get(resp_seeds[0], 'None')
# Format solution into Limb API-compatible dictionary
limb_joints = dict(zip(resp.joints[0].name, resp.joints[0].position))
return limb_joints
else:
print("INVALID POSE - No Valid Joint Solution Found.")
return "invalid"
def euler2Quat(xr, yr, zr):
toRet = {'qw': 0, 'qx': 0, 'qy': 0, 'qz': 0}
xr = radians(xr)
yr = radians(yr)
zr = radians(zr)
c1 = cos(yr/2)
c2 = cos(zr/2)
c3 = cos(xr/2)
s1 = sin(yr/2)
s2 = sin(zr/2)
s3 = sin(xr/2)
toRet['qw'] = c1*c2*c3 - s1*s2*s3
toRet['qx'] = s1*s2*c3 + c1*c2*s3
toRet['qy'] = s1*c2*c3 + c1*s2*s3
toRet['qz'] = c1*s2*c3 - s1*c2*s3
return toRet
def moveOnAxis(limb, axis, dist, speed):
## Moves arm on x, y, or z axis keeping orientation constant
# speed is in m/s
# dist in m
# limb is a handle to a limb object
if 'left' in limb.joint_names()[0]: limbName = 'left'
else: limbName = 'right'
print(limbName)
position = {'x':0, 'y':1, 'z':2}
pose = limb.endpoint_pose()
position['x'] = pose['position'][0]
position['y'] = pose['position'][1]
position['z'] = pose['position'][2]
orient = pose['orientation']
secPframe = .05
frames = int(abs(dist)*(1/float(speed))*(1/secPframe))
if frames == 0: return limb.endpoint_pose()
distPframe = float(dist)/float(frames)
limb.set_joint_position_speed(1)
rate = rospy.Rate(1/secPframe)
for i in range(0, frames):
position[axis] += distPframe
jointPos = xyzToAngles(limbName, position['x'], position['y'], position['z'], orient[0], orient[1], orient[2], orient[3])
if jointPos != "invalid":
# Check if it is minor move. if it is not, use smoother movement function
minorMove = True
actualJointPos = limb.joint_angles()
for joint, angle in jointPos.iteritems():
if abs(angle-actualJointPos[joint]) > .8: minorMove = False
if minorMove:
limb.set_joint_positions(jointPos)
else:
print('bigmove')
limb.move_to_joint_positions(jointPos, timeout=3, threshold=.02)
else:
print("Can't Move Here")
return limb.endpoint_pose()
rate.sleep()
return limb.endpoint_pose()
def playPositionFile(fPath, lLimb, rLimb):
# Moves limb to specified joint positions
# fPath: string indentifying path to file
# lLimb handle to the left limb 'Limb' object
# rLimb hanld to the right limb 'Limb' object
with open(fPath, 'r') as f:
fText = f.read()
fText = fText.replace("'", '"')
wpArray = json.loads(fText)
lLimb.set_joint_position_speed(.5)
rLimb.set_joint_position_speed(.5)
rate = rospy.Rate(1000)
for wp in wpArray:
lPos = wp['left']
rPos = wp['right']
# move left
if lPos != '':
lLimb.move_to_joint_positions(lPos)
if rPos != '':
rLimb.move_to_joint_positions(rPos)
return (lLimb.endpoint_pose(), rLimb.endpoint_pose)
|
[
"alphonsusbq436@gmail.com"
] |
alphonsusbq436@gmail.com
|
efd021c0316156776876ce0abeeb3c3283a39a3d
|
eea3f04dc73d4536083c74cac4478835a31c4a94
|
/chinese_song_generation/data_utils.py
|
75424c8a5cc4c880bf635faf9ab900953138832f
|
[] |
no_license
|
yscoder-github/news-generate
|
15d5f9acecc92add201fb3c53aa211c0aa474e1f
|
6b8a98375db984dea9edb4abff72191477bdb406
|
refs/heads/master
| 2023-05-26T19:58:00.797573
| 2019-07-18T01:30:36
| 2019-07-18T01:30:36
| 187,489,859
| 4
| 4
| null | 2023-05-22T22:14:54
| 2019-05-19T14:50:32
|
Python
|
UTF-8
|
Python
| false
| false
| 5,906
|
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for downloading data from WMT, tokenizing, vocabularies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import jieba
from six.moves import urllib
from tensorflow.python.platform import gfile
# Special vocabulary symbols - we always put them at the start.
_PAD = b"_PAD"
_GO = b"_GO"
_EOS = b"_EOS"
_UNK = b"_UNK"
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
# Regular expressions used to tokenize.
_WORD_SPLIT = re.compile(b"([.,!?\"':;)(])")
_DIGIT_RE = re.compile(br"\d")
def basic_tokenizer(sentence):
"""Very basic tokenizer: split the sentence into a list of tokens."""
words = []
#print(sentence)
for space_separated_fragment in jieba.cut(sentence.strip()):
if isinstance(space_separated_fragment, str):
word = str.encode(space_separated_fragment)
else:
word = space_separated_fragment
words.append(word)
return words
def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,
tokenizer=None, normalize_digits=False):
if not gfile.Exists(vocabulary_path):
print("Creating vocabulary %s from %s" % (vocabulary_path, data_path))
vocab = {}
with gfile.GFile(data_path, mode="rb") as f:
counter = 0
for line in f:
counter += 1
if counter % 100 == 0:
print(" processing line %d" % counter)
tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)
for w in tokens:
word = re.sub(_DIGIT_RE, b"0", w) if normalize_digits else w
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
print('>> Full Vocabulary Size :',len(vocab_list))
if len(vocab_list) > max_vocabulary_size:
vocab_list = vocab_list[:max_vocabulary_size]
with gfile.GFile(vocabulary_path, mode="wb") as vocab_file:
for w in vocab_list:
vocab_file.write(w + b"\n")
def initialize_vocabulary(vocabulary_path):
if gfile.Exists(vocabulary_path):
rev_vocab = []
with gfile.GFile(vocabulary_path, mode="rb") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line.strip() for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
#ct = 0
#for kk in vocab.keys():
# print(kk)
# ct += 1
# if ct == 5:
# break
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path)
def sentence_to_token_ids(sentence, vocabulary, tokenizer=None, normalize_digits=False):
if tokenizer:
words = tokenizer(sentence)
else:
words = basic_tokenizer(sentence)
#print(words[0].decode("utf8"))
#print(words[1])
if not normalize_digits:
return [vocabulary.get(w.decode("utf8"), UNK_ID) for w in words]
# Normalize digits by 0 before looking words up in the vocabulary.
return [vocabulary.get(re.sub(_DIGIT_RE, b"0", w), UNK_ID) for w in words]
def data_to_token_ids(data_path, target_path, vocabulary_path,
tokenizer=None, normalize_digits=False):
if not gfile.Exists(target_path):
print("Tokenizing data in %s" % data_path)
vocab, _ = initialize_vocabulary(vocabulary_path)
with gfile.GFile(data_path, mode="rb") as data_file:
with gfile.GFile(target_path, mode="w") as tokens_file:
counter = 0
for line in data_file:
counter += 1
if counter % 100000 == 0:
print(" tokenizing line %d" % counter)
token_ids = sentence_to_token_ids(line, vocab, tokenizer,
normalize_digits)
tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n")
def prepare_custom_data(working_directory, train_enc, train_dec, test_enc, test_dec, enc_vocabulary_size, dec_vocabulary_size, tokenizer=None):
# Create vocabularies of the appropriate sizes.
enc_vocab_path = os.path.join(working_directory, "vocab%d.enc" % enc_vocabulary_size)
dec_vocab_path = os.path.join(working_directory, "vocab%d.dec" % dec_vocabulary_size)
create_vocabulary(enc_vocab_path, train_enc, enc_vocabulary_size, tokenizer)
create_vocabulary(dec_vocab_path, train_dec, dec_vocabulary_size, tokenizer)
# Create token ids for the training data.
enc_train_ids_path = train_enc + (".ids%d" % enc_vocabulary_size)
dec_train_ids_path = train_dec + (".ids%d" % dec_vocabulary_size)
data_to_token_ids(train_enc, enc_train_ids_path, enc_vocab_path, tokenizer)
data_to_token_ids(train_dec, dec_train_ids_path, dec_vocab_path, tokenizer)
# Create token ids for the development data.
enc_dev_ids_path = test_enc + (".ids%d" % enc_vocabulary_size)
dec_dev_ids_path = test_dec + (".ids%d" % dec_vocabulary_size)
data_to_token_ids(test_enc, enc_dev_ids_path, enc_vocab_path, tokenizer)
data_to_token_ids(test_dec, dec_dev_ids_path, dec_vocab_path, tokenizer)
return (enc_train_ids_path, dec_train_ids_path, enc_dev_ids_path, dec_dev_ids_path, enc_vocab_path, dec_vocab_path)
|
[
"yscoder@foxmail.com"
] |
yscoder@foxmail.com
|
2d7098cb8174e3779d78a54cffcff3d299651034
|
5174346f6bd374cc8873a41ed336b7545756d753
|
/examples/prompts/toolbar-prompt.py
|
ff31c5f2951a01c99352e655915d09e1f94ff7bc
|
[
"BSD-3-Clause"
] |
permissive
|
calebstewart/python-prompt-toolkit
|
f06dd911399b75e9d4985b485a3e9897c04bf1d6
|
3f9f9a927b2d1a208e59af73e574825df2901e69
|
refs/heads/master
| 2022-07-02T16:23:24.682709
| 2020-05-14T22:45:14
| 2020-05-14T22:45:14
| 263,998,820
| 1
| 0
| null | 2020-05-14T18:51:02
| 2020-05-14T18:51:01
| null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
#!/usr/bin/env python
"""
Prompt for user input as a toolbar which disappears after submission.
"""
from prompt_toolkit import prompt
if __name__ == "__main__":
answer = prompt(message="prompt$ ", prompt_in_toolbar=True)
print(f"You said: {answer}")
|
[
"caleb.stewart94@gmail.com"
] |
caleb.stewart94@gmail.com
|
a44c312b288d21db66156e2ee38ac70257256d20
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02918/s539072224.py
|
2dbe24e4788b05696bc3160ba49b9b37d37af922
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 810
|
py
|
import sys
import numpy as np
input = lambda: sys.stdin.readline().rstrip()
INF = 10**9 + 1
def solve():
N, K = map(int, input().split())
S = np.array(list(input()), dtype='str')
if N == 1:
print(0)
exit()
ri = INF
kc = 0
fs = S[0]
if fs == 'R':
nfs = 'L'
else:
nfs = 'R'
for i in range(N):
if S[i] == nfs:
ri = min(ri, i)
elif S[i] == fs and ri != INF:
S[ri:i] = fs
ri = INF
kc += 1
if kc == K:
break
else:
if ri != INF and S[-1] == nfs:
S[ri:N] = fs
# print(S)
happy = 0
for i in range(N - 1):
if S[i] == S[i + 1]:
happy += 1
print(happy)
if __name__ == '__main__':
solve()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
fb2dac07af82c220e6e4a2f95942ed4fa435a178
|
6ffa236a008d1cd1dc70f2c8ea0544d20ec350ee
|
/aries_cloudagent/messaging/issue_credential/v1_0/messages/credential_stored.py
|
59973aa3320b3ad20a261d63f724ad3d305ab2b3
|
[
"Apache-2.0"
] |
permissive
|
blockpass-identity-lab/aries-fl-demo
|
99e849f782dd80e729e3fe24c3af2881c5c49dca
|
310b748c1ac2e814ec6f97c46ddbb9985584e0fc
|
refs/heads/master
| 2022-07-06T18:37:16.007582
| 2020-04-23T15:48:33
| 2020-04-23T15:48:33
| 221,698,330
| 5
| 0
|
Apache-2.0
| 2021-02-26T02:40:03
| 2019-11-14T12:58:58
|
Python
|
UTF-8
|
Python
| false
| false
| 911
|
py
|
"""A credential stored message."""
# from marshmallow import fields
from ....agent_message import AgentMessage, AgentMessageSchema
from ..message_types import CREDENTIAL_STORED
HANDLER_CLASS = (
"aries_cloudagent.messaging.issue_credential.v1_0.handlers."
"credential_stored_handler.CredentialStoredHandler"
)
class CredentialStored(AgentMessage):
"""Class representing a credential stored message."""
class Meta:
"""Credential metadata."""
handler_class = HANDLER_CLASS
schema_class = "CredentialStoredSchema"
message_type = CREDENTIAL_STORED
def __init__(self, **kwargs):
"""Initialize credential object."""
super(CredentialStored, self).__init__(**kwargs)
class CredentialStoredSchema(AgentMessageSchema):
"""Credential stored schema."""
class Meta:
"""Schema metadata."""
model_class = CredentialStored
|
[
"srklump@hotmail.com"
] |
srklump@hotmail.com
|
8b3a97ebe43ae145f472de830429cf5e306e5269
|
5c902cfea2856b5b591a9e4de4ecf7d66d01c3a0
|
/백준/기초1/수학1/나머지.py
|
36861f45235b2a9988962ca407e259b38e24cc23
|
[] |
no_license
|
VIXXPARK/pythonAlgorithm
|
9cbedf1e9dc387756bed1793081be90e77daf9e8
|
8675fc0e078d90620ecf9dae95c1ccd6bcd36d37
|
refs/heads/main
| 2023-05-29T10:41:51.900075
| 2021-06-17T23:28:51
| 2021-06-17T23:28:51
| 316,072,889
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
import sys
a,b,c= map(int,sys.stdin.readline().rstrip().split())
print((a+b)%c)
print(((a%c)+(b%c))%c)
print((a*b)%c)
print(((a%c)*(b%c))%c)
|
[
"vixx170627@gmail.com"
] |
vixx170627@gmail.com
|
4c2bdb7c3c1f0ffd2ca09b91c2b25d6b3bd6dc4c
|
ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f
|
/PORMain/pirates/effects/DarkPortal.py
|
0cf27ecf8deadbcfe7623641ee7db325b3b0db04
|
[] |
no_license
|
BrandonAlex/Pirates-Online-Retribution
|
7f881a64ec74e595aaf62e78a39375d2d51f4d2e
|
980b7448f798e255eecfb6bd2ebb67b299b27dd7
|
refs/heads/master
| 2020-04-02T14:22:28.626453
| 2018-10-24T15:33:17
| 2018-10-24T15:33:17
| 154,521,816
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,813
|
py
|
from panda3d.core import TransparencyAttrib
# File: D (Python 2.4)
from direct.interval.IntervalGlobal import *
from PooledEffect import PooledEffect
from EffectController import EffectController
from otp.otpbase import OTPRender
import random
class DarkPortal(PooledEffect, EffectController):
def __init__(self):
PooledEffect.__init__(self)
EffectController.__init__(self)
self.speed = 0.75
self.holdTime = 2.5
self.size = 40
self.explosionSequence = 0
self.explosion = loader.loadModel('models/effects/darkPortal')
self.explosion.setDepthTest(0)
self.setDepthWrite(0)
self.explosion.setFogOff()
self.explosion.setLightOff()
self.explosion.setHpr(0, -90, 0)
self.explosion.reparentTo(self)
self.hide()
self.explosion.hide(OTPRender.MainCameraBitmask)
self.explosion.showThrough(OTPRender.EnviroCameraBitmask)
self.explosion.setBin('shadow', 0)
self.explosion.setTransparency(TransparencyAttrib.MAlpha)
self.explosion.setDepthWrite(0)
def createTrack(self, rate = 1):
self.explosion.setScale(1)
self.explosion.setColorScale(1, 1, 1, 0.75)
scaleUp = self.explosion.scaleInterval(self.speed, self.size, startScale = 0.0, blendType = 'easeIn', other = render)
scaleDown = self.explosion.scaleInterval(self.speed, 0.0, startScale = self.size, blendType = 'easeIn', other = render)
self.track = Sequence(Func(self.show), scaleUp, Wait(self.holdTime), scaleDown, Func(self.hide), Func(self.cleanUpEffect))
def cleanUpEffect(self):
EffectController.cleanUpEffect(self)
self.checkInEffect(self)
def destroy(self):
EffectController.destroy(self)
PooledEffect.destroy(self)
|
[
"brandoncarden12345@gmail.com"
] |
brandoncarden12345@gmail.com
|
93048f8876fc96b4c7fd4bda1e6719756d628118
|
222d4f2dfb63a66b5de274b785cb92393a2e0fe9
|
/after6pm_191113/04.py
|
17381eb6ff56e9032c7c90fbf870c88dae44464e
|
[] |
no_license
|
GyuReeKim/PycharmProjects
|
fd2584c3ff1369510a7f246f2089cefb77035d9d
|
dd4f0e15b4e72c68b054489c54f24fa0ba5b9ed3
|
refs/heads/master
| 2020-07-03T11:44:54.951147
| 2019-11-21T00:07:43
| 2019-11-21T00:07:43
| 201,894,857
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
# 연구소
# 벽세우기
import sys
sys.stdin = open('04.txt', 'r')
def f(i, j, lab):
pass
N, M = map(int, input().split())
lab = [list(map(int, input().split())) for _ in range(N)]
print(lab)
f(0, 0, lab)
|
[
"starkim96@naver.com"
] |
starkim96@naver.com
|
524b26645d22e5350ca96393ae4a8f8c7410257e
|
4c76dbfaa8f2ca33945e303be90b579c79bd4008
|
/renesola/apps/freight/management/commands/build_angular_js.py
|
50d8bdd16a9ceccc64c3c8823bb5058badf95821
|
[] |
no_license
|
fogcitymarathoner/renasola
|
42c32243df4e4c1246d9a85cfb9251aed2264309
|
9089dcc0ffc57a76799f5e99244df644256e08ea
|
refs/heads/master
| 2021-01-11T00:32:40.358446
| 2016-10-10T18:49:50
| 2016-10-10T18:49:50
| 70,517,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
__author__ = 'marc'
from django.core.management.base import BaseCommand
from django.conf import settings
import os
from renesola_lib.angular_helpers import build_js
class Command(BaseCommand):
"""
field delimiter ';'
text delmiter '"'
"""
args = ''
help = ''
def handle(self, *args, **options):
build_js()
|
[
"marc@fogtest.com"
] |
marc@fogtest.com
|
d54db077ad045ae5605a1a04b178f9fac106b3ab
|
30a456e3012c663782d2a07a0ff67c377d63790d
|
/data/ass2json.py
|
08561f2f6f0db1ff52593268932f24b680e40cf8
|
[
"MIT"
] |
permissive
|
akx/my-video
|
41099725fd96f369a1e8e671667e2e7be3256f42
|
b1135809f81a34026536d1a8532390dc5f1c7945
|
refs/heads/master
| 2021-01-24T10:30:53.608241
| 2016-10-01T18:30:34
| 2016-10-01T18:30:34
| 69,733,122
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,283
|
py
|
import argparse
import re, json
import sys
K_RE = re.compile(r'(\{\\k([0-9]+)\})')
def parse_time(dt):
h, m, s, hs = [float(int(p, 10)) for p in re.split('[:.,]', dt)]
return h * 60 * 60 + m * 60 + s * 1 + hs / 100.0
def parse_ass(infp):
for line in infp:
if not line.startswith('Dialogue:'):
continue
line = line.split(',', 9)
start = parse_time(line[1])
end = parse_time(line[2])
parts = K_RE.split(line[-1])[1:]
word_durations = zip([int(s, 10) / 100.0 for s in parts[1::3]], [s.strip() for s in parts[2::3]])
for i, (dur, word) in enumerate(word_durations):
d = {
'time': round(start, 3),
'word': word,
}
if i == 0:
d['verse'] = True
yield d
start += dur
def main():
ap = argparse.ArgumentParser()
ap.add_argument('file', type=argparse.FileType())
ap.add_argument('-o', '--output', type=argparse.FileType('w'), default=None)
ap.add_argument('--indent', default=None, type=int)
args = ap.parse_args()
json.dump(
list(parse_ass(args.file)),
(args.output or sys.stdout),
indent=args.indent,
)
if __name__ == '__main__':
main()
|
[
"akx@iki.fi"
] |
akx@iki.fi
|
cb7fb08c690282edfd833933070c697f756dcb10
|
0fccee4c738449f5e0a8f52ea5acabf51db0e910
|
/genfragments/ThirteenTeV/ADD/ADDmonoPhoton_MD_1_d_8_TuneCUETP8M1_13TeV_pythia8_cfi.py
|
302e89726365a986e9049cc298156fb6aa79d2a4
|
[] |
no_license
|
cms-sw/genproductions
|
f308ffaf3586c19b29853db40e6d662e937940ff
|
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
|
refs/heads/master
| 2023-08-30T17:26:02.581596
| 2023-08-29T14:53:43
| 2023-08-29T14:53:43
| 11,424,867
| 69
| 987
| null | 2023-09-14T12:41:28
| 2013-07-15T14:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,425
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring( ## see details on http://home.thep.lu.se/~torbjorn/php8135/ExtraDimensionalProcesses.php?filepath=files/
'ExtraDimensionsLED:ffbar2Ggamma = on',
'ExtraDimensionsLED:CutOffmode = 1',
'ExtraDimensionsLED:t = 0.5',
'ExtraDimensionsLED:n = 8',
'ExtraDimensionsLED:MD = 1000.',
'ExtraDimensionsLED:LambdaT = 1000.',
'5000039:m0 = 1200.',
'5000039:mWidth = 1000.',
'5000039:mMin = 1.',
'5000039:mMax = 13990.',
'PhaseSpace:pTHatMin = 130.'
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',)
)
)
|
[
"sheffield@physics.rutgers.edu"
] |
sheffield@physics.rutgers.edu
|
45aad19c79479fd3824ea58eb7d7717279b0b008
|
6858cbebface7beec57e60b19621120da5020a48
|
/ply/modimport.py
|
f82d08c44b979f0b39be6a4dfe34acf53fbfc6e1
|
[] |
no_license
|
ponyatov/PLAI
|
a68b712d9ef85a283e35f9688068b392d3d51cb2
|
6bb25422c68c4c7717b6f0d3ceb026a520e7a0a2
|
refs/heads/master
| 2020-09-17T01:52:52.066085
| 2017-03-28T07:07:30
| 2017-03-28T07:07:30
| 66,084,244
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
>>> import tokrules
>>> lexer = lex.lex(module=tokrules)
>>> lexer.input("3 + 4")
>>> lexer.token()
LexToken(NUMBER,3,1,1,0)
>>> lexer.token()
LexToken(PLUS,'+',1,2)
>>> lexer.token()
LexToken(NUMBER,4,1,4)
>>> lexer.token()
None
>>>
|
[
"dponyatov@gmail.com"
] |
dponyatov@gmail.com
|
4d620495621fd8734bc2f5085f0814fab0602439
|
db6d37fcf5545acd3dd9910674c0f43c90410e0a
|
/iterminal/controllers.py
|
88f3f762f83c05c23a9cf06bbd3546a14f2d520a
|
[] |
no_license
|
capalmer1013/i
|
629bb44b4640fc91be883ca2e47c6a3d81f51a0b
|
4e0bc895ad232cad7dfefefec35a67346da6794b
|
refs/heads/master
| 2023-02-23T02:35:44.270400
| 2022-04-27T03:04:21
| 2022-04-27T03:04:21
| 86,883,795
| 0
| 0
| null | 2023-02-16T00:32:58
| 2017-04-01T04:29:44
|
Python
|
UTF-8
|
Python
| false
| false
| 519
|
py
|
import curses
from iterminal.constants import UP, DOWN, LEFT, RIGHT
def inputController(stdscr, p):
while True:
key = stdscr.getch()
#stdscr.addstr(0, 0, str(key))
dirDict = {curses.KEY_UP: UP, curses.KEY_DOWN: DOWN, curses.KEY_LEFT: LEFT, curses.KEY_RIGHT: RIGHT}
shootDict = {ord('w'): UP, ord('a'): LEFT, ord('s'): DOWN, ord('d'): RIGHT}
if key in dirDict.keys():
p.move(dirDict[key])
elif key in shootDict.keys():
p.shoot(shootDict[key])
|
[
"capalmer1013@gmail.com"
] |
capalmer1013@gmail.com
|
5588811602468117dcf4c2c815b823cd9c66efd6
|
0bb474290e13814c2498c086780da5096453da05
|
/abc151/C/main.py
|
de4737e84810f2b025becf5752de28655a3a7833
|
[] |
no_license
|
ddtkra/atcoder
|
49b6205bf1bf6a50106b4ae94d2206a324f278e0
|
eb57c144b5c2dbdd4abc432ecd8b1b3386244e30
|
refs/heads/master
| 2022-01-25T15:38:10.415959
| 2020-03-18T09:22:08
| 2020-03-18T09:22:08
| 208,825,724
| 1
| 0
| null | 2022-01-21T20:10:20
| 2019-09-16T14:51:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,054
|
py
|
#!/usr/bin/env python3
import sys
sys.setrecursionlimit(10000000)
INF = 1<<32
def solve(N: int, M: int, p: "List[int]", S: "List[str]"):
dp = [[0, 0] for i in range(N+1)]
for i in range(M):
if S[i] == 'AC':
dp[p[i]][0] = 1
else:
if dp[p[i]][0] == 0:
dp[p[i]][1] += 1
ac = len([dp[i][0] for i in range(1, N+1) if dp[i][0] > 0])
wa = sum([dp[i][1] for i in range(1, N+1) if dp[i][0] > 0])
# print(dp[:10])
# print([dp[i][0] for i in range(1, N+1)])
print(ac, wa)
return
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
M = int(next(tokens)) # type: int
p = [int()] * (M) # type: "List[int]"
S = [str()] * (M) # type: "List[str]"
for i in range(M):
p[i] = int(next(tokens))
S[i] = next(tokens)
solve(N, M, p, S)
if __name__ == '__main__':
main()
|
[
"deritefully@gmail.com"
] |
deritefully@gmail.com
|
f17025743fc841a91077662b31a3cb066d361be2
|
a5e5d39f42f468d35f18aab3e78c3c090046b0df
|
/apps/contacts/forms.py
|
72c512374bed6f2e74a37ac9c50a2a1151e9ee6e
|
[] |
no_license
|
WayneLambert/portfolio
|
66198dfc18b3f254e6bc726575903c3e8f570dc4
|
7e02165386e4784f81e15bae0325a77cf45f410d
|
refs/heads/main
| 2023-02-04T18:08:13.559223
| 2023-01-29T14:13:59
| 2023-01-29T14:13:59
| 180,239,669
| 5
| 1
| null | 2023-02-04T07:07:10
| 2019-04-08T22:02:22
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 568
|
py
|
from django import forms
from captcha.fields import ReCaptchaField
from captcha.widgets import ReCaptchaV3
from apps.contacts.models import Contact
class ContactForm(forms.ModelForm):
class Meta:
model = Contact
fields = ('first_name', 'last_name', 'email', 'message')
captcha = ReCaptchaField(
widget=ReCaptchaV3(
attrs={
'data-theme': 'light',
'data-size': 'invisible',
}
)
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
|
[
"wayne.a.lambert@gmail.com"
] |
wayne.a.lambert@gmail.com
|
9eeb6493e7ffc4de7c553d77979a09da3caeaa1e
|
8e1668e35a8df9968ab14d16db089b51dbe6dd51
|
/python/algorithms/contests/four_divisors.py
|
77b0e61ed7442f35a879a90753b56c9b384e7f7b
|
[] |
no_license
|
Chalmiller/competitive_programming
|
f1ec0184d1ff247201522ab90ca8e66b3f326afc
|
b437080d1ba977c023baf08b7dc5c3946784e183
|
refs/heads/master
| 2021-03-24T05:11:59.383916
| 2020-08-24T22:07:41
| 2020-08-24T22:07:41
| 247,519,998
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 557
|
py
|
from typing import *
class Solution:
def sumFourDivisors(self, nums: List[int]) -> int:
divisors = 0
for i in nums:
num_divisor = []
for j in range(i+1):
if len(num_divisor) > 4:
break
if i%(j+1) == 0:
num_divisor.append(j+1)
if len(num_divisor) == 4:
sum_divisors = sum(num_divisor)
divisors += sum_divisors
return divisors
nums = [21,4,7]
obj = Solution()
obj.sumFourDivisors(nums)
|
[
"chalmiller1@gmail.com"
] |
chalmiller1@gmail.com
|
854a857b9eedc99be8a2332e23c37f43e09f4bc4
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/FjiriGn8gc5RE8Xm2_7.py
|
efeef575fedcd049a250bbc0cfb0345e324e582a
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,014
|
py
|
"""
Write a function that takes `fuel` (liters), `fuel_usage` (liters/100km),
`passengers`, `air_con` (boolean) and returns maximum distance that car can
travel.
* `fuel` is the number of liters of fuel in the fuel tank.
* `fuel_usage` is basic fuel consumption per 100 km (with the driver inside only).
* Every additional passenger is increasing basic fuel consumption by 5%.
* If the air conditioner is ON `True`, its increasing total (not basic) fuel consumption by 10%.
### Examples
total_distance(70.0, 7.0, 0, False) ➞ 1000.0
total_distance(36.1, 8.6, 3, True) ➞ 331.8
total_distance(55.5, 5.5, 5, false) ➞ 807.3
### Notes
* `fuel` and `fuel_usage` are always greater than 1.
* `passengers` are always greater or equal to 0.
* Round your answer to the nearest tenth.
"""
def total_distance(fuel, fuel_usage, passengers, air_con):
air = 0
if air_con:
air = 1
return round((1000*fuel)/(fuel_usage*((0.05*passengers+1)*(air+10))),1)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
759453a9942cf164858e0646369370b634ed8630
|
751300a23242cfe393f86ff489339ffc81319efc
|
/speaker_spotting/speaker_spotting_oracle_cluster2-dev.py
|
9ee8cd16ac65ab6ad961b195a92ffb3714d90be2
|
[] |
no_license
|
yinruiqing/speaker_spotting
|
bc349791a59c6caa2a840fb39aa1d4c1221f99e9
|
c2fbdcbf2885d9545abe8bf1e19b2c412b0680ee
|
refs/heads/master
| 2021-05-04T14:39:33.213405
| 2018-05-03T10:28:11
| 2018-05-03T10:28:11
| 120,207,231
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,962
|
py
|
# coding: utf-8
# ```bash
# $ pip install pyannote.metrics==1.4.1
# $ pip install pyannote.db.odessa.ami==0.5.1
# ```
import clustering
import numpy as np
from pyannote.audio.features import Precomputed
precomputed = Precomputed('/vol/work1/bredin/speaker_spotting/embeddings')
from pyannote.database import get_protocol, FileFinder
protocol = get_protocol('AMI.SpeakerSpotting.MixHeadset', progress=True)
# enrolment consists in summing all relevant embeddings
def speaker_spotting_enrol(current_enrolment):
enrol_with = current_enrolment['enrol_with']
embeddings = precomputed(current_enrolment)
return np.sum(embeddings.crop(enrol_with), axis=0, keepdims=True)
models = {}
for current_enrolment in protocol.development_enrolment():
model_id = current_enrolment.pop('model_id')
models[model_id] = speaker_spotting_enrol(current_enrolment)
REFERENCE = {}
for current_file in protocol.development():
uri = current_file['uri']
if uri not in REFERENCE:
REFERENCE[uri] = Annotation(uri=uri)
REFERENCE[uri].update(current_file['annotation'])
# Trials
from pyannote.core import SlidingWindow, SlidingWindowFeature
from pyannote.audio.embedding.utils import cdist
from pyannote.core import Annotation,Segment, Timeline
# trial consists in comparing each embedding to the target embedding
def speaker_spotting_try_system2(current_trial):
""" speaker spotting system based on the oracle
clustering system
"""
# target model
# record the model embedding vector
# and model id
model = {}
model_id = current_trial['model_id']
model_embedding = models[current_trial['model_id']]
model['mid'] = model_id
model['embedding'] = model_embedding
# where to look for this target
try_with = current_trial['try_with']
# precomputed embedding
embeddings = precomputed(current_trial)
# annotation of current file
oracle_diarization = REFERENCE[current_trial['uri']].crop(current_trial['try_with'])
# find index of first and last embedding fully included in 'try_with'
indices = embeddings.sliding_window.crop(try_with, mode='strict')
first, last = indices[0], indices[-1]
onlineOracleClustering = clustering.OnlineOracleClustering(current_trial['uri'])
start = embeddings.sliding_window[0].start
data = np.zeros((len(embeddings.data), 1))
for i, (window, _) in enumerate(embeddings):
# make sure the current segment is in 'try_with'
if i < first:
start = window.end
continue
if i > last:
break
so_far = Segment(start, window.end)
current_annotation = oracle_diarization.crop(so_far)
score = 0.
for segment, _, label in current_annotation.itertracks(label=True):
example = {}
example['label'] = label
example['segment'] = segment
example['embedding'] = embeddings.crop(segment, mode='center')
example['indice'] = [i]
# compute the distance with model
example['distances'] = {}
example['distances'][model['mid']] = list(cdist(example['embedding'],
model['embedding'],
metric='cosine').flatten())
# update the online oracle clustering
onlineOracleClustering.upadateCluster(example)
if not onlineOracleClustering.empty():
# compute the current score
min_dist = min(onlineOracleClustering.modelDistance(model))
score = max(score, 2-min_dist)
data[i] = score
start = window.end
# transform scores to sliding window features
data = data[first:last+1]
sliding_window = SlidingWindow(start=embeddings.sliding_window[first].start,
duration=embeddings.sliding_window.duration,
step=embeddings.sliding_window.step)
return SlidingWindowFeature(data, sliding_window)
# Depending on the value of the detection threshold, the alarm will be triggered with a different latency.
def process_score(scores):
min_score = 0
res = []
for (window, score) in scores:
if score > min_score:
res.append([window.end, score[0]])
min_score = score[0]
return res
def process_trial(trial, scores):
res = {}
pscores = process_score(scores)
res['uri'] = trial['uri']
res['model_id'] = trial['model_id']
res['scores'] = pscores
return res
llss = []
for current_trial in protocol.development_trial():
reference = current_trial.pop('reference')
hypothesis = speaker_spotting_try_system2(current_trial)
llss.append(process_trial(current_trial, hypothesis))
import simplejson as json
with open('llss.txt', 'w') as outfile:
json.dump(llss, outfile)
|
[
"yinruiqing110@gmail.com"
] |
yinruiqing110@gmail.com
|
537b1e6af4b96fd09dba3bd4344c38fb66b9ca65
|
d4e9a392d7465a5c10417364dd91cd5dd3c5d935
|
/app/preprocess.py
|
d0fbfc80e4a532a5803e9d7632c2c1743c42d9e6
|
[] |
no_license
|
MaayanLab/harmonizome-ml
|
045f866bac4683a23dd8a393e48f9f09bb08c35d
|
5cebd194d771b1d7eabeb65a1c81ce0c78bf7a80
|
refs/heads/master
| 2020-03-21T13:26:26.132737
| 2020-03-05T22:46:38
| 2020-03-05T22:46:38
| 138,605,770
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,905
|
py
|
#!/usr/bin/env python
import os
import nbformat
from flask import render_template
from . import app
from .model import build_fields
from .runtime import ipynb_import_from_file
from .template.nbtemplate_parse import parse_fields
from .util import app_dir, globalContext
@app.template_filter('filter')
def reverse_filter(arr, attr, val):
def maybe_eval(v):
if callable(v):
return v()
return v
return [v
for v in arr
if maybe_eval(getattr(v, attr)) == val]
def main():
with app.test_request_context('/'):
for _, _, files in os.walk(app_dir + '/templates/ipynb/'):
for file in files:
file, ext = os.path.splitext(file)
if ext != '.ipynb':
continue
print('Building %s...' % (file))
nb = ipynb_import_from_file(
app_dir + '/templates/ipynb/%s.ipynb' % (file)
)
context = dict(
filename=file,
**globalContext,
**build_fields(),
)
fields = [field
for cell in nb.cells
for field in parse_fields(
cell['source'],
context,
)]
form_out = open(app_dir + '/templates/%s.html' % (file), 'w')
try:
if os.path.isfile(app_dir + '/templates/ipynb/%s.html' % (file)):
# Custom template
print(
render_template('ipynb/%s.html' % (file),
**context,
fields=fields,
),
file=form_out,
)
else:
# General template
print(
render_template('layout/ipynb.j2',
**context,
fields=fields,
),
file=form_out,
)
except Exception as e:
print(e)
finally:
form_out.close()
break
|
[
"u8sand@gmail.com"
] |
u8sand@gmail.com
|
a99dbdf037c0559627072edbf0cd2f7e24983bb2
|
01f77b70dfb8817a913414fd25d9ed44ba3cd1f4
|
/oscar_invoices/urls.py
|
1bc931c736f24795068621e2e1d47790be762a5e
|
[] |
no_license
|
luiz158/django-oscar-invoices
|
ca2cf8b70347000399c5316532aca7e52d0f77a3
|
9cc3425410641a95832bda93155e4d2bfa95ac7e
|
refs/heads/master
| 2023-07-02T22:21:03.318698
| 2020-10-06T16:01:02
| 2020-10-06T16:01:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
from django.urls import re_path
from . import views
app_name = "oscar_invoices"
urlpatterns = [
re_path(r"invoice/(?P<pk>\d+)/", views.InvoicePreviewView.as_view(), name="invoice"),
]
|
[
"sasha@sasha0.ru"
] |
sasha@sasha0.ru
|
40c7a96a66c6ce84439222e54679cc51149bc0ba
|
a86293a2033c06410aa8ed19bcbce8ca55ea3c55
|
/src/client_libraries/python/dynamics/customerinsights/api/models/cds_org_info.py
|
e414e4e2f31a4ed4afa9f160f9258d839d0aa435
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
ramotheonly/Dynamics365-CustomerInsights-Client-Libraries
|
a3ca28aa78d2b5509e65d9895ff4a0d42d05f611
|
e00632f7972717b03e0fb1a9e2667e8f9444a0fe
|
refs/heads/main
| 2023-08-02T08:09:04.063030
| 2021-09-28T22:42:15
| 2021-09-28T22:42:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,154
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CdsOrgInfo(Model):
"""The information for CDS Organization in BAP.
:param friendly_name: Gets the Cds Organization Friendly Name
:type friendly_name: str
:param url: Gets the Cds Organization Url
:type url: str
:param state: Gets the Cds Organization State
:type state: str
:param location: Gets region location of Cds Organization
:type location: str
:param environment_sku: Gets SKU of Cds Organization
:type environment_sku: str
:param expiration_time: Gets the expiration time of CDS Organization if
the SKU is Trial
:type expiration_time: datetime
:param max_allowed_expiration_time: Gets the max allowed expiration time
of CDS Organization if the SKU is Trial
:type max_allowed_expiration_time: datetime
"""
_attribute_map = {
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'environment_sku': {'key': 'environmentSku', 'type': 'str'},
'expiration_time': {'key': 'expirationTime', 'type': 'iso-8601'},
'max_allowed_expiration_time': {'key': 'maxAllowedExpirationTime', 'type': 'iso-8601'},
}
def __init__(self, **kwargs):
super(CdsOrgInfo, self).__init__(**kwargs)
self.friendly_name = kwargs.get('friendly_name', None)
self.url = kwargs.get('url', None)
self.state = kwargs.get('state', None)
self.location = kwargs.get('location', None)
self.environment_sku = kwargs.get('environment_sku', None)
self.expiration_time = kwargs.get('expiration_time', None)
self.max_allowed_expiration_time = kwargs.get('max_allowed_expiration_time', None)
|
[
"michaelajohnston@mac.com"
] |
michaelajohnston@mac.com
|
b92defed3b5e8993f941de86c1d080d39aa48810
|
c73beb04d101ca8d98c9126b1c47b4f19cc35066
|
/week1/calculator.py
|
f7f372af8b41c269b4a182934923f6716834ac12
|
[] |
no_license
|
fywest/python
|
a5ecf62e1f8cdf59c936da81b478c371f169aec4
|
cd97438679d8e129b3cb75d76226b16e7e7850ac
|
refs/heads/master
| 2022-12-13T06:15:04.021492
| 2019-05-28T19:21:18
| 2019-05-28T19:21:18
| 130,403,136
| 0
| 0
| null | 2022-12-08T05:08:55
| 2018-04-20T19:02:57
|
Python
|
UTF-8
|
Python
| false
| false
| 929
|
py
|
import sys
if __name__=='__main__':
if len(sys.argv)<2:
print("please input salary amount")
exit(1)
print(sys.argv[1])
try:
amount=int(sys.argv[1])
tax=0.0
amount_fortax=0.0
amount_fortax=amount-0-3500
if amount_fortax<=0:
tax=0;
elif amount_fortax>80000:
tax=amount_fortax*0.45-13505
elif amount_fortax>55000:
tax=amount_fortax*0.35-5505
elif amount_fortax>35000:
tax=amount_fortax*0.30-2755
elif amount_fortax>9000:
tax=amount_fortax*0.25-1005
elif amount_fortax>4500:
tax=amount_fortax*0.20-555
elif amount_fortax>1500:
tax=amount_fortax*0.1-105
else:
tax=amount_fortax*0.03-0
print("{0:.2f}".format((tax)))
exit(0)
except ValueError:
print("Parameter Error")
exit(1)
|
[
"fywest2109@hotmail.com"
] |
fywest2109@hotmail.com
|
ad796b01f49b7944d7c81a65fdb929ca1235c040
|
64ec8731553aa08c33373b212bbe431b1a23b97c
|
/test/util/util_spatial.py
|
74e2b2692deec5adc94efe1ca8e6186db7ba6e48
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
ChetanNathwani/pyrolite
|
98947fde265b25beea839f24495d68bbdb726eed
|
8de9c67855305115517418e127bf26de84ff062d
|
refs/heads/master
| 2023-07-26T18:57:28.024540
| 2021-07-08T09:19:02
| 2021-07-08T09:19:02
| 367,300,779
| 0
| 0
|
NOASSERTION
| 2021-05-14T09:23:47
| 2021-05-14T08:35:50
| null |
UTF-8
|
Python
| false
| false
| 7,785
|
py
|
import unittest
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
try:
import cartopy.crs as ccrs
HAVE_CARTOPY = True
except ImportError:
HAVE_CARTOPY = False
from pyrolite.util.spatial import *
from pyrolite.util.math import isclose # nan-equalling isclose
class TestGreatCircleDistance(unittest.TestCase):
def setUp(self):
self.ps = zip(
np.array(
[
([0, 0], [0, 0]), # should be 0
([-170, 0], [170, 0]), # should be 20
([0, -90], [0, 90]), # should be 180
([-45, 0], [45.0, 0.0]), # should be 90
([-90, -90], [90.0, 90.0]), # should be 180
([-90, -45], [90.0, 45.0]), # should be 180, rotation of above
([-90, -0], [90.0, 0.0]), # should be 180, rotation of above
([-60, 20], [45.0, 15.0]),
([-87.0, 67.0], [34, 14]),
([-45, -45], [45.0, 45.0]),
([-45, -30], [45.0, 30.0]),
]
),
[0, 20, 180, 90, 180, 180, 180, None, None, None, None],
)
def test_default(self):
for ps, expect in self.ps:
with self.subTest(ps=ps, expect=expect):
distance = great_circle_distance(*ps)
distance_r = great_circle_distance(*ps[::-1])
self.assertTrue(isclose(distance, distance_r))
if (ps[0] == ps[1]).all():
self.assertTrue(np.isclose(distance, 0.0))
"""
ax = plt.subplot(111, projection=ccrs.Mollweide()) # ccrs.Orthographic(0, 0))
ax.figure.set_size_inches(8, 8)
ax.stock_img()
ax.plot(
*np.array([*ps]).T,
color="blue",
marker="o",
transform=ccrs.Geodetic()
)
ax.plot(*np.array([*ps]).T, color="gray", transform=ccrs.PlateCarree())
plt.text(
**np.array([*ps])[0] + [5, 5],
"{:2.0f}".format(distance),
horizontalalignment="left",
fontsize=10,
transform=ccrs.Geodetic()
)
plt.show()"""
def test_absolute(self):
for ps, expect in self.ps:
for absolute in [True, False]:
with self.subTest(ps=ps, expect=expect, absolute=absolute):
distance = great_circle_distance(*ps, absolute=absolute)
distance_r = great_circle_distance(*ps[::-1], absolute=absolute)
self.assertTrue(isclose(distance, distance_r))
if (ps[0] == ps[1]).all():
self.assertTrue(np.isclose(distance, 0.0))
def test_degrees(self):
for ps, expect in self.ps:
for degrees in [True, False]:
with self.subTest(ps=ps, expect=expect, degrees=degrees):
if not degrees:
ps = np.deg2rad(
ps
) # convert to radians to give sensible output
distance = great_circle_distance(*ps, degrees=degrees)
distance_r = great_circle_distance(*ps[::-1], degrees=degrees)
self.assertTrue(isclose(distance, distance_r))
if (ps[0] == ps[1]).all():
self.assertTrue(np.isclose(distance, 0.0))
if expect is not None:
self.assertTrue(isclose(distance, expect))
def test_Vicenty(self):
method = "vicenty"
for ps, expect in self.ps:
with self.subTest(ps=ps, expect=expect, method=method):
distance = great_circle_distance(*ps, method=method)
distance_r = great_circle_distance(*ps[::-1], method=method)
self.assertTrue(isclose(distance, distance_r))
if (ps[0] == ps[1]).all():
self.assertTrue(np.isclose(distance, 0.0))
if expect is not None:
self.assertTrue(isclose(distance, expect))
def test_haversine(self):
method = "haversine"
for ps, expect in self.ps:
with self.subTest(ps=ps, expect=expect, method=method):
distance = great_circle_distance(*ps, method=method)
distance_r = great_circle_distance(*ps[::-1], method=method)
self.assertTrue(isclose(distance, distance_r))
if (ps[0] == ps[1]).all():
self.assertTrue(np.isclose(distance, 0.0))
if expect is not None:
self.assertTrue(isclose(distance, expect))
def test_cosines(self):
method = "cosines"
for ps, expect in self.ps:
with self.subTest(ps=ps, expect=expect, method=method):
distance = great_circle_distance(*ps, method=method)
distance_r = great_circle_distance(*ps[::-1], method=method)
self.assertTrue(isclose(distance, distance_r))
if (ps[0] == ps[1]).all():
self.assertTrue(np.isclose(distance, 0.0))
if expect is not None:
self.assertTrue(isclose(distance, expect))
class TestPieceWise(unittest.TestCase):
def test_pieces(self):
x1, x2 = 0.0, 10.0
segment_ranges = [(x1, x2)]
for segments in [1, 2, 3]:
with self.subTest(segments=segments):
result = list(piecewise(segment_ranges, segments=segments))
self.assertTrue(len(result) == segments)
def test_multiple_ranges(self):
x1, x2 = 0.0, 10.0
segment_ranges = [(x1, x2), (x2, x1), (x1, x2)]
segments = 2
result = list(piecewise(segment_ranges, segments=segments))
self.assertTrue(len(result) == segments ** len(segment_ranges))
class TestSpatioTemporalSplit(unittest.TestCase):
def test_split(self):
x1, x2 = 0, 10
segments = 2
params = dict(age=(0, 10), lat=(-10, 10), lo=(-90, 90))
result = list(spatiotemporal_split(segments=segments, **params))
self.assertTrue([isinstance(item, dict) for item in result])
self.assertTrue(len(result) == segments ** len(params))
class TestNSEW2Bounds(unittest.TestCase):
def setUp(self):
self.params = {
k: v
for (k, v) in zip(
["west", "south", "east", "north"], np.random.randint(1, 10, 4)
)
}
def test_conversion(self):
result = NSEW_2_bounds(self.params)
self.assertTrue(isinstance(result, list))
def test_order(self):
order = ["minx", "maxx", "miny", "maxy"]
result = NSEW_2_bounds(self.params, order=order)
self.assertTrue(result[1] == self.params["east"])
class TestLevenshteinDistance(unittest.TestCase):
def test_string(self):
pairs = [
("bar", "car"),
("bart", "car"),
("Saturday", "Sunday"),
("kitten", "sitting"),
]
expect = [1, 2, 3, 3]
for pair, exp in zip(pairs, expect):
with self.subTest(pair=pair, exp=exp):
dist = levenshtein_distance(*pair)
self.assertTrue(dist == exp)
def test_list(self):
pairs = [
([1, 2, 3], [1, 2, 2]),
(["A", "B", "C"], ["A", "B"]),
(["A", "B", "C", "D"], ["A", "E", "C"]),
]
expect = [1, 1, 2]
for pair, exp in zip(pairs, expect):
with self.subTest(pair=pair, exp=exp):
dist = levenshtein_distance(*pair)
self.assertTrue(dist == exp)
if __name__ == "__main__":
unittest.main()
|
[
"morgan.j.williams@hotmail.com"
] |
morgan.j.williams@hotmail.com
|
a0cb1eee0ce7279e519465175cbaff109ed4fb60
|
e3365a497b6f3afa7afc36381f7a7d1752f09610
|
/.venv/bin/jupyter-notebook
|
70ee2fde73f1c59914cde9b01c22c06f382ee6ce
|
[] |
no_license
|
MohamadSheikhAlshabab/Chess_Board-
|
4229f7044831b79a8b8b6662a2aea5753d11c7dc
|
ee2e69d4567b69559584d0b074d91a25793db2f7
|
refs/heads/master
| 2022-12-08T05:10:59.482582
| 2020-09-04T16:34:18
| 2020-09-04T16:34:18
| 291,529,208
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
#!/home/mohamad/401/chess_board/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from notebook.notebookapp import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"alshabab.moh@gmail.com"
] |
alshabab.moh@gmail.com
|
|
9ad30ee9734df856d50edf0d943d9924d00ca67a
|
1c8bcd2d8e129a92e3328f47d2a452814c033327
|
/kaggle/otto-group-product-classification-challenge/script_30.py
|
2250ea4fb9cf07c4c72a3fb83dcb6c31ab8ca81f
|
[
"MIT"
] |
permissive
|
josepablocam/janus-public
|
425334706f9a4519534779b7f089262cf5cf0dee
|
4713092b27d02386bdb408213d8edc0dc5859eec
|
refs/heads/main
| 2023-03-08T15:21:12.461762
| 2021-02-25T20:53:02
| 2021-02-25T20:53:02
| 314,606,918
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,253
|
py
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfTransformer
import lightgbm as lgb
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_rows', 100)
dtypes = {f"feat_{i}": "int32" for i in range(1, 94)}
dtypes["id"] = "int32"
dtypes["target"] = "string"
df_train = pd.read_csv(
"/kaggle/input/otto-group-product-classification-challenge/train.csv",
dtype=dtypes
).set_index("id")
class_to_order = dict()
order_to_class = dict()
for idx, col in enumerate(df_train.target.unique()):
order_to_class[idx] = col
class_to_order[col] = idx
df_train["target_ord"] = df_train["target"].map(class_to_order).astype("int16")
feature_columns = [
col for col in df_train.columns if col.startswith("feat_")
]
target_column = ["target_ord"]
X_train, X_valid, y_train, y_valid = train_test_split(
df_train[feature_columns], df_train[target_column],
test_size=0.3, random_state=42,
stratify=df_train[target_column]
)
tfidf = TfidfTransformer()
tfidf_feature_train = tfidf.fit_transform(X_train).toarray().astype("float32")
tfidf_feature_valid = tfidf.transform(X_valid).toarray().astype("float32")
X_train_tfidf = np.hstack((X_train.values, tfidf_feature_train))
X_valid_tfidf = np.hstack((X_valid.values, tfidf_feature_valid))
params = {
'objective': "multiclass",
'metric': {"multi_logloss"},
'num_class': 9,
'seed': 42,
'lambda_l1': 0.0036682603550733813,
'lambda_l2': 8.924549306063208,
'num_leaves': 113,
'feature_fraction': 0.48000000000000004,
'bagging_fraction': 1.0,
'bagging_freq': 0,
'min_child_samples': 20
}
dataset_train = lgb.Dataset(X_train_tfidf, y_train)
dataset_valid = lgb.Dataset(X_valid_tfidf, y_valid)
booster = lgb.train(
params,
dataset_train,
feature_name=(
[f"feat_{i}" for i in range(1, 94)]
+ [f"tfidf_{i}" for i in range(1, 94)]
),
num_boost_round=500,
valid_sets=dataset_valid,
early_stopping_rounds=20,
)
best_iteration = booster.best_iteration
print(best_iteration)
lgb.plot_importance(
booster,
max_num_features=30,
figsize=(12, 10),
dpi=300,
);
df_test = pd.read_csv(
"/kaggle/input/otto-group-product-classification-challenge/test.csv",
dtype=dtypes
).set_index("id")
tfidf = TfidfTransformer()
tfidf_feature_train_all = tfidf.fit_transform(df_train[feature_columns]).toarray().astype("float32")
X_train_all_tfidf = np.hstack((df_train[feature_columns].values, tfidf_feature_train_all))
dataset_train_all = lgb.Dataset(X_train_all_tfidf, df_train[target_column])
booster = lgb.train(
params,
dataset_train_all,
feature_name=(
[f"feat_{i}" for i in range(1, 94)]
+ [f"tfidf_{i}" for i in range(1, 94)]
),
num_boost_round=best_iteration,
)
df_test
tfidf_feature_test = tfidf.transform(df_test).toarray()
X_test_tfidf = np.hstack((df_test[feature_columns].values, tfidf_feature_test))
pred = booster.predict(X_test_tfidf)
for idx, col in order_to_class.items():
df_test[col] = pred[:,idx]
df_test[[f"Class_{i}" for i in range(1, 10)]].to_csv('submission.csv', index=True)
|
[
"jcamsan@mit.edu"
] |
jcamsan@mit.edu
|
ecbea36070dd712629e55b616938b75491ba10b9
|
3a8f8bef453f5eb01cc6f22d8bb140d7791024df
|
/command/tcommand.py
|
add4fd8bf60184b1755ced53d3534642b3e2870a
|
[] |
no_license
|
thomasvs/python-command
|
23a68de2ce596a7eed5a2740a5ee1471f62ed569
|
4c31072e9f5f68e22c92cdc8f0a02d911b7e5fc0
|
refs/heads/master
| 2020-05-02T11:29:24.459355
| 2014-09-07T22:23:58
| 2014-09-07T22:23:58
| 5,668,726
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,146
|
py
|
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
"""
A helper class for Twisted commands.
"""
from twisted.internet import defer
from twisted.python import failure
import command
class TwistedCommand(command.Command):
"""
I am a Command that integrates with Twisted and its reactor.
Instead of implementing the do() method, subclasses should implement a
doLater() method which returns a deferred.
"""
def installReactor(self, reactor=None):
"""
Override me to install your own reactor in the parent
ReactorCommand.
"""
self.debug('installing reactor %r in ancestor ReactorCommand',
reactor)
c = self
while c.parentCommand and not isinstance(c, ReactorCommand):
c = c.parentCommand
if not c:
raise AssertionError(
'%r does not have a parent ReactorCommand' % self)
self.debug('installing reactor %r in ancestor ReactorCommand %r',
reactor, c)
c.installReactor(reactor)
### command.Command implementations
def do(self, args):
self.debug('%r: installing reactor using method %r', self,
self.installReactor)
self.installReactor()
d = self.doLater(args)
return d
### command.TwistedCommand methods to implement by subclasses
def doLater(self, args):
"""
@rtype: L{defer.Deferred}
"""
raise NotImplementedError
class ReactorCommand(command.Command):
"""
I am a Command that runs a reactor for its subcommands if they
return a L{defer.Deferred} from their doLater() method.
"""
reactor = None
returnValue = None
_reactorRunning = False
def installReactor(self, reactor=None):
"""
Override me to install your own reactor.
"""
self.debug('ReactorCommand: installing reactor %r', reactor)
if not reactor:
from twisted.internet import reactor
self.reactor = reactor
### command.Command overrides
def parse(self, argv):
"""
I will run a reactor to get the non-deferred result.
"""
self.debug('parse: chain up')
try:
r = command.Command.parse(self, argv)
except Exception:
# get a full traceback to debug here
f = failure.Failure()
self.warning('Exception during %r.parse: %r\n%s\n',
self, f.getErrorMessage(), f.getTraceback())
self.stderr.write('Exception: %s\n' % f.value)
raise
self.debug('parse: result %r', r)
# if it's not a deferred, return the result as is
if not isinstance(r, defer.Deferred):
return r
# We have a deferred, so we need to run a reactor
d = r
# child commands could have installed a reactor
if not self.reactor:
self.installReactor()
def parseCb(ret):
if ret is None:
self.debug('parse returned None, defaults to exit code 0')
ret = 0
elif ret:
self.debug('parse returned %r' % ret)
elif self.parser.help_printed or self.parser.usage_printed:
ret = 0
self.debug('parse: cb: done')
self.returnValue = ret
if self._reactorRunning:
self._reactorRunning = False
self.debug('stopping reactor')
self.reactor.stop()
return ret
def parseEb(failure):
self.debug('parse: eb: failure: %r\n%s\n',
failure.getErrorMessage(), failure.getTraceback())
# we can get here even before we run the reactor below;
# so schedule a stop instead of doing it here
# self.reactor.stop()
self.reactor.callLater(0, self.reactor.stop)
if failure.check(command.CommandExited):
self.stderr.write(failure.value.output + '\n')
reason = failure.value.status
self.returnValue = reason
return reason
self.warning('errback: %r', failure.getErrorMessage())
self.stderr.write('Failure: %s\n' % failure.value)
self.returnValue = failure
# we handled it by storing it for reraising, so don't
# return it
return
d.addCallback(parseCb)
d.addErrback(parseEb)
def raiseIfFailure():
if isinstance(self.returnValue, failure.Failure):
raise self.returnValue.value
if self.returnValue is not None:
self.debug('got return value before reactor ran, returning %r' %
self.returnValue)
raiseIfFailure()
return self.returnValue
self.debug('running reactor %r', self.reactor)
self._reactorRunning = True
self.reactor.run()
self.debug('ran reactor, got %r' % self.returnValue)
raiseIfFailure()
self.debug('ran reactor, returning %r' % self.returnValue)
return self.returnValue
|
[
"thomas (at) apestaart (dot) org"
] |
thomas (at) apestaart (dot) org
|
79f50a378ab45f7801f359d695045b821ff47443
|
b7125b27e564d2cc80a2ce8d0a6f934aa22c8445
|
/.history/sudoku_20201101154742.py
|
c093972aa62bcc31bf99b51feb72a76950605747
|
[] |
no_license
|
JensVL96/Puzzle-solver-for-fun
|
4c15dcd570c3705b7ac555efb56b52913e81083c
|
6d8a4378a480372213a596a336a4deca727a00fc
|
refs/heads/master
| 2021-07-15T05:19:42.185495
| 2020-11-08T13:59:49
| 2020-11-08T13:59:49
| 224,855,888
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,829
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from config import *
from create_board import *
from solve_bloard import *
from display_board import *
from string import *
from math import floor
import pygame as pg
import numpy as np
# For error highlighting
def set_highlight(row, col, blk, lock):
global input_lock
input_lock = lock
global row_index
row_index = row
global col_index
col_index = blk
global blk_index
blk_index = col
def get_cord(pos):
global box_index_x
box_index_x = (pos[0] - TOP_LX)//BLOCK_SIZE
global box_index_y
box_index_y = (pos[1] - TOP_LY)//BLOCK_SIZE
def valid(grid, x, y, val, increase):
input_lock = 0
row = col = blk = (0, 0)
for index in range(9):
# Check if value in column
if grid[x][index] == val:
col = (x, index)
input_lock = 1
# Check if value in row
if grid[index][y] == val:
row = (index, y)
input_lock = 1
# Finds the block
index_x = x // 3 # integer division
index_y = y // 3
# Check if value in block
for i in range(index_x * 3, index_x * 3 + 3):
for j in range (index_y * 3, index_y * 3 + 3):
if grid[i][j] == val:
blk = (i, j)
input_lock = 1
if input_lock == 1:
set_highlight(row, col, blk, input_lock)
return False
return True
class Main():
def __init__(self):
self.board = []
self.run()
def run(self):
pg.init()
self.screen = pg.display.set_mode(SCREEN_RES)
pg.display.set_caption('Sudoku solver')
display = Display_board(self.screen)
flag1 = 0
val = 0
pos = (0, 0)
blink = False
input_lock = 0
get_cord((0, 0))
set_highlight((0, 0), (0, 0), (0, 0), input_lock)
board = create_board().board
while 1:
for event in pg.event.get():
if event.type == pg.QUIT or (event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE):
exit()
if event.type == pg.MOUSEBUTTONDOWN:
flag1 = 1
pos = pg.mouse.get_pos()
get_cord(pos)
blink = True
if event.type == pg.KEYDOWN and input_lock != 1:
if event.key == pg.K_1:
val = 1
if event.key == pg.K_2:
val = 2
if event.key == pg.K_3:
val = 3
if event.key == pg.K_4:
val = 4
if event.key == pg.K_5:
val = 5
if event.key == pg.K_6:
val = 6
if event.key == pg.K_7:
val = 7
if event.key == pg.K_8:
val = 8
if event.key == pg.K_9:
val = 9
elif event.type == pg.KEYDOWN and input_lock == 1:
if event.key == pg.K_BACKSPACE:
val = 0
set_highlight((0, 0), (0, 0), (0, 0), 0)
if val != 0:
display.draw_val(val, box_index_x, box_index_y)
if valid(board, int(box_index_x), int(box_index_y), val, display):
board[int(box_index_x)][int(box_index_y)] = val
else:
board[int(box_index_x)][int(box_index_y)] = 0
val = 0
pg.draw.rect(self.screen, BLACK, (0, 0, self.screen.get_width(), self.screen.get_height()))
self.screen.fill(BEIGE)
display.draw(board)
if blink:
cell = display.find_cell(box_index_x, box_index_y)
alpha = display.blink()
print("start pos x: ", floor(cell[0]), "start pos y: ", floor(cell[1]), "end pos x: ", floor(cell[2]), "end pos y: ", floor(cell[3]))
cell_width = int(cell[2])
cell_height = int(cell[3])
start_pos_X = int(cell[0])
start_pos_y = int(cell[1])
rect = pg.Surface((cell_width, cell_height))
rect.set_alpha(alpha)
# pg.draw.rect(self.screen, GREEN, cell)
self.screen.blit(rect, (rect.x, rect.y))
# print(box_index_x, box_index_y)
if input_lock == 1:
display.update(board, row_index, col_index, blk_index)
# display.draw_box()
pg.display.update()
self.solution = solve_board(board)
self.solution.assign_flags(board)
if __name__ == '__main__':
Main()
|
[
"jle040@uit.no"
] |
jle040@uit.no
|
d4533f4cdf53a8a902ef0e5e52f13d6ae690bf32
|
cfc3fa658f826d02308453e557d82758895399c2
|
/datasets/id_newspapers_2018/id_newspapers_2018.py
|
96a294e8fc22502654396c9ba5f85efe68734ddd
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
meehawk/datasets
|
cac530ec0e17514c01cdff30302521d6303ed93b
|
b70141e3c5149430951773aaa0155555c5fb3e76
|
refs/heads/master
| 2023-03-29T12:51:54.700891
| 2021-04-08T17:22:53
| 2021-04-08T17:22:53
| 355,996,122
| 9
| 0
|
Apache-2.0
| 2021-04-08T17:31:03
| 2021-04-08T17:31:02
| null |
UTF-8
|
Python
| false
| false
| 4,123
|
py
|
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Indonesian Newspapers 2018"""
from __future__ import absolute_import, division, print_function
import glob
import json
import os
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{id_newspapers_2018,
author = {},
title = {Indonesian Newspapers 2018},
year = {2019},
url = {https://github.com/feryandi/Dataset-Artikel},
}
"""
_DESCRIPTION = """\
The dataset contains around 500K articles (136M of words) from 7 Indonesian newspapers: Detik, Kompas, Tempo,
CNN Indonesia, Sindo, Republika and Poskota. The articles are dated between 1st January 2018 and 20th August 2018
(with few exceptions dated earlier). The size of uncompressed 500K json files (newspapers-json.tgz) is around 2.2GB,
and the cleaned uncompressed in a big text file (newspapers.txt.gz) is about 1GB. The original source in Google Drive
contains also a dataset in html format which include raw data (pictures, css, javascript, ...)
from the online news website
"""
_HOMEPAGE = "https://github.com/feryandi/Dataset-Artikel"
_LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International Public License"
_URLs = ["http://cloud.uncool.ai/index.php/s/kF83dQHfGeS2LX2/download"]
class IdNewspapers2018Config(datasets.BuilderConfig):
"""BuilderConfig for IdNewspapers2018"""
def __init__(self, **kwargs):
"""BuilderConfig for IdNewspapers2018.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(IdNewspapers2018Config, self).__init__(**kwargs)
class IdNewspapers2018(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
IdNewspapers2018Config(
name="id_newspapers_2018",
version=VERSION,
description="IdNewspapers2018 dataset",
),
]
def _info(self):
features = datasets.Features(
{
"id": datasets.Value("string"),
"url": datasets.Value("string"),
"date": datasets.Value("string"),
"title": datasets.Value("string"),
"content": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
my_urls = _URLs[0]
data_dir = dl_manager.download_and_extract(my_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"article_dir": os.path.join(data_dir, "newspapers"),
"split": "train",
},
)
]
def _generate_examples(self, article_dir, split):
logger.info("⏳ Generating %s examples from = %s", split, article_dir)
id = 0
for path in sorted(glob.glob(os.path.join(article_dir, "**/*.json"), recursive=True)):
with open(path, encoding="utf-8") as f:
data = json.load(f)
yield id, {
"id": str(id),
"url": data["url"],
"date": data["date"],
"title": data["title"],
"content": data["content"],
}
id += 1
|
[
"noreply@github.com"
] |
meehawk.noreply@github.com
|
94e2c2a401b125a43cee98d701cd7ec13826b551
|
773dc03117f8b0d51f7a10e2a4577229c8be6ba3
|
/migrations/models/36_20230108160220_update.py
|
825e7924ebf0640cde169d23190cb1cc5555254b
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
tcprescott/sahasrahbot
|
382cdff058d63feb5f42dbbd7729eb4b08c4d1bd
|
64a125d948873d0faa5ea3f2d306075ad9e013be
|
refs/heads/master
| 2023-08-31T15:33:01.533206
| 2023-08-31T01:58:48
| 2023-08-31T01:58:48
| 178,310,225
| 22
| 43
|
MIT
| 2023-09-01T08:45:52
| 2019-03-29T01:34:45
|
Python
|
UTF-8
|
Python
| false
| false
| 380
|
py
|
from tortoise import BaseDBAsyncClient
async def upgrade(db: BaseDBAsyncClient) -> str:
return """
ALTER TABLE `ranked_choice_election` ADD `private` BOOL NOT NULL DEFAULT 0;
DROP TABLE IF EXISTS `twitch_channels`;"""
async def downgrade(db: BaseDBAsyncClient) -> str:
return """
ALTER TABLE `ranked_choice_election` DROP COLUMN `private`;"""
|
[
"tcprescott@gmail.com"
] |
tcprescott@gmail.com
|
c2ee27335ec1db4df52d38e9bcdabfb39e334cc2
|
8239e45b6b031839dcd464bc80a6c8d17ed2f7b7
|
/cloudarmy/contrib/conditions/environment.py
|
53177af7d135ab208dc6fbe1359908ca766b4a45
|
[] |
no_license
|
geeknam/cloudarmy
|
401efaee8c8e5e916ddff757edcc657698d9687f
|
4363d5bdf8719a8f8bab8104c8ea7d2247d15746
|
refs/heads/master
| 2021-07-11T19:44:41.769661
| 2016-03-14T12:43:47
| 2016-03-14T12:43:47
| 52,852,867
| 3
| 1
| null | 2021-03-25T21:40:17
| 2016-03-01T06:11:43
|
Python
|
UTF-8
|
Python
| false
| false
| 278
|
py
|
from troposphere import Ref, Equals
class EnvironmentCondition(object):
conditions = {
"IsProduction": Equals(
Ref("EnvironmentType"), "production"
),
"IsStaging": Equals(
Ref("EnvironmentType"), "staging"
),
}
|
[
"emoinrp@gmail.com"
] |
emoinrp@gmail.com
|
04c39588a75c7d1646fb96aeb656bbb9548a976f
|
c1b56d50c68bf32e900349cbab4bfd043a79a237
|
/Pythagorean Triplet.py
|
231f1b5449311249ea7648796d95434b151ff9d6
|
[] |
no_license
|
divanshu79/GeeksForGeeks-solutions
|
c7a5f0be04e8376e72f933c35fb2d09641fe7130
|
caf77aad9c53d5d05c87318806097d750864a6e3
|
refs/heads/master
| 2020-03-25T07:56:14.997786
| 2018-08-05T06:37:22
| 2018-08-05T06:37:22
| 143,589,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 614
|
py
|
from collections import defaultdict
for _ in range(int(input())):
n = int(input())
arr = list(map(int, input().split()))
def_dict = defaultdict(int)
sq_list = []
for i in arr:
def_dict[i*i] = 1
sq_list.append(i*i)
sum_list = []
flag = 0
for i in range(n-1):
for j in range(i+1, n):
if def_dict[sq_list[i] + sq_list[j]] == 1:
flag = 1
print(arr[i], arr[j])
break
if flag == 1:
break
if flag == 1:
print('Yes')
else:
print('No')
|
[
"noreply@github.com"
] |
divanshu79.noreply@github.com
|
189638b913ac8e4f95628be830208ded60454bf1
|
994e5b7156a8c1429238facc1463ad1846f1a89a
|
/models/official/nlp/xlnet/xlnet_config.py
|
95ab092442ef4f4b96e61d91ed391051469e8441
|
[
"Apache-2.0"
] |
permissive
|
TrellixVulnTeam/Felect_M46O
|
f0c2a9a6c48695705e0b68c92c3a414bacfaa599
|
6d8b80e216c40233d2c1b9e51fe6f605a3b5ef4b
|
refs/heads/main
| 2023-04-22T11:33:59.448117
| 2021-05-06T13:01:12
| 2021-05-06T13:01:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,317
|
py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions used in XLNet model."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import json
import os
import tensorflow as tf
def create_run_config(is_training, is_finetune, flags):
"""Helper function for creating RunConfig."""
kwargs = dict(
is_training=is_training,
use_tpu=flags.use_tpu,
dropout=flags.dropout,
dropout_att=flags.dropout_att,
init_method=flags.init_method,
init_range=flags.init_range,
init_std=flags.init_std,
clamp_len=flags.clamp_len)
if not is_finetune:
kwargs.update(
dict(
mem_len=flags.mem_len,
reuse_len=flags.reuse_len,
bi_data=flags.bi_data,
clamp_len=flags.clamp_len,
same_length=flags.same_length))
return RunConfig(**kwargs)
# TODO(hongkuny): refactor XLNetConfig and RunConfig.
class XLNetConfig(object):
"""Configs for XLNet model.
XLNetConfig contains hyperparameters that are specific to a model checkpoint;
i.e., these hyperparameters should be the same between
pretraining and finetuning.
The following hyperparameters are defined:
n_layer: int, the number of layers.
d_model: int, the hidden size.
n_head: int, the number of attention heads.
d_head: int, the dimension size of each attention head.
d_inner: int, the hidden size in feed-forward layers.
ff_activation: str, "relu" or "gelu".
untie_r: bool, whether to untie the biases in attention.
n_token: int, the vocab size.
"""
def __init__(self, FLAGS=None, json_path=None, args_dict=None):
"""Constructing an XLNetConfig.
One of FLAGS or json_path should be provided.
Args:
FLAGS: An FLAGS instance.
json_path: A path to a json config file.
args_dict: A dict for args.
"""
assert FLAGS is not None or json_path is not None or args_dict is not None
self.keys = [
'n_layer', 'd_model', 'n_head', 'd_head', 'd_inner', 'ff_activation',
'untie_r', 'n_token'
]
if FLAGS is not None:
self.init_from_flags(FLAGS)
if json_path is not None:
self.init_from_json(json_path)
if args_dict is not None:
self.init_from_dict(args_dict)
def init_from_dict(self, args_dict):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
for key in self.keys:
setattr(self, key, args_dict[key])
def init_from_flags(self, flags):
for key in self.keys:
setattr(self, key, getattr(flags, key))
def init_from_json(self, json_path):
with tf.io.gfile.GFile(json_path) as f:
json_data = json.load(f)
self.init_from_dict(json_data)
def to_json(self, json_path):
"""Save XLNetConfig to a json file."""
json_data = {}
for key in self.keys:
json_data[key] = getattr(self, key)
json_dir = os.path.dirname(json_path)
if not tf.io.gfile.exists(json_dir):
tf.io.gfile.makedirs(json_dir)
with tf.io.gfile.GFile(json_path, 'w') as f:
json.dump(json_data, f, indent=4, sort_keys=True)
class RunConfig(object):
"""Class of RunConfig.
RunConfig contains hyperparameters that could be different
between pretraining and finetuning.
These hyperparameters can also be changed from run to run.
We store them separately from XLNetConfig for flexibility.
"""
def __init__(self,
is_training,
use_tpu,
dropout,
dropout_att,
init_method='normal',
init_range=0.1,
init_std=0.02,
mem_len=None,
reuse_len=None,
bi_data=False,
clamp_len=-1,
same_length=False,
use_cls_mask=True):
"""Initializes RunConfig.
Args:
is_training: bool, whether in training mode.
use_tpu: bool, whether TPUs are used.
dropout: float, dropout rate.
dropout_att: float, dropout rate on attention probabilities.
init_method: str, the initialization scheme, either "normal" or "uniform".
init_range: float, initialize the parameters with a uniform distribution
in [-init_range, init_range]. Only effective when init="uniform".
init_std: float, initialize the parameters with a normal distribution with
mean 0 and stddev init_std. Only effective when init="normal".
mem_len: int, the number of tokens to cache.
reuse_len: int, the number of tokens in the currect batch to be cached and
reused in the future.
bi_data: bool, whether to use bidirectional input pipeline. Usually set to
True during pretraining and False during finetuning.
clamp_len: int, clamp all relative distances larger than clamp_len. -1
means no clamping.
same_length: bool, whether to use the same attention length for each
token.
use_cls_mask: bool, whether to introduce cls mask.
"""
self.init_method = init_method
self.init_range = init_range
self.init_std = init_std
self.is_training = is_training
self.dropout = dropout
self.dropout_att = dropout_att
self.use_tpu = use_tpu
self.mem_len = mem_len
self.reuse_len = reuse_len
self.bi_data = bi_data
self.clamp_len = clamp_len
self.same_length = same_length
self.use_cls_mask = use_cls_mask
|
[
"noreply@github.com"
] |
TrellixVulnTeam.noreply@github.com
|
bc2ec15906048fc42b645664a4552aa614fffaec
|
4cbe0eef8694a7f5443e6d276577d3ca08d15456
|
/cpt1/noneLenDemo.py
|
a713e854c6074bac6033c4576a506fd818583169
|
[] |
no_license
|
GSIL-Monitor/PythonLearning
|
2bf313e366e395df1d27164fe79e16e948094583
|
3f20f9cdff1cef368baa6a2374e6b2cbe3871aa4
|
refs/heads/master
| 2020-04-19T09:11:45.169704
| 2018-11-28T09:55:01
| 2018-11-28T09:55:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
s=None
s1 = ''
s2 = ' '
print(len(s1))
print(len(s2))
print(len(s2.strip()))
# print(len(s))
t1 = t2 = t3 = None
print(t1, t2, t3)
|
[
"249398363@qq.com"
] |
249398363@qq.com
|
29eaf7dca764f8db0e109f82e350645c5ee1f812
|
c741f04141784a2571d2d27d95e0d994e4584ab1
|
/learning/py3/连接mysql/PyMySQL/test3.py
|
f72ccb6eb48887eb51cf2b269456a0e175b90e48
|
[] |
no_license
|
haodonghui/python
|
bbdece136620bc6f787b4942d6e1760ed808afd4
|
365062ba54297c81093b7f378742e76d438658b7
|
refs/heads/master
| 2022-02-03T23:52:37.288503
| 2022-01-27T05:23:25
| 2022-01-27T05:23:25
| 191,729,797
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
from pythonmysql3 import DB
if __name__ == '__main__':
with DB(host='59.110.228.110', port=3306, database='test_tea_uc_0', user='test_tea_uc_0',
passwd='L~+SJ*F^kon[t+10l6') as db:
db.execute('select * from uc_user limit 0,10')
print(db)
for i in db:
print(i)
|
[
"haodonghui@yestae.com"
] |
haodonghui@yestae.com
|
3ccbf8883c86965571f090c36bced556f00efdd1
|
f60ec2c12c6d56be853bec9c222b8ea91b170130
|
/apps/pig/src/pig/models.py
|
a38ff955d4c0be321ef26bdb2d085598b63d858f
|
[
"Apache-2.0"
] |
permissive
|
jackerxff/hue
|
b33911f62129cc949096dd48b3fdcf0584bbba69
|
2418050cafd75aab043900c28a867f5c13bc1c0e
|
refs/heads/master
| 2020-12-29T02:54:39.947205
| 2013-04-05T21:25:07
| 2013-04-05T21:26:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,071
|
py
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import json
except ImportError:
import simplejson as json
import posixpath
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _, ugettext_lazy as _t
from desktop.lib.exceptions_renderable import PopupException
from hadoop.fs.hadoopfs import Hdfs
from oozie.models import Workflow
class Document(models.Model):
owner = models.ForeignKey(User, db_index=True, verbose_name=_t('Owner'), help_text=_t('User who can modify the job.'))
is_design = models.BooleanField(default=True, db_index=True, verbose_name=_t('Is a user document, not a document submission.'),
help_text=_t('If the document is not a submitted job but a real query, script, workflow.'))
def is_editable(self, user):
return user.is_superuser or self.owner == user
def can_edit_or_exception(self, user, exception_class=PopupException):
if self.is_editable(user):
return True
else:
raise exception_class(_('Only superusers and %s are allowed to modify this document.') % user)
class PigScript(Document):
_ATTRIBUTES = ['script', 'name', 'properties', 'job_id', 'parameters', 'resources']
data = models.TextField(default=json.dumps({
'script': '',
'name': '',
'properties': [],
'job_id': None,
'parameters': [],
'resources': []
}))
def update_from_dict(self, attrs):
data_dict = self.dict
for attr in PigScript._ATTRIBUTES:
if attrs.get(attr) is not None:
data_dict[attr] = attrs[attr]
self.data = json.dumps(data_dict)
@property
def dict(self):
return json.loads(self.data)
class Submission(models.Model):
script = models.ForeignKey(PigScript)
workflow = models.ForeignKey(Workflow)
def create_or_update_script(id, name, script, user, parameters, resources, is_design=True):
"""This take care of security"""
try:
pig_script = PigScript.objects.get(id=id)
pig_script.can_edit_or_exception(user)
except:
pig_script = PigScript.objects.create(owner=user, is_design=is_design)
pig_script.update_from_dict({
'name': name,
'script': script,
'parameters': parameters,
'resources': resources
})
return pig_script
def get_scripts(user, max_count=200):
scripts = []
for script in PigScript.objects.filter(owner=user).order_by('-id')[:max_count]:
data = script.dict
massaged_script = {
'id': script.id,
'name': data['name'],
'script': data['script'],
'parameters': data['parameters'],
'resources': data['resources'],
'isDesign': script.is_design,
}
scripts.append(massaged_script)
return scripts
def get_workflow_output(oozie_workflow, fs):
# TODO: guess from the STORE or parameters
output = None
if 'workflowRoot' in oozie_workflow.conf_dict:
output = oozie_workflow.conf_dict.get('workflowRoot')
if output and not fs.exists(output):
output = None
return output
def hdfs_link(url):
if url:
path = Hdfs.urlsplit(url)[2]
if path:
if path.startswith(posixpath.sep):
return "/filebrowser/view" + path
else:
return "/filebrowser/home_relative_view/" + path
else:
return url
else:
return url
|
[
"romain@cloudera.com"
] |
romain@cloudera.com
|
1d1c6159d39366e7b2130cca2ed83d36fab067c6
|
c96c79bb7ca3e71d609eab20ed8d68cff8ee7fe7
|
/DataStructurePrograms/bankingCashCounter.py
|
0049a83ecb8431b52e5fdb75741a8707cd5863a8
|
[] |
no_license
|
NikhilDusane222/Python
|
25c9eb50bcd5e0e8679ece41d97129b9100e9a91
|
0183c4211a28bbddb6792978cf55da89a682f67a
|
refs/heads/master
| 2021-05-18T13:07:07.059428
| 2020-04-12T17:23:57
| 2020-04-12T17:23:57
| 251,254,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,721
|
py
|
#Class Queue
class Queue:
def __init__(self):
self.balance = 0
print("Welcome to the Bank Cash Counter..")
print("This is a Banking portal")
#Function for deposite amount
def enqueue_deposit(self):
amount = int(input("Enter amount to be Deposited: "))
self.balance += amount
print("\nAmount Deposited:", amount)
#Function for withdraw amount
def dequeue_withdraw(self):
amount = int(input("Enter amount to be Withdrawn: "))
if self.balance >= amount:
self.balance -= amount
print("\nYou Withdrew:", amount)
else:
print("\nInsufficient balance ")
#Function for display amount
def queue_display(self):
print("\nNet Available Balance=", self.balance)
#Function for exit
def queue_exit(self):
exit()
#Main function
if __name__ == '__main__':
q = Queue()
try:
while True:
print("Please Enter the option that you want to make a transaction:")
#Choice for Deposite and Withdrawn amount
choiceNo = int(input(
" 1. Deposite Amount to the account \n 2. Withdraw Amount from the account \n "
"3. Display the amount \n 4. Cancel Transaction \n"))
if choiceNo == 1:
q.enqueue_deposit()
elif choiceNo == 2:
q.dequeue_withdraw()
elif choiceNo == 3:
q.queue_display()
elif choiceNo == 4:
q.queue_exit()
else:
print("Invalid Choice...!! Press the Correct choice")
except ValueError:
print("Invalid Choice...!! Press the Correct choice")
|
[
"you@example.com"
] |
you@example.com
|
fb9d2de4608618a90483dce7880ec25859319581
|
eb4070d3dda38df8b6d4118343db59d559e58df6
|
/week-1/Examples/plot_bostonjuly2012temps.py
|
7106e6e834e9c292ae22013b1fc5392a53e0f201
|
[] |
no_license
|
RaviTezu/MITx-6.00.2x
|
df767115085e4f28cfaac20ec90c18453517ed5a
|
6effafa89e15e1d59c9302c4a3c9f6ce96da0faa
|
refs/heads/master
| 2021-01-10T16:15:03.999778
| 2016-04-20T11:40:46
| 2016-04-20T11:40:46
| 53,061,225
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,099
|
py
|
from __future__ import print_function
import os
import pylab
# It is assumed that the 'julyTemps.txt' file is present along the side of this script and this script is
# executed at the root.
PWD = os.getcwd()
FILE_NAME = 'julyTemps.txt'
FILE = PWD + '/' + FILE_NAME
HIGH = []
LOW = []
def load_file(inFile=FILE):
return open(inFile, 'r')
def read_data(fd=load_file()):
for line in fd.readlines():
fields = line.split()
if len(fields) < 3 or not fields[0].isdigit():
pass
else:
HIGH.append(fields[1])
LOW.append(fields[2])
def calculate_diff(high=HIGH, low=LOW):
diff_temps = [int(h) - int(l) for h, l in zip(high, low)]
return diff_temps
def plotting(diff_temps):
length = len(diff_temps)
print(length)
pylab.figure(1)
pylab.title('Day by Day Ranges in Temperature in Boston in July 2012')
pylab.xlabel('Days')
pylab.ylabel('Temperature Ranges')
pylab.plot(range(1, length + 1), diff_temps)
pylab.show()
if __name__ == "__main__":
read_data()
plotting(calculate_diff())
|
[
"ravi-teja@live.com"
] |
ravi-teja@live.com
|
03cdb1d4773ac7b2357bc6f611f33df1c00e995b
|
d5eb2fe5d49b581562ae2bc660d08ca80a03d331
|
/PythonSandbox/src/leetcode/lc235_lowest_common_ancestor_bst.py
|
8a95af9b55836848b2011fec66cdb18da8f848ba
|
[] |
no_license
|
mcxu/code-sandbox
|
fd5aa2e593057901d281a0e74db8957777b06cf3
|
a785231582bda8578f79982e2dcddd2f2ab559b4
|
refs/heads/master
| 2023-07-10T02:07:24.180947
| 2023-07-08T03:31:48
| 2023-07-08T03:31:48
| 130,493,607
| 4
| 2
| null | 2023-01-15T22:53:29
| 2018-04-21T16:49:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,624
|
py
|
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
if root == None:
return 0
deepestValidDepthSoFar = 0
validNode = root
# iterative dfs
stack = [(root, 0)] # (node, depth)
while stack:
currItem = stack.pop(-1)
currNode, currDepth = currItem[0], currItem[1]
# print("==== Outer DFS from currNode: ", currNode.val if currNode != None else None)
if currNode != None:
seenValues = set()
# print("Running inner dfs on currNode: ", currNode.val)
self.verifyPandQExistFromRoot(currNode, p, q, seenValues)
# print("seenValues: after: ", seenValues)
pqExistsFromRoot = (p.val in seenValues) and (q.val in seenValues)
# print("pqExistsFromRoot: ", pqExistsFromRoot)
if pqExistsFromRoot and currDepth > deepestValidDepthSoFar:
deepestValidDepthSoFar = currDepth
validNode = currNode
stack.append((currNode.right, currDepth+1))
stack.append((currNode.left, currDepth+1))
return validNode
def verifyPandQExistFromRoot(self, root, p, q, seenValues):
if root == None:
return
if p.val in seenValues and q.val in seenValues:
return
seenValues.add(root.val)
self.verifyPandQExistFromRoot(root.left, p, q, seenValues)
self.verifyPandQExistFromRoot(root.right, p, q, seenValues)
|
[
"michaelxu79@gmail.com"
] |
michaelxu79@gmail.com
|
d21a1e0fda886e68b04b7b6fb2aae7d62a280eea
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit1046.py
|
63eee4eecc3a2149468ba16560b7bb2f0123e5f6
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,157
|
py
|
# qubit number=5
# total number=51
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[0]) # number=38
prog.cz(input_qubit[1],input_qubit[0]) # number=39
prog.h(input_qubit[0]) # number=40
prog.cx(input_qubit[1],input_qubit[0]) # number=45
prog.z(input_qubit[1]) # number=46
prog.h(input_qubit[0]) # number=48
prog.cz(input_qubit[1],input_qubit[0]) # number=49
prog.h(input_qubit[0]) # number=50
prog.h(input_qubit[0]) # number=32
prog.cz(input_qubit[1],input_qubit[0]) # number=33
prog.h(input_qubit[0]) # number=34
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[3],input_qubit[0]) # number=41
prog.z(input_qubit[3]) # number=42
prog.cx(input_qubit[3],input_qubit[0]) # number=43
prog.cx(input_qubit[1],input_qubit[3]) # number=44
prog.x(input_qubit[0]) # number=9
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.cx(input_qubit[0],input_qubit[3]) # number=35
prog.x(input_qubit[3]) # number=36
prog.cx(input_qubit[0],input_qubit[3]) # number=37
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.x(input_qubit[0]) # number=25
prog.cx(input_qubit[1],input_qubit[0]) # number=26
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.x(input_qubit[1]) # number=22
prog.x(input_qubit[1]) # number=23
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit1046.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
98b24527a49dde6f431800e65ba9394fb4c3a89e
|
503d2f8f5f5f547acb82f7299d86886691966ca5
|
/atcoder/abc288_e.py
|
70c4f614907f574c7e19042d8ed2d2ab4cc3fcdb
|
[] |
no_license
|
Hironobu-Kawaguchi/atcoder
|
3fcb649cb920dd837a1ced6713bbb939ecc090a9
|
df4b55cc7d557bf61607ffde8bda8655cf129017
|
refs/heads/master
| 2023-08-21T14:13:13.856604
| 2023-08-12T14:53:03
| 2023-08-12T14:53:03
| 197,216,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,214
|
py
|
# https://atcoder.jp/contests/abc288/tasks/abc288_d
# from numba import njit
# from functools import lru_cache
import sys
input = sys.stdin.buffer.readline
INF = 1001001001001001
N, M = map(int, input().split())
A = list(map(int, (input().split())))
C = list(map(int, (input().split())))
X = list(map(int, (input().split())))
dp = [[INF]*(N+1) for _ in range(N+1)]
dp[0][0] = 0
# for i in range(N+1):
# dp[i][0] = 0
cost = [[0]*N for _ in range(N)]
for i in range(N):
for j in range(i+1):
if j==0:
cost[i][j] = C[i]
else:
cost[i][j] = min(cost[i][j-1], C[i-j])
# for i in range(N):
# print(cost[i])
idx = 0
for i in range(N):
for j in range(i+1):
dp[i+1][j+1] = min(dp[i+1][j+1], dp[i][j] + A[i] + cost[i][j])
if idx<M and i==X[idx]-1: continue
dp[i+1][j] = min(dp[i+1][j], dp[i][j])
if idx<M and i==X[idx]-1:
idx += 1
# for i in range(N+1):
# print(dp[i])
ans = INF
for j in range(M, N+1):
ans = min(ans, dp[N][j])
# for i in range(M):
# ans += A[X[i]-1]
print(ans)
# WA
# import sys
# input = sys.stdin.buffer.readline
# # def input(): return sys.stdin.readline().rstrip()
# # sys.setrecursionlimit(10 ** 7)
# import copy
# N, M = map(int, input().split())
# A = list(map(int, (input().split())))
# C = list(map(int, (input().split())))
# X = list(map(int, (input().split())))
# ans = 0
# for i in range(M):
# ans += A[X[i]-1]
# pre = [[]]
# idx = 0
# for i in range(N):
# jj = 0
# if i==X[idx]-1:
# v = C[X[idx]-1]
# u = X[idx] - 1
# for j in range(idx):
# if C[X[idx]-1-j]<v:
# v = C[X[idx]-1-j]
# u = X[idx] - 1
# for j in range(len(pre[u])):
# # print(u, j, pre[u])
# if j<jj:
# if C[u-j-1]: break
# v = C[u-j-1]
# else:
# if v<pre[u][j]+C[u-j-1]: break
# v = pre[u][j]+C[u-j-1]
# jj = max(jj, j+1)
# ans += v
# print(ans, idx, v, u)
# idx += 1
# pre.append(copy.copy(pre[-1]))
# pre[-1].append(A[i] + C[i])
# pre[-1].sort()
# # print(pre)
# print(ans)
|
[
"hironobukawaguchi3@gmail.com"
] |
hironobukawaguchi3@gmail.com
|
89938fbcb47e0b7757adcf91ed9a35f11cc37eeb
|
a27e43d263375f1ea42d496e18af01f5ad46990e
|
/modules/initialize.py
|
d7767bbf8a118b8f1b6dc24808d627c54abdcc1f
|
[] |
no_license
|
Klim314/Quetzalcoatl
|
74565556a26d548f28118137e81866f7dc7a4e7a
|
0d78183235207bc9c44c7c099722f5a7203e1d9c
|
refs/heads/master
| 2016-08-06T08:57:19.802511
| 2015-06-24T08:29:53
| 2015-06-24T08:29:53
| 36,220,505
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
#!/usr/bin/env python3
"""
initialize.py
loads the pubdip.ini file
returns a dictionary containing all terms
"""
def execute(target):
res = dict()
with open(target) as f:
for i in f:
if i[0] == '#':
continue
temp = i.split('=')
res[temp[0]] = temp[1].strip()
return res
if __name__ == "__main__":
path = "../pubdip.ini"
print(execute(path))
|
[
"klim314@gmail.com"
] |
klim314@gmail.com
|
91a5b6e81692b41a2ffffebed1fa5a58a9cc4ca7
|
2097293065bb28452b221a5f635bac63c69a3e80
|
/pizza.py
|
60599550eb351267a25b0b28131179907e104ba8
|
[
"MIT"
] |
permissive
|
kafkoders/hashcode-pizza
|
eb2ca3944f62c9c21853b8d0dc2cd34a984984bf
|
513452f35299885f396a49113264523a0a6cceae
|
refs/heads/master
| 2020-04-23T22:16:53.542636
| 2019-02-24T19:46:38
| 2019-02-24T19:46:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,483
|
py
|
import pandas as pd
import numpy as np
import math
input_files = ['d_big']
def create_pizza_dataset(file_):
flag = False
elements_ = []
with open(file_ + '.in') as input_:
for line in input_:
if flag is False:
rows, cols, min_ingredients, max_cells = line.split(' ')
flag = True
else:
elements_.append(np.array(list(line.rstrip())))
df = pd.DataFrame(elements_)
pizza_ = df.replace(['M', 'T'], [1, 0])
total_tomatoes = len(pizza_[pizza_.values == 0])
total_mushrooms = len(pizza_[pizza_.values == 1])
less_ingredient = 'tomatoes' if total_tomatoes < total_mushrooms else 'mushrooms'
return pizza_, rows, cols, min_ingredients, max_cells, less_ingredient
def maximize_cuts(max_):
possible_cuts = list()
for j in range(max_, (int(min_ingredients) * 2) - 1, -1):
for i in range(j, 0, -1):
if (j % i) == 0:
item_x = [int(j / i), i]
item_y = [i, int(j / i)]
if item_x not in possible_cuts:
possible_cuts.append(item_x)
if item_y not in possible_cuts:
possible_cuts.append(item_y)
return possible_cuts
class pizzaSlice:
slice_ = None
value_ = 0
def __init__(self, slice_):
self.slice_ = slice_
self.value_ = self.calc_value()
def calc_value(self):
mushrooms = 0
tomatoes = 0
for val in self.slice_:
if pizza_.at[val[0], val[1]] == 1:
mushrooms += 1
elif pizza_.at[val[0], val[1]] == 0:
tomatoes += 1
if less_ingredient == 'tomatoes':
return tomatoes
else:
return mushrooms
def matches_condition(pizza_, pizza_slices):
if not pizza_slices:
return None
else:
min_slice = None
max_cells = 0
for pizza_slice in pizza_slices:
tomatoes = 0
mushrooms = 0
for cell_slice in pizza_slice.slice_:
if pizza_.at[cell_slice[0], cell_slice[1]] == 1:
mushrooms += 1
elif pizza_.at[cell_slice[0], cell_slice[1]] == 0:
tomatoes += 1
if mushrooms >= int(min_ingredients) and tomatoes >= int(min_ingredients):
if min_slice is None:
min_slice = pizza_slice
if min_slice.value_ > pizza_slice.value_ and max_cells < len(pizza_slice.slice_):
max_cells = len(pizza_slice.slice_)
min_slice = pizza_slice
if min_slice is not None:
return min_slice.slice_
else:
return None
def check_cuts(x, y, min_, max_, cuts_):
slices_ = list()
for cut in cuts_:
slice_ = list()
invalid = False
for i in range(cut[0]):
for j in range(cut[1]):
if x + i < pizza_.shape[0] and y + j < pizza_.shape[1] and pizza_.at[x + i, y + j] != 5:
slice_.append([x + i, y + j])
else:
invalid = True
if invalid is False:
slices_.append(pizzaSlice(slice_))
return slices_
if __name__ == '__main__':
for file_ in input_files:
pizza_, rows, cols, min_ingredients, max_cells, less_ingredient = create_pizza_dataset(file_)
good_slices = list()
possible_cuts = maximize_cuts(int(max_cells))
for row_ in range(pizza_.shape[0]):
for col_ in range(pizza_.shape[1]):
if pizza_.at[row_, col_] != 5:
slices_ = check_cuts(row_, col_, int(min_ingredients), int(max_cells), possible_cuts)
slice_ = matches_condition(pizza_, slices_)
if slice_ is not None:
col_final = len(slice_)
good_slices.append([row_, slice_[col_final - 1][0], col_, slice_[col_final - 1][1]])
for element in slice_:
pizza_.at[element[0], element[1]] = 5
with open(file_ + '.out', 'w') as f_:
f_.write(str(len(good_slices)) + "\n")
for value_ in good_slices:
f_.write(str(value_[0]) + " " + str(value_[2]) + " " + str(value_[1]) + " " + str(value_[3]) + "\n")
|
[
"alvarob96@usal.es"
] |
alvarob96@usal.es
|
34a80c8dab37022c77f53a2aea2077a2f51aa81b
|
a0e33f22ed416429e5ed003896d410ab0e82d3eb
|
/polymodels/managers.py
|
a08e4ba298a2da0d63b9bcbbeaadcc69656423fd
|
[
"MIT"
] |
permissive
|
fusionbox/django-polymodels
|
37982506c6ea58ae85f44da676cd990b4babc6fd
|
0e6caf3932b2d8337d15f9755983c94743317e12
|
refs/heads/master
| 2020-12-25T10:59:02.520899
| 2016-01-22T00:13:22
| 2016-01-22T00:13:22
| 50,145,841
| 0
| 0
| null | 2016-01-22T00:13:14
| 2016-01-22T00:13:14
| null |
UTF-8
|
Python
| false
| false
| 2,968
|
py
|
from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.db import models
class PolymorphicQuerySet(models.query.QuerySet):
def select_subclasses(self, *models):
self.type_cast = True
relateds = set()
accessors = self.model.subclass_accessors
if models:
subclasses = set()
for model in models:
if not issubclass(model, self.model):
raise TypeError(
"%r is not a subclass of %r" % (model, self.model)
)
subclasses.update(model.subclass_accessors)
# Collect all `select_related` required lookups
for subclass in subclasses:
# Avoid collecting ourself and proxy subclasses
related = accessors[subclass][2]
if related:
relateds.add(related)
queryset = self.filter(
**self.model.content_type_lookup(*tuple(subclasses))
)
else:
# Collect all `select_related` required relateds
for accessor in accessors.values():
# Avoid collecting ourself and proxy subclasses
related = accessor[2]
if accessor[2]:
relateds.add(related)
queryset = self
if relateds:
queryset = queryset.select_related(*relateds)
return queryset
def exclude_subclasses(self):
return self.filter(**self.model.content_type_lookup())
def _clone(self, *args, **kwargs):
kwargs.update(type_cast=getattr(self, 'type_cast', False))
return super(PolymorphicQuerySet, self)._clone(*args, **kwargs)
def iterator(self):
iterator = super(PolymorphicQuerySet, self).iterator()
if getattr(self, 'type_cast', False):
for obj in iterator:
yield obj.type_cast()
else:
# yield from iterator
for obj in iterator:
yield obj
class PolymorphicManager(models.Manager.from_queryset(PolymorphicQuerySet)):
use_for_related_fields = True
def contribute_to_class(self, model, name):
# Avoid circular reference
from .models import BasePolymorphicModel
if not issubclass(model, BasePolymorphicModel):
raise ImproperlyConfigured(
'`%s` can only be used on '
'`BasePolymorphicModel` subclasses.' % self.__class__.__name__
)
return super(PolymorphicManager, self).contribute_to_class(model, name)
def get_queryset(self):
queryset = super(PolymorphicManager, self).get_queryset()
model = self.model
opts = model._meta
if opts.proxy:
# Select only associated model and its subclasses.
queryset = queryset.filter(**self.model.subclasses_lookup())
return queryset
|
[
"charette.s@gmail.com"
] |
charette.s@gmail.com
|
90b417bedd17743c79571e8607da6f6a022d1f12
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03024/s808391195.py
|
bd7e39cfcdbb36cf608ec3f0dbb696430bf5c305
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
s = list(input())
counter = 0
k = len(s)
for i in range(k):
if s[i] == "o":
counter += 1
if counter+(15-k) >= 8:
print("YES")
else:
print("NO")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
6e69b5b44498b70dbb7ec604c2bc824d7cd54d73
|
31e10d5f9bbdf768a2b6aae68af0c2105b43120c
|
/web+多线程/miniweb框架/web服务器/application/utils.py
|
fc5a398094a52747dd43aa00a08d209b8d724c5b
|
[] |
no_license
|
664120817/python-test
|
6d0ce82923b3e7974f393fc8590c5e47e4117781
|
418085378ca0db8019e4fa3b5564daebed0e6163
|
refs/heads/master
| 2023-02-16T13:10:55.403774
| 2022-08-02T17:01:52
| 2022-08-02T17:01:52
| 200,843,808
| 8
| 11
| null | 2023-02-15T16:53:44
| 2019-08-06T12:08:19
|
Python
|
UTF-8
|
Python
| false
| false
| 435
|
py
|
def create_http_response(status,response_body):
# 拼接响应
request_line = "HTTP/1.1 {}\r\n".format(status) # 请求行
request_header = "Server:python80WS/2.1;charset=UTF-8 \r\n" # 请求头
request_header += "Content-Type:text/html\r\n"
request_blank = "\r\n" # 请求空行
request_data = (request_line + request_header + request_blank).encode() + response_body # 整体拼接
return request_data
|
[
"51182039+664120817@users.noreply.github.com"
] |
51182039+664120817@users.noreply.github.com
|
ee9ea4d11f545f46aa88dcf699a6500010c37f2d
|
c6d9e353d19e0b92da72602ce274493dbb179525
|
/Setup_custom.py
|
ca095135168082bb68b2205c98650d75d777c9fc
|
[
"BSL-1.0",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
davidbrownell/Common_cpp_Common
|
a337f0d589316f28950e93acd518d4e82b7cc14a
|
7346273b79628514af1c584c447003a638def15d
|
refs/heads/master
| 2022-03-01T19:31:12.571884
| 2022-01-03T17:56:37
| 2022-01-03T17:56:37
| 187,749,343
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,579
|
py
|
# ----------------------------------------------------------------------
# |
# | Setup_custom.py
# |
# | David Brownell <db@DavidBrownell.com>
# | 2018-05-03 22:12:13
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2018-22.
# | Distributed under the Boost Software License, Version 1.0.
# | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# |
# ----------------------------------------------------------------------
"""Performs repository-specific setup activities."""
# ----------------------------------------------------------------------
# |
# | To setup an environment, run:
# |
# | Setup(.cmd|.ps1|.sh) [/debug] [/verbose] [/configuration=<config_name>]*
# |
# ----------------------------------------------------------------------
import os
import shutil
import sys
from collections import OrderedDict
import CommonEnvironment
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# <Missing function docstring> pylint: disable = C0111
# <Line too long> pylint: disable = C0301
# <Wrong hanging indentation> pylint: disable = C0330
# <Class '<name>' has no '<attr>' member> pylint: disable = E1103
# <Unreachable code> pylint: disable = W0101
# <Wildcard import> pylint: disable = W0401
# <Unused argument> pylint: disable = W0613
fundamental_repo = os.getenv("DEVELOPMENT_ENVIRONMENT_FUNDAMENTAL")
assert os.path.isdir(fundamental_repo), fundamental_repo
sys.path.insert(0, fundamental_repo)
from RepositoryBootstrap import * # <Unused import> pylint: disable = W0614
from RepositoryBootstrap.SetupAndActivate import CurrentShell # <Unused import> pylint: disable = W0614
from RepositoryBootstrap.SetupAndActivate.Configuration import * # <Unused import> pylint: disable = W0614
del sys.path[0]
from _custom_data import _CUSTOM_DATA
# ----------------------------------------------------------------------
# There are two types of repositories: Standard and Mixin. Only one standard
# repository may be activated within an environment at a time while any number
# of mixin repositories can be activated within a standard repository environment.
# Standard repositories may be dependent on other repositories (thereby inheriting
# their functionality), support multiple configurations, and specify version
# information for tools and libraries in themselves or its dependencies.
#
# Mixin repositories are designed to augment other repositories. They cannot
# have configurations or dependencies and may not be activated on their own.
#
# These difference are summarized in this table:
#
# Standard Mixin
# -------- -----
# Can be activated in isolation X
# Supports configurations X
# Supports VersionSpecs X
# Can be dependent upon other repositories X
# Can be activated within any other Standard X
# repository
#
# Consider a script that wraps common Git commands. This functionality is useful
# across a number of different repositories, yet doesn't have functionality that
# is useful on its own; it provides functionality that augments other repositories.
# This functionality should be included within a repository that is classified
# as a mixin repository.
#
# To classify a repository as a Mixin repository, decorate the GetDependencies method
# with the MixinRepository decorator.
#
# @MixinRepository # <-- Uncomment this line to classify this repository as a mixin repository
def GetDependencies():
"""
Returns information about the dependencies required by this repository.
The return value should be an OrderedDict if the repository supports multiple configurations
(aka is configurable) or a single Configuration if not.
"""
d = OrderedDict()
if CurrentShell.CategoryName == "Windows":
architectures = ["x64", "x86"]
else:
# Cross compiling on Linux is much more difficult on Linux than it is on
# Windows. Only support the current architecture.
architectures = [CurrentShell.Architecture]
for architecture in architectures:
d[architecture] = Configuration(
architecture,
[
Dependency(
"0EAA1DCF22804F90AD9F5A3B85A5D706",
"Common_Environment",
"python36",
"https://github.com/davidbrownell/Common_Environment_v3.git",
)
],
)
return d
# ----------------------------------------------------------------------
def GetCustomActions(debug, verbose, explicit_configurations):
"""
Returns an action or list of actions that should be invoked as part of the setup process.
Actions are generic command line statements defined in
<Common_Environment>/Libraries/Python/CommonEnvironment/v1.0/CommonEnvironment/Shell/Commands/__init__.py
that are converted into statements appropriate for the current scripting language (in most
cases, this is Bash on Linux systems and Batch or PowerShell on Windows systems.
"""
actions = []
for tool, version_infos in _CUSTOM_DATA:
for version, operating_system_infos in version_infos:
for operating_system, hash in operating_system_infos:
if CurrentShell.CategoryName != operating_system:
continue
tool_dir = os.path.join(
_script_dir,
"Tools",
tool,
version,
operating_system,
)
assert os.path.isdir(tool_dir), tool_dir
actions += [
CurrentShell.Commands.Execute(
'python "{script}" Install "{tool} - {version}" "{uri}" "{dir}" "/unique_id={hash}" /unique_id_is_hash'.format(
script=os.path.join(
os.getenv("DEVELOPMENT_ENVIRONMENT_FUNDAMENTAL"),
"RepositoryBootstrap",
"SetupAndActivate",
"AcquireBinaries.py",
),
tool=tool,
version=version,
uri=CommonEnvironmentImports.FileSystem.FilenameToUri(
os.path.join(tool_dir, "Install.7z"),
),
dir=tool_dir,
hash=hash,
),
),
]
# Perform actions that must be completed after all other actions have completed
actions.append(
CurrentShell.Commands.Execute(
'python "{}"'.format(os.path.join(_script_dir, "Setup_epilogue.py")),
),
)
return actions
|
[
"db@DavidBrownell.com"
] |
db@DavidBrownell.com
|
be89e3bb2bcbb432edbcf5ef7805532ee5823d5d
|
30dc32fd39cf71c76fc24d53b68a8393adcac149
|
/OWDTestToolkit/apps/Marketplace/__main.py
|
a24d1a6405e92cfdd242bbf8fe55cd7389288a89
|
[] |
no_license
|
carlosmartineztoral/OWD_TEST_TOOLKIT
|
448caefdc95bc3e54aad97df0bff7046ffb37be1
|
50768f79488735eba8355824f5aa3686a71d560a
|
refs/heads/master
| 2021-01-15T17:14:03.614981
| 2013-06-11T12:48:18
| 2013-06-11T12:48:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,357
|
py
|
from OWDTestToolkit.global_imports import *
import installApp ,\
searchForApp ,\
selectSearchResultApp
class Marketplace (
installApp.main,
searchForApp.main,
selectSearchResultApp.main):
def __init__(self, p_parent):
self.apps = p_parent.apps
self.data_layer = p_parent.data_layer
self.parent = p_parent
self.marionette = p_parent.marionette
self.UTILS = p_parent.UTILS
def launch(self):
#
# Launch the app.
#
self.apps.kill_all()
# WARNING: Marketplace is in a weird place - you need to use "Marketplace Dev"!!
# self.app = self.apps.launch(self.__class__.__name__)
self.UTILS.logResult("info",
"About to launch the marketplace app from the dev server. " + \
"If it's \"not found\" then either try again later, or contact #marketplace mozilla irc channel.")
self.app = self.apps.launch("Marketplace Dev")
self.UTILS.waitForNotElements(DOM.Market.market_loading_icon,
self.__class__.__name__ + " app - loading icon",
True,
30)
|
[
"roy.collings@sogeti.com"
] |
roy.collings@sogeti.com
|
6d2f69de2487fa86a348999f7695b0190ce4b725
|
78d7d7aeb78a8cea6d0e10b89fc4aa6c46c95227
|
/3995.py
|
a3eefaf7f66a32547cbdcc5db18db51791b52a02
|
[] |
no_license
|
GenryEden/kpolyakovName
|
97db13ef93061a8c2afc6cc5acd91337f79063f1
|
c5d7f631ae7ec8770e56170574b82ea2b7d8a4d9
|
refs/heads/master
| 2023-05-23T21:22:51.983756
| 2021-06-21T08:56:49
| 2021-06-21T08:56:49
| 350,466,773
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
def check(x):
if sum([int(s) for s in oct(x)[2:]]) % 19 != 0:
return False
m = 1
for s in oct(x)[2:]:
m *= int(s)
return m % 5 == 0
cnt = 0
minimal = 0
for x in range(12345, 67890+1):
if check(x):
cnt += 1
if cnt == 1:
minimal = x
print(cnt, minimal)
|
[
"a926788@gmail.com"
] |
a926788@gmail.com
|
4220d040287852ff2cb51884d1f88a13f9e80009
|
af9268e1ead8cdb491868c14a2240d9e44fb3b56
|
/last-minute-env/lib/python2.7/site-packages/django/contrib/admin/templatetags/admin_static.py
|
62b8691f9c135756c86c3975ad0fb508ab08de89
|
[] |
no_license
|
frosqh/Cousinade2017
|
d5154c24c93ca8089eeba26b53c594e92cb6bd82
|
c34d5707af02402bf2bb7405eddc91297da399ff
|
refs/heads/master
| 2021-01-20T07:57:34.586476
| 2017-10-22T18:42:45
| 2017-10-22T18:42:45
| 90,074,802
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 315
|
py
|
from django.template import Library
from django.templatetags.static import static as _static
register = Library()
@register.simple_tag
def static(path):
# Backwards compatibility alias for django.templatetags.static.static().
# Deprecation should start in Django 2.0.
return _static(path)
|
[
"frosqh@gmail.com"
] |
frosqh@gmail.com
|
558a6dcac84f11a72034f4701f4143645c0414fd
|
63b864deda44120067eff632bbb4969ef56dd573
|
/object_detection/fast rcnn/roi.py
|
f7f8c76fbc257a5e40c8450b8615c8b335e4a852
|
[] |
no_license
|
lizhe960118/Deep-Learning
|
d134592c327decc1db12cbe19d9a1c85a5056086
|
7d2c4f3a0512ce4bd2f86c9f455da9866d16dc3b
|
refs/heads/master
| 2021-10-29T06:15:04.749917
| 2019-07-19T15:27:25
| 2019-07-19T15:27:25
| 152,355,392
| 5
| 2
| null | 2021-10-12T22:19:33
| 2018-10-10T03:06:44
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,310
|
py
|
import numpy as np
import torch
import torch.nn as nn
class ROIPool(nn.Module):
def __init__(self, output_size):
super().__init__()
self.maxpool = nn.AdaptiveMaxPool2d(output_size)
self.size = output_size
def forward(self, images, rois, roi_idx):
# images:特征图 image_batchsize * channels * h * w
# rois:[[x1,y1,x2,y2], ...] n * 4
# roi_idx:[4,5,8,7] n * 1, roi_idx[i]保存的是rois[i]对应的是哪个特征图
n = rois.shape[0] # 有多少个建议框
h = images.size(2)
w = images.size(3)
x1 = rois[:,0] # 提取框的位置,此处缩放为到(0,1)
y1 = rois[:,1]
x2 = rois[:,2]
y2 = rois[:,3]
x1 = np.floor(x1 * w).astype(int) # 回归到特征图的位置
x2 = np.ceil(x2 * w).astype(int)
y1 = np.floor(y1 * h).astype(int)
y2 = np.ceil(y2 * h).astype(int)
res = []
for i in range(n):
img = images[roi_idx[i]].unsqueeze(0)
img = img[:, :, y1[i]:y2[i], x1[i]:x2[i]]
img = self.maxpool(img) # 调用的self.maxpool直接输出output_size*output_size大小的特征图
res.append(img)
res = torch.cat(res, dim=0) # n * output_size * output_size
return res
|
[
"2957308424@qq.com"
] |
2957308424@qq.com
|
bd6d1d5a395d1a59e39358b8164d34d56dbcb1cb
|
82e78f606f8c203cb77b1e3e8fd3b13158f31af8
|
/thenewboston/transactions/validation.py
|
8612d11da9a8ba93fe0d80accb79bbd627413987
|
[
"MIT"
] |
permissive
|
rajat4665/thenewboston-python
|
1f0b8aea02fb8dbfb2eea60cd1ef07ac12fad667
|
df842c793fe7bfd8731fd8746abf25747c9e569e
|
refs/heads/master
| 2022-11-26T00:46:54.848608
| 2020-07-26T00:12:06
| 2020-07-26T00:12:06
| 283,263,021
| 0
| 0
|
MIT
| 2020-07-28T16:07:08
| 2020-07-28T16:07:07
| null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
def validate_transaction_exists(*, amount, error, recipient, txs):
"""
Check for the existence of a Tx
"""
tx = next((tx for tx in txs if tx.get('amount') == amount and tx.get('recipient') == recipient), None)
if not tx:
raise error({
'error_message': 'Tx not found',
'expected_amount': amount,
'expected_recipient': recipient
})
|
[
"buckyroberts@gmail.com"
] |
buckyroberts@gmail.com
|
df9384d60dcde3fb318a9b646d98debfab15d79a
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03578/s404612965.py
|
ac57b84158a4259a926ce398a0358c3c359d58d5
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 740
|
py
|
# -*- coding: utf-8 -*-
import sys
from collections import deque, defaultdict
from math import sqrt, factorial
# def input(): return sys.stdin.readline()[:-1] # warning not \n
# def input(): return sys.stdin.buffer.readline().strip() # warning bytes
# def input(): return sys.stdin.buffer.readline().decode('utf-8')
def solve():
n = int(input())
d = defaultdict(int)
a = [int(x) for x in input().split()]
for e in a:
d[e] += 1
m = int(input())
t = [int(x) for x in input().split()]
for e in t:
if d[e]:
d[e] -= 1
else:
print("NO")
return
print("YES")
t = 1
# t = int(input())
for case in range(1,t+1):
ans = solve()
"""
1 + k
"""
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
69c5f69164eed21cf0ed953345f5fed4d702daf5
|
1633258aff76252b660534eae6d70a9e95a468ec
|
/cost_management/urls.py
|
4443c8d4c8e5ca1952519e7048671ed5a7cfe38d
|
[] |
no_license
|
kxplorer/banglai-django
|
7077117f66128cb2bbaa8d50c1a28c076b303987
|
0d764f744ef165b078e856eb9374dba93cb614e8
|
refs/heads/master
| 2021-09-24T20:27:14.726832
| 2018-09-16T08:50:42
| 2018-09-16T08:50:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 341
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('list/', views.my_expense, name='cost-list'),
path('add/', views.add_expense, name='add-expense'),
path('edit/<int:expense_id>/', views.edit_expense, name='edit-expense'),
path('delete/<int:expense_id>/', views.delete_expense, name='delete-expense'),
]
|
[
"harun1393@gmail.com"
] |
harun1393@gmail.com
|
ed531ac39f4e836f0ef9223d8913f55327376982
|
8c825730f6fd253e58902b150a9800de8f766943
|
/capture/noworkflow/now/cmd/cmd_history.py
|
2e5d8da0a5ca9eab5b4e964956f6cef37e97c90f
|
[
"MIT"
] |
permissive
|
rmparanhos/noworkflow
|
aeb92695c34e65edf9cc4d4dc31d80467b085773
|
8f703a14503345568e91957659b43654036f8154
|
refs/heads/master
| 2020-05-17T12:39:04.231204
| 2019-06-21T03:42:49
| 2019-06-21T03:42:49
| 183,716,529
| 0
| 0
| null | 2019-04-27T01:58:31
| 2019-04-27T01:58:31
| null |
UTF-8
|
Python
| false
| false
| 1,849
|
py
|
# Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# Copyright (c) 2016 Polytechnic Institute of New York University.
# This file is part of noWorkflow.
# Please, consult the license terms in the LICENSE file.
"""'now history' command"""
from __future__ import (absolute_import, print_function,
division, unicode_literals)
import os
from ..ipython.converter import create_ipynb
from ..persistence.models.history import History as HistoryModel
from ..persistence import persistence_config
from .command import NotebookCommand
class History(NotebookCommand):
"""Show project history"""
def add_arguments(self):
add_arg = self.add_argument
add_arg("-s", "--script", type=str, default="*",
help="show history of specific script")
add_arg("-e", "--status", type=str, default="*",
choices=["*", "finished", "unfinished", "backup"],
help="show only trials in a specific status")
add_arg("--dir", type=str,
help="set demo path. Default to CWD/demo<number>"
"where <number> is the demo identification")
def execute(self, args):
persistence_config.connect_existing(args.dir or os.getcwd())
history = HistoryModel(script=args.script, status=args.status)
print(history)
def execute_export(self, args):
code = ("%load_ext noworkflow\n"
"import noworkflow.now.ipython as nip\n"
"# <codecell>\n"
"history = nip.History()\n"
"# history.graph.width = 700\n"
"# history.graph.height = 300\n"
"# history.script = '*'\n"
"# history.status = '*'\n"
"# <codecell>\n"
"history")
create_ipynb("History.ipynb", code)
|
[
"joaofelipenp@gmail.com"
] |
joaofelipenp@gmail.com
|
cb0abb7803753d6eb75cdac081833a6020167949
|
821f403a3afc9055d40893eca033c369a4c3831e
|
/Easy/No206.py
|
c2200da24597a13f4e107a7fd6caac6856ee93e2
|
[] |
no_license
|
kikihiter/LeetCode2
|
29f91b6992a01ba23e7da04b2b2c862410cc563b
|
7167f1a7c6cb16cca63675c80037682752ee2a7d
|
refs/heads/master
| 2023-05-01T03:45:44.482932
| 2021-05-19T13:12:16
| 2021-05-19T13:12:16
| 277,283,525
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
temp = None # 空,用来存储上一个节点信息
while head != None:
nextNode = head.next
head.next = temp
temp = head
head = nextNode
return temp
|
[
"noreply@github.com"
] |
kikihiter.noreply@github.com
|
dff0eb2acf4da0a475156ff795a327f9c89bcde3
|
a5ada23f0c9d429cd7afa2351368e46bc23255e4
|
/meta_models/meta_layers/conv3d_meta_layer.py
|
b3542d2d43a0dafcd10873c1e253f60dafba31d8
|
[
"MIT"
] |
permissive
|
AnacletoLAB/meta_models
|
ef6df0205f88832897e7ebdcd8057635b90024a9
|
9c70eb0bf080f0ec4bd24b7764f0f71d92d467d5
|
refs/heads/master
| 2023-04-11T14:01:47.678710
| 2021-04-27T08:25:53
| 2021-04-27T08:25:53
| 286,005,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,515
|
py
|
"""Class implementing meta-model for a Conv3D Layer."""
from typing import Dict
from tensorflow.keras.layers import (Activation, BatchNormalization, Conv3D,
Layer)
from .regularized_meta_layer import RegularizedMetaLayer
from ..utils import distributions
class Conv3DMetaLayer(RegularizedMetaLayer):
"""Class implementing meta-layer for tri-dimensional convolutional layers.
Private members
------------------------
_min_filters: int,
Minimum number of filters to use for the layer.
_max_filters: int,
Maximum number of filters to use for the layer.
_min_x_kernel_size: int,
Minimum size of the kernel on the lenght axis.
_max_x_kernel_size: int,
Maximum size of the kernel on the lenght axis.
_min_y_kernel_size: int,
Minimum size of the kernel on the depth axis.
_max_y_kernel_size: int,
Maximum size of the kernel on the depth axis.
_min_z_kernel_size: int,
Minimum size of the kernel on the height axis.
_max_z_kernel_size: int,
Maximum size of the kernel on the height axis.
_activation: str,
The activation function to use for the layer.
"""
def __init__(
self,
min_filters: int = 0,
max_filters: int = 256,
min_x_kernel_size: int = 1,
max_x_kernel_size: int = 5,
min_y_kernel_size: int = 1,
max_y_kernel_size: int = 5,
min_z_kernel_size: int = 1,
max_z_kernel_size: int = 5,
activation: str = "relu",
**kwargs: Dict
):
"""Create new Conv3DResidualLayer meta-model object.
Parameters
----------------------
min_filters: int = 0,
Minimum number of filters (neurons) in each layer.
If the tuning process passes 0, then the layer is skipped.
max_filters: int = 256,
Maximum number of filters (neurons) in each layer.
min_x_kernel_size: int = 1,
Minimum size of the kernel on the lenght axis.
max_x_kernel_size: int = 5,
Maximum size of the kernel on the lenght axis.
min_y_kernel_size: int = 1,
Minimum size of the kernel on the depth axis.
max_y_kernel_size: int = 5,
Maximum size of the kernel on the depth axis.
min_z_kernel_size: int = 1,
Minimum size of the kernel on the height axis.
max_z_kernel_size: int = 5,
Maximum size of the kernel on the height axis.
activation: str = "relu",
The activation function to use for the layer.
**kwargs: Dict,
Dictionary of keyword parameters to be passed to parent class.
"""
super().__init__(**kwargs)
self._min_filters = min_filters
self._max_filters = max_filters
self._min_x_kernel_size = min_x_kernel_size
self._max_x_kernel_size = max_x_kernel_size
self._min_y_kernel_size = min_y_kernel_size
self._max_y_kernel_size = max_y_kernel_size
self._min_z_kernel_size = min_z_kernel_size
self._max_z_kernel_size = max_z_kernel_size
self._activation = activation
def _space(self) -> Dict:
"""Return hyper parameters of the layer."""
return {
"filters": (distributions.integer, self._min_filters, self._max_filters),
"x_kernel_size": (distributions.integer, self._min_x_kernel_size, self._max_x_kernel_size),
"y_kernel_size": (distributions.integer, self._min_y_kernel_size, self._max_y_kernel_size),
"z_kernel_size": (distributions.integer, self._min_z_kernel_size, self._max_z_kernel_size),
**super()._space()
}
def _build(
self,
input_layers: Layer,
filters: int,
x_kernel_size: int,
y_kernel_size: int,
z_kernel_size: int,
strides: int = (1, 1, 1),
**kwargs: Dict
) -> Layer:
"""Return built Conv3D layer block.
If the given filters number is equal to 0, the layer is skipped.
Parameters
--------------------------
input_layers: Layer,
The input layer of the current layer.
filters: int,
The number of neurons of the layer.
x_kernel_size: int,
The dimension of the kernel for the layer, on the length axis.
y_kernel_size: int,
The dimension of the kernel for the layer, on the depth axis.
z_kernel_size: int,
The dimension of the kernel for the layer, on the height axis.
strides: int = (1, 1),
Strides for the convolutional layer.
**kwargs: Dict,
The kwargs to pass to the kernel regularizers.
Returns
--------------------------
Output layer of the block.
"""
filters = round(filters)
x_kernel_size = round(x_kernel_size)
y_kernel_size = round(y_kernel_size)
z_kernel_size = round(z_kernel_size)
if filters == 0:
return input_layers
layer = Conv3D(
filters=filters,
kernel_size=(x_kernel_size, y_kernel_size, z_kernel_size),
strides=strides,
padding="same",
**self._build_regularizers(**kwargs)
)(input_layers)
if self._batch_normalization:
layer = BatchNormalization()(layer)
activation = Activation(self._activation)(layer)
return activation
|
[
"cappelletti.luca94@gmail.com"
] |
cappelletti.luca94@gmail.com
|
04747c7c8266e99f1a85acf17f1ae88fef5da79d
|
03d68f032ab0e8cf269413d0309fc6d36281504f
|
/src/l2hmc/utils/tensorflow/history.py
|
d66fe35509b67f88da6d0b9dd0b405dac0889a21
|
[
"Apache-2.0"
] |
permissive
|
saforem2/l2hmc-qcd
|
560026cd4d63f786247170a2b8641a7402b7e81e
|
46ada488bc5c8b0a31be0bf23ea11b95b3b06767
|
refs/heads/main
| 2023-09-06T03:20:19.577196
| 2023-08-23T19:26:58
| 2023-08-23T19:26:58
| 176,870,361
| 57
| 8
|
Apache-2.0
| 2023-08-23T18:56:02
| 2019-03-21T04:32:54
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,812
|
py
|
"""
tensorflow/history.py
Implements tfHistory, containing minor modifications from base History class.
"""
from __future__ import absolute_import, print_function, division, annotations
from typing import Any
import tensorflow as tf
import numpy as np
from l2hmc.utils.history import BaseHistory
class History(BaseHistory):
def update(self, metrics: dict) -> dict:
avgs = {}
era = metrics.get('era', 0)
for key, val in metrics.items():
avg = None
if isinstance(val, (float, int)):
avg = val
else:
if isinstance(val, dict):
for k, v in val.items():
key = f'{key}/{k}'
try:
avg = self._update(key=key, val=v)
# TODO: Figure out how to deal with exception
except tf.errors.InvalidArgumentError:
continue
else:
avg = self._update(key=key, val=val)
if avg is not None:
avgs[key] = avg
try:
self.era_metrics[str(era)][key].append(avg)
except KeyError:
self.era_metrics[str(era)][key] = [avg]
return avgs
def _update(self, key: str, val: Any) -> float:
if val is None:
raise ValueError(f'None encountered: {key}: {val}')
if isinstance(val, list):
val = np.array(val)
try:
self.history[key].append(val)
except KeyError:
self.history[key] = [val]
if isinstance(val, (float, int)):
return val
try:
return tf.reduce_mean(val)
except Exception:
return val
|
[
"saforem2@gmail.com"
] |
saforem2@gmail.com
|
ef89ebbee0f0db544ff5bf1b817aff77405ecae0
|
7d274ce8dae971228a23157a409b561020c22f66
|
/tools/packages/SCons/Tool/sunc++.py
|
00fb8c85284d59226fd62f3cfb8e577783661690
|
[] |
no_license
|
Eigenlabs/EigenD-Contrib
|
a212884d4fdf9ae0e1aeb73f6311606212e02f94
|
586fe17471571802295c792697f255e6cab51b17
|
refs/heads/master
| 2020-05-17T07:54:48.668925
| 2013-02-05T10:20:56
| 2013-02-05T10:20:56
| 3,239,072
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,744
|
py
|
"""SCons.Tool.sunc++
Tool-specific initialization for C++ on SunOS / Solaris.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunc++.py 4577 2009/12/27 19:43:56 scons"
import SCons
import os
import re
import subprocess
cplusplus = __import__('c++', globals(), locals(), [])
package_info = {}
def get_package_info(package_name, pkginfo, pkgchk):
try:
return package_info[package_name]
except KeyError:
version = None
pathname = None
try:
sadm_contents = open('/var/sadm/install/contents', 'r').read()
except EnvironmentError:
pass
else:
sadm_re = re.compile('^(\S*/bin/CC)(=\S*)? %s$' % package_name, re.M)
sadm_match = sadm_re.search(sadm_contents)
if sadm_match:
pathname = os.path.dirname(sadm_match.group(1))
try:
p = subprocess.Popen([pkginfo, '-l', package_name],
stdout=subprocess.PIPE,
stderr=open('/dev/null', 'w'))
except EnvironmentError:
pass
else:
pkginfo_contents = p.communicate()[0]
version_re = re.compile('^ *VERSION:\s*(.*)$', re.M)
version_match = version_re.search(pkginfo_contents)
if version_match:
version = version_match.group(1)
if pathname is None:
try:
p = subprocess.Popen([pkgchk, '-l', package_name],
stdout=subprocess.PIPE,
stderr=open('/dev/null', 'w'))
except EnvironmentError:
pass
else:
pkgchk_contents = p.communicate()[0]
pathname_re = re.compile(r'^Pathname:\s*(.*/bin/CC)$', re.M)
pathname_match = pathname_re.search(pkgchk_contents)
if pathname_match:
pathname = os.path.dirname(pathname_match.group(1))
package_info[package_name] = (pathname, version)
return package_info[package_name]
# use the package installer tool lslpp to figure out where cppc and what
# version of it is installed
def get_cppc(env):
cxx = env.subst('$CXX')
if cxx:
cppcPath = os.path.dirname(cxx)
else:
cppcPath = None
cppcVersion = None
pkginfo = env.subst('$PKGINFO')
pkgchk = env.subst('$PKGCHK')
for package in ['SPROcpl']:
path, version = get_package_info(package, pkginfo, pkgchk)
if path and version:
cppcPath, cppcVersion = path, version
break
return (cppcPath, 'CC', 'CC', cppcVersion)
def generate(env):
"""Add Builders and construction variables for SunPRO C++."""
path, cxx, shcxx, version = get_cppc(env)
if path:
cxx = os.path.join(path, cxx)
shcxx = os.path.join(path, shcxx)
cplusplus.generate(env)
env['CXX'] = cxx
env['SHCXX'] = shcxx
env['CXXVERSION'] = version
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS -KPIC')
env['SHOBJPREFIX'] = 'so_'
env['SHOBJSUFFIX'] = '.o'
def exists(env):
path, cxx, shcxx, version = get_cppc(env)
if path and cxx:
cppc = os.path.join(path, cxx)
if os.path.exists(cppc):
return cppc
return None
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
[
"jim@eigenlabs.com"
] |
jim@eigenlabs.com
|
74aeddee7276ced1388155ecfd993003fe1085f4
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/NhPYFqfQcFXWvdH8t_5.py
|
f7d2eb52db8c7a1424e591f89c82b393d52cea0d
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,614
|
py
|
"""
A positive integer multiplied times its inverse is always equal to 1:
`17*(1/17)==1`. Modular arithmetic has a similar inverse function, although,
for modulus `m`, we are confined to integers from 0 to m-1. The modular
multiplicative inverse of 3 modulus 5 is equal to 2 because `(3*2)%5==1`.
Another example: the modular inverse of 17 modulus 1000007 is equal to 58824
because `(17*58824)%1000007==1`. The modular inverse, if it exists, must
always be in the range 0 to m-1.
Create a function that has arguments integer `n` and modulus `m`. The function
will return the modular inverse of `n` mod `m`. If the modular inverse does
not exist, return `False`.
### Examples
mod_inv(2, 3) ➞ 2
mod_inv(12, 47) ➞ 4
mod_inv(11, 33) ➞ False
mod_inv(55, 678) ➞ 37
mod_inv(81, 3455) ➞ 2346
### Notes
* Some of the test cases have rather large integers, so if you attempt to do a brute force search of the entire modular field, you may not be successful due to the 12 second time limit imposed by the server. See **Resources** for a more efficient approach.
* The modular inverse of a number `n` modulus `m` exists only if `n` and `m` are coprime (i.e. they have no common factors other than 1).
* One practical use of modular inverse is in public-key cryptography like RSA where it can be used to determine the value of the private key.
"""
def egcd(j, k):
if j == 0:
return (k, 0, 1)
h, y, x = egcd(k%j,j)
return (h, x - (k//j) * y, y)
def mod_inv(j, m):
h, x, y = egcd(j, m)
if h != 1:
return False
return x%m
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
654671700188a0cf97b551f4f3716dcebb0ade85
|
48832d27da16256ee62c364add45f21b968ee669
|
/res/scripts/client/gui/wgnc/events.py
|
7291b9a2e8cb59d82254603badc1df9740d57f17
|
[] |
no_license
|
webiumsk/WOT-0.9.15.1
|
0752d5bbd7c6fafdd7f714af939ae7bcf654faf7
|
17ca3550fef25e430534d079876a14fbbcccb9b4
|
refs/heads/master
| 2021-01-20T18:24:10.349144
| 2016-08-04T18:08:34
| 2016-08-04T18:08:34
| 64,955,694
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 957
|
py
|
# 2016.08.04 19:53:34 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/wgnc/events.py
import Event
class _WGNCEvents(object):
__slots__ = ('__eManager', 'onItemShowByDefault', 'onItemShowByAction', 'onItemUpdatedByAction', 'onProxyDataItemShowByDefault')
def __init__(self):
super(_WGNCEvents, self).__init__()
self.__eManager = Event.EventManager()
self.onItemShowByDefault = Event.Event(self.__eManager)
self.onItemShowByAction = Event.Event(self.__eManager)
self.onItemUpdatedByAction = Event.Event(self.__eManager)
self.onProxyDataItemShowByDefault = Event.Event(self.__eManager)
def clear(self):
self.__eManager.clear()
g_wgncEvents = _WGNCEvents()
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\wgnc\events.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 19:53:34 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
66c71b03c28c724553f740d6e72d6d54448e2888
|
9b64f0f04707a3a18968fd8f8a3ace718cd597bc
|
/huaweicloud-sdk-bcs/huaweicloudsdkbcs/v2/model/show_blockchain_detail_request.py
|
0799bf411b855abd953b527d517b0231e35885cf
|
[
"Apache-2.0"
] |
permissive
|
jaminGH/huaweicloud-sdk-python-v3
|
eeecb3fb0f3396a475995df36d17095038615fba
|
83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b
|
refs/heads/master
| 2023-06-18T11:49:13.958677
| 2021-07-16T07:57:47
| 2021-07-16T07:57:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,836
|
py
|
# coding: utf-8
import re
import six
class ShowBlockchainDetailRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'blockchain_id': 'str'
}
attribute_map = {
'blockchain_id': 'blockchain_id'
}
def __init__(self, blockchain_id=None):
"""ShowBlockchainDetailRequest - a model defined in huaweicloud sdk"""
self._blockchain_id = None
self.discriminator = None
self.blockchain_id = blockchain_id
@property
def blockchain_id(self):
"""Gets the blockchain_id of this ShowBlockchainDetailRequest.
blockchainID
:return: The blockchain_id of this ShowBlockchainDetailRequest.
:rtype: str
"""
return self._blockchain_id
@blockchain_id.setter
def blockchain_id(self, blockchain_id):
"""Sets the blockchain_id of this ShowBlockchainDetailRequest.
blockchainID
:param blockchain_id: The blockchain_id of this ShowBlockchainDetailRequest.
:type: str
"""
self._blockchain_id = blockchain_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowBlockchainDetailRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
1345771bc1b47bd1670f09a40a36343b34214e39
|
f3110c8d0d1a232a0511ec559695882c1eb8594e
|
/DJANGO/quiz/views.py
|
884b75847a4fa12c352577e3ce03ff8523cc36d7
|
[] |
no_license
|
SeungWookHan/Flutter-DRF
|
feb1394d52961824eac2a6e88c667a0e03375c47
|
c793ccdacee1a4053a33471c226ff2ce8c5797dc
|
refs/heads/master
| 2023-02-04T23:15:45.945139
| 2020-12-29T15:50:02
| 2020-12-29T15:50:02
| 324,955,539
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
from rest_framework.response import Response
from rest_framework.decorators import api_view
from .models import Quiz
from .serializers import QuizSerializer
import random
# Create your views here.
@api_view(['GET'])
def helloAPI(request):
return Response("hello world!")
@api_view(['GET'])
def randomQuiz(request, id):
totalQuizs = Quiz.objects.all()
randomQuizs = random.sample(list(totalQuizs), id)
serializer = QuizSerializer(randomQuizs, many=True) #many 부분을 통해 다량의 데이터도 직렬화 진행
return Response(serializer.data)
|
[
"hswook12@me.com"
] |
hswook12@me.com
|
ba1076a3246d6802d9ea52c4729fe3b0503f4722
|
60448d1467b5a2531bab91e8bc721294a397e754
|
/nmrpyschedule/generator.py
|
6ae0f758186b260b88d0c9eb2eb10be36e7e9cae
|
[] |
no_license
|
mattfenwick/NMRPySchedule
|
8c7fda460b32f09138f08f15d302df4096075fb9
|
22e3399e9964137cb3e382b5805d457bb82e751f
|
refs/heads/master
| 2021-01-18T16:32:23.013635
| 2013-06-11T15:36:29
| 2013-06-11T15:36:29
| 10,610,035
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,688
|
py
|
'''
@author: matt
'''
import itertools
import math
def uniform(ranges):
'''
Generate a table of n-dimensional points containing all grid points within the given ranges.
Includes both boundaries.
'''
theNums = [range(low, high + 1) for (low, high) in ranges]
return itertools.product(*theNums)
_primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
def _haltonNumber(index, base):
result = 0
f = 1. / base
i = index
while i > 0:
result = result + f * (i % base)
i = int(i / base)
f = f / base
return result
def _scaledHaltonNumber(factor, shift, index, prime):
return int(factor * _haltonNumber(index, prime)) + shift
def halton(ranges):
'''
Generate subrandom sequence of n-dimensional points according to the Halton sequence.
Returns a generator of an infinite sequence.
'''
scalingFactors = [max(x) - min(x) for x in ranges]
shifts = [min(x) for x in ranges]
if len(ranges) > len(_primes):
raise ValueError("not enough primes defined: please define more or reduce the dimensionality")
ix = 0
while True:
pt = []
for (sf, s, p) in zip(scalingFactors, shifts, _primes):
pt.append(_scaledHaltonNumber(sf, s, ix, p))
yield pt
ix += 1
def _distance(pt, origin):
zipped = zip(pt, origin)
sumSquares = sum([abs(a - b) ** 2 for (a, b) in zipped])
dist = math.sqrt(sumSquares)
return dist
def _myDist(pt, origin, width, maxDeviation):
dist = _distance(pt, origin)
ratio = dist / width
return abs(ratio - round(ratio)) * width <= maxDeviation
def concentricShell(ranges, shellSpacing, maxDeviation):
'''
Generate all points whose distance from the origin is close to a multiple
of an arbitrary number. The origin is defined as the point whose coordinates
are the low end of each dimension's range.
'''
points = uniform(ranges)
origin = [r[0] for r in ranges]
return [pt for pt in points if _myDist(pt, origin, shellSpacing, maxDeviation)]
def _myFilter(pt, origin, offsetAngle, degreeGap, tolerance):
y,x = pt[0] - origin[0], pt[1] - origin[1]
theta = m.atan2(x, y) * 180. / m.pi # angle in degrees
ratio = (theta + offsetAngle) / degreeGap
return abs(ratio - round(ratio)) * degreeGap < tolerance
def radial(ranges, offsetAngle, gapAngle, maximumDeviation):
'''
Generate coordinates of points, where the points lie along 'spokes' radiating out from the origin.
'''
allPoints = uniform(ranges)
origin = [r[0] for r in ranges]
return [pt for pt in allPoints if _myFilter(pt, origin, offsetAngle, gapAngle, maximumDeviation)]
|
[
"mfenwick100@gmail.com"
] |
mfenwick100@gmail.com
|
3b4f8d5f9825913e31189eddb81b7034aebe454f
|
46669c775591b38f71382f690cb93a4879366595
|
/src/020_create_xml.py
|
2cdeed0319fdbdfe0862a1c99e4fb20e25ad7850
|
[
"CC-BY-4.0"
] |
permissive
|
kouigenjimonogatari/kouigenjimonogatari.github.io
|
e234abe0e4145bbe879756f6af19a546c01a2ff4
|
c0ec798d550bda5670d8af15c4028ff925e6495d
|
refs/heads/master
| 2022-10-12T19:52:05.229525
| 2022-10-04T09:34:51
| 2022-10-04T09:34:51
| 223,747,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,077
|
py
|
import sys
import urllib
import json
import argparse
import urllib.request
import unicodedata
import collections
import os
import xml.etree.ElementTree as ET
import csv
import glob
import urllib.parse
def get_mdata(manifest):
print(manifest)
res = urllib.request.urlopen(manifest)
# json_loads() でPythonオブジェクトに変換
data = json.loads(res.read().decode('utf-8'))
canvases = data["sequences"][0]["canvases"]
map = {}
for i in range(len(canvases)):
canvas = canvases[i]
canvas_id = canvas["@id"]
width = canvas["width"]
height = canvas["height"]
url = canvas["images"][0]["resource"]["@id"]
map[canvas_id] = {
"width": width,
"height": height,
"url": url
}
return map
vols = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12 ,13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54]
m_map = {}
for vol in vols:
prefix = ".//{http://www.tei-c.org/ns/1.0}"
xml = ".//{http://www.w3.org/XML/1998/namespace}"
tmp_path = "data/template.xml"
tree = ET.parse(tmp_path)
ET.register_namespace('', "http://www.tei-c.org/ns/1.0")
ET.register_namespace('xml', "http://www.w3.org/XML/1998/namespace")
root = tree.getroot()
para = root.find(prefix + "body").find(prefix + "p")
files = glob.glob("../api/items/*.json")
surfaceGrp = root.find(prefix+"surfaceGrp")
with open("../api/item_sets/"+str(vol).zfill(2)+".json", 'r') as f:
rdf_collection = json.load(f)
manifest = rdf_collection[0]["http://www.w3.org/2000/01/rdf-schema#seeAlso"][0]["@id"]
title = rdf_collection[0]["http://www.w3.org/2000/01/rdf-schema#label"][0]["@value"]
surfaceGrp.set("facs", manifest)
if manifest not in m_map:
m_map[manifest] = get_mdata(manifest)
canvas_data = m_map[manifest]
prev_page = -1
canvas_map = {}
for file in sorted(files):
with open(file, 'r') as f:
data = json.load(f)
# print(file)
value = data[0]["http://www.w3.org/2000/01/rdf-schema#label"][0]["@value"]
# if "http://example.org/冊数名" not in data[0]:
# continue
vol_ = int(data[0]["http://purl.org/dc/terms/isPartOf"][0]["@id"].split("/")[-1].split(".")[0])
if vol != vol_:
continue
root.find(prefix + "title").text = "校異源氏物語・"+ title
id = data[0]["@id"]
page = data[0]["https://w3id.org/kouigenjimonogatari/api/property/page"][0]["@value"]
# 新しい頁
if page != prev_page:
prev_page = page
lb = ET.Element(
"{http://www.tei-c.org/ns/1.0}lb")
para.append(lb)
pb = ET.Element(
"{http://www.tei-c.org/ns/1.0}pb")
pb.set("n", str(page))
pb.set("facs", "#zone_"+str(page).zfill(4))
para.append(pb)
relation = data[0]["http://purl.org/dc/terms/relation"][0]["@id"]
relation = urllib.parse.unquote(relation)
canvas_id = relation.split("canvas=")[1]
obj = canvas_data[canvas_id]
if canvas_id not in canvas_map:
canvas_map[canvas_id] = {
"url": obj["url"],
"zones": []
}
if page % 2 == 0:
lrx = obj["width"]
ulx = int(lrx / 2)
else:
lrx = int(obj["width"] / 2)
ulx = 0
zone = ET.Element(
"{http://www.tei-c.org/ns/1.0}zone")
zone.set("xml:id", "zone_"+str(page).zfill(4))
zone.set("lrx", str(lrx))
zone.set("lry", str(obj["height"]))
zone.set("ulx", str(ulx))
zone.set("uly", str(0))
canvas_map[canvas_id]["zones"].append(zone)
lb = ET.Element(
"{http://www.tei-c.org/ns/1.0}lb")
para.append(lb)
line = ET.Element(
"{http://www.tei-c.org/ns/1.0}seg")
line.set("corresp", id)
line.text = value
# para.append(line)
para.append(line)
for canvas_id in canvas_map:
obj = canvas_map[canvas_id]
surface = ET.Element(
"{http://www.tei-c.org/ns/1.0}surface")
surfaceGrp.append(surface)
graphic = ET.Element(
"{http://www.tei-c.org/ns/1.0}graphic")
graphic.set("n", canvas_id)
graphic.set("url", obj["url"])
surface.append(graphic)
for zone in obj["zones"]:
surface.append(zone)
tree.write("../tei/"+str(vol).zfill(2)+".xml", encoding="utf-8")
|
[
"na.kamura.1263@gmail.com"
] |
na.kamura.1263@gmail.com
|
7920769fb9df2c743760034190be86dff1f1947a
|
65c0ef56c2e2c3e1646a610f49e6dd06f2c6102d
|
/src/libs/cmd/implement/emulator/fastboot.py
|
e8c8437bb490f5e1cb28f6289ccb8449e2873cad
|
[
"MIT"
] |
permissive
|
VirtualVFix/AndroidTestFramework
|
d3411f328a793ee7b007c4736983204aae81b739
|
1feb769c6aca39a78e6daefd6face0a1e4d62cd4
|
refs/heads/master
| 2020-08-11T14:48:12.454415
| 2019-10-12T10:20:43
| 2019-10-12T10:20:43
| 214,582,560
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 648
|
py
|
# All rights reserved by forest fairy.
# You cannot modify or share anything without sacrifice.
# If you don't agree, keep calm and don't look at code bellow!
__author__ = "VirtualV <https://github.com/virtualvfix>"
__date__ = "09/22/17 14:27"
from .cmd import Cmd
from libs.cmd.implement.base.fastboot import Fastboot
from libs.cmd.implement.base.cmd import Cmd as CmdBase
#: Replace :class:`implement.base.cmd.Cmd` class by :class:`implement.emulator.cmd.Cmd`
#: After class replace Fastboot emulator class have same signature as Fastboot base
Fastboot.__bases__ = tuple([x if not issubclass(x, CmdBase) else Cmd for x in Fastboot.__bases__])
|
[
"github.com/virtualvfix"
] |
github.com/virtualvfix
|
1926722da71183f936fd15d9c412fe2e5f789af4
|
35fb71dd7b67fcee5e01e090e5f2a04dbbf30a15
|
/network_base/week01/day02/lqueue.py
|
a2b102948e76f64e135371e6dfc924f57c1832a7
|
[] |
no_license
|
zlz2013/zlz
|
3119795848ed9cc43708482a2aa3e764c1312394
|
228d04a30b0782d859323e507ddd0c7459635bfb
|
refs/heads/master
| 2020-06-05T17:44:47.975328
| 2019-09-10T11:57:23
| 2019-09-10T11:57:23
| 192,500,784
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 967
|
py
|
"""
lqueue.py 链式队列
重点代码
思路分析:
1.基于链表模型完成链式栈
2.链表开端作为队头,尾端作为队尾
"""
class LQueueError(Exception):
pass
class Node:
def __init__(self,data,next=None):
self.data=data
self.next=next
#链式队列类
class LQueue:
def __init__(self):
#初始头尾指向一个没有实际意义的节点
self.front=self.rear=Node(None)
def is_empty(self):
return self.front==self.rear
#入队 尾动
def enqueue(self,elem):
self.rear.next=Node(elem)
self.rear=self.rear.next
#出队 头动
def dequeue(self):
if self.front==self.rear:
raise LQueueError("Queue is empty")
self.front=self.front.next
return self.front.data
if __name__=="__main__":
lq=LQueue()
lq.enqueue(10)
lq.enqueue(20)
lq.enqueue(30)
while not lq.is_empty():
print(lq.dequeue())
|
[
"229165631@qq.com"
] |
229165631@qq.com
|
1e738f57abaf2f4bade3d418917aad39cbae070f
|
7649278f4bda14aaf4ec02b7ae58094e16d98618
|
/Project/scripts/cartpole_eval.py
|
61018275f50b4f2d739e06cf8596805d284be6f9
|
[] |
no_license
|
peng00bo00/optlearningcontrol
|
1877381ca749f17caf75ede02a5cb263cbddaa79
|
44eff6d17e4da0b0adc85e5e84cf4b8edb8a1bb8
|
refs/heads/master
| 2021-01-06T18:44:58.981575
| 2020-05-19T17:44:34
| 2020-05-19T17:44:34
| 241,445,231
| 0
| 0
| null | 2020-02-18T19:11:08
| 2020-02-18T19:11:08
| null |
UTF-8
|
Python
| false
| false
| 1,121
|
py
|
import numpy as np
import tensorflow as tf
import gym
from gym import wrappers
import os
import matplotlib.pyplot as plt
## environment
env = gym.make('CartPole-v0')
env = wrappers.Monitor(env, '../animations/', force=True)
env.reset()
## GPU configuration
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def play(env, policy):
x = env.reset()
terminal = False
rewards = []
while not terminal:
env.render()
u = policy.predict(x.reshape([1, -1]))
u = np.argmax(u)
x, r, terminal, _ = env.step(u)
rewards.append(r)
return np.sum(rewards)
# DQN
policy = tf.keras.models.load_model("../models/DQN_q_network.h5")
play(env, policy)
## Double DQN
policy = tf.keras.models.load_model("../models/DoubleDQN_q_network.h5")
play(env, policy)
## Prioritized Experience Replay
policy = tf.keras.models.load_model("../models/PrioritizedDQN_q_network.h5")
play(env, policy)
## Deuling DQN
policy = tf.keras.models.load_model("../models/DeulDQN_q_network.h5")
play(env, policy)
|
[
"pengbo_tongji@126.com"
] |
pengbo_tongji@126.com
|
874a69d989a964f5f0210a7eafbf994cd3c38d0c
|
6ddcdda679089b228d55ef098addfe8193287d88
|
/py/lpthw/test.py
|
500c610d2d9010ee315cb403153222d93a1680c9
|
[
"MIT"
] |
permissive
|
danyfang/SourceCode
|
518e4715a062ed1ad071dea023ff4785ce03b068
|
8168f6058648f2a330a7354daf3a73a4d8a4e730
|
refs/heads/master
| 2021-06-06T16:36:50.999324
| 2021-04-23T08:52:20
| 2021-04-23T08:52:20
| 120,310,634
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 547
|
py
|
#!/usr/bin/python
from __future__ import division
#lambda function to calculate factor
x = int(raw_input("Please input an integer\n > "))
print reduce(lambda x,y: x*y, range(1,x+1))
def add(x,y):
return x+y
def sub(x,y):
return x-y
def mul(x,y):
return x*y
def div(x,y):
return x/y
operator = {"+":add, "-":sub, "*":mul, "/":div}
if __name__ == "__main__":
x = raw_input("Please input a numebr\n > ")
o = raw_input("Please input an operator\n > ")
y = raw_input("Please input a numebr\n > ")
print operator.get(o)(int(x), int(y))
|
[
"danyfang7@gmail.com"
] |
danyfang7@gmail.com
|
57e2ee283d3febe993b10065b968ba9f581b5a55
|
6a52db9b913c3677dfbcd55776e1a14cddde359d
|
/parceiros/migrations/0006_auto_20181117_0309.py
|
3ab094723e58de570f6ab1ca3fb06592a7e4d342
|
[] |
no_license
|
tiagocordeiro/casaconceito-sie
|
47a2922f328fa7c9e13e84dae1b6a9135edd6236
|
892e42a655bb4ef08952c5be167e281720f40b49
|
refs/heads/master
| 2023-08-31T14:48:21.396973
| 2021-05-11T18:18:07
| 2021-05-11T18:18:07
| 140,175,770
| 0
| 0
| null | 2023-09-13T15:14:42
| 2018-07-08T14:38:35
|
HTML
|
UTF-8
|
Python
| false
| false
| 497
|
py
|
# Generated by Django 2.1.3 on 2018-11-17 05:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('parceiros', '0005_auto_20181117_0251'),
]
operations = [
migrations.AlterField(
model_name='indicacaopagamentos',
name='indicacao',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='parceiros.Indicacao'),
),
]
|
[
"tiago@mulhergorila.com"
] |
tiago@mulhergorila.com
|
20da8a1571be3297fdc2a8720ab6d9c6f804eede
|
a0801d0e7325b31f0383fc68517e208680bb36d6
|
/Kattis/anagramcounting.py
|
362adbaa6dfaa8948b5b2fb3c59253bb2a0f31b6
|
[] |
no_license
|
conormccauley1999/CompetitiveProgramming
|
bd649bf04438817c7fa4755df2c2c7727273b073
|
a7e188767364be40f625612af3d16182f2d8d4de
|
refs/heads/master
| 2023-05-14T13:19:32.678134
| 2023-05-11T16:07:33
| 2023-05-11T16:07:33
| 179,089,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 340
|
py
|
from collections import Counter
_f = { 0: 1 }
def f(n):
if n not in _f:
_f[n] = n * f(n - 1)
return _f[n]
def g(s):
cs = Counter(s)
vs = cs.values()
l = len(s)
r = f(l)
for v in vs:
r //= f(v)
return r
while True:
try:
i = input()
print(g(i))
except:
break
|
[
"conormccauley1999@gmail.com"
] |
conormccauley1999@gmail.com
|
aab5320af9b48f92a2e321db7cb26674e6d0a401
|
24f2696aab87f1632705a7c8b2d3b866e26aa3ee
|
/LCA_236.py
|
281941167ef5ab53585044747e11fcdfbd20eb5e
|
[] |
no_license
|
adiggo/leetcode_py
|
44a77a0b029f4d92bd0d8e24cad21ceea52e7794
|
4aa3a3a0da8b911e140446352debb9b567b6d78b
|
refs/heads/master
| 2020-04-06T07:05:21.770518
| 2016-07-01T16:00:40
| 2016-07-01T16:00:40
| 30,397,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if not root or root == p or root == q:
return root
left = self.lowestCommonAncestor(root.left, p, q)
right = self.lowestCommonAncestor(root.right, p, q)
return right if not left else left if not right else root
|
[
"adiggo@gmail.com"
] |
adiggo@gmail.com
|
4af9fff107581efba17158a157bc33c7f8d43be6
|
f5390652068c736aea061a0979f27ba32b51784f
|
/Web/Web/views.py
|
dfd74dd32a7e1fe8b04516a9a481ccbc516c7484
|
[] |
no_license
|
kho903/Project_Reflux
|
172c9bd7062f4cc9f84c576412557435f63906b5
|
0f1cdab08bc71d4c219b34839f63cc96f7c90d47
|
refs/heads/master
| 2022-12-14T03:29:21.008229
| 2020-09-02T09:24:16
| 2020-09-02T09:24:16
| 286,716,990
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 512
|
py
|
from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
from django.views.generic.base import TemplateView
from django.views.generic import CreateView
class HomeView(TemplateView):
template_name = 'home.html'
class UserCreateView(CreateView):
template_name = 'registration/register.html'
form_class = UserCreationForm
success_url = reverse_lazy('register_done')
class UserCreateDoneTV(TemplateView):
template_name = 'registration/register_done.html'
|
[
"gmldnr2222@naver.com"
] |
gmldnr2222@naver.com
|
8ad3db0ec4061062900fc2e03cbbae10b8f45f56
|
498d889585187ca56018b15f38880b8a671442b8
|
/utils.py
|
5c6fc73da2244ffe9d611253c389cb6fc386f278
|
[] |
no_license
|
mandasdasdasd/excel-fe
|
b89b06681bd7c91000f491a5f85f0c8577ac0fc3
|
a81eb0085192c0932992745284c24efda9859241
|
refs/heads/master
| 2022-12-24T01:53:03.351947
| 2019-12-04T10:09:14
| 2019-12-04T10:09:14
| 205,658,439
| 0
| 0
| null | 2022-12-11T05:10:56
| 2019-09-01T10:01:07
|
Vue
|
UTF-8
|
Python
| false
| false
| 218
|
py
|
import hmac, random
class Encryption(object):
def __init__(self):
self.key = "bigdata"
def hmac_md5(self, s):
return hmac.new(self.key.encode('utf-8'), s.encode('utf-8'), 'MD5').hexdigest()
|
[
"you@example.com"
] |
you@example.com
|
c9ec417f68e16aaa3a781bc04a7e47b8cffff73c
|
c8c0d3e83dbec83ccb89a751dc3e656bb482a2ce
|
/ZombieGame/modules/coordinates.py
|
2e949bd8c3dcf4cac12328f9fe1025eaec8889dd
|
[] |
no_license
|
Yamase31/python-zombie-game
|
80658bcfcb05b819265dfc75c5563391f19b1861
|
dfd931ecf5caac9348b652862fc0b018979491d9
|
refs/heads/main
| 2023-07-07T08:58:43.314898
| 2021-08-10T00:33:36
| 2021-08-10T00:33:36
| 394,479,044
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,382
|
py
|
# Quick coordinate class to contain both x and y
# Overrides == for easy comparison
class Coordinates(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __sub__(self, other):
if type(other) == int:
return Coordinates(self.x - other, self.y - other)
if type(other) == Coordinates:
return Coordinates(self.x - other.x, self.y - other.y)
if type(other) == tuple:
return Coordinates(self.x - other[0], self.y - other[1])
def __add__(self, other):
if type(other) == int:
return Coordinates(self.x + other, self.y + other)
if type(other) == Coordinates:
return Coordinates(self.x + other.x, self.y + other.y)
if type(other) == tuple:
return Coordinates(self.x + other[0], self.y + other[1])
def __len__(self):
return 2
def __iter__(self):
self.current = 0
return self
def __next__(self):
if self.current >= len(self):
raise StopIteration
else:
self.current += 1
if self.current == 1:
return self.x
else:
return self.y
if __name__ == '__main__':
c = Coordinates(5,6)
print(*c)
|
[
"noreply@github.com"
] |
Yamase31.noreply@github.com
|
a01b71e2dae640d49f54d02cf08acedbab149c70
|
961931333838aebe8bd17c30c19f3994e32d76ce
|
/src/leetcode/bfs/279. Perfect Squares.py
|
128380fcb8630cd5d95ab5e6415f0e7e36e9fcdd
|
[] |
no_license
|
MTGTsunami/LeetPython
|
5161f9e31dc2ab1855123c2a3a151eb6f4d889bc
|
f7f3839f631f08a9e5bf8a02398b940f82e43e67
|
refs/heads/master
| 2023-04-17T16:59:45.621291
| 2021-04-26T07:24:50
| 2021-04-26T07:24:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,624
|
py
|
"""
Given a positive integer n, find the least number of perfect square numbers (for example, 1, 4, 9, 16, ...) which sum to n.
Example 1:
Input: n = 12
Output: 3
Explanation: 12 = 4 + 4 + 4.
Example 2:
Input: n = 13
Output: 2
Explanation: 13 = 4 + 9.
"""
class MySolution(object): # A little bit larger than O(n) time
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
square = [float("inf")] * n
for i in range(1, n + 1):
sqrt = i ** 0.5
floor = int(sqrt)
if sqrt - floor == 0:
square[i - 1] = 1
nearest = floor
else:
while floor >= 1:
square[i - 1] = min(square[i - floor ** 2 - 1] + 1, square[i - 1])
floor -= 1
return square[-1]
class SolutionDP(object):
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
square = [float("inf")] * (n + 1)
square[0] = 0
for i in range(1, n + 1):
j = 1
while j * j <= i:
square[i] = min(square[i - j * j] + 1, square[i])
j += 1
return square[-1]
class SolutionMath(object):
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
def isSquare(n):
return (n ** 0.5 - int(n ** 0.5)) == 0
# Based on Lagrange's Four Square theorem, there
# are only 4 possible results: 1, 2, 3, 4.
# If n is a perfect square, return 1.
if isSquare(n):
return 1
# The result is 4 if and only if n can be written in the form of 4^k*(8*m + 7).
# Please refer to Legendre's four-square theorem.
while n % 4 == 0:
n /= 4
if n % 8 == 7:
return 4
for i in range(1, int(n ** 0.5) + 1):
if isSquare(n - i * i):
return 2
return 3
class SolutionBFS(object): # Important
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
depth = 0
nodes = set([n])
edges = [i * i for i in range(1, int(n ** 0.5) + 1)]
while True:
depth += 1
nextLevel = set()
for node in nodes:
for edge in edges:
if edge == node:
return depth
elif edge < node:
nextLevel.add(node - edge)
else:
break
nodes = nextLevel
|
[
"mtgtsunami1219@gmail.com"
] |
mtgtsunami1219@gmail.com
|
2c12a85637d4448821f4e08fab01976870d8fdca
|
b3330bd3365767b89afb9c432f4deb722b39ac1c
|
/python/sort/selection_sort/selection_sort_10.py
|
d0142c054e2de0f2b0945ab15e296cef179f94f5
|
[] |
no_license
|
hguochen/algorithms
|
944df332d5b39220bd59cbd62dc74b12e335fb9e
|
703e71a5cd9e002d800340df879ed475a404d092
|
refs/heads/master
| 2022-02-27T12:11:10.607042
| 2022-02-18T21:04:00
| 2022-02-18T21:04:00
| 13,767,503
| 5
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 733
|
py
|
# selection sort
def selection_sort(array):
"""
Divides the array into unsorted and sorted sublist. Left sublist contains
list of sorted elements, right sublist contains list of unsorted elements.
Find the least element in unsorted list and put in sorted list.
"""
# traverse the array
for i in xrange(len(array)):
# initialize min index
min_index = i
# find the least element in unsorted list and update min index
for j in xrange(i+1, len(array)):
if array[j] < array[min_index]:
min_index = j
# swap current element with min index value
array[i], array[min_index] = array[min_index], array[i]
# return array
return array
|
[
"hguochen@gmail.com"
] |
hguochen@gmail.com
|
2db11fc713334d1c4d17ecf444cf9726e26cc5dd
|
055cf8aeec011f67580bf92a83d94ee6919648cd
|
/migrations/versions/ad28a44f93c4_initial_migration.py
|
18999b6182f1570c2b30ca638cbdbed3b8a6a43e
|
[
"MIT",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
RisperAkinyi/BlogPost
|
df82c8fec558425ca1bbce65aa90464176aefb87
|
f8ee4c887fceae8e70410b66a12bc5680cf26044
|
refs/heads/master
| 2022-09-30T19:09:27.969983
| 2019-08-13T07:36:26
| 2019-08-13T07:36:26
| 201,879,164
| 0
| 0
|
MIT
| 2022-09-16T18:07:44
| 2019-08-12T07:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 2,128
|
py
|
"""Initial Migration
Revision ID: ad28a44f93c4
Revises:
Create Date: 2019-08-09 11:05:50.912878
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ad28a44f93c4'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=True),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('bio', sa.String(length=255), nullable=True),
sa.Column('profile_pic_path', sa.String(), nullable=True),
sa.Column('password_hash', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=False)
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('comments', sa.String(), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.Column('posted', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('posts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(), nullable=True),
sa.Column('description', sa.String(), nullable=True),
sa.Column('posted', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('posts')
op.drop_table('comments')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
# ### end Alembic commands ###
|
[
"email@example.com"
] |
email@example.com
|
08a41f586570d5ba0baa10410a977b1169ac947f
|
4be9a5bdb8e051001b78c8f127ccc1a7f85c14e7
|
/mapping/migrations/0033_auto_20170129_0939.py
|
90fce4536a94b43eded5f95299f301669aa5c874
|
[] |
no_license
|
quentin-david/heimdall
|
f72a85606e7ab53683df2023ef5eaba762198211
|
84a429ee52e1891bc2ee4eb07a084dff209c789c
|
refs/heads/master
| 2021-01-21T10:26:28.895663
| 2017-07-21T19:19:46
| 2017-07-21T19:19:46
| 83,432,596
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-29 09:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mapping', '0032_servicewebserver_reverse_proxy'),
]
operations = [
migrations.AlterField(
model_name='servicereverseproxy',
name='servername',
field=models.CharField(blank=True, max_length=30, null=True),
),
]
|
[
"david@hemdgsa01.local.lan"
] |
david@hemdgsa01.local.lan
|
6be743b4b02d6eb6d7f62aab46ff57260ffa042b
|
f92dfdebb4bf6bc108f51783333520c35afa66da
|
/api-web/src/www/application/management/commands/publish_rabbitmq_genome_gene.py
|
23f7465ee4e41b1adf971b243ae030a6a568b6ea
|
[] |
no_license
|
duytran92-cse/nas-genodata
|
4d8659a135913d226842ff6a013324714ead0458
|
80c88f42145f729c5862a5293012e71548182e1d
|
refs/heads/master
| 2022-11-13T17:24:03.769605
| 2020-06-14T18:59:36
| 2020-06-14T18:59:36
| 272,264,593
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,305
|
py
|
import json, pika, os
from application.models import *
from urad_api import registry
from urad_api_standard.commands import Command as BaseCommand
from django.conf import settings
import json
from application.modules.gene import components as gene_components
from django.db import connection
class Command(BaseCommand):
## PUBLISH
def publish_to_queue(self, iterator, genome_queue, rabbitmq_host, rabbitmq_port):
credentials = pika.PlainCredentials('guest', 'guest')
connection = pika.BlockingConnection(pika.ConnectionParameters(rabbitmq_host, rabbitmq_port, '/', credentials))
channel = connection.channel()
channel.queue_declare(queue=genome_queue)
for x in iterator:
channel.basic_publish(exchange='', routing_key=genome_queue, body=json.dumps(x))
connection.close()
def process(self, params = {}):
# DECLARE VARIABLE
GENOME_QUEUE = settings.GENOME_QUEUE
RABBITMQ_HOST = settings.RABBITMQ_HOST
RABBITMQ_PORT = int(settings.RABBITMQ_PORT)
# Starting
print "[x] Publish data to rabbitmq"
##########################
## Gene
print "[***] Publish GENE data to rabbitmq"
isDone = False
start = 0
gene_manager = gene_components.DataManager()
while not isDone:
end = start + 5000
print 'start: %s, end: %s' % (start, end)
gene = Gene.objects.all()[start:end]
start = end + 1
if gene.count() <= 0:
isDone = True
x = []
for var in gene:
y = ['gene', var.code]
try:
data = gene_manager.get(var.code)
values = {}
arr_disease = []
asso_disease = []
asso_pub = []
for field, value in data.items():
if field in ['synonyms', 'effects','start', 'end','num_exon','chromosome','protein_product','description'] and value['value'] != None:
values[field] = value['value']
# disease field
if field == 'disgenet-diseases' and value['value'] != None:
arr_disease.extend(value['value'])
rs = [ item['disease'] for item in value['value'] ]
asso_disease.extend(rs)
if field == 'gwas-diseases' and value['value'] != None:
try:
for k in value['value']:
arr_disease.append({
'disease': k.get('disease',''),
'pubmedid': k.get('pmid',''),
'sentence': k.get('sentence', '')
})
except Exception as e:
pass
rs = [ item['disease'] for item in value['value'] ]
asso_disease.extend(rs)
if field == 'ctdbase-diseases' and value['value'] != None:
try:
for k in value['value']:
arr_disease.append({
'disease': k.get('disease',''),
'pubmedid': k.get('pmid',''),
'sentence': k.get('evidence', '')
})
except Exception as e:
pass
rs = [ item['disease'] for item in value['value'] ]
asso_disease.extend(rs)
if len(arr_disease) > 0:
values['disgenet-diseases'] = arr_disease
if len(asso_disease) > 0:
values['associated_diseases'] = asso_disease
# publications
if field == 'publications' and value['value'] != None:
values[field] = value['value']
try:
for k in value['value']:
asso_pub.append({
'pmid': k['pmid'],
'title': k['title']
})
except Exception as e:
pass
if field == 'gwas-publications' and value['value'] != None:
asso_pub.extend(value['value'])
if len(asso_pub) > 0:
values['associated_publications'] = asso_pub
if values:
y.append(values)
x.append(y)
except Exception as e:
pass
# Publish rabbitMQ
self.publish_to_queue(x, GENOME_QUEUE, RABBITMQ_HOST, RABBITMQ_PORT)
print "[***] DONE gene"
print "[x] Sent data to RabbitMQ"
|
[
"thanh.tran@etudiant.univ-lr.fr"
] |
thanh.tran@etudiant.univ-lr.fr
|
a43e6873d5770d466c0143a8d8e3abdff3975ac4
|
4bc19f4dd098ebedcb6ee78af0ae12cb633671fe
|
/static/views.py
|
608e8568b487fbee9eb1251fbf226fbe6d45ec5b
|
[] |
no_license
|
StanislavKraev/rekvizitka
|
958ab0e002335613a724fb14a8e4123f49954446
|
ac1f30e7bb2e987b3b0bda4c2a8feda4d3f5497f
|
refs/heads/master
| 2021-01-01T05:44:56.372748
| 2016-04-27T19:20:26
| 2016-04-27T19:20:26
| 57,240,406
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
from django.http import Http404
from django.shortcuts import render_to_response
from rek.static.models import StaticPage
from django.template.context import RequestContext
def render(request, page_alias=''):
page = StaticPage.objects.get(alias=page_alias, enabled=True)
if not page:
raise Http404()
return render_to_response('static_page_with_sidebar.html',
{'page' : page},
context_instance=RequestContext(request))
|
[
"kraevst@yandex.ru"
] |
kraevst@yandex.ru
|
e2a4d4248d4f5b48e5c69c52e0dad41e541340ba
|
33cfcb4561e7320ae0e893fbe774c7eb0a2effe8
|
/eg15.01.py
|
c94d345080db1688fdbb1a237e7fd737f5e8db93
|
[] |
no_license
|
Jueee/aByteOfPython
|
9c8bc01f0707daef29e52467db0c3f5a94747119
|
ae1a4a4b181612463ccdcd0d89c961f22f7ece20
|
refs/heads/master
| 2021-05-31T14:26:00.790823
| 2016-02-17T05:41:20
| 2016-02-17T05:41:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 877
|
py
|
#!/usr/bin/python
# Filename: list_comprehension.py
# 通过列表综合,可以从一个已有的列表导出一个新的列表。
listone = [2, 3, 4]
listtwo = [2*i for i in listone if i > 2]
print(listtwo)
# 在函数中接收元组和列表
# 当要使函数接收元组或字典形式的参数的时候,有一种特殊的方法,它分别使用*和**前缀。
# 这种方法在函数需要获取可变数量的参数的时候特别有用。
# 由于在args变量前有*前缀,所有多余的函数参数都会作为一个元组存储在args中。
# 如果使用的是**前缀,多余的参数则会被认为是一个字典的键/值对。
def powersum(power, *args):
'''Return the sum of each argument raised to specified power.'''
total = 0
for i in args:
total += pow(i, power)
return total
print(powersum(2,3,4,5))
print(powersum(2,10,100,1000))
|
[
"hellojue @foxmail.com"
] |
hellojue @foxmail.com
|
5da2bd8dc2830c9ae5ea68845892e133cd447295
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startCirq2210.py
|
06f183066edd0d13b690b7e34154e944725a31e0
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,734
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=28
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.rx(-1.9069467407290044).on(input_qubit[2])) # number=20
c.append(cirq.H.on(input_qubit[3])) # number=21
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.Y.on(input_qubit[2])) # number=13
c.append(cirq.rx(0.13823007675795101).on(input_qubit[2])) # number=24
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.X.on(input_qubit[3])) # number=1
c.append(cirq.rx(-1.9352210746113125).on(input_qubit[3])) # number=14
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[2])) # number=22
c.append(cirq.Y.on(input_qubit[2])) # number=10
c.append(cirq.H.on(input_qubit[1])) # number=17
c.append(cirq.CZ.on(input_qubit[3],input_qubit[1])) # number=18
c.append(cirq.H.on(input_qubit[1])) # number=19
c.append(cirq.Y.on(input_qubit[2])) # number=11
c.append(cirq.H.on(input_qubit[0])) # number=25
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=26
c.append(cirq.H.on(input_qubit[0])) # number=27
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=16
c.append(cirq.Z.on(input_qubit[3])) # number=23
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq2210.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
39f29b37f7444cf60b0b9e2cbd3307132c1c48c6
|
d094ba0c8a9b1217fbf014aa79a283a49aabe88c
|
/env/lib/python3.6/site-packages/pandas/tests/io/parser/test_skiprows.py
|
1df2ca4fad4d87539cdcdee874cb25a6cd3ce18e
|
[
"Apache-2.0"
] |
permissive
|
Raniac/NEURO-LEARN
|
d9274e0baadd97bb02da54bdfcf6ca091fc1c703
|
3c3acc55de8ba741e673063378e6cbaf10b64c7a
|
refs/heads/master
| 2022-12-25T23:46:54.922237
| 2020-09-06T03:15:14
| 2020-09-06T03:15:14
| 182,013,100
| 9
| 2
|
Apache-2.0
| 2022-12-09T21:01:00
| 2019-04-18T03:57:00
|
CSS
|
UTF-8
|
Python
| false
| false
| 6,948
|
py
|
# -*- coding: utf-8 -*-
"""
Tests that skipped rows are properly handled during
parsing for all of the parsers defined in parsers.py
"""
from datetime import datetime
import numpy as np
import pytest
from pandas.compat import StringIO, lrange, range
from pandas.errors import EmptyDataError
from pandas import DataFrame, Index
import pandas.util.testing as tm
@pytest.mark.parametrize("skiprows", [lrange(6), 6])
def test_skip_rows_bug(all_parsers, skiprows):
# see gh-505
parser = all_parsers
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
result = parser.read_csv(StringIO(text), skiprows=skiprows, header=None,
index_col=0, parse_dates=True)
index = Index([datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)], name=0)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3], index=index)
tm.assert_frame_equal(result, expected)
def test_deep_skip_rows(all_parsers):
# see gh-4382
parser = all_parsers
data = "a,b,c\n" + "\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_data = "a,b,c\n" + "\n".join([
",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
result = parser.read_csv(StringIO(data), skiprows=[6, 8])
condensed_result = parser.read_csv(StringIO(condensed_data))
tm.assert_frame_equal(result, condensed_result)
def test_skip_rows_blank(all_parsers):
# see gh-9832
parser = all_parsers
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = parser.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
index = Index([datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)], name=0)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=index)
tm.assert_frame_equal(data, expected)
@pytest.mark.parametrize("data,kwargs,expected", [
("""id,text,num_lines
1,"line 11
line 12",2
2,"line 21
line 22",2
3,"line 31",1""",
dict(skiprows=[1]),
DataFrame([[2, "line 21\nline 22", 2],
[3, "line 31", 1]], columns=["id", "text", "num_lines"])),
("a,b,c\n~a\n b~,~e\n d~,~f\n f~\n1,2,~12\n 13\n 14~",
dict(quotechar="~", skiprows=[2]),
DataFrame([["a\n b", "e\n d", "f\n f"]], columns=["a", "b", "c"])),
(("Text,url\n~example\n "
"sentence\n one~,url1\n~"
"example\n sentence\n two~,url2\n~"
"example\n sentence\n three~,url3"),
dict(quotechar="~", skiprows=[1, 3]),
DataFrame([['example\n sentence\n two', 'url2']],
columns=["Text", "url"]))
])
def test_skip_row_with_newline(all_parsers, data, kwargs, expected):
# see gh-12775 and gh-10911
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_skip_row_with_quote(all_parsers):
# see gh-12775 and gh-10911
parser = all_parsers
data = """id,text,num_lines
1,"line '11' line 12",2
2,"line '21' line 22",2
3,"line '31' line 32",1"""
exp_data = [[2, "line '21' line 22", 2],
[3, "line '31' line 32", 1]]
expected = DataFrame(exp_data, columns=[
"id", "text", "num_lines"])
result = parser.read_csv(StringIO(data), skiprows=[1])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,exp_data", [
("""id,text,num_lines
1,"line \n'11' line 12",2
2,"line \n'21' line 22",2
3,"line \n'31' line 32",1""",
[[2, "line \n'21' line 22", 2],
[3, "line \n'31' line 32", 1]]),
("""id,text,num_lines
1,"line '11\n' line 12",2
2,"line '21\n' line 22",2
3,"line '31\n' line 32",1""",
[[2, "line '21\n' line 22", 2],
[3, "line '31\n' line 32", 1]]),
("""id,text,num_lines
1,"line '11\n' \r\tline 12",2
2,"line '21\n' \r\tline 22",2
3,"line '31\n' \r\tline 32",1""",
[[2, "line '21\n' \r\tline 22", 2],
[3, "line '31\n' \r\tline 32", 1]]),
])
def test_skip_row_with_newline_and_quote(all_parsers, data, exp_data):
# see gh-12775 and gh-10911
parser = all_parsers
result = parser.read_csv(StringIO(data), skiprows=[1])
expected = DataFrame(exp_data, columns=["id", "text", "num_lines"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("line_terminator", [
"\n", # "LF"
"\r\n", # "CRLF"
"\r" # "CR"
])
def test_skiprows_lineterminator(all_parsers, line_terminator):
# see gh-9079
parser = all_parsers
data = "\n".join(["SMOSMANIA ThetaProbe-ML2X ",
"2007/01/01 01:00 0.2140 U M ",
"2007/01/01 02:00 0.2141 M O ",
"2007/01/01 04:00 0.2142 D M "])
expected = DataFrame([["2007/01/01", "01:00", 0.2140, "U", "M"],
["2007/01/01", "02:00", 0.2141, "M", "O"],
["2007/01/01", "04:00", 0.2142, "D", "M"]],
columns=["date", "time", "var", "flag",
"oflag"])
if parser.engine == "python" and line_terminator == "\r":
pytest.skip("'CR' not respect with the Python parser yet")
data = data.replace("\n", line_terminator)
result = parser.read_csv(StringIO(data), skiprows=1, delim_whitespace=True,
names=["date", "time", "var", "flag", "oflag"])
tm.assert_frame_equal(result, expected)
def test_skiprows_infield_quote(all_parsers):
# see gh-14459
parser = all_parsers
data = "a\"\nb\"\na\n1"
expected = DataFrame({"a": [1]})
result = parser.read_csv(StringIO(data), skiprows=2)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs,expected", [
(dict(), DataFrame({"1": [3, 5]})),
(dict(header=0, names=["foo"]), DataFrame({"foo": [3, 5]}))
])
def test_skip_rows_callable(all_parsers, kwargs, expected):
parser = all_parsers
data = "a\n1\n2\n3\n4\n5"
result = parser.read_csv(StringIO(data),
skiprows=lambda x: x % 2 == 0,
**kwargs)
tm.assert_frame_equal(result, expected)
def test_skip_rows_skip_all(all_parsers):
parser = all_parsers
data = "a\n1\n2\n3\n4\n5"
msg = "No columns to parse from file"
with pytest.raises(EmptyDataError, match=msg):
parser.read_csv(StringIO(data), skiprows=lambda x: True)
def test_skip_rows_bad_callable(all_parsers):
msg = "by zero"
parser = all_parsers
data = "a\n1\n2\n3\n4\n5"
with pytest.raises(ZeroDivisionError, match=msg):
parser.read_csv(StringIO(data), skiprows=lambda x: 1 / 0)
|
[
"leibingye@outlook.com"
] |
leibingye@outlook.com
|
c6f9bfe889eb0278f68b7a17049662d5605c5285
|
5af277b5819d74e61374d1d78c303ac93c831cf5
|
/axial/logging_utils.py
|
ef723570c0f02a331ebfc7220811665417690c53
|
[
"Apache-2.0"
] |
permissive
|
Ayoob7/google-research
|
a2d215afb31513bd59bc989e09f54667fe45704e
|
727ec399ad17b4dd1f71ce69a26fc3b0371d9fa7
|
refs/heads/master
| 2022-11-11T03:10:53.216693
| 2020-06-26T17:13:45
| 2020-06-26T17:13:45
| 275,205,856
| 2
| 0
|
Apache-2.0
| 2020-06-26T16:58:19
| 2020-06-26T16:58:18
| null |
UTF-8
|
Python
| false
| false
| 4,459
|
py
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import time
from absl import logging
import numpy as np
import PIL.Image
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import gfile
from tensorflow.compat.v1.core.framework.summary_pb2 import Summary
from tensorflow.compat.v1.core.util.event_pb2 import Event
def pack_images(images, rows, cols):
"""Helper utility to make a tiled field of images from numpy arrays.
Taken from Jaxboard.
Args:
images: Image tensor in shape [N, W, H, C].
rows: Number of images per row in tiled image.
cols: Number of images per column in tiled image.
Returns:
A tiled image of shape [W * rows, H * cols, C].
Truncates incomplete rows.
"""
shape = np.shape(images)
width, height, depth = shape[-3:]
images = np.reshape(images, (-1, width, height, depth))
batch = np.shape(images)[0]
rows = np.minimum(rows, batch)
cols = np.minimum(batch // rows, cols)
images = images[:rows * cols]
images = np.reshape(images, (rows, cols, width, height, depth))
images = np.transpose(images, [0, 2, 1, 3, 4])
images = np.reshape(images, [rows * width, cols * height, depth])
return images
class SummaryWriter(object):
"""Tensorflow summary writer inspired by Jaxboard.
This version doesn't try to avoid Tensorflow dependencies, because this
project uses Tensorflow.
"""
def __init__(self, dir, write_graph=True):
if not gfile.IsDirectory(dir):
gfile.MakeDirs(dir)
self.writer = tf.summary.FileWriter(
dir, graph=tf.get_default_graph() if write_graph else None)
def flush(self):
self.writer.flush()
def close(self):
self.writer.close()
def _write_event(self, summary_value, step):
self.writer.add_event(
Event(
wall_time=round(time.time()),
step=step,
summary=Summary(value=[summary_value])))
def scalar(self, tag, value, step):
self._write_event(Summary.Value(tag=tag, simple_value=float(value)), step)
def image(self, tag, image, step):
image = np.asarray(image)
if image.ndim == 2:
image = image[:, :, None]
if image.shape[-1] == 1:
image = np.repeat(image, 3, axis=-1)
bytesio = io.BytesIO()
PIL.Image.fromarray(image).save(bytesio, 'PNG')
image_summary = Summary.Image(
encoded_image_string=bytesio.getvalue(),
colorspace=3,
height=image.shape[0],
width=image.shape[1])
self._write_event(Summary.Value(tag=tag, image=image_summary), step)
def images(self, tag, images, step, square=True):
"""Saves (rows, cols) tiled images from onp.ndarray.
This truncates the image batch rather than padding
if it doesn't fill the final row.
"""
images = np.asarray(images)
n_images = len(images)
if square:
rows = cols = int(np.sqrt(n_images))
else:
rows = 1
cols = n_images
tiled_images = pack_images(images, rows, cols)
self.image(tag, tiled_images, step=step)
class Log(object):
"""Logging to Tensorboard and the Python logger at the same time."""
def __init__(self, logdir, write_graph=True):
self.logdir = logdir
# Tensorboard
self.summary_writer = SummaryWriter(logdir, write_graph=write_graph)
def write(self, key_prefix, info_dicts, step):
log_items = []
for key in info_dicts[-1]:
# average the log values over time
key_with_prefix = '{}/{}'.format(key_prefix, key)
avg_val = np.mean([info[key] for info in info_dicts])
# absl log
log_items.append('{}={:.6f}'.format(key_with_prefix, avg_val))
# tensorboard
self.summary_writer.scalar(key_with_prefix, avg_val, step=step)
self.summary_writer.flush()
logging.info('step={:08d} {}'.format(step, ' '.join(log_items)))
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
54157e46485cfe84e785669c8a896e72e4eba04c
|
22fc34523f4de64a1e1eea707e01da79e425a043
|
/srtmprofile/core/urls.py
|
3eb617af2639a39dc20d463863e4fff390506028
|
[
"MIT"
] |
permissive
|
marcellobenigno/srtmprofile
|
04cdcf4a1f127462dd37d94ec5f368b0f304b932
|
52a2550976ce4ecad2921e53a72ac2ec8a8459b5
|
refs/heads/master
| 2021-04-03T05:25:54.097968
| 2018-03-15T11:05:02
| 2018-03-15T11:05:02
| 124,605,246
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
from django.conf.urls import url
from . import views
app_name = 'core'
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^roads.geojson$', views.roads_geojson, name='roads_geojson'),
url(r'^(?P<pk>\d+)/$', views.detail, name='detail'),
]
|
[
"benigno.marcello@gmail.com"
] |
benigno.marcello@gmail.com
|
6a6d137d3c8dc70d14aa023a752ffba6f170d4fd
|
91af1af67ed219e583b209b40ae5dd34d6f7f355
|
/train_net.py
|
90d770c1765c7f52a585ded8af49a5bf767545db
|
[] |
no_license
|
jack20951948/Deep-Clustering
|
d6f5bfdd97be1f07f114371eafd9f8643ebb6e30
|
4dd8b4d3fef72e597cd142406d343450cf2dd517
|
refs/heads/main
| 2023-06-28T02:18:58.915727
| 2021-07-18T07:18:10
| 2021-07-18T07:18:10
| 387,109,398
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,555
|
py
|
'''
Script to train the model
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import time
import numpy as np
import tensorflow as tf
import ipdb
from datagenerator2 import DataGenerator
from model import Model
from GlobalConstont import *
# the .pkl file lists of data set
pkl_list = ['deep-clustering-master/pkl_folder/train.pkl'] # ['../dcdata/' + str(i) + '.pkl' for i in range(1, 12)]
val_list = ['deep-clustering-master/pkl_folder/val.pkl']
sum_dir = 'deep-clustering-master/sum'
train_dir = 'deep-clustering-master/model'
lr = 1e-3
n_hidden = 300
max_steps = 20000000
batch_size = 128
def train():
with tf.Graph().as_default():
# dropout keep probability
p_keep_ff = tf.placeholder(tf.float32, shape=None)
p_keep_rc = tf.placeholder(tf.float32, shape=None)
# generator for training set and validation set
data_generator = DataGenerator(pkl_list, batch_size)
val_generator = DataGenerator(val_list, batch_size)
# placeholder for input log spectrum, VAD info.,
# and speaker indicator function
in_data = tf.placeholder(
tf.float32, shape=[batch_size, FRAMES_PER_SAMPLE, NEFF])
VAD_data = tf.placeholder(
tf.float32, shape=[batch_size, FRAMES_PER_SAMPLE, NEFF])
Y_data = tf.placeholder(
tf.float32, shape=[batch_size, FRAMES_PER_SAMPLE, NEFF, 2])
# init the model
BiModel = Model(n_hidden, batch_size, p_keep_ff, p_keep_rc)
# build the net structure
embedding = BiModel.inference(in_data)
Y_data_reshaped = tf.reshape(Y_data, [-1, NEFF, 2])
VAD_data_reshaped = tf.reshape(VAD_data, [-1, NEFF])
# compute the loss
loss = BiModel.loss(embedding, Y_data_reshaped, VAD_data_reshaped)
# get the train operation
train_op = BiModel.train(loss, lr)
saver = tf.train.Saver(tf.all_variables())
summary_op = tf.summary.merge_all()
sess = tf.Session()
# either train from scratch or a trained model
# saver.restore(sess, 'train/model.ckpt-492000')
# val_loss = np.fromfile('val_loss').tolist()
# init_step = 56001
init = tf.initialize_all_variables()
sess.run(init)
init_step = 0
summary_writer = tf.summary.FileWriter(
sum_dir, sess.graph)
# val_loss = []
last_epoch = data_generator.epoch
for step in range(init_step, init_step + max_steps):
start_time = time.time()
data_batch = data_generator.gen_batch()
# concatenate the samples into batch data
in_data_np = np.concatenate(
[np.reshape(item['Sample'], [1, FRAMES_PER_SAMPLE, NEFF])
for item in data_batch])
VAD_data_np = np.concatenate(
[np.reshape(item['VAD'], [1, FRAMES_PER_SAMPLE, NEFF])
for item in data_batch])
VAD_data_np = VAD_data_np.astype('int')
Y_data_np = np.concatenate(
[np.reshape(item['Target'], [1, FRAMES_PER_SAMPLE, NEFF, 2])
for item in data_batch])
Y_data_np = Y_data_np.astype('int')
# train the model
loss_value, _, summary_str = sess.run(
[loss, train_op, summary_op],
feed_dict={in_data: in_data_np,
VAD_data: VAD_data_np,
Y_data: Y_data_np,
p_keep_ff: 1 - P_DROPOUT_FF,
p_keep_rc: 1 - P_DROPOUT_RC})
summary_writer.add_summary(summary_str, step)
duration = time.time() - start_time
# if np.isnan(loss_value):
# import ipdb; ipdb.set_trace()
assert not np.isnan(loss_value)
if step % 100 == 0:
# show training progress every 100 steps
num_examples_per_step = batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = (
'%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch, epoch %d)')
print (format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch,
data_generator.epoch))
if step % 4000 == 0:
# save model every 4000 steps
checkpoint_path = os.path.join(train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
if last_epoch != data_generator.epoch:
# doing validation every training epoch
print('Doing validation')
val_epoch = val_generator.epoch
count = 0
loss_sum = 0
# average the validation loss
while(val_epoch == val_generator.epoch):
count += 1
data_batch = val_generator.gen_batch()
in_data_np = np.concatenate(
[np.reshape(item['Sample'],
[1, FRAMES_PER_SAMPLE, NEFF])
for item in data_batch])
VAD_data_np = np.concatenate(
[np.reshape(item['VAD'], [1, FRAMES_PER_SAMPLE, NEFF])
for item in data_batch])
VAD_data_np = VAD_data_np.astype('int')
Y_data_np = np.concatenate(
[np.reshape(item['Target'],
[1, FRAMES_PER_SAMPLE, NEFF, 2])
for item in data_batch])
Y_data_np = Y_data_np.astype('int')
loss_value, = sess.run(
[loss],
feed_dict={in_data: in_data_np,
VAD_data: VAD_data_np,
Y_data: Y_data_np,
p_keep_ff: 1,
p_keep_rc: 1})
loss_sum += loss_value
val_loss.append(loss_sum / count)
print ('validation loss: %.3f' % (loss_sum / count))
np.array(val_loss).tofile('val_loss')
last_epoch = data_generator.epoch
print('%s start' % datetime.now())
train()
|
[
"j20951948@gmail.com"
] |
j20951948@gmail.com
|
92c3a0d5822904b02ee02cf30204b593268f8d36
|
ded10c2f2f5f91c44ec950237a59225e8486abd8
|
/.history/2/matrix_squaring_20200413235341.py
|
1cded98ea6504881b7ef71c0979704ed33286f9f
|
[] |
no_license
|
jearistiz/Statistical-Physics-Projects
|
276a86407b32ded4e06b32efb2fadbd8eff8daed
|
d9c5b16a50856e148dc8604d92b6de3ea21fc552
|
refs/heads/master
| 2022-11-05T03:41:23.623050
| 2020-06-28T06:36:05
| 2020-06-28T06:36:05
| 254,909,897
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,000
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from time import time
import pandas as pd
def rho_free(x,xp,beta):
"""
Uso: devuelve elemento de matriz dsnsidad para el caso de una partícula libre en un toro infinito.
"""
return (2.*np.pi*beta)**(-0.5) * np.exp(-(x-xp)**2 / (2 * beta) )
def harmonic_potential(x):
"""Devuelve valor del potencial harmónico para una posición x dada"""
return 0.5*x**2
def anharmonic_potential(x):
"""Devuelve valor de potencial anharmónico para una posición x dada"""
# return np.abs(x)*(1+np.cos(x)) #el resultado de este potencial es interesante
return 0.5*x**2 - x**3 + x**4
def QHO_canonical_ensemble(x,beta):
"""
Uso: calcula probabilidad teórica cuántica de encontrar al osciladoe armónico
(presente en un baño térmico) en la posición x.
Recibe:
x: float -> posición
beta: float -> inverso de temperatura en unidades reducidas beta = 1/T.
Devuelve:
probabilidad teórica cuántica en posición dada para temperatura T dada.
"""
return (np.tanh(beta/2.)/np.pi)**0.5 * np.exp(- x**2 * np.tanh(beta/2.))
def rho_trotter(x_max = 5., nx = 101, beta=1, potential=harmonic_potential):
"""
Uso: devuelve matriz densidad en aproximación de Trotter para altas temperaturas
y bajo el potencial "potential".
Recibe:
xmax: float -> los valores de x estarán en el intervalo (-xmax,xmax).
nx: int -> número de valores de x considerados.
beta: float -> inverso de temperatura en unidades reducidas.
potential: func -> potencial de interacción, debe ser una función de x.
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad en aproximación de Trotter para
altas temperaturas y potencial dado.
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada rho.
dx: float -> separación entre valores contiguos de grid_x
"""
dx = 2. * x_max / (nx - 1)
grid_x = np.array([i*dx for i in range(-int((nx-1)/2), int(nx/2 + 1))])
rho = np.array([ [ rho_free(x , xp, beta) * np.exp(-0.5*beta*(potential(x)+potential(xp))) for x in grid_x] for xp in grid_x])
return rho, grid_x, dx
def density_matrix_squaring(rho, grid_x, N_iter = 1, beta_ini = 1, print_steps=True):
"""
Uso: devuelve matriz densidad luego de aplicarle algoritmo matrix squaring N_iter veces.
El sistema asociado a la matriz densidad obtenida (al final de aplicar el algoritmo)
está a temperatura inversa beta_fin = beta_ini * 2**(N_iter).
Recibe:
rho: numpy array, shape=(nx,nx) -> matriz densidad en aproximación de Trotter para
altas temperaturas y potencial dado.
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada "rho".
N_iter: int -> número de iteraciones del algoritmo.
beta_ini: float -> valor de inverso de temperatura asociado a la
matriz densidad "rho".
print_steps: bool -> muestra valores de beta en cada iteración
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad de estado "rho" a temperatura
inversa igual a "beta_fin".
trace_rho: int -> traza de la matriz densidad a temperatura inversa
igual a "beta_fin". Por la definición que tomamos
de "rho", ésta es equivalente a la función
partición en dicha temperatura.
beta_fin: float -> temperatura inversa del sistema asociado a "rho".
"""
dx = grid_x[1] - grid_x[0]
beta_fin = beta_ini * 2 ** N_iter
print('\nbeta_ini = %.3f'%beta_ini,
'\n----------------------------------------------------------------')
for i in range(N_iter):
rho = dx * np.dot(rho,rho)
if print_steps==True:
print(u'Iteration %d) 2^%d * beta_ini --> 2^%d * beta_ini'%(i, i, i+1))
trace_rho = np.trace(rho)*dx
return rho, trace_rho, beta_fin
def save_pi_x_csv(grid_x, x_weights, file_name, relevant_info, print_data=True):
"""
Uso: guarda datos de la distribución pi(x;beta)
Recibe:
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada pi(x;beta).
x_weights: numpy array, shape=(nx,) ->
"""
pi_x_data = {'Position x': grid_x,
'Prob. density': x_weights}
pi_x_data = pd.DataFrame(data=pi_x_data)
with open(file_name,mode='w') as rho_csv:
rho_csv.write(relevant_info+'\n')
rho_csv.close()
with open(file_name,mode='a') as rho_csv:
pi_x_data.to_csv(rho_csv)
rho_csv.close()
if print_data==True:
print(pi_x_data)
return pi_x_data
def run_pi_x_squaring(x_max=5., nx=201, N_iter=7, beta_fin=4, potential=harmonic_potential,
potential_string = 'harmonic_potential', print_steps=True,
save_data=True, plot=True, save_plot=True, show_plot=True):
beta_ini = beta_fin * 2**(-N_iter)
# Cálculo de rho con aproximación de Trotter
rho, grid_x, dx = rho_trotter(x_max, nx, beta_ini, potential)
# Aproximación de rho con matrix squaring iterado N_iter veces.
rho, trace_rho, beta_fin_2 = density_matrix_squaring(rho, grid_x, N_iter,
beta_ini, print_steps)
print('----------------------------------------------------------------\n',
u'beta_fin = %.3f Z(beta_fin) = Tr(rho(beta_fin)) ≈ %.3E \n'%(beta_fin_2,trace_rho))
# Normalización de rho y cálculo de densidades de probabilidad para valores en grid_x
rho_normalized = rho/trace_rho
x_weights = np.diag(rho_normalized)
if save_data==True:
# Nombre del archivo csv en el que guardamos valores de pi(x;beta_fin)
file_name = u'pi_x-%s-x_max_%.3f-nx_%d-N_iter_%d-beta_fin_%.3f.csv'\
%(potential_string,x_max,nx,N_iter,beta_fin)
# Información relevante para agregar como comentario al archivo csv
relevant_info = u'# %s x_max = %.3f nx = %d '%(potential_string,x_max,nx) + \
u'N_iter = %d beta_ini = %.3f '%(N_iter,beta_ini,) + \
u'beta_fin = %.3f'%beta_fin
# Guardamos valores de pi(x;beta_fin) en archivo csv
save_pi_x_csv(grid_x, x_weights, file_name, relevant_info, print_data=0)
# Gráfica y comparación con teoría
if plot == True:
plt.figure(figsize=(8,5))
plt.plot(grid_x, x_weights, label = 'Matrix squaring +\nfórmula de Trotter.\n$N=%d$ iteraciones\n$dx=%.3E$'%(N_iter,dx))
plt.plot(grid_x, QHO_canonical_ensemble(grid_x,beta_fin), label=u'Valor teórico QHO')
plt.xlabel(u'x')
plt.ylabel(u'$\pi^{(Q)}(x;\\beta)$')
plt.legend(loc='best',title=u'$\\beta=%.2f$'%beta_fin)
plt.tight_layout()
if save_plot==True:
plot_name = u'pi_x-plot-%s-x_max_%.3f-nx_%d-N_iter_%d-beta_fin_%.3f.eps'\
%(potential_string,x_max,nx,N_iter,beta_fin)
plt.savefig(plot_name)
if show_plot==True:
plt.show()
plt.close()
return 0
plt.rcParams.update({'font.size':15})
run_pi_x_squaring(potential = harmonic_potential, potential_string = 'harmonic_potential',
save_data=True, save_plot=False, show_plot=True)
|
[
"jeaz.git@gmail.com"
] |
jeaz.git@gmail.com
|
52a5fc44063f2e73239719204651a2f2b4b3e5e5
|
767b5482f3c5b9c2c85575c711e37561f5b8f198
|
/engine/engine_lib/encoderlib.py
|
27d186e1e4d625fe001279e1c8110f2ff708818f
|
[] |
no_license
|
zhupite233/scaner
|
8e39c903f295d06195be20067043087ec8baac4f
|
7c29c02bca2247a82bcbb91cc86955cc27998c95
|
refs/heads/master
| 2020-05-18T03:23:03.459222
| 2019-04-15T04:29:10
| 2019-04-15T04:29:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,332
|
py
|
#!/usr/bin/env python
"""
This is the encoding / decoding functions collection for DharmaEncoder. It
allows you to encode and decode various data formats.
(c) 2010 Nathan Hamiel
Email: nathan{at}neohaxor{dot}org
Hexsec Labs: http://hexsec.com/labs
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import urllib
import hashlib
import cgi
import StringIO
import zlib
import decimal
from xml.sax.saxutils import unescape
from xml.sax.saxutils import escape
###################
# Encoder section #
###################
def url_encode(encvalue):
""" URL encode the specifed value. Example Format: Hello%20World """
try:
encoded_value = urllib.quote(encvalue)
except:
encoded_value = "There was a problem with the specified value"
return(encoded_value)
def full_url_encode(encvalue):
""" Full URL Hex encode the specified value.
Example Format: %48%65%6c%6c%6f%20%57%6f%72%6c%64 """
hexval = ""
for item in encvalue:
val = hex(ord(item)).replace("0x", "%")
hexval += val
return(hexval)
def base64_encode(encvalue):
""" Base64 encode the specified value. Example Format: SGVsbG8gV29ybGQ= """
try:
basedata = encvalue.encode("Base64")
except:
basedata = "There was an error"
return(basedata)
# def html_entity_encode(encvalue):
# """ Encode value using HTML entities. Example Format: """
#####
# Follow up on this. It needs to be fixed
#####
# encoded_value = cgi.escape(encvalue)
# return(encoded_value)
def hex_encode(encvalue):
""" Encode value to Hex. Example Format: 48656c6c6f2576f726c64"""
hexval = ""
for item in encvalue:
val = hex(ord(item)).strip("0x")
hexval += val
return(hexval)
def hex_entity_encode(encvalue):
""" Encode value to a Hex entitiy. Example Format: Hello"""
hexval = ""
for item in encvalue:
val = hex(ord(item)).replace("0x", "&#x") + ";"
hexval += val
return(hexval)
def unicode_encode(encvalue):
""" Unicode encode the specified value in the %u00 format. Example:
%u0048%u0065%u006c%u006c%u006f%u0020%u0057%u006f%u0072%u006c%u0064 """
hexval = ""
for item in encvalue:
val = hex(ord(item)).replace("0x", "%u00")
hexval += val
return(hexval)
def escape_xml(encvalue):
""" Escape the specified HTML/XML value. Example Format: Hello&World """
escaped = escape(encvalue, {"'": "'", '"': """})
return(escaped)
def md5_hash(encvalue):
""" md5 hash the specified value.
Example Format: b10a8db164e0754105b7a99be72e3fe5"""
hashdata = hashlib.md5(encvalue).hexdigest()
return(hashdata)
def sha1_hash(encvalue):
""" sha1 hash the specified value.
Example Format: 0a4d55a8d778e5022fab701977c5d840bbc486d0 """
hashdata = hashlib.sha1(encvalue).hexdigest()
return(hashdata)
def sqlchar_encode(encvalue):
""" SQL char encode the specified value.
Example Format: CHAR(72)+CHAR(101)+CHAR(108)+CHAR(108)+CHAR(111)"""
charstring = ""
for item in encvalue:
val = "CHAR(" + str(ord(item)) + ")+"
charstring += val
return(charstring.rstrip("+"))
####
# oraclechr_encode not tested yet, but should work
####
def oraclechr_encode(encvalue):
""" Oracle chr encode the specified value. """
charstring = ""
for item in encvalue:
val = "chr(" + str(ord(item)) + ")||"
charstring += val
return(charstring.rstrip("||"))
def decimal_convert(encvalue):
""" Convert input to decimal value.
Example Format: 721011081081113287111114108100 """
decvalue = ""
for item in encvalue:
decvalue += str(ord(item))
return(decvalue)
def decimal_entity_encode(encvalue):
""" Convert input to a decimal entity.
Example Format: Hello World """
decvalue = ""
for item in encvalue:
decvalue += "&#" + str(ord(item)) +";"
return(decvalue)
def rot13_encode(encvalue):
""" Perform ROT13 encoding on the specified value.
Example Format: Uryyb Jbeyq """
return(encvalue.encode("rot13"))
###################
# Decoder section #
###################
def url_decode(decvalue):
""" URL Decode the specified value. Example Format: Hello%20World """
returnval = urllib.unquote(decvalue)
return(returnval)
def fullurl_decode(decvalue):
""" Full URL decode the specified value.
Example Format: %48%65%6c%6c%6f%20%57%6f%72%6c%64 """
splithex = decvalue.split("%")
hexdec = ""
for item in splithex:
if item != "":
hexdec += chr(int(item, 16))
return(hexdec)
def base64_decode(decvalue):
""" Base64 decode the specified value.
Example Format: SGVsbG8gV29ybGQ= """
msg = """ There was an error. Most likely this isn't a valid Base64 value
and Python choked on it """
try:
base64dec = decvalue.decode("Base64")
return(base64dec)
except:
return(msg)
def hex_decode(decvalue):
""" Hex decode the specified value.
Example Format: 48656c6c6f2576f726c64 """
msg = """ There was an error, perhaps an invalid length for the hex
value """
try:
decodeval = decvalue.decode("hex")
return(decodeval)
except:
return(msg)
def hexentity_decode(decvalue):
""" Hex entity decode the specified value.
Example Format: Hello """
charval = ""
splithex = decvalue.split(";")
for item in splithex:
# Necessary because split creates an empty "" that tries to be
# converted with int()
if item != "":
hexcon = item.replace("&#", "0")
charcon = chr(int(hexcon, 16))
charval += charcon
else:
pass
return(charval)
def unescape_xml(decvalue):
""" Unescape the specified HTML or XML value: Hel啊lo&World"""
unescaped = unescape(decvalue, {"'": "'", """: '"'})
return(unescaped)
def unicode_decode(decvalue):
""" Unicode decode the specified value %u00 format.
Example Format: %u0048%u0065%u006c%u006c%u006f%u0020%u0057%u006f%u0072%u006c%u0064 """
charval = ""
splithex = decvalue.split("%u00")
for item in splithex:
if item != "":
hexcon = item.replace("%u00", "0")
charcon = chr(int(hexcon, 16))
charval += charcon
else:
pass
return(charval)
def rot13_decode(decvalue):
""" ROT13 decode the specified value. Example Format: Uryyb Jbeyq
rot13 回转位13 a编码后转换成b,b经过相同的编码之后会转换成
"""
return(decvalue.decode("rot13"))
|
[
"lixiang@yundun.com"
] |
lixiang@yundun.com
|
2e3138b7aebe9b0d818303c674da9144988dee2d
|
2b0eab74af8d23244ff11699830f9bb10fbd717a
|
/helpers/mixins/unpack_tags_mixin.py
|
5e6e4c11c733fc5368427ac90ddb23bf2e781302
|
[] |
no_license
|
alexandrenorman/mixeur
|
c7e25cd20b03c78b361cb40e3e359a6dc5d9b06b
|
95d21cd6036a99c5f399b700a5426e9e2e17e878
|
refs/heads/main
| 2023-03-13T23:50:11.800627
| 2021-03-07T15:49:15
| 2021-03-07T15:49:15
| 345,384,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,277
|
py
|
from .unpack_ids_mixin import UnpackIdsMixin
class UnpackTagsMixin(UnpackIdsMixin):
"""
Mixin to apply on a ModelViewSet which transform registered fields from string containing ids to list of objects
"1,2,3" => [<Obj id=1>, <Obj id=2>, <Obj id=3>]
If a string passed, it will create a new instance of given model with given model name field
"1,2,truc" => [<Obj id=1 name=...>, <Obj id=2 name=...>, <new Obj id=3 name="truc">]
Should define unpackable fields like this :
unpackable_fields = {'data_field_name': (ModelName, 'model_field_name')}
"""
def get_item_id(self, word, options):
"""
If given tag contain only digits, use it as id, else create the instance
"""
item_id = None
if word.isdigit():
item_id = int(word)
elif options:
tag_model, tag_model_field = options
existing_tag = tag_model.objects.filter(**{tag_model_field: word}).first()
if existing_tag:
item_id = existing_tag.id
elif word != "":
item_id = tag_model.objects.create(**{tag_model_field: word}).id
else:
return {"id": None}
if item_id is not None:
return {"id": item_id}
|
[
"norman@xael.org"
] |
norman@xael.org
|
05af6eb6e60b4748045485fcbf36d751acf72583
|
0c7ff0ec35ba2bb38f99ef6ecb261ec33466dd52
|
/Day1/day1Project.py
|
2d1e56254a4ef4fd53ab5a15fdd51db183e510ec
|
[] |
no_license
|
TheKinshu/100-Days-Python
|
15cbacc608ee349cc9733a7032e10a359bebb731
|
293ad6b3e5f5208da84efbc5b2d2d395a5a53421
|
refs/heads/master
| 2023-04-18T08:21:30.361800
| 2021-05-02T18:48:39
| 2021-05-02T18:48:39
| 351,582,416
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 534
|
py
|
#1. Create a greeting for your program.
print("Welcome to the Band Name Generator.")
#2. Ask the user for the city that they grew up in.
city = input("What's name of the city you gre up in?\n")
#3. Ask the user for the name of a pet.
pet = input("What's your pet's name?\n")
#4. Combine the name of their city and pet and show them their band name.
print("Your band name could be " + city + " " + pet)
#5. Make sure the input cursor shows on a new line, see the example at:
# https://band-name-generator-end.appbrewery.repl.run/
|
[
"kc007919@gmail.com"
] |
kc007919@gmail.com
|
d2534e7f9ed2539c6ec7228c87061771a60c4676
|
1d11288ec1a5d98dcf66c4ca45072ffd29901de0
|
/mrp_extend/models/mrp_bom_line.py
|
0731280072097855fc742fa848452a84c7f6fb29
|
[] |
no_license
|
pyrun13/addons
|
14202e273c802cee391a68474a6bdc7cf062b25c
|
b81650d81e0a227dd4fc460846e53ce5e61a8cc1
|
refs/heads/master
| 2020-09-07T21:48:18.673226
| 2019-11-12T16:15:06
| 2019-11-12T16:15:06
| 220,921,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
from odoo import models, fields, api, exceptions
class MrpBomLine(models.Model):
_inherit = 'mrp.bom.line'
attrition_rate = fields.Float(string='损耗率(%)')
def write(self, vals):
attrition_rate = vals.get('attrition_rate', 0)
if attrition_rate < 0:
raise exceptions.ValidationError('损耗率不能为负数!')
return super(MrpBomLine, self).write(vals)
|
[
"xiongjianhong@gmail.com"
] |
xiongjianhong@gmail.com
|
d9431f1fb2020f8d301376bed93ef53f3204cbf1
|
0c110eb32f2eaea5c65d40bda846ddc05757ced6
|
/python_scripts/pimriscripts/mastersort/scripts_dir/p7432_run2M1.py
|
39656c11ebf8cd9db049ce6d7b9a74d8b7e3f30a
|
[] |
no_license
|
nyspisoccog/ks_scripts
|
792148a288d1a9d808e397c1d2e93deda2580ff4
|
744b5a9dfa0f958062fc66e0331613faaaee5419
|
refs/heads/master
| 2021-01-18T14:22:25.291331
| 2018-10-15T13:08:24
| 2018-10-15T13:08:24
| 46,814,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,193
|
py
|
from __future__ import with_statement
import os, csv, shutil,tarfile, uf, dcm_ops
dest_root = '/ifs/scratch/pimri/soccog/test_working'
dst_path_lst = ['7432', 'run2M1']
uf.buildtree(dest_root, dst_path_lst)
uf.copytree('/ifs/scratch/pimri/soccog/old/SocCog_Raw_Data_By_Exam_Number/2480/e1331017/s1388354_5610_2M1_s30', '/ifs/scratch/pimri/soccog/test_working/7432/run2M1')
t = tarfile.open(os.path.join('/ifs/scratch/pimri/soccog/test_working/7432/run2M1','MRDC_files.tar.gz'), 'r')
t.extractall('/ifs/scratch/pimri/soccog/test_working/7432/run2M1')
for f in os.listdir('/ifs/scratch/pimri/soccog/test_working/7432/run2M1'):
if 'MRDC' in f and 'gz' not in f:
old = os.path.join('/ifs/scratch/pimri/soccog/test_working/7432/run2M1', f)
new = os.path.join('/ifs/scratch/pimri/soccog/test_working/7432/run2M1', f + '.dcm')
os.rename(old, new)
qsub_cnv_out = dcm_ops.cnv_dcm('/ifs/scratch/pimri/soccog/test_working/7432/run2M1', '7432_run2M1', '/ifs/scratch/pimri/soccog/scripts/mastersort/scripts_dir/cnv')
#qsub_cln_out = dcm_ops.cnv_dcm('/ifs/scratch/pimri/soccog/test_working/7432/run2M1', '7432_run2M1', '/ifs/scratch/pimri/soccog/scripts/mastersort/scripts_dir/cln')
|
[
"katherine@Katherines-MacBook-Pro.local"
] |
katherine@Katherines-MacBook-Pro.local
|
0203f8b7a170b9c90a9503a129644d67e720066b
|
de121a951947f70f402079d288a78d35c85747b2
|
/exercises/exercises_04.py
|
79cb7651e375b500210a4054a4ae7430a01afd4a
|
[] |
no_license
|
tpurnachander/requests-workshop
|
56899be6c5520fb947d91676c11864d09b4489d6
|
dac134558f141c482e0a52f19fdce37b7e7ba928
|
refs/heads/master
| 2023-03-10T19:00:31.012280
| 2021-02-19T12:08:54
| 2021-02-19T12:08:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,359
|
py
|
import requests
import xml.etree.ElementTree as et
# Exercise 4.1
# Create a function create_xml_body_from_string()
# that returns a docstring (with triple double quotes)
# containing the following XML document:
# <payee>
# <name>John Smith</name>
# <address>
# <street>My street</street>
# <city>My city</city>
# <state>My state</state>
# <zipCode>90210</zipCode>
# </address>
# <phoneNumber>0123456789</phoneNumber>
# <accountNumber>12345</accountNumber>
# </payee>
# Exercise 4.2
# Write a test that POSTs the object created in 4.1
# to http://parabank.parasoft.com/parabank/services/bank/billpay?accountId=12345&amount=500
# Set the request header 'Content-Type' to 'application/xml'
# Then check that the response status code is 200
# and that the value of the response header 'Content-Type' is also equal to 'application/xml'
# Exercise 4.3
# Write a method create_xml_body_using_elementtree() that returns
# the same request body as in Exercise 4.1, but now uses the
# ElementTree library (I've imported that for you already, it's available as 'et')
# Make your life a little easier by specifying all element values as strings
# Exercise 4.4
# Repeat Exercise 4.2, but now use the XML document created in Exercise 4.3
# Don't forget to convert the XML document to a string before sending it!
|
[
"bas@ontestautomation.com"
] |
bas@ontestautomation.com
|
a5e2debc3b4de63242c2bc5f62e4db0ae3a58645
|
44f07b81df56d7ea44775784a9697648fe481478
|
/day8/faceapp/facedetect.py
|
ab3e244e889618a394e6791b7b7b4edf81d25532
|
[] |
no_license
|
shaadomanthra/cbpython-advanced
|
436510c70deca4e1ef01517f87bba0e392583a88
|
86b613f89ca0b0cd8b243c157af1a2807e6ce605
|
refs/heads/master
| 2022-11-30T23:33:45.938854
| 2020-08-12T11:20:03
| 2020-08-12T11:20:03
| 276,316,817
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 841
|
py
|
## detect face and draw rectangles
# import packages (pip install opencv-python)
from cv2 import cv2
import sys
# path for image and cascade
imagePath = 'images/f1.jpg'
cascPath = "haarcascade_frontalface_default.xml"
# Create the haar cascade
faceCascade = cv2.CascadeClassifier(cascPath)
# Read the image & convert to gray scale
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
print(faces)
# # Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
# #
# # # # open the image widow to display
cv2.imshow("Faces found", image)
cv2.waitKey(0)
# Saving the image
# cv2.imwrite(saveimagePath, image)
|
[
"packetcode@gmail.com"
] |
packetcode@gmail.com
|
001b8e5d7167d9f7ae30d9510713bbc363cc653b
|
da934e0010380fdc6894063540f61b0ebc2c9ded
|
/nova/crypto.py
|
1f35ffa3915dad74a002a55998c536549c4b8d2d
|
[
"Apache-2.0"
] |
permissive
|
bopopescu/cc-2
|
ed4f1dfe3c98f476ff619058d99855a16272d36b
|
37444fb16b36743c439b0d6c3cac2347e0cc0a94
|
refs/heads/master
| 2022-11-23T03:57:12.255817
| 2014-10-02T06:10:46
| 2014-10-02T06:10:46
| 282,512,589
| 0
| 0
|
Apache-2.0
| 2020-07-25T19:36:05
| 2020-07-25T19:36:05
| null |
UTF-8
|
Python
| false
| false
| 7,863
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Wrappers around standard crypto, including root and intermediate CAs,
SSH keypairs and x509 certificates.
"""
import hashlib
import logging
import os
import shutil
import tempfile
import time
import utils
from nova import vendor
import M2Crypto
from nova import exception
from nova import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('ca_file', 'cacert.pem', 'Filename of root CA')
flags.DEFINE_string('keys_path', utils.abspath('../keys'), 'Where we keep our keys')
flags.DEFINE_string('ca_path', utils.abspath('../CA'), 'Where we keep our root CA')
flags.DEFINE_boolean('use_intermediate_ca', False, 'Should we use intermediate CAs for each project?')
def ca_path(project_id):
if project_id:
return "%s/INTER/%s/cacert.pem" % (FLAGS.ca_path, project_id)
return "%s/cacert.pem" % (FLAGS.ca_path)
def fetch_ca(project_id=None, chain=True):
if not FLAGS.use_intermediate_ca:
project_id = None
buffer = ""
if project_id:
with open(ca_path(project_id),"r") as cafile:
buffer += cafile.read()
if not chain:
return buffer
with open(ca_path(None),"r") as cafile:
buffer += cafile.read()
return buffer
def generate_key_pair(bits=1024):
# what is the magic 65537?
tmpdir = tempfile.mkdtemp()
keyfile = os.path.join(tmpdir, 'temp')
utils.execute('ssh-keygen -q -b %d -N "" -f %s' % (bits, keyfile))
(out, err) = utils.execute('ssh-keygen -q -l -f %s.pub' % (keyfile))
fingerprint = out.split(' ')[1]
private_key = open(keyfile).read()
public_key = open(keyfile + '.pub').read()
shutil.rmtree(tmpdir)
# code below returns public key in pem format
# key = M2Crypto.RSA.gen_key(bits, 65537, callback=lambda: None)
# private_key = key.as_pem(cipher=None)
# bio = M2Crypto.BIO.MemoryBuffer()
# key.save_pub_key_bio(bio)
# public_key = bio.read()
# public_key, err = execute('ssh-keygen -y -f /dev/stdin', private_key)
return (private_key, public_key, fingerprint)
def ssl_pub_to_ssh_pub(ssl_public_key, name='root', suffix='nova'):
"""requires lsh-utils"""
convert="sed -e'1d' -e'$d' | pkcs1-conv --public-key-info --base-64 |" \
+ " sexp-conv | sed -e'1s/(rsa-pkcs1/(rsa-pkcs1-sha1/' | sexp-conv -s" \
+ " transport | lsh-export-key --openssh"
(out, err) = utils.execute(convert, ssl_public_key)
if err:
raise exception.Error("Failed to generate key: %s", err)
return '%s %s@%s\n' %(out.strip(), name, suffix)
def generate_x509_cert(subject="/C=US/ST=California/L=The Mission/O=CloudFed/OU=NOVA/CN=foo", bits=1024):
tmpdir = tempfile.mkdtemp()
keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key'))
csrfile = os.path.join(tmpdir, 'temp.csr')
logging.debug("openssl genrsa -out %s %s" % (keyfile, bits))
utils.runthis("Generating private key: %s", "openssl genrsa -out %s %s" % (keyfile, bits))
utils.runthis("Generating CSR: %s", "openssl req -new -key %s -out %s -batch -subj %s" % (keyfile, csrfile, subject))
private_key = open(keyfile).read()
csr = open(csrfile).read()
shutil.rmtree(tmpdir)
return (private_key, csr)
def sign_csr(csr_text, intermediate=None):
if not FLAGS.use_intermediate_ca:
intermediate = None
if not intermediate:
return _sign_csr(csr_text, FLAGS.ca_path)
user_ca = "%s/INTER/%s" % (FLAGS.ca_path, intermediate)
if not os.path.exists(user_ca):
start = os.getcwd()
os.chdir(FLAGS.ca_path)
utils.runthis("Generating intermediate CA: %s", "sh geninter.sh %s" % (intermediate))
os.chdir(start)
return _sign_csr(csr_text, user_ca)
def _sign_csr(csr_text, ca_folder):
tmpfolder = tempfile.mkdtemp()
csrfile = open("%s/inbound.csr" % (tmpfolder), "w")
csrfile.write(csr_text)
csrfile.close()
logging.debug("Flags path: %s" % ca_folder)
start = os.getcwd()
# Change working dir to CA
os.chdir(ca_folder)
utils.runthis("Signing cert: %s", "openssl ca -batch -out %s/outbound.crt -config ./openssl.cnf -infiles %s/inbound.csr" % (tmpfolder, tmpfolder))
os.chdir(start)
with open("%s/outbound.crt" % (tmpfolder), "r") as crtfile:
return crtfile.read()
def mkreq(bits, subject="foo", ca=0):
pk = M2Crypto.EVP.PKey()
req = M2Crypto.X509.Request()
rsa = M2Crypto.RSA.gen_key(bits, 65537, callback=lambda: None)
pk.assign_rsa(rsa)
rsa = None # should not be freed here
req.set_pubkey(pk)
req.set_subject(subject)
req.sign(pk,'sha512')
assert req.verify(pk)
pk2 = req.get_pubkey()
assert req.verify(pk2)
return req, pk
def mkcacert(subject='nova', years=1):
req, pk = mkreq(2048, subject, ca=1)
pkey = req.get_pubkey()
sub = req.get_subject()
cert = M2Crypto.X509.X509()
cert.set_serial_number(1)
cert.set_version(2)
cert.set_subject(sub) # FIXME subject is not set in mkreq yet
t = long(time.time()) + time.timezone
now = M2Crypto.ASN1.ASN1_UTCTIME()
now.set_time(t)
nowPlusYear = M2Crypto.ASN1.ASN1_UTCTIME()
nowPlusYear.set_time(t + (years * 60 * 60 * 24 * 365))
cert.set_not_before(now)
cert.set_not_after(nowPlusYear)
issuer = M2Crypto.X509.X509_Name()
issuer.C = "US"
issuer.CN = subject
cert.set_issuer(issuer)
cert.set_pubkey(pkey)
ext = M2Crypto.X509.new_extension('basicConstraints', 'CA:TRUE')
cert.add_ext(ext)
cert.sign(pk, 'sha512')
# print 'cert', dir(cert)
print cert.as_pem()
print pk.get_rsa().as_pem()
return cert, pk, pkey
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# http://code.google.com/p/boto
def compute_md5(fp):
"""
@type fp: file
@param fp: File pointer to the file to MD5 hash. The file pointer will be
reset to the beginning of the file before the method returns.
@rtype: tuple
@return: the hex digest version of the MD5 hash
"""
m = hashlib.md5()
fp.seek(0)
s = fp.read(8192)
while s:
m.update(s)
s = fp.read(8192)
hex_md5 = m.hexdigest()
# size = fp.tell()
fp.seek(0)
return hex_md5
|
[
"anotherjesse@gmail.com"
] |
anotherjesse@gmail.com
|
ad21dddcaff52dd22e77f283ff4e11ab18a76100
|
b8d0b260960e1c43b883049d68c15a7183df200b
|
/5_py_blog/blog_app/tests.py
|
ebafc4198267b4929abd66e68f76098e08839139
|
[] |
no_license
|
JAreina/python-django
|
59ac92d0694522c1d096bed636409d9405c5caba
|
66c7c301dec448217df6516198723e1ce987eab7
|
refs/heads/master
| 2020-03-27T18:34:59.821701
| 2018-09-07T07:49:35
| 2018-09-07T07:49:35
| 146,931,202
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,739
|
py
|
# Create your tests here.
from django.contrib.auth import get_user_model
from django.test import Client, TestCase
from django.urls import reverse
from .models import Post
class BlogTests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username='testuser',
email='ja@gmail.com',
password='xxxxxx'
)
self.post = Post.objects.create(
titulo='A good titulo',
texto='Nice texto content',
autor=self.user,
)
def test_string_representation(self):
post = Post(titulo='A sample titulo')
self.assertEqual(str(post), post.titulo)
def test_post_content(self):
self.assertEqual(f'{self.post.titulo}', 'A good titulo')
self.assertEqual(f'{self.post.autor}', 'testuser')
self.assertEqual(f'{self.post.texto}', 'Nice texto content')
self.assertEqual(f'{self.post.titulo}', 'A good titulo')
self.assertEqual(f'{self.post.autor}', 'testuser')
self.assertEqual(f'{self.post.texto}', 'Nice texto content')
def test_post_list_view(self):
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Nice texto content')
self.assertTemplateUsed(response, 'home.html')
def test_post_detail_view(self):
response = self.client.get('/post/1/')
no_response = self.client.get('/post/100000/')
self.assertEqual(response.status_code, 200)
self.assertEqual(no_response.status_code, 404)
self.assertContains(response, 'A good titulo')
self.assertTemplateUsed(response, 'post_detalle.html')
|
[
"jareinafdez@gmail.com"
] |
jareinafdez@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.