blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
87a9d5fc07b1eeb59551a66e38e121c1bcb52f4b
|
bb0eeade4685dc89ff8a53beb813afdf7394989d
|
/ML2018/commend sys/readers.py
|
7d306676d9c3cffdfe249ecd0402e19a6f313dbb
|
[] |
no_license
|
zhaocheng1996/pyproject
|
72929cd0ba2f0486d7dc87a7defa82656bf75a8e
|
0a1973dda314f844f9898357bc4a5c8ee3f2246d
|
refs/heads/master
| 2021-10-26T08:38:43.675739
| 2019-04-11T13:52:46
| 2019-04-11T13:52:46
| 176,939,063
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,145
|
py
|
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
def read_file(filname, sep="\t"):
col_names = ["user", "item", "rate", "st"]#st是timestamps时间戳
df = pd.read_csv(filname, sep=sep, header=None, names=col_names, engine='python')
df["user"] -= 1
df["item"] -= 1
for col in ("user", "item"):
df[col] = df[col].astype(np.int32)
df["rate"] = df["rate"].astype(np.float32)
#print(len(df))
return df
#print(df)
# user item rate st
# 0 0 1192 5.0 978300760
# 1 0 660 3.0 978302109
# 2 0 913 3.0 978301968
class ShuffleIterator(object):
"""
Randomly generate batches
"""
def __init__(self, inputs, batch_size=10):
self.inputs = inputs
self.batch_size = batch_size
self.num_cols = len(self.inputs)
self.len = len(self.inputs[0])
self.inputs = np.transpose(np.vstack([np.array(self.inputs[i]) for i in range(self.num_cols)]))
def __len__(self):
return self.len
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
ids = np.random.randint(0, self.len, (self.batch_size,))
out = self.inputs[ids, :]
return [out[:, i] for i in range(self.num_cols)]
class OneEpochIterator(ShuffleIterator):
"""
Sequentially generate one-epoch batches, typically for test data
"""
def __init__(self, inputs, batch_size=10):
super(OneEpochIterator, self).__init__(inputs, batch_size=batch_size)
if batch_size > 0:
self.idx_group = np.array_split(np.arange(self.len), np.ceil(self.len / batch_size))
else:
self.idx_group = [np.arange(self.len)]
self.group_id = 0
def next(self):
if self.group_id >= len(self.idx_group):
self.group_id = 0
raise StopIteration
out = self.inputs[self.idx_group[self.group_id], :]
self.group_id += 1
return [out[:, i] for i in range(self.num_cols)]
read_file('./ml-1m/ratings.dat', sep="::")
|
[
"34829837+zhaocheng1996@users.noreply.github.com"
] |
34829837+zhaocheng1996@users.noreply.github.com
|
79dcf66b9517d6c9857138b38aa4bebd074af7e9
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit_noisy2781.py
|
63f0d647daa70d02a644d9fe38bd1a0e985c5100
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,238
|
py
|
# qubit number=4
# total number=40
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=13
prog.cx(input_qubit[0],input_qubit[3]) # number=17
prog.x(input_qubit[3]) # number=18
prog.cx(input_qubit[0],input_qubit[3]) # number=19
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[1]) # number=31
prog.cz(input_qubit[2],input_qubit[1]) # number=32
prog.h(input_qubit[1]) # number=33
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[0]) # number=24
prog.cz(input_qubit[3],input_qubit[0]) # number=25
prog.h(input_qubit[0]) # number=26
prog.h(input_qubit[0]) # number=37
prog.cz(input_qubit[3],input_qubit[0]) # number=38
prog.h(input_qubit[0]) # number=39
prog.z(input_qubit[3]) # number=29
prog.cx(input_qubit[3],input_qubit[0]) # number=30
prog.x(input_qubit[2]) # number=23
prog.cx(input_qubit[3],input_qubit[0]) # number=22
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.x(input_qubit[3]) # number=36
prog.cx(input_qubit[3],input_qubit[0]) # number=34
prog.cx(input_qubit[3],input_qubit[0]) # number=35
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy2781.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
a8edb0da7d8720a5f48f1695b3f768a2a34ec969
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/010_strings/_exercises/Python 3 Most Nessesary/6.10. Search and replace in line.py
|
c8f366f01071b74a68d0f19128b40eb84fc3a1d0
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,873
|
py
|
# # -*- coding: utf-8 -*-
#
# s = "пример пример Пример"
# print ?.f.. "при" , ?.f.. "При" , ?.f.. "тест"
# # (0, 14, -1)
# print ?.f.. "при", 9 , ?.f.. "при", 0, 6 , ?.f.. "при", 7, 12
# # (-1, 0, 7)
#
#
# s = "пример пример Пример"
# print ?.i..("при" , ?.i..("при", 7, 12 , ?.i..("При", 1
# # (0, 7, 14)
# # print(s.index("тест"))
# # Traceback (most recent call last):
# # File "<pyshell#24>", line 1, in <module>
# # s.index("тест")
# # ValueError: substring not found
#
#
# s = "пример пример Пример Пример"
# print ?.rf.. "при" , ?.rf.. "При" , ?.rf.. "тест"
# # (7, 21, -1)
# print ?.f.. "при", 0, 6 , ?.f.. "При", 10, 20
# # (0, 14)
#
#
# s = "пример пример Пример Пример"
# print ?.ri.. "при" , ?.ri.. "При" , ?.ri.. "при", 0, 6
# # (7, 21, 0)
# # print(s.rindex("тест"))
# # Traceback (most recent call last):
# # File "<pyshell#30>", line 1, in <module>
# # s.rindex("тест")
# # ValueError: substring not found
#
#
# s = "пример пример Пример Пример"
# print ?.c.. "при" , ?.c.. "при", 6 , ?.c.. "При"
# # (2, 1, 2)
# print ?.c.. "тест"
# 0
#
#
# s = "пример пример Пример Пример"
# print ?.st..w.. "при" , ?.st..w.. "При"
# # (True, False)
# print ?.st..w.. "при", 6 , ?.st..w.. "При", 14
# # (False, True)
#
#
# s = "пример пример Пример Пример"
# print ?.st..w.. "при", "При"
# # True
#
#
# s = "подстрока ПОДСТРОКА"
# print ?.e..w.. "ока" , ?.e..w.. "ОКА"
# # (False, True)
# print ?.e..w.. "ока", 0, 9
# # True
#
#
# s = "подстрока ПОДСТРОКА"
# print ?.e..w.. "ока", "ОКА"
# # True
#
# s = "Привет, Петя"
# print ?.re.. "Петя", "Вася"
# # Привет, Вася
# print ?.re.. "петя", "вася" # Зависит от регистра
# # Привет, Петя
# s = "strstrstrstrstr"
# print ?.re.. "str", "" , ?.re.. "str", "", 3
# # ('', 'strstr')
#
#
# s = "Пример"
# d = o.. "П" N.. o.. "р" o.. "Р"
# print ?
# # {1088: 1056, 1055: None}
# print ?.tr.. d
# # 'РимеР'
#
#
# t = st_.m.tr.. "а" "А", "о" "О", "с" N..
# print(t
# # {1072: 'А', 1089: None, 1086: 'О'}
# print "строка".tr.. t
# # 'трОкА'
#
#
# t = st_.m.tr.. "абвгдежзи", "АБВГДЕЖЗИ"
# print(t)
# # {1072: 1040, 1073: 1041, 1074: 1042, 1075: 1043, 1076: 1044,
# # 1077: 1045, 1078: 1046, 1079: 1047, 1080: 1048}
# print "абвгдежзи".tr.. t
# # 'АБВГДЕЖЗИ'
#
#
# t = st_.m.tr.. "123456789", "0" * 9, "str"
# print(t)
# # {116: None, 115: None, 114: None, 49: 48, 50: 48, 51: 48,
# # 52: 48, 53: 48, 54: 48, 55: 48, 56: 48, 57: 48}
# print "str123456789str".tr.. t
# # '000000000'
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
258bb0e2762aefc4fda2a6a064e89faad4e34e96
|
ff81a9d7880f1b85a1dc19d5eba5ac72d7179c86
|
/pychron/hardware/polyinomial_mapper.py
|
aa3f53521645648ca77a9b4089bb88812a44f0bd
|
[
"Apache-2.0"
] |
permissive
|
UManPychron/pychron
|
2fb7e479a9f492423c0f458c70102c499e1062c4
|
b84c9fd70072f9cbda30abe2c471e64fe3dd75d8
|
refs/heads/develop
| 2022-12-03T23:32:45.579326
| 2020-01-29T19:02:20
| 2020-01-29T19:02:20
| 36,100,637
| 0
| 0
| null | 2015-05-23T00:10:06
| 2015-05-23T00:10:05
| null |
UTF-8
|
Python
| false
| false
| 2,147
|
py
|
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from numpy import poly1d
from scipy import optimize
from traits.api import HasTraits, List, Float
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.core.helpers.strtools import csv_to_floats
class PolynomialMapper(HasTraits):
"""
list of coefficients. see numpy.poly1d to see exactly how coefficients used
coefficient = 1,2,3
==> 1*x^2+2*x+3
"""
_coefficients = List
output_low = Float(0)
output_high = Float(100)
_polynomial = None
def set_coefficients(self, cs):
self._coefficients = cs
self._polynomial = poly1d(cs)
def parse_coefficient_string(self, s):
self.set_coefficients(csv_to_floats(s))
def map_measured(self, v):
"""
convert a measured value to an output value (Voltage -> Temp)
"""
if self._polynomial:
v = self._polynomial(v)
return v
def map_output(self, v):
"""
convert an output value to measured value (Voltage <- Temp)
"""
c=self._coefficients[:]
c[-1] -= v
return optimize.brentq(poly1d(c), self.output_low, self.output_high)
# ============= EOF =============================================
|
[
"jirhiker@gmail.com"
] |
jirhiker@gmail.com
|
04398fb29841e18b9505fe74de19ad29fe08b860
|
7a527060afabd2e0867d5dcf4b75592b43ef5005
|
/Leetcode/二叉树/103. 二叉树的锯齿形层次遍历.py
|
d361d16ca05d5ccb931c8c609b61586d0b68b318
|
[] |
no_license
|
Stevenzzz1996/MLLCV
|
ff01a276cf40142c1b28612cb5b43e563ad3a24a
|
314953b759212db5ad07dcb18854bf6d120ba172
|
refs/heads/master
| 2023-02-10T18:11:30.399042
| 2021-01-05T12:05:21
| 2021-01-05T12:05:21
| 267,804,954
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 903
|
py
|
#!usr/bin/env python
# -*- coding:utf-8 -*-
# author: sfhong2020 time:2020/5/7 15:01
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:
if not root: return []
res = []
cur = [root]
depth = 0
while cur:
tmp = []
next_level = []
for node in cur:
tmp.append(node.val)
if node.left:
next_level.append(node.left)
if node.right:
next_level.append(node.right)
if depth % 2 == 1:
res.append(tmp[::-1])
else:
res.append(tmp)
depth += 1
cur = next_level
return res
|
[
"2499143041@qq.com"
] |
2499143041@qq.com
|
080728dd79f7bff9d345033a81fe4b83e3180222
|
4e47bb6c804150f8be2c7aee96718c8347765cf8
|
/sample.py
|
bfd316339170a9eddb2844089423ec1a214dfd3a
|
[] |
no_license
|
vvasuki/misc-python
|
89955529b32bf32cf06ab726319a2ccbb6e6accb
|
5d6d53bfec0dc1f85c24bb5e0cf6e2fcec31a389
|
refs/heads/master
| 2022-12-09T14:30:20.149062
| 2022-12-01T04:14:17
| 2022-12-01T04:14:17
| 149,946,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,495
|
py
|
#! /usr/bin/python
#easy to use python documentation.. intended for reference and reuse of source code (sample code) slices.
#for help: install python-docs package.
#see this then: file:///usr/share/doc/python-docs-2.4.1/html/tut/tut.html
#to enter interactive mode, type: python
#to exit python shell: EOF character .. ^d
#you can set an environment variable named PYTHONSTARTUP to the name of a file containing your start-up commands.
#interpreter can act as a calculator
#arithmatic operators as in c.
#>>> width = 20
#>>> height = 5*9
#>>> width * height
#900
#9+_ #note underscore (implicit variable)
#909
#complex numbers too
#>>> 1j * 1J
#(-1+0j)
#>>> 1j * complex(0,1)
#(-1+0j)
#>>> a=1.5+0.5j
#>>> a.real
#1.5
#>>> a.imag #that is how you print in interactive mode.. directly quote the variable.
#0.5
#"python -c command [arg] ..."
#"python -m module [arg] ...", which executes the source file for module
#"python file" and "python <file" are different..
#in that the former gets input from stdin.
#sys.argv, a list of strings has the script name and additional arguments from shell.
#no arguments are given,
#sys.argv[0] is an empty string.
#When the script name is given as '-' (meaning standard input), sys.argv[0] is set to '-'.
#When -c command is used, sys.argv[0] is set to '-c'.
#When -m module is used, sys.argv[0] is set to the full name of the located module.
#There are six sequence types: strings, Unicode strings, lists, tuples, buffers, and xrange objects.
#lists are like: [a, b, c]
#tuples are like: a, b, c or () or (d,)
#Buffer objects are not directly supported by Python syntax, but can be created by calling the builtin function buffer().
#Xrange objects are similar to buffers in that there is no specific syntax to create them,
#but they are created using the xrange() function.
#general sequence operators:
#in, not in, +, *, s[i], s[i:j], s[i:j:k], len, min, max
lstTmp = [[]] * 3
#>>> lists
#[[], [], []]
#>>> lists[0].append(3)
#>>> lists
#[[3], [3], [3]]
lstTmp[0:2] = [] #removed elements.. size of list changable. elemensts replacable too.
#functions on lists:
#append extend insert remove(if the arg is matched) pop(can take args) index count sort reverse
#an inbuilt function to make list of numbers:
rngTmp=range(4)
rngTmp=range(2,8)
iTmp=1
iTmp,iTmp1=1,1
if iTmp:
#indentation is necessary for blocks in python
strTmp="iTmp is 1"
print strTmp, " ", iTmp
strTmp='yeah, both single and double quotes can encapsulate strings.\n\
yeah, note the continuation of the string into the next line.'
print strTmp
#any non-zero integer value is true; zero is false.
#The condition may also be a string or list value, in fact any sequence;
#anything with a non-zero length is true, empty sequences are false.
#comparison operators as in C.
strTmp=r'this is a raw string \
oye. it works thus.'
strTmp="""
another way of writing multiline strings.
"""
strTmp='''
yet another way of writing multiline strings.
'''
strTmp="""
look at this piece of string concatenation!
""" "oye. write them side by side.\n" + "or use the '+' sign\n"+ "muaddib "*5
print strTmp
#slice notation: strTmp[0], strTmp[2,5]
#strTmp[:5] and strTmp[0,5] are the same.
#>>> word[-1] # The last character.. from the right. a negative index is used.
#strTmp[0]='p' is not allowed.
#>>> 'x' + word[1:]
#'xelpA'
#is ok.
#degenerate slices are handled gracefully:
#word='HelpA'
#>>> word[1:100]
#'elpA'
#>>> word[10:]
#''
#>>> word[2:1]
#''
#>>> word[-100:]
#'HelpA'
#>>> word[-10] # error
ustrTmp= u' a unicode \u0020 string !'
#u'a unicode string !'
#the lower 256 characters of Unicode are the same as the 256 characters of Latin-1.
#Codecs can convert are Latin-1, ASCII, UTF-8, and UTF-16.
ustrTmp.encode('utf-8')
print ustrTmp
#string formatting options
strTmp="string formatting or interpolation operator %% is like %(familiarFunction)s" \
%{'familiarFunction':"sprintf()"}
print strTmp;
#the following options may be used in %(varName)[formatting]option:
# d i o u x X e E f F g G c %
# r s (for python objects, using repr and str functions)
#
#the following are string related functions:
#strip() len() capitalize() lower() swapcase() l/rjust() center() l/rstrip() title()
#join(sequenceOfStrings) [r]split(delimiter) splitlines()
#[r]find () count(substr[,start,end]) [r]index() translate(table[, deletechars])
#endswith() startswith()
#isalnum() isalpha() isdigit() islower() isspace() isupper() istitle()
#zfill()
#str(), unicode(), float(), int() and long() convert among datatypes
#decision statements: if, else, elif
#looping:
#while looping: while a<b:
#for statement iterates over the items of any sequence: for x in ['cat', 'window', 'defenestrate']:
#iterate over a sequence of numbers: use for with range.
#looping constructs can have else clauses.
#break and continue are as in C.
def function(iTmp):
#reference to the argument is passed.
#default value may be optionally specified..
#it is the value evaluated at the time of making of the function object.
"this is the function's optional docstring"
print "oye, a function was defined here."
#global variables cannot be directly assigned a value within a function
#(unless named in a global statement), although they may be referenced.
#unless the function explicitly returns something,
#it returns None object.
if iTmp:
return [iTmp]
else:
return
print function.__doc__
#a function is actually an object in the global namespace too.
#function can be referenced only after it is defined... "interpreter".. remember?
print function
print function(0), function(1)
iTmp = 5
def function(arg=iTmp):
print arg
iTmp = 6
#default is evaluated only once. rest of the calls, it is shared...
#to be expected. for the default is filled in when the function object is created.
function() #printeth 5
def function(a, L=[]):
L.append(a)
return L #L has scope only within this here block
print function(1)
print function(2)
print function(3)
print function(1,[])
print function(3) #hehe. [1, 2, 3, 3]
#the above function behaved thusly because the default was a mutable object..
#not an immutable one.. like below.
def function(a, L=None):
if L is None:
L = []
L.append(a)
return L
#keyword arguments.
def function(arg1,arg2='ole',arg3='jo'):
pass #this is an empty statement.
print arg1
function(arg2=99, arg1=0231)
#all functions accept a tuple of arguments in place of passing a literal unpacked sequence.
#the contents of the literal tuple,
#though they may contain references to objects,
#are themselves passed by value.
tupTmp=(0231,99)
function(*tupTmp)
#the * operator unpacks the tuple
#variable number of arguments may be passed as below.
#they may be passed in the form of a tuple of arguments, and
#also as a dictionary (hashtable) of arguments.
def function(arg, *argTuple, ** argDictionary):
#see how a for loop is used with a tuple
for argentum in argTuple: pass
#see how argDictioary is used, and notice the use of the dictionary method keys:
keynen = argDictionary.keys()
#see that the sequence keynen has a method called sort
keynen.sort()
function("sa","asdfa","sdf","asdff",
god="allah",
prophet="mohammed")
#lambda forms from Lisp.. functions used to make function objects
def function(arg):
return lambda argLm: arg+argLm
#Like nested function definitions, lambda forms can reference variables from the containing scope
fnTmp=function(strTmp)
print "lambda land ", fnTmp("sdf")
|
[
"vishvas.vasuki@gmail.com"
] |
vishvas.vasuki@gmail.com
|
b40bac9713b087f67ca3260d194ce949da4c8dae
|
73a0f661f1423d63e86489d4b2673f0103698aab
|
/python/oneflow/nn/modules/math_ops.py
|
a16ddf4555f82be980156024d8fa893e24247691
|
[
"Apache-2.0"
] |
permissive
|
Oneflow-Inc/oneflow
|
4fc3e081e45db0242a465c4330d8bcc8b21ee924
|
0aab78ea24d4b1c784c30c57d33ec69fe5605e4a
|
refs/heads/master
| 2023-08-25T16:58:30.576596
| 2023-08-22T14:15:46
| 2023-08-22T14:15:46
| 81,634,683
| 5,495
| 786
|
Apache-2.0
| 2023-09-14T09:44:31
| 2017-02-11T06:09:53
|
C++
|
UTF-8
|
Python
| false
| false
| 7,845
|
py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
from typing import Optional, Sequence, Union
import oneflow as flow
from oneflow.framework.tensor import register_tensor_op
from oneflow.nn.modules.module import Module
from oneflow.nn.modules.utils import _check_axis
from oneflow.ops.transpose_util import (
get_inversed_perm,
get_perm_when_transpose_axis_to_last_dim,
)
def asin_op(input):
"""
Returns a new tensor with the arcsine of the elements of :attr:`input`.
.. math::
\\text{out}_{i} = \\sin^{-1}(\\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(np.array([-0.5, 0.8, 1.0, -0.8]), dtype=flow.float32)
>>> output = flow.asin(input)
>>> output.shape
oneflow.Size([4])
>>> output
tensor([-0.5236, 0.9273, 1.5708, -0.9273], dtype=oneflow.float32)
>>> input1 = flow.tensor(np.array([[0.8, 1.0], [-0.6, -1.0]]), dtype=flow.float32)
>>> output1 = input1.asin()
>>> output1.shape
oneflow.Size([2, 2])
>>> output1
tensor([[ 0.9273, 1.5708],
[-0.6435, -1.5708]], dtype=oneflow.float32)
"""
return flow._C.asin(input)
def arcsin_op(input):
"""
Alias for :func:`oneflow.asin`
"""
return flow._C.asin(input)
def asinh_op(input):
"""
Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`.
.. math::
\\text{out}_{i} = \\sinh^{-1}(\\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(np.array([2, 3, 4]), dtype=flow.float32)
>>> output = flow.asinh(input)
>>> output.shape
oneflow.Size([3])
>>> output
tensor([1.4436, 1.8184, 2.0947], dtype=oneflow.float32)
>>> input1 = flow.tensor(np.array([[-1, 0, -0.4], [5, 7, 0.8]]), dtype=flow.float32)
>>> output1 = input1.asinh()
>>> output1.shape
oneflow.Size([2, 3])
>>> output1
tensor([[-0.8814, 0.0000, -0.3900],
[ 2.3124, 2.6441, 0.7327]], dtype=oneflow.float32)
"""
return flow._C.asinh(input)
def arcsinh_op(input):
"""
Alias for :func:`oneflow.asinh`
"""
return flow._C.asinh(input)
def asinh_op_tensor(input):
"""
See :func:`oneflow.asinh`
"""
return flow._C.asinh(input)
def inplace_sin_op_tensor(input):
"""
In-place version of :func:`oneflow.sin`
"""
return flow._C.sin_(input)
def atan_op(input):
"""
Returns a new tensor with the arctangent of the elements of :attr:`input`.
.. math::
\\text{out}_{i} = \\tan^{-1}(\\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(np.array([0.5, 0.6, 0.7]), dtype=flow.float32)
>>> output = flow.atan(input)
>>> output.shape
oneflow.Size([3])
"""
return flow._C.atan(input)
def arctan_op(input):
"""
Alias for :func:`oneflow.atan`
"""
return flow._C.atan(input)
def fmod_op(input, other):
"""
fmod(input, other, *, out=None) -> Tensor
Computes the element-wise remainder of division.
The dividend and divisor may contain both for integer and floating point
numbers. The remainder has the same sign as the dividend :attr:`input`.
Supports broadcasting to a common shape, integer and float inputs.
Args:
input (Tensor): the dividend
other (Tensor or Scalar): the divisor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> import oneflow as flow
>>> flow.fmod(flow.tensor([-3., -2, -1, 1, 2, 3]), 2.)
tensor([-1., -0., -1., 1., 0., 1.], dtype=oneflow.float32)
>>> flow.fmod(flow.tensor([1, 2, 3, 4, 5.]), 1.5)
tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000], dtype=oneflow.float32)
>>> flow.fmod(flow.tensor([1, 2, 3, 4., -5]), flow.tensor([4, 2, 1, 3., 1]))
tensor([1., 0., 0., 1., -0.], dtype=oneflow.float32)
"""
return flow._C.fmod(input, other)
def addmm(x, mat1, mat2, alpha=1, beta=1):
if len(x.shape) > 2 or len(mat1.shape) > 2 or len(mat2.shape) > 2:
raise ValueError("input matrixes shape can not be greater than 2")
else:
return flow.mul(x, beta) + flow.mul(flow._C.matmul(mat1, mat2), alpha)
def addmm_op(input, mat1, mat2, alpha=1, beta=1):
"""addmm(beta=1, input, alpha=1, mat1, mat2, out=None) -> Tensor
Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
The matrix :attr:`input` is added to the final result.
If :attr:`mat1` is a :math:`(n \\times m)` tensor, :attr:`mat2` is a
:math:`(m \\times p)` tensor, then :attr:`input` must be
broadcastable with a :math:`(n \\times p)` tensor
and :attr:`out` will be a :math:`(n \\times p)` tensor.
:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
:attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
.. math::
\\text{out} = \\beta\\ \\text{input} + \\alpha\\ (\\text{mat1}_i \\mathbin{@} \\text{mat2}_i)
For inputs of type `float` or `double`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
Args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\\beta`)
input (Tensor): matrix to be added
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\\alpha`)
mat1 (Tensor): the first matrix to be multiplied
mat2 (Tensor): the second matrix to be multiplied
out (Tensor, optional): the output tensor.
For example:
>>> import numpy as np
>>> import oneflow as flow
>>> input = flow.tensor(np.array([[1,2,4],[5,11,9.1]]))
>>> mat1 = flow.tensor(np.array([[7.3,1.9,7.3],[10.2,1,5.5]]))
>>> mat2 = flow.tensor(np.array([[7.3,1.9,7.3],[10.2,1,5.5],[3.7,2.2,8.1]]))
>>> output = flow.addmm(input, mat1, mat2)
>>> output
tensor([[100.6800, 33.8300, 126.8700],
[110.0100, 43.4800, 133.6100]], dtype=oneflow.float64)
>>> output.shape
oneflow.Size([2, 3])
>>> input2 = flow.tensor(np.array([1.7]))
>>> mat1 = flow.tensor(np.array([[1,2],[5,9.1],[7.7,1.4]]))
>>> mat2 = flow.tensor(np.array([[1,2,3.7],[5,9.1,6.8]]))
>>> output2 = flow.addmm(input2, mat1, mat2, alpha=1, beta=2)
>>> output2
tensor([[14.4000, 23.6000, 20.7000],
[53.9000, 96.2100, 83.7800],
[18.1000, 31.5400, 41.4100]], dtype=oneflow.float64)
>>> output2.shape
oneflow.Size([3, 3])
"""
return addmm(input, mat1, mat2, alpha, beta)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
|
[
"noreply@github.com"
] |
Oneflow-Inc.noreply@github.com
|
6697e58f58dc6dc054679c72808f91d06415102d
|
88ea7bf2bbc8ffba551e881df553ae5ceac70dd6
|
/deblock/codes/models/archs/archs_sub/SRResNet_o2m_spectral_arch.py
|
83e92751b2e71f264c06bc251d1ed9cc9b2e4680
|
[
"Apache-2.0"
] |
permissive
|
zhouhuanxiang/repo-zhx
|
2d1135bb2f925e051e1b0bcfc2ed53fb34ea51c5
|
76b577eea13130c60bf7bff8c486f51766128661
|
refs/heads/main
| 2023-06-10T02:56:17.978649
| 2021-06-29T02:35:57
| 2021-06-29T02:35:57
| 381,213,557
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,389
|
py
|
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
import models.archs.arch_util as arch_util
class ResidualBlock_Spectral_withZ(nn.Module):
'''Residual block w/o BN
---Conv-ReLU-Conv-+-
|________________|
'''
def __init__(self, ni=65, no=64):
super(ResidualBlock_Spectral_withZ, self).__init__()
self.conv1 = nn.utils.spectral_norm(nn.Conv2d(ni, ni, 3, 1, 1, bias=True))
self.conv2 = nn.utils.spectral_norm(nn.Conv2d(ni, no, 3, 1, 1, bias=True))
# initialization
arch_util.initialize_weights([self.conv1, self.conv2], 0.1)
def forward(self, x):
identity = x
out = F.relu(self.conv1(x), inplace=True)
out = self.conv2(out)
return identity[:, :out.shape[1], :, :] + out
class MSRResNet(nn.Module):
''' modified SRResNet'''
def __init__(self, in_nc=3, out_nc=3, nf=64, nb=16, upscale=4):
super(MSRResNet, self).__init__()
self.upscale = upscale
self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)
# basic_block = functools.partial(ResidualBlock_noBN_withZ, nf=nf)
# self.recon_trunk = arch_util.make_layer(basic_block, nb)
self.recon_trunk = nn.ModuleList([ResidualBlock_Spectral_withZ(nf + 1, nf) for i in range(nb)])
# upsampling
self.upconv1 = nn.Conv2d(nf + 1, nf, 3, 1, 1, bias=True)
self.HRconv = nn.Conv2d(nf + 1, nf, 3, 1, 1, bias=True)
self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)
# activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
# initialization
arch_util.initialize_weights([self.conv_first, self.upconv1, self.HRconv, self.conv_last],
0.1)
def forward(self, x, z):
out = self.lrelu(self.conv_first(x))
# out = self.recon_trunk(fea)
for layer in self.recon_trunk:
out = layer(torch.cat((out, z), dim=1))
out = self.lrelu(self.upconv1(torch.cat((out, z), dim=1)))
out = self.conv_last(self.lrelu(self.HRconv(torch.cat((out, z), dim=1))))
base = F.interpolate(x, scale_factor=self.upscale, mode='bilinear', align_corners=False)
if out.shape[1] == base.shape[1]:
out += base
else:
out += base[:, :3, :, :]
return out
|
[
"zhouhx.cn@gmail.com"
] |
zhouhx.cn@gmail.com
|
9269f28f522d0d9b3083bf0059d3b6ed41848195
|
d67ae1b2f20d96b7e36c82c3a298882042c951c5
|
/src/asyncf.py
|
05a406b445e0dbcbd7eb0341c1360003b928bcfe
|
[
"MIT"
] |
permissive
|
Vistaus/my-weather-indicator
|
8a99e69fd9d2c03ab5cca578a89da38d6676a5ab
|
32aaa77a14cf2f85edbfb72c45d154e1676abe83
|
refs/heads/master
| 2021-01-02T12:00:00.506304
| 2020-02-11T19:42:47
| 2020-02-11T19:42:47
| 239,614,123
| 0
| 0
|
MIT
| 2020-02-10T21:11:07
| 2020-02-10T21:11:06
| null |
UTF-8
|
Python
| false
| false
| 2,549
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# async.py
#
# This file is part of uPodcatcher
#
# Copyright (C) 2014
# Lorenzo Carbonell Cerezo <lorenzo.carbonell.cerezo@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gi
try:
gi.require_version('GLib', '2.0')
except Exception as e:
print(e)
exit(1)
from gi.repository import GLib
import threading
import traceback
__all__ = ['async_function']
def _async_call(f, args, kwargs, on_done):
def run(data):
f, args, kwargs, on_done = data
error = None
result = None
try:
result = f(*args, **kwargs)
except Exception as e:
e.traceback = traceback.format_exc()
error = 'Unhandled exception in asyn call:\n{}'.format(e.traceback)
GLib.idle_add(lambda: on_done(result, error))
data = f, args, kwargs, on_done
thread = threading.Thread(target=run, args=(data,))
thread.daemon = True
thread.start()
def async_function(on_done=None):
'''
A decorator that can be used on free functions so they will always be
called asynchronously. The decorated function should not use any resources
shared by the main thread.
Example:
def do_async_stuff(self, input_string):
def on_async_done(result, error):
# Do stuff with the result and handle errors in the main thread.
if error:
print(error)
elif result:
print(result)
@async_function(on_done=on_async_done)
def do_expensive_stuff_in_thread(input_string):
# Pretend to do expensive stuff...
time.sleep(10)
stuff = input_string + ' Done in a different thread'
return stuff
do_expensive_stuff_in_thread(input_string)
'''
def wrapper(f):
def run(*args, **kwargs):
_async_call(f, args, kwargs, on_done)
return run
return wrapper
|
[
"lorenzo.carbonell.cerezo@gmail.com"
] |
lorenzo.carbonell.cerezo@gmail.com
|
1a43fcbec667b510a0a1ff82df246326a83a70fb
|
eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429
|
/data/input/andersbll/deeppy/deeppy/model/__init__.py
|
3fc2414a36c1575b1ca19d8106a70e4a76258fb6
|
[] |
no_license
|
bopopescu/pythonanalyzer
|
db839453bde13bf9157b76e54735f11c2262593a
|
8390a0139137574ab237b3ff5fe8ea61e8a0b76b
|
refs/heads/master
| 2022-11-22T02:13:52.949119
| 2019-05-07T18:42:52
| 2019-05-07T18:42:52
| 282,079,884
| 0
| 0
| null | 2020-07-23T23:46:09
| 2020-07-23T23:46:08
| null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
from .adversarial import AdversarialNet
from .feedforward import FeedForwardNet, ClassifierNet, RegressorNet
from .variational_autoencoder import VariationalAutoencoder
|
[
"rares.begu@gmail.com"
] |
rares.begu@gmail.com
|
7e8116443903d033a1a47a2ffed807aec258d0c3
|
49e17d736df9889b3a0d91705abd0f3ed579d17c
|
/quests/Temple_Of_Ikov.py
|
b4d3f0e615c687daab5b6c89a084be6e2400e914
|
[] |
no_license
|
TheWhirl/RunescapeQuestWebsite
|
4f258c04a1c1e6bb9f6d9e0fa63fdcab452ccfc2
|
8d5dacbc8251bd1f2dded4ffa04400ed48e0f1fb
|
refs/heads/master
| 2020-05-16T02:54:35.603906
| 2018-12-23T13:03:58
| 2018-12-23T13:03:58
| 182,643,424
| 0
| 0
| null | 2019-04-22T07:22:00
| 2019-04-22T07:21:59
| null |
UTF-8
|
Python
| false
| false
| 443
|
py
|
import os
import sys
sys.path.insert(0,
os.path.dirname(os.path.realpath(__file__))[
0:-len("quests")])
from QuestInfo import Quest
class Temple_Of_Ikov(Quest):
def __init__(self):
super().__init__("Temple of Ikov")
self.age = 5
self.difficulty = "Experienced"
self.length = "Medium"
self.quest_points = 1
self.thieving = 42
self.ranged = 40
|
[
"musomaddy@gmail.com"
] |
musomaddy@gmail.com
|
3ab8865d156fd4539ee009f877d33e4d2f16b8ae
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-sblp-obt/sblp_ut=3.5_rd=1_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=13/params.py
|
28bdc367d387d98fbf09079da0322b1eedc608ea
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
py
|
{'cpus': 4,
'duration': 30,
'final_util': '3.628952',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '1',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'RUN',
'trial': 13,
'utils': 'uni-medium-3'}
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
ac78f5706a5fa6ab691f744614ebe243eeb0e6e6
|
81407be1385564308db7193634a2bb050b4f822e
|
/the-python-standard-library-by-example/SimpleXMLRPCServer/SimpleXMLRPCServer_dotted_name.py
|
927f913a51fc70c40159c7b5e56b864de61651e3
|
[
"MIT"
] |
permissive
|
gottaegbert/penter
|
6db4f7d82c143af1209b4259ba32145aba7d6bd3
|
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
|
refs/heads/master
| 2022-12-30T14:51:45.132819
| 2020-10-09T05:33:23
| 2020-10-09T05:33:23
| 305,266,398
| 0
| 0
|
MIT
| 2020-10-19T04:56:02
| 2020-10-19T04:53:05
| null |
UTF-8
|
Python
| false
| false
| 537
|
py
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""
"""
__version__ = "$Id$"
#end_pymotw_header
from SimpleXMLRPCServer import SimpleXMLRPCServer
import os
server = SimpleXMLRPCServer(('localhost', 9000), allow_none=True)
server.register_function(os.listdir, 'dir.list')
server.register_function(os.mkdir, 'dir.create')
server.register_function(os.rmdir, 'dir.remove')
try:
print 'Use Control-C to exit'
server.serve_forever()
except KeyboardInterrupt:
print 'Exiting'
|
[
"350840291@qq.com"
] |
350840291@qq.com
|
5d09f348af5df16b53230056d4eb3e6758f688c8
|
9d7a1f61e957c6ba688ba9acbd4810bfc41259bd
|
/crawling/scrapy/section04_03/section04_03/pipelines.py
|
d6934d13d6491f2787f92d85948d74fd762da68b
|
[] |
no_license
|
saanghyuk/data_science_python
|
17f4c35b9f4d197991fd0c03eecd06487ceaa9a0
|
7dde1ed2a3570edbdd716a43a4a340e64f7e2bb0
|
refs/heads/master
| 2023-08-24T10:47:13.478635
| 2021-11-05T15:37:33
| 2021-11-05T15:37:33
| 355,115,183
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,113
|
py
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from scrapy.exceptions import DropItem
import csv
import xlsxwriter
class TestSpiderPipeline:
# 초기화 메서드
def __init__(self):
# 엑셀 처리 선언
self.workbook = xlsxwriter.Workbook("./result_excel.xlsx")
# CSV처리 선언(a, w 옵션 변경)
self.file_opener = open("./result_excel.csv", 'w')
self.csv_writer = csv.DictWriter(self.file_opener, fieldnames = ['rank_num', 'site_name', 'daily_time_site', 'daily_page_view', 'is_pass'])
#워크시트
self.worksheet = self.workbook.add_worksheet()
# 삽입 수
self.rowcount = 1
# 최초 1회 실행
def open_spider(self, spider):
spider.logger.info('TestSpider Pipeline Started ')
def process_item(self, item, spider):
if int(item.get('rank_num')) < 41:
item['is_pass'] = True
# 엑셀 저장
self.worksheet.write('A%s' %self.rowcount, item.get('rank_num'))
self.worksheet.write('B%s' %self.rowcount, item.get('site_name'))
self.worksheet.write('C%s' %self.rowcount, item.get('daily_time_site'))
self.worksheet.write('D%s' %self.rowcount, item.get('daily_page_view'))
self.worksheet.write('E%s' %self.rowcount, item.get('is_pass'))
self.rowcount+=1
# CSV 저장
self.csv_writer.writerow(item)
return item
else:
raise DropItem('Dropped Item. Because This Site Rank is {}'.format(item.get('rank_number')))
# print('Sorry, Dropped')
# 마지막 1회 실행
def close_spider(self, spider ):
# 엑셀 파일 닫기
self.workbook.close()
# csv파일 닫기
self.file_opener.close()
# 종료 선언
spider.logger.info('TestSpider Pipeline Closed')
|
[
"saanghyuk@gmail.com"
] |
saanghyuk@gmail.com
|
f0128317036c9b966541e24a1e1efe172ad2fce5
|
cc5eb8eb50d64ffbca780c42a908053ec549f295
|
/python-in-a-day-scripts/ch12 program/script_002.py
|
43129ebbb2a9f5b3ad633d6fc7d93d8accaedfbb
|
[] |
no_license
|
bemagee/LearnPython
|
328b1f7a9d5046fe1503aece8a5134a7dd2727d2
|
a42565f8fb45f9e2ebbcdcf359ebb9092bf837c2
|
refs/heads/master
| 2020-12-13T02:45:30.308604
| 2016-10-24T03:09:12
| 2016-10-24T03:09:12
| 10,793,864
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
# Our epic programmer dict from before
epic_programmer_dict = {
'Tim Berners-Lee' : ['tbl@gmail.com', 111],
'Guido van Rossum' : ['gvr@gmail.com', 222],
'Linus Torvalds': ['lt@gmail.com', 333],
'Larry Page' : ['lp@gmail.com', 444],
'Sergey Brin' : ['sb@gmail.com', 555]
}
print epic_programmer_dict
|
[
"bemagee@gmail.com"
] |
bemagee@gmail.com
|
636fe7f33650c3bd29921d6bf95425a2aeeaef48
|
d09fd96bbc931fbb8522e5c991973f064a4ded50
|
/baxter/devel/.private/baxter_maintenance_msgs/lib/python2.7/dist-packages/baxter_maintenance_msgs/msg/_UpdateStatus.py
|
dcdfcbd9e5d9bc1182afd40950d3c1c371b7df12
|
[] |
no_license
|
rymonyu/EE4-Robotics
|
b3827ba0dff5bdfdd1e47fe07a40e955c5226f38
|
6cf9272abd7fe8a074dc74a032f6e0b35edb8548
|
refs/heads/master
| 2020-08-22T15:09:39.706809
| 2019-12-15T23:35:45
| 2019-12-15T23:35:45
| 216,420,098
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,704
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from baxter_maintenance_msgs/UpdateStatus.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class UpdateStatus(genpy.Message):
_md5sum = "74e246350421569590252c39e8aa7b85"
_type = "baxter_maintenance_msgs/UpdateStatus"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# See the class UpdateRunner()
# status: One-word description of the current action being performed
# long_description: Details pertaining to status if any. Used for verbose error messages.
uint16 status
float32 progress
string long_description
uint16 STS_IDLE = 0
uint16 STS_INVALID = 1
uint16 STS_BUSY = 2
uint16 STS_CANCELLED = 3
uint16 STS_ERR = 4
uint16 STS_MOUNT_UPDATE = 5
uint16 STS_VERIFY_UPDATE = 6
uint16 STS_PREP_STAGING = 7
uint16 STS_MOUNT_STAGING = 8
uint16 STS_EXTRACT_UPDATE = 9
uint16 STS_LOAD_KEXEC = 10
"""
# Pseudo-constants
STS_IDLE = 0
STS_INVALID = 1
STS_BUSY = 2
STS_CANCELLED = 3
STS_ERR = 4
STS_MOUNT_UPDATE = 5
STS_VERIFY_UPDATE = 6
STS_PREP_STAGING = 7
STS_MOUNT_STAGING = 8
STS_EXTRACT_UPDATE = 9
STS_LOAD_KEXEC = 10
__slots__ = ['status','progress','long_description']
_slot_types = ['uint16','float32','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
status,progress,long_description
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(UpdateStatus, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.status is None:
self.status = 0
if self.progress is None:
self.progress = 0.
if self.long_description is None:
self.long_description = ''
else:
self.status = 0
self.progress = 0.
self.long_description = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_Hf().pack(_x.status, _x.progress))
_x = self.long_description
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 6
(_x.status, _x.progress,) = _get_struct_Hf().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.long_description = str[start:end].decode('utf-8')
else:
self.long_description = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_Hf().pack(_x.status, _x.progress))
_x = self.long_description
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 6
(_x.status, _x.progress,) = _get_struct_Hf().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.long_description = str[start:end].decode('utf-8')
else:
self.long_description = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_Hf = None
def _get_struct_Hf():
global _struct_Hf
if _struct_Hf is None:
_struct_Hf = struct.Struct("<Hf")
return _struct_Hf
|
[
"rymonyu@gmail.com"
] |
rymonyu@gmail.com
|
6005b320155e884dcb8bc9c7994fc6807bcf4c35
|
aa7c6a9276a859f75b3c5181a92f71d7c19122a5
|
/zvt/domain/quotes/stock/stock_1m_kdata.py
|
f1378710d90001bd962e69feaa23a05bf88f493e
|
[
"MIT"
] |
permissive
|
Pengyuyan2/zvt
|
deef9c5e5bd91c65728ad9bac8c79499707519ee
|
9f9c77efcd34c04aaf11b12da0cf483cbe55e297
|
refs/heads/master
| 2023-07-12T16:55:15.040579
| 2021-08-22T09:41:33
| 2021-08-22T09:55:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
# -*- coding: utf-8 -*-
# this file is generated by gen_kdata_schema function, dont't change it
from sqlalchemy.orm import declarative_base
from zvt.contract.register import register_schema
from zvt.domain.quotes import StockKdataCommon
KdataBase = declarative_base()
class Stock1mKdata(KdataBase, StockKdataCommon):
__tablename__ = 'stock_1m_kdata'
register_schema(providers=['joinquant'], db_name='stock_1m_kdata', schema_base=KdataBase, entity_type='stock')
# the __all__ is generated
__all__ = ['Stock1mKdata']
|
[
"5533061@qq.com"
] |
5533061@qq.com
|
86118937a3c5da7d22eb06c3ed34e49f7cfa2f11
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2711/47774/305852.py
|
a8c9c3680c535404ce3caf423c50014ec1f95130
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,535
|
py
|
def isSimilar(s1, s2):
diff, l = 0, len(s1)
for i in range(l):
if (s1[i] != s2[i]):
diff += 1
if (diff > 2):
return False
return True
def find(f, x):
return f[x] if x == f[x] else find(f, f[x])
def merge(f, x, y):
rx = find(f, f[x])
ry = find(f, f[y])
f[ry] = rx
def solve(A):
A = list(set(A))
l,w = len(A), len(A[0])
res = 0
f = [i for i in range(l)]
if l <= w*w:
for i in range(l):
for j in range(i + 1, l):
if (find(f, i) != find(f,j)):
isS = isSimilar(A[i], A[j])
if (isS):
merge(f, i, j)
else:
dict = {}
for i in range(l):
if (A[i] in dict):
dict[A[i]].add(i)
else:
dict[A[i]] = {i}
word = list(A[i])
for i0 in range(w):
for j0 in range(i0+1, w):
if (word[i0] != word[j0]):
word[i0],word[j0] = word[j0],word[i0]
neighbor = ''.join(word)
if (neighbor in dict):
dict[neighbor].add(i)
else:
dict[neighbor] = {i}
word[i0],word[j0] = word[j0],word[i0]
for i in range(l):
for j in dict[A[i]]:
merge(f,i,j)
for i in range(l):
if (i == f[i]):
res += 1
return res
s=eval(input())
print(solve(s))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
5245bc11bfacf34d092a6630efd1e6ec7b5948a9
|
32809f6f425bf5665fc19de2bc929bacc3eeb469
|
/src/1096-Brace-Expansion-II/1096.py
|
78067156acba02fd1f032327859403cee51255d5
|
[] |
no_license
|
luliyucoordinate/Leetcode
|
9f6bf01f79aa680e2dff11e73e4d10993467f113
|
bcc04d49969654cb44f79218a7ef2fd5c1e5449a
|
refs/heads/master
| 2023-05-25T04:58:45.046772
| 2023-05-24T11:57:20
| 2023-05-24T11:57:20
| 132,753,892
| 1,575
| 569
| null | 2023-05-24T11:57:22
| 2018-05-09T12:30:59
|
C++
|
UTF-8
|
Python
| false
| false
| 723
|
py
|
import itertools
class Solution:
def braceExpansionII(self, expression):
groups = [[]]
level = 0
for i, c in enumerate(expression):
if c == '{':
if level == 0:
start = i+1
level += 1
elif c == '}':
level -= 1
if level == 0:
groups[-1].append(self.braceExpansionII(expression[start:i]))
elif level == 0:
if c == ",":
groups.append([])
else:
groups[-1].append([c])
return sorted(set().union(*[set(map(''.join, itertools.product(*group))) for group in groups]))
|
[
"luliyucoordinate@outlook.com"
] |
luliyucoordinate@outlook.com
|
6b19da70918b7711aee9f2fda10eb6fbec50ba0d
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/126/usersdata/191/29517/submittedfiles/ap2.py
|
c8f2da701341911eecf630c83018954555844586
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
# -*- coding: utf-8 -*-
a=float(input('digite a:'))
b=float(input('digite b:'))
c=float(input('digite c:'))
d=float(input('digite d:'))
if a>=b and b>=c and a>=d:
print(a)
elif b>=a and b>=c and b>=d:
print(b)
elif c>=a and c>=b and c>=d:
print(c)
else:
print(d)
if a<=b and a<=c and a<=d:
print(a)
elif b<=a and b<=c and c<=d:
print(b)
elif c<=a and c<=b and c<=d:
print(c)
else:
print(d)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
1c3d00acafd76a610342ab1ef712ad250ee8870c
|
b2bdd5997ac84b0e19071c1ddc1c1a4d2f4fab58
|
/catkin_ws/devel/.private/p2/lib/python2.7/dist-packages/p2/msg/_Ackermann.py
|
0dff4e208b8c08e4de290b065cd192a52bee173e
|
[] |
no_license
|
hbtslys01/RosCodingProject
|
860d18531dabe4a969278deff5dbad8a8703ea83
|
226feda08724e92fd94191e123b9442c028283dd
|
refs/heads/master
| 2020-04-11T09:16:17.808626
| 2018-12-13T17:30:08
| 2018-12-13T17:30:08
| 161,671,560
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,768
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from p2/Ackermann.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class Ackermann(genpy.Message):
_md5sum = "61c7e29a36f91d9c196a9722234d7472"
_type = "p2/Ackermann"
_has_header = False #flag to mark the presence of a Header object
_full_text = """float64 steering_angle
float64 vel
"""
__slots__ = ['steering_angle','vel']
_slot_types = ['float64','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
steering_angle,vel
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Ackermann, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.steering_angle is None:
self.steering_angle = 0.
if self.vel is None:
self.vel = 0.
else:
self.steering_angle = 0.
self.vel = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_2d().pack(_x.steering_angle, _x.vel))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 16
(_x.steering_angle, _x.vel,) = _get_struct_2d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_2d().pack(_x.steering_angle, _x.vel))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 16
(_x.steering_angle, _x.vel,) = _get_struct_2d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2d = None
def _get_struct_2d():
global _struct_2d
if _struct_2d is None:
_struct_2d = struct.Struct("<2d")
return _struct_2d
|
[
"907098316@qq.com"
] |
907098316@qq.com
|
b26444ad2d6f2216e041816a9cd9a0238f7491e6
|
6d493d09085d4d398132204925078a179774f138
|
/melgan_vocoder.py
|
2ec8f713892afcce0d01ff4faa4f26ebc87935ea
|
[
"MIT"
] |
permissive
|
zongxiangli/CycleGAN-VC3
|
6a41f843b430fd307d9ea0b43aa5910816fba450
|
431b332fa17638391ca913e6821b526456fd874f
|
refs/heads/main
| 2023-02-21T02:19:39.058010
| 2021-01-25T09:49:00
| 2021-01-25T09:49:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,448
|
py
|
#!python
# -*- coding: utf-8 -*-
import os
import yaml
from pathlib import Path
import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
from feature_utils import Audio2Mel
def weights_init(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find("BatchNorm2d") != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def WNConv1d(*args, **kwargs):
return weight_norm(nn.Conv1d(*args, **kwargs))
def WNConvTranspose1d(*args, **kwargs):
return weight_norm(nn.ConvTranspose1d(*args, **kwargs))
class ResnetBlock(nn.Module):
def __init__(self, dim, dilation=1):
super().__init__()
self.block = nn.Sequential(
nn.LeakyReLU(0.2),
nn.ReflectionPad1d(dilation),
WNConv1d(dim, dim, kernel_size=3, dilation=dilation),
nn.LeakyReLU(0.2),
WNConv1d(dim, dim, kernel_size=1),
)
self.shortcut = WNConv1d(dim, dim, kernel_size=1)
def forward(self, x):
return self.shortcut(x) + self.block(x)
class Generator(nn.Module):
def __init__(self, input_size, ngf, n_residual_layers):
super().__init__()
ratios = [8, 8, 2, 2]
self.hop_length = np.prod(ratios)
mult = int(2 ** len(ratios))
model = [
nn.ReflectionPad1d(3),
WNConv1d(input_size, mult * ngf, kernel_size=7, padding=0),
]
# Upsample to raw audio scale
for i, r in enumerate(ratios):
model += [
nn.LeakyReLU(0.2),
WNConvTranspose1d(
mult * ngf,
mult * ngf // 2,
kernel_size=r * 2,
stride=r,
padding=r // 2 + r % 2,
output_padding=r % 2,
),
]
for j in range(n_residual_layers):
model += [ResnetBlock(mult * ngf // 2, dilation=3 ** j)]
mult //= 2
model += [
nn.LeakyReLU(0.2),
nn.ReflectionPad1d(3),
WNConv1d(ngf, 1, kernel_size=7, padding=0),
nn.Tanh(),
]
self.model = nn.Sequential(*model)
self.apply(weights_init)
def forward(self, x):
return self.model(x)
def get_default_device():
if torch.cuda.is_available():
return "cuda"
else:
return "cpu"
def load_model(mel2wav_path, device=get_default_device()):
"""
Args:
mel2wav_path (str or Path): path to the root folder of dumped text2mel
device (str or torch.device): device to load the model
"""
root = Path(mel2wav_path)
with open(root / "args.yml", "r") as f:
args = yaml.load(f, Loader=yaml.FullLoader)
netG = Generator(args.n_mel_channels, args.ngf, args.n_residual_layers).to(device)
netG.load_state_dict(torch.load(root / "best_netG.pt", map_location=device))
return netG
class MelVocoder:
def __init__(
self,
path,
device=get_default_device(),
github=False,
model_name="multi_speaker",
):
self.fft = Audio2Mel().to(device)
if github:
netG = Generator(80, 32, 3).to(device)
root = Path(os.path.dirname(__file__)).parent
netG.load_state_dict(
torch.load(root / f"models/{model_name}.pt", map_location=device)
)
self.mel2wav = netG
else:
self.mel2wav = load_model(path, device)
self.device = device
def __call__(self, audio):
"""
Performs audio to mel conversion (See Audio2Mel in mel2wav/modules.py)
Args:
audio (torch.tensor): PyTorch tensor containing audio (batch_size, timesteps)
Returns:
torch.tensor: log-mel-spectrogram computed on input audio (batch_size, 80, timesteps)
"""
return self.fft(audio.unsqueeze(1).to(self.device))
def inverse(self, mel):
"""
Performs mel2audio conversion
Args:
mel (torch.tensor): PyTorch tensor containing log-mel spectrograms (batch_size, 80, timesteps)
Returns:
torch.tensor: Inverted raw audio (batch_size, timesteps)
"""
with torch.no_grad():
return self.mel2wav(mel.to(self.device)).squeeze(1)
|
[
"jackaduma@gmail.com"
] |
jackaduma@gmail.com
|
c72d9299bc10665a4db3242dbdca70d84cf13520
|
68ea05d0d276441cb2d1e39c620d5991e0211b94
|
/2714.py
|
c816933a2eed56ec8282d45061a5d42bbd7766f2
|
[] |
no_license
|
mcavalca/uri-python
|
286bc43aa157d3a6880dc222e0136c80cf079565
|
e22875d2609fe7e215f9f3ed3ca73a1bc2cf67be
|
refs/heads/master
| 2021-11-23T08:35:17.614443
| 2021-10-05T13:26:03
| 2021-10-05T13:26:03
| 131,339,175
| 50
| 27
| null | 2021-11-22T12:21:59
| 2018-04-27T19:54:09
|
Python
|
UTF-8
|
Python
| false
| false
| 221
|
py
|
n = int(input())
while n > 0:
n -= 1
ra = input()
saida = 'INVALID DATA'
if len(ra) == 20:
if ra[0:2] == 'RA':
if ra[2:].isdigit():
saida = int(ra[2:])
print(saida)
|
[
"m.cavalca@hotmail.com"
] |
m.cavalca@hotmail.com
|
c22cd593f5f83ae3732d104ca10c62e681b4363f
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_159/609.py
|
1d4cb7959af1112bc540d578dcf82f9dfd5fc3ae
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 784
|
py
|
f = open('A-large.in')
#f = open('test.in')
count = int(f.readline())
output = ''
for x in xrange(1, count + 1):
platesCount = int(f.readline())
arr = f.readline().split()
case1 = 0
case2 = 0
case2MaxGap = 0
for i in xrange(0, platesCount - 1):
curPlate = int(arr[i])
nextPlate = int(arr[i+1])
gap = curPlate - nextPlate
case2MaxGap = max(case2MaxGap, gap)
if gap > 0:
case1 += gap
for j in xrange(0, platesCount - 1):
curPlate = int(arr[j])
if curPlate < case2MaxGap:
case2 += curPlate
else:
case2 += case2MaxGap
output += 'Case #' + str(x) + ': ' + str(case1) + ' ' + str(case2) + '\n'
print(output)
newf = open('output.txt','w')
newf.write(output)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
0f3ce92a2ff9742a1df0452ef3c71ce7e361bd2b
|
f8ad6963bfc851657ea50c6a036cfad29cdd7f60
|
/Books/LearningTensorFlow/Chapter5_Text_Sequence_Tensorboard/scan_example.py
|
4cf7e1f4fa42316220ed1621d22dc6ddfdcbd77a
|
[] |
no_license
|
foru120/PythonRepository
|
e1ab0265c0f50ef2e9acdf7447237c913560692b
|
db6b6be0f9fb91b0a81a3b6a2ec5631daab10f98
|
refs/heads/master
| 2021-01-01T06:53:11.728109
| 2019-04-25T13:52:50
| 2019-04-25T13:52:50
| 97,541,222
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
import numpy as np
import tensorflow as tf
elems = np.array(['T', 'e', 'n', 's', 'o', 'r', ' ', 'F', 'l', 'o', 'w'])
scan_sum = tf.scan(lambda a, x: a + x, elems)
sess = tf.InteractiveSession()
print(sess.run(scan_sum))
sess.close()
|
[
"broodsky1122@hanmail.net"
] |
broodsky1122@hanmail.net
|
5184f3b5a7ed92581d910e979f58f31d1a589646
|
cad83699bb72bc1d560815d5b1c34fc9b371f163
|
/cartridge/project_template/manage.py
|
4330dc2e1a9c13927f25c69c19985527dbbe4267
|
[
"BSD-3-Clause"
] |
permissive
|
BeUnique/cartridge
|
a1903ecc78029a576e57c3832b16357c7661b1b8
|
e5b887fed96d01ab93237f345fc420bcbe56d027
|
refs/heads/master
| 2021-01-19T06:00:01.090292
| 2011-08-31T01:04:33
| 2011-08-31T01:04:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,284
|
py
|
#!/usr/bin/env python
# When project_template is used as the actual project during Mezzanine
# development, insert the development path into sys.path so that the
# development version of Mezzanine is used rather than the installed version.
import os
import sys
project_path = os.path.dirname(os.path.abspath(__file__))
project_dir = project_path.split(os.sep)[-1]
if project_dir == "project_template":
dev_path = os.path.abspath(os.path.join(project_path, "..", ".."))
if dev_path not in sys.path:
sys.path.insert(0, dev_path)
import cartridge
cartridge_path = os.path.dirname(os.path.abspath(cartridge.__file__))
assert os.path.abspath(os.path.join(cartridge_path, "..")) == dev_path
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the "
"directory containing %r. It appears you've customized things.\n"
"You'll have to run django-admin.py, passing it your settings module.\n"
"(If the file settings.py does indeed exist, it's causing an "
"ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
|
[
"steve@jupo.org"
] |
steve@jupo.org
|
5278d0ebc39489eb80a4b0a82ecaf609f72027a7
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03813/s431513228.py
|
c4298e11e8a394fafce0121831f9fbfa51e6a6ab
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
def read_int():
return int(input().strip())
def read_ints():
return list(map(int, input().strip().split(' ')))
def solve():
x = read_int()
if x < 1200:
return 'ABC'
return 'ARC'
if __name__ == '__main__':
print(solve())
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
0e10cbdfa5b1cd030ad2bdd01ff695d9fdb60938
|
d88868b88864e4d10009c58b27323034715b0277
|
/projects/barter/deployments/docker/barter/test.py
|
1ff68bc01c1f1c3d50a8992e741a015cb27fa14d
|
[
"Apache-2.0"
] |
permissive
|
shamal112mn/packer-1
|
795ebd9e0fee444f0cbb01897a50e199b73f1307
|
be2720e9cb61bf1110a880e94e32a6767a341588
|
refs/heads/master
| 2023-07-10T09:33:10.516559
| 2021-08-17T02:25:50
| 2021-08-17T02:25:50
| 327,763,823
| 0
| 0
| null | 2021-01-08T01:08:49
| 2021-01-08T01:08:48
| null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
import requests
import json
url = "https://investors-exchange-iex-trading.p.rapidapi.com/stock/tsla/effective-spread"
headers = {
'x-rapidapi-key': "158cd4f9cdmsh0d92f8b92b1d427p1947b6jsn857aa1252e0b",
'x-rapidapi-host': "investors-exchange-iex-trading.p.rapidapi.com"
}
response = requests.request("GET", url, headers=headers)
print(json.dumps(response.json(), indent=2))
|
[
"you@example.com"
] |
you@example.com
|
6f5a178c8d1ba0fb6bb65c7f38002457ca8ef23a
|
e3565e1ce607f60745f2a045aae8026661a6b99b
|
/resources/Onyx-1.0.511/py/onyx/util/rocutils.py
|
48dd8b474c58afe853ee45475fc9479842d375ed
|
[
"Apache-2.0"
] |
permissive
|
eternity668/speechAD
|
4c08d953b2ed06b3357b1c39d8709dd088a2471c
|
f270a1be86372b7044615e4fd82032029e123bc1
|
refs/heads/master
| 2021-01-12T22:10:33.358500
| 2014-02-03T16:03:28
| 2014-02-03T16:03:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,676
|
py
|
###########################################################################
#
# File: rocutils.py (directory: ./py/onyx/util)
# Date: Mon 10 Mar 2008 18:34
# Author: Ken Basye
# Description: Utility code for generating ROC and DET curves
#
# This file is part of Onyx http://onyxtools.sourceforge.net
#
# Copyright 2008, 2009 The Johns Hopkins University
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
###########################################################################
"""
Utilities for generating ROC and DET curves
"""
import StringIO
def _uniquify_preserving_first(iterable, eq_pred):
item = iterable.next()
while 1:
try:
next_item = iterable.next()
except:
yield item
break
if not eq_pred(item, next_item):
yield item
item = next_item
def _uniquify_preserving_last(iterable, eq_pred):
item = iterable.next()
while 1:
try:
next_item = iterable.next()
except:
yield item
break
if not eq_pred(item, next_item):
yield item
item = next_item
else:
item = next_item
def make_ROC_data(reference, ratios):
"""
reference is a list of 0/1 values which are the correct classifications
values is a parallel list of numeric values, with higher values intending to
map toward classifications of 1.
Returns data for a ROC curve in the form of a list of triples, where each triple
contains an interesting threshold value, the fraction of correct identifications (true positives)
as a percent, and the fraction of false positives, at that threshold. The triples are
ordered by threshold from lowest (fewest false positives) to highest (most true positives)
Note that a typical ROC curve would plot false_pos on the X axis and true_pos on the Y axis
using a linear scale.
>>> ref = [0,0,0,0,0,1,1,1,1,1]
>>> values = [2, 3, 4, 9, 4, 5, 6, 9, 9, 3]
>>> res = make_ROC_data(ref, values)
>>> res
[(0.0, 0.0, 9), (20.0, 80.0, 4), (80.0, 100.0, 2)]
"""
det_data = make_DET_data(reference, ratios)
roc_data = [(fp, 100-miss, t) for (fp, miss, t) in det_data]
return roc_data
def make_DET_data(reference, ratios):
"""
reference is a list of 0/1 values which are the correct
classifications values is a parallel list of numeric values, with
higher values intending to map toward classifications of 1.
Returns data for a DET curve in the form of a list of triples,
where each triple contains the fraction of false positives as a
percent, the fraction of false negatives, and the threshold value
that generated those rates. The triples are ordered by threshold
from lowest (fewest false positives) to highest (fewest misses)
Note that a typical DET curve would plot false_pos on the X axis
and false_neg on the Y axis, oftentimes with a normal deviate
scale.
>>> ref = [0,0,0,0,0,1,1,1,1,1]
>>> values = [2, 3, 4, 9, 4, 5, 6, 9, 9, 3]
>>> res = make_DET_data(ref, values)
>>> res
[(0.0, 100.0, 9), (20.0, 19.999999999999996, 4), (80.0, 0.0, 2)]
"""
assert( len(reference) == len(ratios) )
num_pos = reference.count(1)
num_neg = reference.count(0)
assert( num_pos + num_neg == len(reference))
full_result = []
# Find the list of interesting threshholds, which is any value in
# the list of ratios
# Seems like there should be an easier way to uniquify a list
all_threshes = set(ratios)
all_threshes = list(all_threshes)
all_threshes.sort()
def count_values_over_thresh(value, ref, ratios, t):
result = 0
for (i, r) in enumerate(ratios):
if ref[i] == value and r > t:
result += 1
return result
# Now find precision and recall at each threshold
for thresh in all_threshes:
num_neg_accepted = count_values_over_thresh(0, reference, ratios, thresh)
num_pos_accepted = count_values_over_thresh(1, reference, ratios, thresh)
full_result.append((100 * float(num_neg_accepted) / num_neg, # false positives
100 * (1 - float(num_pos_accepted) / num_pos), # misses
thresh))
def eq0(x,y): return x[0] == y[0]
def eq1(x,y): return x[1] == y[1]
iter1 = _uniquify_preserving_first(iter(full_result), eq0)
ret = list(_uniquify_preserving_last(iter1, eq1))
ret.reverse()
return ret
def write_data_as_csv(data, stream, header_type = "DET"):
""" Write either ROC or DET data as comma-separated text, suitable for import into
a spreadsheet or other tool. Writes DET header fields be default, use header_type
of "ROC" or None for ROC headers or no headers, respectively.
>>> ref = [0,0,0,0,0,1,1,1,1,1]
>>> values = [2, 3, 4, 9, 4, 5, 6, 9, 9, 3]
>>> res = make_DET_data(ref, values)
>>> s = StringIO.StringIO()
>>> write_data_as_csv(res, s)
>>> out = s.getvalue()
>>> print out
False Alarm Rate, Miss Rate, Threshold
0.0, 100.0, 9
20.0, 20.0, 4
80.0, 0.0, 2
<BLANKLINE>
>>> s.seek(0)
>>> res = make_ROC_data(ref, values)
>>> write_data_as_csv(res, s, header_type="ROC")
>>> out = s.getvalue()
>>> print out
False Pos Rate, True Pos Rate, Threshold
0.0, 0.0, 9
20.0, 80.0, 4
80.0, 100.0, 2
<BLANKLINE>
>>> s.close()
"""
if header_type == "DET":
stream.write("False Alarm Rate, Miss Rate, Threshold")
elif header_type == "ROC":
stream.write("False Pos Rate, True Pos Rate, Threshold")
[stream.write("\n%s, %s, %s" % triple) for triple in data]
stream.write("\n")
def _test0():
ref = [0,0,0,0,0,1,1,1,1,1]
values = [2, 3, 4, 9, 4, 5, 6, 9, 9, 3]
res = make_DET_data(ref, values)
s = open("foo_csv.txt", "w")
write_data_as_csv(res, s)
s.close()
if __name__ == '__main__':
from onyx import onyx_mainstartup
onyx_mainstartup()
# _test0()
|
[
"nassos@n12mavra.cs.ntua.gr"
] |
nassos@n12mavra.cs.ntua.gr
|
4438a410880898850073b4bc83f77e73ca792121
|
eadd15064aa74811e7a3718b617636627ef4fd47
|
/web/migrations/0020_rename_index_indexpage.py
|
8efb4605a936d00a29a46a2c95ef6c4263e63c65
|
[] |
no_license
|
topsai/plasrefine_backstage
|
262f7bb032daa4d018aac1519e1139cb060c3f91
|
1eb34dd0b13ebdc2a42dd6ed1aaa2d08c18ab5fb
|
refs/heads/master
| 2023-04-12T13:24:22.710108
| 2021-05-08T14:16:41
| 2021-05-08T14:16:41
| 361,993,024
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
# Generated by Django 3.2 on 2021-05-02 08:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('web', '0019_auto_20210502_1558'),
]
operations = [
migrations.RenameModel(
old_name='Index',
new_name='IndexPage',
),
]
|
[
"hurte@foxmail.com"
] |
hurte@foxmail.com
|
241ac7b70c2142fba7ff196677ed61d5e0910d2f
|
587290dbd33c5fb60a154eacd2155e681a3f9ecf
|
/js/gatsby/index.cgi
|
db257841537410c023fb93f34bb0b3e0a10dcd00
|
[] |
no_license
|
jaredly/prog
|
b6408db52c16e9d3c322933f0624c23663d33ce0
|
e8fe82ccd1abe42371adbb3f317576facac546ca
|
refs/heads/master
| 2021-01-20T09:12:48.931999
| 2013-08-29T04:24:36
| 2013-08-29T04:24:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,523
|
cgi
|
#!/usr/bin/python
print 'Content-type:text/html\n'
import cgi,cgitb,os,sys,re
cgitb.enable()
def load_chapter(chap):
return open('chapter%s.txt'%chap).read()
def load_chapters():
d=[]
for i in range(1,10):
d.append([i,load_chapter(i)])
return d
def print_entry(at,chap,pageat,item):
if at==1:nd="<sup>st</sup>"
elif at==2:nd="<sup>nd</sup>"
elif at==3:nd="<sup>rd</sup>"
else:nd="<sup>th</sup>"
return "<br><br><br><b>%s%s</b> paragraph in chapter <b>%s</b> (around page %s)<br><br>\n"%(at, nd, chap, pageat)+item
form = cgi.FieldStorage()
print """
<html><head><title>Great Gatsby Search</title></head><body>
<style>
span {
font-weight: bold;
font-size: 1.1em;
color: black;
background-color: #ccc;
}
h2 {
text-align:center;
}
div.searchform {
background-color:#BBFFAA;
border:2px solid green;
padding:15px;
position:absolute;
right:0px;
top:0px;
}
form {
margin: 0px;
}
</style>
<h1>Search the Great Gatsby</h1>
<div class="searchform">
<form method="GET">
Search For: <input name="s" value="%s"> <input type="checkbox" name="whole" value="1"> Whole word
<input type="submit" value="Search">
</form>
</div>
<br>"""%(form.has_key("s") and form["s"].value or "")
pages = [1, 23, 39, 61, 81, 97, 113, 147, 163, 180 ] ## None ## [3, 16, 26, 39, 52, 62, 93, 103]
retr = ""
num = 0
if form.has_key('s'):
term = form['s'].value.strip()
iterm=term
if form.has_key('whole'):
term='(?<=\W)'+term+'(?=\W)'
for chapter,text in load_chapters():
for i,body in enumerate(text.split('\n')):
all = re.search(term,body,re.I|re.S)
if pages:
pchap = pages[chapter-1]
#print text.find(body),len(text)
pat = int(round(float(pages[chapter]-pchap)* (text.find(body)/float(len(text)))+pchap))
else:
pat = ""
rgx = re.compile(term,re.I)
bdy = rgx.sub(lambda x:'<span>'+x.group()+'</span>', body)+'<br><br>'
## bdy = re.sub(term, lambda x:'<span>'+x.group()+'</span>', body)+'<br><br>'
if all:
## print (text.find(body)/float(len(text))),float(pages[chapter]-pchap)
## print float(pages[chapter]-pchap)*(text.find(body)/float(len(text)))+pchap
retr += print_entry(i,chapter,pat,bdy)
num+=1
print "<h3>Found %d results for %s</h3>"%(num,iterm)
print retr
|
[
"jared@jaredforsyth.com"
] |
jared@jaredforsyth.com
|
21bc2d0fbd981fbefdd919c846357da41182c5ac
|
e48eac671ea8335f696ec5fd0511b12800b0fca0
|
/accounts/models.py
|
6625a130b79945d6d4613bcc9a0b047c32339541
|
[] |
no_license
|
linusidom/django-basic-reservation-system
|
8fba10708cebd42d5ad308c0ef838a0fe1ac8778
|
5dd5db4832fe83e862424af18aae3aad6cf3f1ed
|
refs/heads/master
| 2020-03-26T08:37:15.052362
| 2018-08-14T11:34:19
| 2018-08-14T11:34:19
| 144,711,607
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
from django.db import models
# Create your models here.
from django.contrib.auth.models import AbstractUser
from django.shortcuts import reverse
class Profile(AbstractUser):
ideal_weight = models.IntegerField(default=185)
def __str__(self):
return self.email
def get_absolute_url(self):
return reverse('accounts:profile_detail', kwargs={'pk':pk})
|
[
"linusidom@gmail.com"
] |
linusidom@gmail.com
|
f8fdac6b1a2846a9f74f7db1f038fed9022ab0a4
|
5dd03f9bd8886f02315c254eb2569e4b6d368849
|
/3rdparty/python/GitPython-0.3.1-py2.6.egg/git/__init__.py
|
500d053f7729d7172f300870e30b00ae7a523f09
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
adamsxu/commons
|
9e1bff8be131f5b802d3aadc9916d5f3a760166c
|
9fd5a4ab142295692994b012a2a2ef3935d35c0b
|
refs/heads/master
| 2021-01-17T23:13:51.478337
| 2012-03-11T17:30:24
| 2012-03-11T17:30:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,171
|
py
|
# __init__.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import os
import sys
import inspect
__version__ = '0.3.1'
#{ Initialization
def _init_externals():
"""Initialize external projects by putting them into the path"""
sys.path.append(os.path.join(os.path.dirname(__file__), 'ext', 'gitdb'))
try:
import gitdb
except ImportError:
raise ImportError("'gitdb' could not be found in your PYTHONPATH")
#END verify import
#} END initialization
#################
_init_externals()
#################
#{ Imports
from git.config import GitConfigParser
from git.objects import *
from git.refs import *
from git.diff import *
from git.exc import *
from git.db import *
from git.cmd import Git
from git.repo import Repo
from git.remote import *
from git.index import *
from git.util import (
LockFile,
BlockingLockFile,
Stats,
Actor
)
#} END imports
__all__ = [ name for name, obj in locals().items()
if not (name.startswith('_') or inspect.ismodule(obj)) ]
|
[
"jsirois@twitter.com"
] |
jsirois@twitter.com
|
996aac45cc3fff5b7b5a9eb0567f864fdb8f7981
|
8c4af05e0257661195c95b0b9e0873eeb6391dab
|
/packages/python-packages/apiview-gpt/src/_models.py
|
85d435a1aa1dd9ca713a5cccc403df55f16ebc0d
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
Azure/azure-sdk-tools
|
6d171054800807fcbe7b8b878c5903a202d31faa
|
2dce521dedc3f5169007d4c481ae8ec077be4450
|
refs/heads/main
| 2023-09-01T00:00:32.662190
| 2023-08-31T22:21:44
| 2023-08-31T22:21:44
| 170,592,186
| 113
| 174
|
MIT
| 2023-09-14T21:53:41
| 2019-02-13T22:55:46
|
C#
|
UTF-8
|
Python
| false
| false
| 744
|
py
|
from pydantic import BaseModel, Field
from typing import List, Optional
class Violation(BaseModel):
rule_ids: List[str] = Field(description="unique rule ID or IDs that were violated.")
line_no: Optional[int] = Field(description="the line number of the violation.")
bad_code: str = Field(description="the original code that was bad, cited verbatim.")
suggestion: str = Field(description="the suggested fix for the bad code.")
comment: str = Field(description="a comment about the violation.")
class GuidelinesResult(BaseModel):
status: str = Field(description="Succeeded if the request has no violations. Error if there are violations.")
violations: List[Violation] = Field(description="list of violations if any")
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
ad74fb1924b3b8c48c2d09b0bbc03cd105920500
|
af97a81a69d100e684b8e9f02bc1b117069b9895
|
/Testcases_Copilot/CWE-730/codeql_2.py
|
1c59968d10ace977410229a13e67c6c9188734a4
|
[] |
no_license
|
s2e-lab/SecurityEval
|
8de3e2f78fffa7058d79cce84fbcd8ff29c8468d
|
f637f67b8e8d52d641ab15903e46a41ead7e30a5
|
refs/heads/main
| 2023-06-17T22:55:08.047769
| 2023-06-16T16:54:54
| 2023-06-16T16:54:54
| 509,664,226
| 28
| 7
| null | 2023-05-27T04:55:05
| 2022-07-02T05:10:17
|
Python
|
UTF-8
|
Python
| false
| false
| 294
|
py
|
from flask import request
import re
@app.route("/direct")
def direct():
'''
get the regular expression pattern from the get request, comipile it and then use it for search
'''
pattern = request.args.get('pattern')
regex = re.compile(pattern)
return regex.search(text)
|
[
"lsiddiqsunny@gmail.com"
] |
lsiddiqsunny@gmail.com
|
aeff8452dda485b474d6f78dec9db2f3f258e6ff
|
80d1f1f50b9f7f6be17302df7695154f54aa7a76
|
/pdp8/core.py
|
3691cb7deeb3ab2b53a7ab5d3ba6877bf6d1f838
|
[
"MIT"
] |
permissive
|
romilly/pdp8
|
af7f7df0136b31df03a4f53b709869a59f25ca2c
|
68e1025c5e4c6b6fa92a62cc81117d841c214137
|
refs/heads/master
| 2020-03-13T15:24:12.955602
| 2019-06-19T05:58:50
| 2019-06-19T05:58:50
| 131,176,107
| 4
| 1
| null | 2018-05-16T07:13:14
| 2018-04-26T15:31:01
|
HTML
|
UTF-8
|
Python
| false
| false
| 7,983
|
py
|
from io import StringIO
from pdp8.tracing import NullTracer
def octal(string):
return int(string, 8)
OPR_GROUP1 = octal('0400')
OPR_GROUP2 = octal('0001')
CLA1 = octal('0200')
CLL = octal('0100')
CMA = octal('0040')
CML = octal('0020')
RAR = octal('0010')
RAL = octal('0004')
RTR = octal('0012')
RTL = octal('0006')
IAC = octal('0001')
HALT = octal('0002')
BIT8 = octal('0010')
Z_BIT = octal('0200')
I_BIT = octal('0400')
class PDP8:
# TODO simplify these, use constants rather than calculating?
W_BITS = 12 # number of bits in a word
W_MASK = 2 ** W_BITS - 1 # word mask
OP_BITS = 3 # 3 bits in the opcode
V_BITS = 7 # 7 bits for the value part of an instruction
OP_MASK = (2 ** OP_BITS - 1) << W_BITS - OP_BITS
V_MASK = 2 ** V_BITS - 1 # mask for instruction data
MAX = 2 ** (V_BITS - 1)
def __init__(self):
self.memory = 2 ** self.W_BITS * [0]
self.pc = 0
self.accumulator = 0
self.link = 0
self.running = False
self.debugging = False
self.stepping = False
self.ia = None
self.instruction = None
self.tape = StringIO('')
self.READER1 = 0o03
self.PUNCH1 = 0o04
self.punchflag = 0
self.output = ''
self.tracer = None
self.ops = [self.andi,
self.tad,
self.isz,
self.dca,
self.jms,
self.jmp,
self.iot,
self.opr]
def __getitem__(self, address):
return self.memory[address] & self.W_MASK # only 12 bits retrieved
def is_group1(self):
return 0 == self.i_mask(OPR_GROUP1)
def i_mask(self, mask):
return self.instruction & mask
def is_iac(self):
return 0 != self.i_mask(IAC)
def is_group2(self):
return (not self.is_group1()) and 0 == self.i_mask(OPR_GROUP2)
# Group 2
def is_halt(self):
return self.i_mask(HALT)
def __setitem__(self, address, contents):
self.memory[address] = contents & self.W_MASK # only 12 bits stored
if self.debugging:
self.tracer.setting(address, contents)
def run(self, debugging=False, start=None, tape='', stepping=None, tracer=None):
self.running = True
if tracer is not None:
self.tracer = tracer
else:
if self.tracer is None:
self.tracer = NullTracer()
if start:
self.pc = start
# TODO: smarter tape creation to cope with text and binary tapes.
self.tape = StringIO(tape)
if stepping is not None:
self.stepping = stepping
self.debugging = debugging
while self.running:
self.execute()
if self.stepping:
self.running = False
def execute(self):
old_pc = self.pc # for debugging
self.instruction = self[self.pc]
self.ia = self.instruction_address()
op = self.opcode()
self.pc += 1
self.ops[op]()
if self.debugging:
self.tracer.instruction(old_pc, self.instruction, self.accumulator, self.link, self.pc)
def opcode(self):
bits = self.i_mask(self.OP_MASK)
code = bits >> self.W_BITS - self.OP_BITS
return code
def andi(self):
self.accumulator &= self[self.ia]
def tad(self):
self.add_12_bits(self[self.ia])
def add_12_bits(self, increment):
self.accumulator += increment
total = self.accumulator
self.accumulator &= octal('7777')
if self.accumulator == total:
self.link = 0
else:
self.link = 1
def isz(self):
contents = self[self.ia]
contents += 1
self[self.ia] = contents # forces 12-bit value
if self[self.ia] == 0:
self.pc += 1 # skip
def dca(self):
self[self.ia] = self.accumulator
self.accumulator = 0
def jmp(self):
self.pc = self.ia
def jms(self):
self[self.ia] = self.pc
self.pc = self.ia + 1
def iot(self):
device = (self.instruction & 0o0770) >> 3
io_op = self.instruction & 0o0007
if device == self.READER1:
self.reader(io_op)
elif device == self.PUNCH1:
self.punch(io_op)
else:
raise ValueError('uknown device')
def opr(self):
if self.is_group1():
self.group1()
return
if self.is_group2():
self.group2()
return
raise ValueError('Unknown opcode in instruction 0o%o at %d(%o)' % (self.instruction, self.pc-1, self.pc-1) )
def instruction_address(self):
o = self.i_mask(self.V_MASK)
if not self.i_mask(Z_BIT):
o += self.pc & 0o7600
if self.i_mask(I_BIT):
o = self[o]
return o
def cla(self):
self.accumulator = 0
def cll(self):
self.link = 0
def cma(self):
self.accumulator ^= 0o7777
def cml(self):
self.link = 1-self.link
def rr(self):
self.rar(0 < self.i_mask(2))
def rar(self, flag):
count = 2 if flag else 1
for i in range(count):
new_link = self.accumulator & 0o0001
self.accumulator = self.accumulator >> 1
if self.link:
self.accumulator |= 0o4000
self.link = new_link
def rl(self):
self.ral(self.i_mask(2))
def ral(self, flag):
count = 2 if flag else 1
for i in range(count):
new_link = 1 if self.accumulator & 0o4000 else 0
self.accumulator = 0o7777 & self.accumulator << 1
if self.link:
self.accumulator |= 0o0001
self.link = new_link
def iac(self):
self.add_12_bits(1)
def halt(self):
if self.debugging:
print('Halted')
self.tracer.halt(self.pc)
self.running = False
def group1(self):
for (mask, ins) in zip([ CLA1, CLL, CMA, CML, IAC, RAR, RAL],
[self.cla, self.cll, self.cma, self.cml, self.iac,self.rr, self.rl]):
if self.i_mask(mask):
ins()
def is_or_group(self):
return not self.i_mask(BIT8)
def is_and_group(self):
return self.i_mask(BIT8)
def group2(self):
if self.is_or_group() and (self.sma() or self.sza() or self.snl()):
self.pc += 1
if self.is_and_group() and self.spa() and self.sna() and self.szl():
self.pc += 1
if self.is_cla2():
self.cla()
if self.is_halt():
self.halt()
def sma(self):
return self.accumulator_is_negative() and (self.i_mask(octal('0100')))
def accumulator_is_negative(self):
return self.accumulator & octal('4000')
def sza(self):
return self.accumulator == 0 and (self.i_mask(octal('0040')))
def snl(self):
return self.link == 1 and (self.i_mask(octal('0020')))
def spa(self):
return self.accumulator_is_positive() or not (self.i_mask(octal('0100')))
def accumulator_is_positive(self):
return not self.accumulator_is_negative()
def sna(self):
return self.accumulator != 0 or not (self.i_mask(octal('0040')))
def szl(self):
return self.link == 0 or not (self.i_mask(octal('0020')))
def reader(self, io_op):
pass
def punch(self, io_op):
if (io_op & 1) and self.punchflag:
self.pc += 1
if io_op & 2:
self.punchflag = 0
if io_op & 4:
if self.accumulator != 0:
self.output += str(chr(self.accumulator))
self.punchflag = 1
def is_cla2(self):
return self.instruction & octal('0200')
|
[
"romilly.cocking@gmail.com"
] |
romilly.cocking@gmail.com
|
def4e0c9060cbb6946a984f723129a8064a91715
|
16679038c7a0b75097ffdd2d5b6be28ae8dae68f
|
/test/utilities/test_catch_exceptions.py
|
74b112680efb4b09be050e92f0ab2d6cc4bcdc2b
|
[
"MIT"
] |
permissive
|
elifesciences/profiles
|
d98e5c2391630f9877e0585e07143d7904f1e777
|
9cd2e523f9dfa864891511e6525381f191951b24
|
refs/heads/develop
| 2023-08-31T03:09:08.723797
| 2023-08-25T06:54:55
| 2023-08-25T06:54:55
| 94,993,646
| 2
| 0
|
MIT
| 2023-06-21T01:15:37
| 2017-06-21T10:43:52
|
Python
|
UTF-8
|
Python
| false
| false
| 959
|
py
|
import logging
from logging import Handler, Logger, Manager
from logging.handlers import BufferingHandler
from pytest import fixture
from profiles.utilities import catch_exceptions
@fixture
def logger(handler: Handler) -> Logger:
logger = Logger('logger', logging.DEBUG)
logger.addHandler(handler)
logger.manager = Manager('root')
return logger
@fixture
def handler() -> Handler:
return BufferingHandler(100)
def test_it_catches_and_logs_exceptions(logger: Logger, handler: BufferingHandler):
@catch_exceptions(logger)
def my_function():
raise Exception('My exception')
result = my_function()
assert result is None
assert len(handler.buffer) == 1
def test_it_does_nothing_when_no_exception(logger: Logger, handler: BufferingHandler):
@catch_exceptions(logger)
def my_function():
return True
result = my_function()
assert result is True
assert len(handler.buffer) == 0
|
[
"noreply@github.com"
] |
elifesciences.noreply@github.com
|
d193d711f2be24fe4204a34d2b1a3b14eda09afd
|
d40ab8694389d1a0d80013a2b0ecd8c426e6e8f8
|
/graphs/scc.py
|
5847ec8cbff74c175a28bd22a6d879601af33ceb
|
[] |
no_license
|
lukebiggerstaff/Stanford-Algorithms-MOOC
|
b5b34c8d8ff7725461fd03bb3aac505c87a1012e
|
382a30f27dff6ca9d30c071a4d3418ff6333f4c3
|
refs/heads/master
| 2021-01-01T17:02:26.883373
| 2017-11-14T19:06:14
| 2017-11-14T19:06:14
| 97,980,984
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,171
|
py
|
import sys
import re
import resource
from collections import defaultdict
sys.setrecursionlimit(10 ** 6)
resource.setrlimit(resource.RLIMIT_STACK, (2 ** 29, 2 ** 30))
def dfsfirstpass(graph):
visited = set()
stack = list()
for i in graph.keys():
start = str(i)
if start in graph:
dfsfirstpassrecursive(graph, start, stack, visited)
return stack
def dfsfirstpassrecursive(graph, start, stack, visited):
if start not in visited:
visited.add(start)
if start in graph:
for edge in graph[start]:
if edge not in visited:
dfsfirstpassrecursive(graph, edge, stack, visited)
stack.append(start)
def dfssecondpass(rgraph, stack):
visited = set()
leaderlist = defaultdict(list)
while stack:
start = stack.pop()
if start not in visited:
visited.add(start)
leader = start
leaderlist[leader] += [start]
for edge in set(rgraph[start]) - visited:
dfsrecursive(rgraph, edge, visited, leaderlist, leader)
return leaderlist
def dfsrecursive(graph, start, visited, leaderlist, leader):
visited.add(start)
leaderlist[leader] += [start]
for edge in set(graph[start]) - visited:
dfsrecursive(graph, edge, visited, leaderlist, leader)
def return_top_five_scc(leaderlist):
sccsizelist = list()
for key in leaderlist.keys():
size = len(leaderlist[key])
sccsizelist.append(size)
sccsizelist.sort()
return sccsizelist[-5:]
def kosaraju(graph, rgraph):
stack = dfsfirstpass(rgraph)
#print(f'stack is {stack}')
leaderdict = dfssecondpass(graph, stack)
#print(f'graph is {graph}\n'
#f'leader is {leaderdict}\n')
top5 = return_top_five_scc(leaderdict)
return top5
if __name__ == '__main__':
graph = defaultdict(list)
rgraph = defaultdict(list)
with open(sys.argv[1]) as f:
for line in f:
line_lst = re.findall(r'(\d+|\w+)',line)
graph[line_lst[0]] += [line_lst[1]]
rgraph[line_lst[1]] += [line_lst[0]]
print(kosaraju(graph,rgraph))
|
[
"luke.biggerstaff@gmail.com"
] |
luke.biggerstaff@gmail.com
|
91c8298a8f35841bf72996c47795505cf4afd03a
|
65c001b5f572a6b0ca09dd9821016d628b745009
|
/frappe-bench/env/lib/python2.7/site-packages/watchdog/observers/polling.py
|
3039ceb3678ce611aeccc6d88d0586c3f632a5e2
|
[
"MIT"
] |
permissive
|
ibrahmm22/library-management
|
666dffebdef1333db122c2a4a99286e7c174c518
|
b88a2129a5a2e96ce1f945ec8ba99a0b63b8c506
|
refs/heads/master
| 2022-10-30T17:53:01.238240
| 2020-06-11T18:36:41
| 2020-06-11T18:36:41
| 271,620,992
| 0
| 1
|
MIT
| 2022-10-23T05:04:57
| 2020-06-11T18:36:21
|
CSS
|
UTF-8
|
Python
| false
| false
| 4,687
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.observers.polling
:synopsis: Polling emitter implementation.
:author: yesudeep@google.com (Yesudeep Mangalapilly)
Classes
-------
.. autoclass:: PollingObserver
:members:
:show-inheritance:
.. autoclass:: PollingObserverVFS
:members:
:show-inheritance:
:special-members:
"""
from __future__ import with_statement
import os
import threading
from functools import partial
from watchdog.utils import stat as default_stat
from watchdog.utils.dirsnapshot import DirectorySnapshot, DirectorySnapshotDiff
from watchdog.observers.api import (
EventEmitter,
BaseObserver,
DEFAULT_OBSERVER_TIMEOUT,
DEFAULT_EMITTER_TIMEOUT
)
from watchdog.events import (
DirMovedEvent,
DirDeletedEvent,
DirCreatedEvent,
DirModifiedEvent,
FileMovedEvent,
FileDeletedEvent,
FileCreatedEvent,
FileModifiedEvent
)
class PollingEmitter(EventEmitter):
"""
Platform-independent emitter that polls a directory to detect file
system changes.
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT,
stat=default_stat, listdir=os.listdir):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._snapshot = None
self._lock = threading.Lock()
self._take_snapshot = lambda: DirectorySnapshot(
self.watch.path, self.watch.is_recursive, stat=stat, listdir=listdir)
def queue_events(self, timeout):
if not self._snapshot:
self._snapshot = self._take_snapshot()
# We don't want to hit the disk continuously.
# timeout behaves like an interval for polling emitters.
if self.stopped_event.wait(timeout):
return
with self._lock:
if not self.should_keep_running():
return
# Get event diff between fresh snapshot and previous snapshot.
# Update snapshot.
new_snapshot = self._take_snapshot()
events = DirectorySnapshotDiff(self._snapshot, new_snapshot)
self._snapshot = new_snapshot
# Files.
for src_path in events.files_deleted:
self.queue_event(FileDeletedEvent(src_path))
for src_path in events.files_modified:
self.queue_event(FileModifiedEvent(src_path))
for src_path in events.files_created:
self.queue_event(FileCreatedEvent(src_path))
for src_path, dest_path in events.files_moved:
self.queue_event(FileMovedEvent(src_path, dest_path))
# Directories.
for src_path in events.dirs_deleted:
self.queue_event(DirDeletedEvent(src_path))
for src_path in events.dirs_modified:
self.queue_event(DirModifiedEvent(src_path))
for src_path in events.dirs_created:
self.queue_event(DirCreatedEvent(src_path))
for src_path, dest_path in events.dirs_moved:
self.queue_event(DirMovedEvent(src_path, dest_path))
class PollingObserver(BaseObserver):
"""
Platform-independent observer that polls a directory to detect file
system changes.
"""
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT):
BaseObserver.__init__(self, emitter_class=PollingEmitter, timeout=timeout)
class PollingObserverVFS(BaseObserver):
"""
File system independent observer that polls a directory to detect changes.
"""
def __init__(self, stat, listdir, polling_interval=1):
"""
:param stat: stat function. See ``os.stat`` for details.
:param listdir: listdir function. See ``os.listdir`` for details.
:type polling_interval: float
:param polling_interval: interval in seconds between polling the file system.
"""
emitter_cls = partial(PollingEmitter, stat=stat, listdir=listdir)
BaseObserver.__init__(self, emitter_class=emitter_cls, timeout=polling_interval)
|
[
"iabouelftouh@trudoc24x7.com"
] |
iabouelftouh@trudoc24x7.com
|
64bef1b8d66e25515d68a737b143f8d15d5675ce
|
7790e3a3f2de068fef343585ec856983591997a2
|
/bank/migrations/0021_followlawtype.py
|
67c1e5c0f8ed434aeb042dbf4b3e27f516602279
|
[] |
no_license
|
mehdi1361/tadbir
|
ce702a9a02672826f0bf06e8d5cf0644efe31949
|
c0a67710099f713cf96930e25df708625de89a6f
|
refs/heads/master
| 2021-06-04T07:35:37.624372
| 2018-07-23T05:25:04
| 2018-07-23T05:25:04
| 148,870,028
| 0
| 0
| null | 2019-10-22T21:40:28
| 2018-09-15T04:40:26
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,147
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-11 16:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bank', '0020_auto_20180510_1351'),
]
operations = [
migrations.CreateModel(
name='FollowLawType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='تاریخ ایجاد')),
('update_at', models.DateTimeField(auto_now=True, verbose_name='تاریخ بروزرسانی')),
('type', models.CharField(max_length=100, verbose_name='نوع پیگیری')),
('enable', models.BooleanField(default=False, verbose_name='فعال')),
],
options={
'verbose_name': 'پیگیری حقوقی',
'verbose_name_plural': 'پیگیری های حقوقی',
'db_table': 'follow_low_type',
},
),
]
|
[
"mhd.mosavi@gmail.com"
] |
mhd.mosavi@gmail.com
|
5497eed0b98d3d44dc25ed39c7376e7800f9fcaa
|
350cb6c7c7a7842e80aa06ee32bfffc5bc35ee03
|
/programming/language/python/python-pillow/actions.py
|
09179eb1424c0d20883d92c49aeb6480d96ba765
|
[] |
no_license
|
LimeLinux/Packages-2
|
f41d11343e8b39274ccd85b9850d0f4e76830031
|
356975df129f2097f12dbed3bc2604cadb5a6c64
|
refs/heads/master
| 2021-04-30T23:25:31.121967
| 2017-01-21T21:46:54
| 2017-01-21T21:46:54
| 79,139,920
| 0
| 2
| null | 2017-01-21T21:46:55
| 2017-01-16T17:02:37
|
Python
|
UTF-8
|
Python
| false
| false
| 823
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import pisitools
from pisi.actionsapi import pythonmodules
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
#WorkDir="Imaging-%s" % get.srcVERSION()
def install():
pisitools.dosed("_imagingft.c", "<freetype/freetype.h>", "<freetype2/freetype.h>")
pisitools.dosed("_imagingft.c", "<freetype/fterrors.h>", "<freetype2/fterrors.h>")
pythonmodules.install()
#shelltools.cd("Sane")
#pythonmodules.install()
#shelltools.cd("..")
for header in ["Imaging.h","ImPlatform.h"]:
pisitools.insinto("/usr/include/%s" % get.curPYTHON(), "libImaging/%s" % header)
pisitools.dodoc("README.rst")
|
[
"ergunsalman@hotmail.com"
] |
ergunsalman@hotmail.com
|
c63f6c71799ea453d1f3eec67be2aff4089d9177
|
bc41457e2550489ebb3795f58b243da74a1c27ae
|
/fabfile.py
|
36e45dab0037e8a64b682e70626dadcb3e9d14de
|
[] |
no_license
|
SEL-Columbia/ss_sql_views
|
28a901d95fe779b278d2a51aec84d6bf51245c02
|
d146fd96849a4d165f3dc3f197aadda804a2f60a
|
refs/heads/master
| 2021-01-01T19:35:18.999147
| 2012-05-10T18:43:36
| 2012-05-10T18:43:36
| 3,020,367
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| true
| false
| 1,649
|
py
|
'''
fabfile for offline gateway tasks
'''
import datetime as dt
from fabric.api import local, lcd, run, env
env.hosts = ['gateway.sharedsolar.org']
env.user = 'root'
def sync_db():
time = dt.datetime.now().strftime('%y%m%d')
file = 'gateway.' + time + '.sql.zip'
url = 'root@gateway.sharedsolar.org'
path = 'var/lib/postgresql/backups/'
local('mkdir temp')
with lcd('temp'):
download_db(url, path, file)
load_db(path, file)
create_views()
local('rm -rf temp')
show_disk_space()
def download_db(url, path, file):
# create local temp folder
print 'Creating temporary folder ./temp'
# create timestamp
# create string for getting database
# scp database
print 'Downloading database from gateway'
local('scp ' + url + ':/' + path + file + ' .')
# locally unzip database
print 'Expanding database'
local('unzip ' + file)
def load_db(path, file):
# if database exists, dropdb
local('dropdb gateway')
# create db
local('createdb gateway')
# load database
print 'Loading database'
local('psql -d gateway -f ' + path + file[:-4])
def create_views():
print 'Executing create_views'
# execute all sql files
local('psql -d gateway -f views/create_view_primary_log.sql')
local('psql -d gateway -f views/create_view_midnight.sql')
local('psql -d gateway -f views/create_view_meter.sql')
local('psql -d gateway -f views/create_view_alarms.sql')
local('psql -d gateway -f views/create_view_solar.sql')
local('psql -d gateway -f views/create_view_recharge.sql')
def show_disk_space():
run('df -h')
|
[
"danielrsoto@gmail.com"
] |
danielrsoto@gmail.com
|
b620e42042438f0ddf82969a5e2f05dcf02a8e23
|
3922c05b9434bb5a96f7833a987c50c8e3e29107
|
/news/admin.py
|
6881fe98a61e98e3099d1a8b53bfb646d84da9fa
|
[
"MIT"
] |
permissive
|
jasonmuchiri/moringa-tribune
|
e7769dca9aa2e7a9cdc62be56c3071104ba30f33
|
ceabe0cf9cc136b6eb5072253aef09f43bea7040
|
refs/heads/master
| 2020-05-23T23:19:55.111831
| 2019-05-18T21:32:44
| 2019-05-18T21:32:44
| 186,990,384
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
from django.contrib import admin
from .models import Article,tags
# Register your models here.
class ArticleAdmin(admin.ModelAdmin):
filter_horizontal = ('tags',)
admin.site.register(Article,ArticleAdmin)
admin.site.register(tags)
|
[
"jasonmkinyua@gmail.com"
] |
jasonmkinyua@gmail.com
|
6bd5fb8e2cc28159a3d0726aa5efc0e21295b713
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/contour/_textsrc.py
|
43bd0d62ed17e92c16a553b953658aaf6d67f0be
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994
| 2021-12-21T03:49:19
| 2021-12-21T03:49:19
| 234,146,634
| 0
| 0
|
MIT
| 2020-01-15T18:33:43
| 2020-01-15T18:33:41
| null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
import _plotly_utils.basevalidators
class TextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="textsrc", parent_name="contour", **kwargs):
super(TextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
[
"noreply@github.com"
] |
hugovk.noreply@github.com
|
6ad6148e7378b35ec5dbeeb2a493dbad852d7119
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/bob/61f29ff81b664630acba807a6f4f14e9.py
|
b34cc9defb4248705933ec5d41f5347dc783be44
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 286
|
py
|
#
# Skeleton file for the Python "Bob" exercise.
#
def hey(what):
if what.upper() == what and any(c.isalpha() for c in what):
return "Whoa, chill out!"
if what != '' and what[-1] == '?':
return "Sure."
if len(what) < 7:
return "Fine. Be that way!"
else:
return "Whatever."
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
557c96972141d1a75b7f45e4289a642a6390440e
|
08dfaf714830a6310742dcd50848790d595e838e
|
/位运算/code_01_EvenTimesOddTimes.py
|
c16881e90ab21aa241caa096e317d2dd06fa949c
|
[] |
no_license
|
Tokyo113/leetcode_python
|
d9e0fb96a76efaadcec7aad08f5ef542d898d434
|
e86b3fb26aef1cf63727e3e5c9fd4ddc9bedb7f1
|
refs/heads/master
| 2020-08-10T15:36:10.364714
| 2020-04-13T08:28:53
| 2020-04-13T08:28:53
| 214,369,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 998
|
py
|
#coding:utf-8
'''
@Time: 2019/12/4 21:54
@author: Tokyo
@file: code_01_EvenTimesOddTimes.py
@desc:
1.一个数组中有一种数出现了奇数次,其他数都出现了偶数次,怎么找到这一个数
2.一个数组中有两种数出现了奇数次,其他数都出现了偶数次,怎么找到这两个数
'''
def findOddTimes1(arr):
eor = 0
for i in arr:
eor = eor ^ i
return eor
def findOddTimes2(arr):
eor = 0
for i in arr:
eor = eor ^ i
# eor = a ^ b
# 取得eor最右侧的1,eor肯定不为0,存在一位为1
# 这两个数肯定在这一位不一样,一个为1,一个为0
rightone = eor & (~eor+1)
eor1 = 0
for i in arr:
if (i&rightone) == 0:
eor1 = eor1 ^ i
return eor1, eor1^eor
if __name__ == '__main__':
a = [1,2,3,2,1,2,4,4,3,2,5]
print(findOddTimes1(a))
b = [4, 3, 4, 2, 2, 1, 4, 1, 1, 1, 3, 3, 1, 1, 1, 4, 2, 2]
print(findOddTimes2(b))
print(find2(b))
|
[
"21810179@zju.edu.cn"
] |
21810179@zju.edu.cn
|
39c078ee69d1098e1c91f37879882232c475e2f0
|
59b0ebc4249f20edd0e87dc63784c6e8c138c7fd
|
/.history/fibonacci_20180603232558.py
|
0f355ae930f9f8d834a1e6a158738d3573e77163
|
[] |
no_license
|
Los4U/first_python_programs
|
f397da10be3ef525995f3f220e3b60012a6accaa
|
c3fc33a38c84abd292cb2e86de63e09434fc7fc4
|
refs/heads/master
| 2020-03-22T08:09:40.426118
| 2018-07-04T17:17:58
| 2018-07-04T17:17:58
| 139,748,883
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
i = 0
j = 1
k = 0
fib = 0
user_input = int(input("How many numbers print out? : "))
for fn in range(user_input):
#if i < 30:
print('{0:2d} {1:>10}'.format(fn, fib))
#print(fib)
fib = j+k
j = k
k = fib
#else:
# print("3")
|
[
"inz.kamil.wos@gmail.com"
] |
inz.kamil.wos@gmail.com
|
85c2a8dc30a8c4d16a1497f4bad44935f7ca19d2
|
81485dc96f7539730bee976c7e8e3d5929c3df77
|
/ProjectEuler145.py
|
a3fd109802113ff296456c4d9fc9e471357fb859
|
[] |
no_license
|
zfhrp6/PE
|
6462621f5cb3812c7d8d9f591ad66382490661e2
|
4e64f6549bd50fb4c1ee5f580a76764935e35360
|
refs/heads/master
| 2020-04-05T13:45:56.936758
| 2015-12-11T08:29:50
| 2015-12-11T08:29:50
| 4,495,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 940
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
project euler problem 145
ある正の整数nについて、[n + reverse(n)]が奇数のみで表されるようなnが存在する。
えば、36 + 63 = 99, 409 + 904 = 1313 のように。この性質を持つ数を、reversibleと呼ぶことにする。
つまり、36, 63, 409, 904はrevesibleである。
先頭の0はnでもreverse(n)でも許されない。
1000未満には120個のreversibleな数が存在する。
10億(10^9)未満では、いくつのreversibleな数が存在するか。
"""
import time
t0 = time.time()
answer = 0
i = 0
while i < 10 ** 9:
i += 1
if i % 10 == 0:
continue
if i % 1000000 == 1:
print(i)
num = i + int(str(i)[::-1])
if "0" in str(num) or "2" in str(num) or "4" in str(num) or "6" in str(num) or "8" in str(num):
continue
else:
answer += 1
print(answer)
print(time.time() - t0, "seconds")
|
[
"coricozizi@gmail.com"
] |
coricozizi@gmail.com
|
5b9ed6ed0530e8623a9bbac53c115fadbaf8fb92
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_recesses.py
|
a34a6d1f64dbe47f008faa9c0c762b260b8b828f
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
#calss header
class _RECESSES():
def __init__(self,):
self.name = "RECESSES"
self.definitions = recess
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['recess']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
0f6bff7af88112200164ee73a63e93548e0b7606
|
1094e533594d6fbdf4a0f605b06a1954336b52e8
|
/index/views.py
|
586009091d84dd75a9a807174d8ade7c1949bc90
|
[] |
no_license
|
leezhiyong08/friutday
|
ac424c31bc2dd54aa61e76f13b8264042b4ba741
|
16f6a25d827f64fe88a526adf3e51de543b1c2de
|
refs/heads/master
| 2020-04-24T01:14:11.321113
| 2019-02-16T13:40:16
| 2019-02-16T13:40:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,810
|
py
|
import json
from django.core import serializers
from django.http import HttpResponse
from django.shortcuts import render, redirect
from .forms import *
# Create your views here.
def index_views(request):
return render(request,'index.html')
# /login 对应的视图
def login_views(request):
url = '/'
if request.method == 'GET':
# get 的流程
# 判断session中是否有登录信息
if 'uid' in request.session and 'uphone' in request.session:
# session中有值,重定向回首页或原路径
print('session中有数据')
return redirect(url)
else:
# session中没有值
# 判断cookie中是否有uid和uphone
if 'uid' in request.COOKIES and 'uphone' in request.COOKIES:
# cookie 中有登录信息
# 从cookie中取出数据保存进session
uid = request.COOKIES['uid']
uphone = request.COOKIES['uphone']
request.session['uid']=uid
request.session['uphone']=uphone
# 重定向到首页或原路径
return redirect(url)
else:
# cookie 中没有登录信息
# 去往登录页面
form = LoginForm()
return render(request,'login.html',locals())
else:
# post 的流程
# 实现登录操作:取出uphone和upwd到db中判断
uphone = request.POST['uphone']
upwd = request.POST['upwd']
uList = Users.objects.filter(uphone=uphone,upwd=upwd)
# if uList:
if uphone=='13511225566' and upwd=='123456':
# 登录成功
# uid = uList[0].id
# 取出 uphone 和 uid 保存进session
uid = '01'
request.session['uid'] = uid
request.session['uphone'] = uphone
# 判断是否有记住密码,记住密码的话则将值保存进cookie
resp = redirect(url)
if 'isSaved' in request.POST:
# 记住密码,保存进cookie
expires = 60 * 60 * 24 * 366
resp.set_cookie('uid',uid,expires)
resp.set_cookie('uphone',uphone,expires)
# 重定向到首页或原路径
return resp
else:
#登录失败 : 回登录页
form = LoginForm()
errMsg = "用户名或密码不正确"
return render(request,'login.html',locals())
# /register 对应的视图
def register_views(request):
if request.method == 'GET':
return render(request,'register.html')
else:
#实现注册的功能
dic ={
"uphone":request.POST['uphone'],
"upwd":request.POST['upwd'],
"uname":request.POST['uname'],
"uemail":request.POST['uemail'],
}
#将数据插入进数据库 - 注册
Users(**dic).save()
#根据uphone的值再查询数据库
u = Users.objects.get(uphone=request.POST['uphone'])
#将用户id和uphone保存进session
request.session['uid'] = u.id
request.session['uphone'] = u.uphone
return redirect('/')
# 检查手机号码是否存在 -> /check_uphone/
def check_uphone_views(request):
if request.method == 'POST':
#接收前端传递过来的手机号码
uphone = request.POST['uphone']
uList = Users.objects.filter(uphone=uphone)
if uList:
# 如果条件为真,则表示手机号码已经存在
# 响应 status值为0,用于通知客户端手机号码已存在
# 响应 text值为 “手机号码已存在”
dic = {
"status":"0",
"text":'手机号码已存在',
}
return HttpResponse(json.dumps(dic))
else:
dic = {
"status":"1",
"text":"可以注册",
}
return HttpResponse(json.dumps(dic))
# 检查用户是否登录,如果有的话则取出uname的值
def check_login_views(request):
# 判断 session 中是否有 uid 和 uphone
if 'uid' in request.session and 'uphone' in request.session:
# 用户此时处于登录状态
# 根据 uid 获取 uname 的值
uid = request.session['uid']
user = Users.objects.get(id=uid)
#处理响应数据
dic = {
"status":'1',
'user':json.dumps(user.to_dict())
}
return HttpResponse(json.dumps(dic))
else:
# 判断cookie是否有登录信息
if 'uid' in request.COOKIES and 'uphone' in request.COOKIES:
# 从cookie中取出数据保存进session
uid = request.COOKIES['uid']
uphone = request.COOKIES['uphone']
request.session['uid']=uid
request.session['uphone']=uphone
# 根据uid查询处对应的user信息转换成字典,响应给客户端
user = Users.objects.get(id=uid)
jsonStr = json.dumps(user.to_dict())
dic = {
"status":"1",
"user":jsonStr,
}
return HttpResponse(json.dumps(dic))
else:
# session和cookie中都没有登录信息
dic = {
"status":0,
'text':'用户尚未登录'
}
if request.method == 'POST':
tmp_url = '/'
uphone = request.POST['uphone']
tmp_resp = redirect(tmp_url)
tmp_expires = 60 * 60 * 24 * 366
tmp_resp.set_cookie('uphone', uphone, tmp_expires)
return redirect(tmp_url)
return HttpResponse(json.dumps(dic))
# 退出登录
# 清除 session 和 cookie 中的数据
# 原路返回
def logout_views(request):
#获取请求源地址,如果没有,则返回首页 /
url = request.META.get('HTTP_REFERER','/')
resp = redirect(url)
# 判断 session 中是否有登录信息
if 'uid' in request.session and 'uphone' in request.session:
del request.session['uid']
del request.session['uphone']
if 'uid' in request.COOKIES and 'uphone' in request.COOKIES:
resp.delete_cookie('uid')
resp.delete_cookie('uphone')
return resp
def type_goods_views(request):
all_list=[]
types=GoodsType.objects.all()
for type in types:
type_json=json.dumps(type.to_dic())
g_list=type.goods_set.all()
g_list_json=serializers.serialize('json',g_list)
dic={
'type':type_json,
'goods':g_list_json,
}
all_list.append(dic)
return HttpResponse(json.dumps(all_list))
|
[
"lvze@tedu.cn"
] |
lvze@tedu.cn
|
980b466c28c5040171706e805a75717fbb69f66d
|
ed7342bcfd051d5280c444f5a625fac507ef9b53
|
/demo/basics/sum_of_numbers_v2.py
|
724f67c49fef9989060ad053d4ae302ff4759cd0
|
[] |
no_license
|
srikanthpragada/PYTHON_19_MAR_2021
|
55f86289e7d6be5398c18ad9f52bfd4d81563827
|
20cd95481c1fc4c156d1fed01e29cb3b09b03333
|
refs/heads/master
| 2023-04-06T02:37:52.657864
| 2021-05-05T03:01:31
| 2021-05-05T03:01:31
| 350,551,106
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
# Take numbers until 0 is given and display sum of numbers
total = 0
while True:
num = int(input("Enter a number [0 to stop] :"))
if num == 0:
break # Terminate loop
total += num
print(f"Total = {total}")
|
[
"srikanthpragada@gmail.com"
] |
srikanthpragada@gmail.com
|
d617aaac35275cf070b7f5bd47f28582080b01ae
|
fb1e852da0a026fb59c8cb24aeb40e62005501f1
|
/kosmos-2/fairseq/fairseq/models/speech_to_speech/__init__.py
|
d34883552596496799514422e5a895376d02f735
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
microsoft/unilm
|
134aa44867c5ed36222220d3f4fd9616d02db573
|
b60c741f746877293bb85eed6806736fc8fa0ffd
|
refs/heads/master
| 2023-08-31T04:09:05.779071
| 2023-08-29T14:07:57
| 2023-08-29T14:07:57
| 198,350,484
| 15,313
| 2,192
|
MIT
| 2023-08-19T11:33:20
| 2019-07-23T04:15:28
|
Python
|
UTF-8
|
Python
| false
| false
| 248
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .modules import * # noqa
from .s2s_transformer import * # noqa
|
[
"1083127130@qq.com"
] |
1083127130@qq.com
|
ef835c8ab8f9b1d665e298b1da78b17ab7380731
|
135d2c02b3ad706573bdfafa75ebc14bd170ef97
|
/firedex-static/sdn-controller/sdn_controller.py
|
d19c338814f705b0ff1ecde6b5649d74806fa4f2
|
[] |
no_license
|
boulouk/firedex
|
4afc6467bd83e096051d941699e59f1be806a46c
|
187012986f4adf85d017e84a64db7c9bb1f447b0
|
refs/heads/master
| 2022-06-06T01:56:38.464322
| 2019-11-24T09:44:03
| 2019-11-24T09:44:03
| 138,659,150
| 2
| 1
| null | 2022-05-20T20:55:18
| 2018-06-25T23:09:54
|
Python
|
UTF-8
|
Python
| false
| false
| 376
|
py
|
from ryu.cmd import manager
applications = ["topology_application", "flow_application"]
def run_controller(applications):
arguments = []
arguments.extend(applications)
arguments.append("--observe-links")
arguments.append("--enable-debugger")
manager.main( args = arguments )
if __name__ == '__main__':
run_controller(applications = applications)
|
[
"lucascalz8@gmail.com"
] |
lucascalz8@gmail.com
|
23766bceb270d73585937f8eb705efca167b4426
|
c3b739b07214507bf1023b926c19d30784623e98
|
/segme/model/cascade_psp/refine.py
|
b8419f1aa09101135ce9339c1be00c9ec1fa696d
|
[
"MIT"
] |
permissive
|
templeblock/segme
|
20a96787500c46483cb7af0db917207fcedafb0b
|
8192ed066558c1ea1e7283805b40da4baa5b3827
|
refs/heads/master
| 2023-08-30T12:31:39.327283
| 2021-11-11T17:08:40
| 2021-11-11T17:08:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,917
|
py
|
import cv2
import numpy as np
import tensorflow as tf
from keras import backend
from tensorflow_hub import KerasLayer
class Refiner:
def __init__(self, hub_uri, max_size=900):
self.model = KerasLayer(hub_uri)
self.max_size = max_size
self.image = tf.Variable(
shape=(1, None, None, 3), dtype='uint8', initial_value=np.zeros((1, 0, 0, 3)).astype(np.uint8))
self.mask = tf.Variable(
shape=(1, None, None, 1), dtype='uint8', initial_value=np.zeros((1, 0, 0, 1)).astype(np.uint8))
self.prev = tf.Variable(
shape=(1, None, None, 1), dtype='uint8', initial_value=np.zeros((1, 0, 0, 1)).astype(np.uint8))
def __call__(self, image, mask, fast=False):
fine, coarse = self._global_step(image, mask)
if fast:
return fine
return self._local_step(image, fine, coarse)
def _global_step(self, image, mask):
height_width = image.shape[:2]
if max(height_width) < self.max_size:
image = Refiner._resize_max_side(image, self.max_size, cv2.INTER_CUBIC)
mask = Refiner._resize_max_side(mask, self.max_size, cv2.INTER_LINEAR)
elif max(height_width) > self.max_size:
image = Refiner._resize_max_side(image, self.max_size, cv2.INTER_AREA)
mask = Refiner._resize_max_side(mask, self.max_size, cv2.INTER_AREA)
fine, coarse = self._safe_predict(image, mask)
if max(height_width) < self.max_size:
fine = Refiner._resize_fixed_size(fine, height_width, interpolation=cv2.INTER_AREA)
coarse = Refiner._resize_fixed_size(coarse, height_width, interpolation=cv2.INTER_AREA)
elif max(height_width) > self.max_size:
fine = Refiner._resize_fixed_size(fine, height_width, interpolation=cv2.INTER_LINEAR)
coarse = Refiner._resize_fixed_size(coarse, height_width, interpolation=cv2.INTER_LINEAR)
return fine, coarse
def _local_step(self, image, fine, coarse, padding=16):
height, width = fine.shape[:2]
grid_mask = np.zeros_like(fine, dtype=np.uint32)
grid_weight = np.zeros_like(fine, dtype=np.uint32)
step_size = self.max_size // 2 - padding * 2
used_start_idx = set()
for x_idx in range(width // step_size + 1):
for y_idx in range(height // step_size + 1):
start_x = x_idx * step_size
start_y = y_idx * step_size
end_x = start_x + self.max_size
end_y = start_y + self.max_size
# Shift when required
if end_x > width:
end_x = width
start_x = width - self.max_size
if end_y > height:
end_y = height
start_y = height - self.max_size
# Bound x/y range
start_x = max(0, start_x)
start_y = max(0, start_y)
end_x = min(width, end_x)
end_y = min(height, end_y)
# The same crop might appear twice due to bounding/shifting
start_idx = start_y * width + start_x
if start_idx in used_start_idx:
continue
used_start_idx.add(start_idx)
# Take crop
part_image = image[start_y:end_y, start_x:end_x, :]
part_mask = fine[start_y:end_y, start_x:end_x]
part_prev = coarse[start_y:end_y, start_x:end_x]
# Skip when it is not an interesting crop anyway
part_mean = (part_mask > 127).astype(np.float32).mean()
if part_mean > 0.9 or part_mean < 0.1:
continue
grid_fine, _ = self._safe_predict(part_image, part_mask, part_prev)
# Padding
pred_sx = pred_sy = 0
pred_ex = self.max_size
pred_ey = self.max_size
if start_x != 0:
start_x += padding
pred_sx += padding
if start_y != 0:
start_y += padding
pred_sy += padding
if end_x != width:
end_x -= padding
pred_ex -= padding
if end_y != height:
end_y -= padding
pred_ey -= padding
grid_mask[start_y:end_y, start_x:end_x] += grid_fine[pred_sy:pred_ey, pred_sx:pred_ex]
grid_weight[start_y:end_y, start_x:end_x] += 1
# Final full resolution output
grid_weight_ = grid_weight.astype(np.float32) + backend.epsilon()
grid_mask = np.round(grid_mask.astype(np.float32) / grid_weight_).astype(np.uint8)
fine = np.where(grid_weight == 0, fine, grid_mask)
return fine
def _safe_predict(self, image, mask, prev=None):
if len(image.shape) != 3:
raise ValueError('Wrong image supplied')
if image.dtype != 'uint8':
raise ValueError('Wrong image dtype')
if len(mask.shape) != 2:
raise ValueError('Wrong mask supplied')
if mask.dtype != 'uint8':
raise ValueError('Wrong mask dtype')
if prev is not None and len(prev.shape) != 2:
raise ValueError('Wrong prev supplied')
if prev is not None and prev.dtype != 'uint8':
raise ValueError('Wrong prev dtype')
height, width = image.shape[:2]
_image = np.pad(image, ((0, height % 8), (0, width % 8), (0, 0)))
_mask = np.pad(mask, ((0, height % 8), (0, width % 8)))
_prev = _mask if prev is None else np.pad(prev, ((0, height % 8), (0, width % 8)))
self.image.assign(_image[None, ...])
self.mask.assign(_mask[None, ..., None])
self.prev.assign(_prev[None, ..., None])
fine, coarse = self.model([self.image, self.mask, self.prev])
fine, coarse = fine[0, :height, :width, 0], coarse[0, :height, :width, 0]
fine = np.round(fine * 255).astype(np.uint8)
coarse = np.round(coarse * 255).astype(np.uint8)
return fine, coarse
@staticmethod
def _resize_max_side(image, max_size, interpolation=cv2.INTER_LINEAR):
if len(image.shape) > 3 or len(image.shape) < 2:
raise ValueError('Wrong image supplied')
aspect = max_size / max(image.shape[:2])
return cv2.resize(image, (0, 0), fx=aspect, fy=aspect, interpolation=interpolation)
@staticmethod
def _resize_fixed_size(image, height_width, interpolation=cv2.INTER_LINEAR):
if len(image.shape) > 3 or len(image.shape) < 2:
raise ValueError('Wrong image supplied')
if len(height_width) != 2:
raise ValueError('Wrong desired size supplied')
return cv2.resize(image, height_width[::-1], interpolation=interpolation)
|
[
"shkarupa.alex@gmail.com"
] |
shkarupa.alex@gmail.com
|
771a2bf6caaa7ad3e08d7d92a9dd0f6c8d49b9a8
|
f74119a55ff5d4e89f5b7fb7da24a23828e1c203
|
/test_labeler.py
|
0ee0907d1d02f413876674b0d058a669f89f461d
|
[
"MIT"
] |
permissive
|
mdlaskey/yolo_labeler
|
3f15dd229f6a5e01e508c5141345ff9363717b94
|
93463ee54ee8773e7c2ce2368a95c4c1102e712c
|
refs/heads/master
| 2021-08-16T00:50:10.238386
| 2017-09-20T22:49:40
| 2017-09-20T22:49:40
| 96,812,011
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,526
|
py
|
import os,sys
import xml.etree.ElementTree as ET
import numpy as np
import cv2
import cPickle
import copy
import glob
import yolo.config as cfg
import cPickle as pickle
import IPython
class TestLabeler(object):
def __init__(self):
self.cache_path = cfg.CACHE_PATH
self.image_path = cfg.IMAGE_PATH
self.label_path = cfg.LABEL_PATH
self.batch_size = cfg.BATCH_SIZE
self.image_size = cfg.IMAGE_SIZE
self.cell_size = cfg.CELL_SIZE
self.classes = cfg.CLASSES
self.class_to_ind = dict(zip(self.classes, xrange(len(self.classes))))
def check_label(self,frame):
label_path = cfg.LABEL_PATH+frame+'.p'
label_data = pickle.load(open(label_path,'r'))
for objs in label_data['objects']:
box_ind = objs['box_index']
class_label = objs['num_class_label']
print "CLASS LABEL"
print class_label
print "BOX INDEX"
print box_ind
def check_frame(self,frame):
image_path = cfg.IMAGE_PATH+frame+'.png'
image = cv2.imread(image_path)
cv2.imshow('debug',image)
cv2.waitKey(0)
def image_read(self, imname, flipped=False):
image = cv2.imread(imname)
image = cv2.resize(image, (self.image_size, self.image_size))
# cv2.imshow('debug',image)
# cv2.waitKey(30)
#image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
image = (image / 255.0) * 2.0 - 1.0
if flipped:
image = image[:, ::-1, :]
return image
def load_bbox_annotation(self, label):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
label_data = pickle.load(open(label,'r'))
num_objs = label_data['num_labels']
label = np.zeros((self.cell_size, self.cell_size, 5+cfg.NUM_LABELS))
for objs in label_data['objects']:
box_ind = objs['box_index']
class_label = objs['num_class_label']
x_ind = int(box_ind[0] * self.cell_size / self.image_size)
y_ind = int(box_ind[1] * self.cell_size / self.image_size)
label[y_ind, x_ind, 0] = 1
label[y_ind, x_ind, 1:5] = box_ind
label[y_ind, x_ind, 5 + class_label] = 1
return label, num_objs
if __name__ == '__main__':
tl = TestLabeler()
frame = 'frame_1771'
tl.check_label(frame)
tl.check_frame(frame)
|
[
"mdlaskey@umich.edu"
] |
mdlaskey@umich.edu
|
1b5dde44a062a74cb90f2e60d15903012ccb7620
|
eff2fc11905f6118dcd70050392f168cd7aea086
|
/leetcode/5_longest_palindromic_substring/solution2.py
|
dc6f8c44f995cff0b89286e6dbc72af866bea932
|
[] |
no_license
|
algobot76/leetcode-python
|
28f1e1107fa941a3b40006f074eec6231e674ac1
|
ec8bff8978d6915bfdf187c760b97ee70f7515af
|
refs/heads/master
| 2021-07-05T17:06:40.581977
| 2020-09-19T22:02:38
| 2020-09-19T22:02:38
| 199,255,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 678
|
py
|
class Solution:
def longestPalindrome(self, s):
n = len(s)
if n < 2:
return s
f = [[False] * n for _ in range(n)]
ans = ""
for i in range(n - 1, -1, -1):
for j in range(i, n):
if s[i] == s[j]:
if self._get_len(i, j) > 2:
if f[i + 1][j - 1]:
f[i][j] = True
else:
f[i][j] = True
if f[i][j]:
if self._get_len(i, j) > len(ans):
ans = s[i:j + 1]
return ans
def _get_len(self, i, j):
return j - i + 1
|
[
"xkaitian@gmail.com"
] |
xkaitian@gmail.com
|
4b7d04c5de2f897b35e6ea61fc5a14077a9d6ef7
|
9f91ce42e1982ded6f77e184a0c6e35331b9ad23
|
/greedy_color/main.py
|
9308c47eb7dcc321bf983e03e6c97dfc36b2951d
|
[
"MIT"
] |
permissive
|
dixler/graph-coloring
|
b5b1b5aeb91d24ba4f94fc1b837225019327c885
|
6a5e853b9a88bdddfd8a02c75dfe588f26eddaba
|
refs/heads/master
| 2020-04-10T14:17:53.701941
| 2018-12-15T09:44:36
| 2018-12-15T09:44:36
| 161,073,490
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,551
|
py
|
#!/usr/bin/env python3
# gonna make a stupid algorithm
import json
import sys
num_colors = 0
graph = json.loads(input())
num_nodes = len(graph)
class Node():
def __init__(self):
self.color = None
self.neighbors = set()
nodes = {int(i): Node() for i, val in graph.items()}
# add edges to graph
for k, val in graph.items():
nodes[int(k)].neighbors = set(val)
# add inbound edges
for k, adj_list in graph.items():
for endpoint in adj_list:
nodes[endpoint].neighbors |= {int(k)}
def recursive_color(graph, start_index):
'determines the color of interconnected nodes'
global num_colors
node = graph[start_index]
if node.color != None:
'we already colored it'
return
else:
neighbor_colors = {graph[neighbor_id].color for neighbor_id in node.neighbors}
new_color_id = 0
while new_color_id in neighbor_colors:
new_color_id += 1
node.color = new_color_id
num_colors = max(num_colors, new_color_id+1)
for neighbor_id in node.neighbors:
recursive_color(graph, neighbor_id)
return
# make a stack of unvisited nodes
graph = {int(k): v for k, v in graph.items()}
unvisited = {k for k, v in graph.items()}
while unvisited != set():
start_index = max(unvisited)
recursive_color(nodes, start_index)
unvisited = unvisited - {k for k, node in nodes.items() if node.color != None}
print('satisfiable with %d colors' % num_colors)
for k, node in nodes.items():
print((k, node.color), end=', ')
|
[
"you@example.com"
] |
you@example.com
|
55452e8eaf3c675ee734d7d08b29328ed897b400
|
344b654cbb8b13d683bcd2cacf522c983287a5fe
|
/Exercises/fileExtension.py
|
295ca1b77df26281183deef41448b83bb4510202
|
[] |
no_license
|
tchaitanya2288/pyproject01
|
d869522584ab498008e67e81c209472ab20685c2
|
565660b73039db6f0e9ed986504c2f96ba674f9c
|
refs/heads/master
| 2020-03-15T13:18:21.480443
| 2018-06-19T18:44:47
| 2018-06-19T18:44:47
| 132,163,324
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
Filename = input('Enter your required filename:')
Extension = Filename.split('.')
print("The Extension of file is:" +repr(Extension[-1]))
|
[
"tchaitanya.2288@gmail.com"
] |
tchaitanya.2288@gmail.com
|
f39ba693f9984287400dc51c6fd3384c2c8d4aad
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/92/usersdata/216/46367/submittedfiles/atividade.py
|
6f93a371a202140a4fcb7fb058a09a066cd9d666
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 348
|
py
|
# -*- coding: utf-8 -*-
import math
n=int(input('Digite um número:'))
soma=0
if n>0:
for i in range(0,n,1):
if n>=0:
i=i+1
soma=soma+((i)/(n))
n=n-1
else:
n=n*(-1)
i=i+1
soma=soma+((i)/(n))
n=n-1
else:
n=n*(-1)
print('%.5f'%soma)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
c7db867a68cfc633338475e43990083bb406cd98
|
1564d12d61f669ce9f772f3ef7563167f7fe13bf
|
/codeforces/educationalRound73/A-books.py
|
77e50053332612a3e54fa06049612ac125655ecd
|
[] |
no_license
|
sakshamk6999/codingPractice
|
73ec4873defb0f0d2e47173150a589ee12e5e0a1
|
f727aac6d87448b19fc9d48660dc6978fe5edc14
|
refs/heads/master
| 2020-12-01T20:22:36.299535
| 2020-02-04T05:55:53
| 2020-02-04T05:55:53
| 230,757,937
| 0
| 0
| null | 2020-02-12T20:38:12
| 2019-12-29T14:00:22
|
Python
|
UTF-8
|
Python
| false
| false
| 316
|
py
|
for _ in range(int(input())):
n = int(input())
a = list(map(int, input().split()))
dp = [0 for i in range(n)]
for i in range(n - 2, -1, -1):
if a[i] == a[i + 1]:
dp[i] = dp[i + 1]
else:
dp[i] = n - 1 - i
for i in dp:
print(i, end=" ")
print('')
|
[
"sakshamkhatwani@gmail.com"
] |
sakshamkhatwani@gmail.com
|
9a378ac66d24667514820bb7ae2934ca7d3f4f35
|
e2242f78a129f2b87252a0bf1621e8190fd07442
|
/src/compas_vol/microstructures/tpms.py
|
6264e6a6389be9c6043785e4474fb65d97fa8cda
|
[
"MIT"
] |
permissive
|
ilmihur/compas_vol
|
751237e00f841f25546accf1bf1db782aa9a4559
|
8aedc611bd96acd95d26b9f34c805a8ff05020bf
|
refs/heads/master
| 2022-11-19T12:21:03.829785
| 2020-07-16T11:22:52
| 2020-07-16T11:22:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,724
|
py
|
from math import pi, sin, cos
from compas import PRECISION
class TPMS(object):
"""A triply periodic minimal surface (TPMS) is defined by a type and a wavelength.
Parameters
----------
tpmstype: String
Type of TPMS. Currently avaliable are Gyroid, SchwartzP, Diamond, Neovius, Lidinoid and FischerKoch.
wavelength: float
The wavelength of the trigonometric function.
Examples
--------
>>> a = TPMS(tpmstype='Gyroid', wavelength=5.0)
"""
def __init__(self, tpmstype=0, wavelength=1.0):
self.tpmstypes = ['gyroid', 'schwartzp', 'diamond', 'neovius', 'lidinoid', 'fischerkoch']
self._tpmstype = None
self.tpmstype = tpmstype
self._wavelength = None
self.wavelength = wavelength
self._factor = self.wavelength/pi
# ==========================================================================
# descriptors
# ==========================================================================
@property
def tpmstype(self):
return self._tpmstype
@tpmstype.setter
def tpmstype(self, tpmstype):
if type(tpmstype) == str:
if tpmstype.lower() in self.tpmstypes:
self._tpmstype = self.tpmstypes.index(tpmstype.lower())
else:
self._tpmstype = 0
elif type(tpmstype) == int:
self._tpmstype = max(0, min(tpmstype, len(self.tpmstypes) - 1))
@property
def wavelength(self):
"""float: The wavelength of the TPMS."""
return self._wavelength
@wavelength.setter
def wavelength(self, wavelength):
self._wavelength = float(wavelength)
self._factor = self.wavelength/pi
def __repr__(self):
return 'TPMS({0},{1:.{2}f})'.format(self.tpmstype, self.wavelength, PRECISION[:1])
# ==========================================================================
# distance function
# ==========================================================================
def get_distance(self, point):
"""
single point distance function
"""
x, y, z = point
px = x/self._factor
py = y/self._factor
pz = z/self._factor
d = 0
if self.tpmstype == 0: # 'Gyroid':
d = sin(px)*cos(py) + sin(py)*cos(pz) + sin(pz)*cos(px)
elif self.tpmstype == 1: # 'SchwartzP':
d = cos(px) + cos(py) + cos(pz)
elif self.tpmstype == 2: # 'Diamond':
d = (
sin(px) * sin(py) * sin(pz) +
sin(px) * cos(py) * cos(pz) +
cos(px) * sin(py) * cos(pz) +
cos(px) * cos(py) * sin(pz)
)
elif self.tpmstype == 3: # 'Neovius':
d = (3 * cos(px) + cos(py) + cos(pz) +
4 * cos(px) * cos(py) * cos(pz))
elif self.tpmstype == 4: # 'Lidinoid':
d = (0.5 * (sin(2*px) * cos(py) * sin(pz) +
sin(2*py) * cos(py) * sin(px) +
sin(2*pz) * cos(px) * sin(pz)) -
0.5 * (cos(2*px) * cos(2*py) +
cos(2*py) * cos(2*pz) +
cos(2*pz) * cos(2*px)) + 0.15)
elif self.tpmstype == 5: # 'FischerKoch':
d = (cos(2*px) * sin(py) * cos(pz) +
cos(2*py) * sin(pz) * cos(px) +
cos(2*pz) * sin(px) * cos(py))
return d
def get_distance_numpy(self, x, y, z):
"""
vectorized distance function
"""
import numpy as np
px = x/self._factor
py = y/self._factor
pz = z/self._factor
d = 0
# Gyroid
if self.tpmstype == 0:
d = np.sin(px) * np.cos(py) + np.sin(py)*np.cos(pz) + np.sin(pz)*np.cos(px)
# SchwartzP
elif self.tpmstype == 1:
d = np.cos(px) + np.cos(py) + np.cos(pz)
# Diamond
elif self.tpmstype == 2:
d = (
np.sin(px) * np.sin(py) * np.sin(pz) +
np.sin(px) * np.cos(py) * np.cos(pz) +
np.cos(px) * np.sin(py) * np.cos(pz) +
np.cos(px) * np.cos(py) * np.sin(pz)
)
# Neovius
elif self.tpmstype == 3:
d = (3 * np.cos(px) + np.cos(py) + np.cos(pz) +
4 * np.cos(px) * np.cos(py) * np.cos(pz))
# Lidinoid
elif self.tpmstype == 4:
d = (0.5 * (np.sin(2*px) * np.cos(py) * np.sin(pz) +
np.sin(2*py) * np.cos(py) * np.sin(px) +
np.sin(2*pz) * np.cos(px) * np.sin(pz)) -
0.5 * (np.cos(2*px) * np.cos(2*py) +
np.cos(2*py) * np.cos(2*pz) +
np.cos(2*pz) * np.cos(2*px)) + 0.15)
# FischerKoch
elif self.tpmstype == 5:
d = (np.cos(2*px) * np.sin(py) * np.cos(pz) +
np.cos(2*py) * np.sin(pz) * np.cos(px) +
np.cos(2*pz) * np.sin(px) * np.cos(py))
# IWP?
return d
if __name__ == "__main__":
# from compas.geometry import Point
import numpy as np
import matplotlib.pyplot as plt
b = TPMS(tpmstype='schwartzP', wavelength=5)
print(b)
x, y, z = np.ogrid[-14:14:112j, -12:12:96j, -10:10:80j]
m = b.get_distance_numpy(x, y, z)
plt.imshow(m[:, :, 25].T, cmap='RdBu') # transpose because numpy indexing is 1)row 2) column instead of x y
plt.colorbar()
plt.axis('equal')
plt.show()
# for y in range(-15, 15):
# s = ''
# for x in range(-30, 30):
# d = b.get_distance(Point(x*0.5, y, 1.))
# if d < 0:
# s += 'x'
# else:
# s += '.'
# print(s)
|
[
"bernhard@arch.ethz.ch"
] |
bernhard@arch.ethz.ch
|
73acba9528101c1bfa9187c8776c8d7234afbc3f
|
c6fca34b2c9cb973d9d65d23e58e40d4513e173a
|
/aoc2015/day18.py
|
65008c1bad113a40d1876343cbf348d6f612d6a1
|
[] |
no_license
|
tomkooij/AdventOfCode
|
8ff47c027c887194b0d441f61a8db172c4e260ea
|
7890d45a01498dcb48972a7e311888ce6f003bd2
|
refs/heads/master
| 2021-08-15T19:46:21.869137
| 2021-01-18T06:37:50
| 2021-01-18T06:37:50
| 48,421,868
| 7
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,538
|
py
|
# adventofcode.com
# day18
from copy import deepcopy
INPUT = ('input/input18', 100)
TESTCASE = ('input/test18', 4)
ON = '#'
OFF = '.'
def pretty_print(lights):
for l in lights:
print ''.join(l).rstrip('\n')
def count(lights):
return sum([l.count('#') for l in lights])
def get_neighbours(lights, x, y):
neighbours = []
xmax = ymax = len(lights)
for i in range(max(y-1, 0), min(y+2, ymax)):
for j in range(max(x-1,0), min(x+2, xmax)):
neighbours.append((i,j))
if (y,x) in neighbours:
neighbours.remove((y,x))
return neighbours
def count_neighbours(lights, x, y):
n = get_neighbours(lights, x, y)
return count([lights[y][x] for y,x in n])
FILENAME, STEPS = INPUT
if __name__ == '__main__':
with open(FILENAME) as f:
lights = map(list, f.read().splitlines())
for _ in range(STEPS+1):
old_lights = deepcopy(lights)
pretty_print(lights)
print count(lights)
for y in range(0, len(lights)):
for x in range(0, len(lights)):
#print y, x, count_neighbours(lights, x, y)
if old_lights[y][x] == ON:
if not count_neighbours(old_lights, x, y) in [2, 3]:
lights[y][x] = OFF
elif old_lights[y][x] == OFF:
if count_neighbours(old_lights, x, y) == 3:
lights[y][x] = ON
else:
assert False, 'lp0 on fire! %d %d %c' % (x, y, lights[y][x])
|
[
"tomkooij@tomkooij.nl"
] |
tomkooij@tomkooij.nl
|
0099ea1a24cd0a7e27e7caa9bcd30ad25bb5fc29
|
d4b91d9ebb7c850f07b06e5c15794b2885f2e767
|
/6/Tema3(Циклы)/6.c_6.py
|
3d423e792c84c79c5c729e0ca3d5be2f25693867
|
[] |
no_license
|
Timur597/First6team
|
13b6dbb2d2e68d5df5c76c5bbba587d563a95957
|
4df85a6f20bad626ad76196cd5bc867ce27d0aac
|
refs/heads/master
| 2023-03-05T15:39:24.311784
| 2021-02-20T07:17:36
| 2021-02-20T07:17:36
| 340,588,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
6 Задание
names = ('Максат','Лязат','Данияр','Айбек','Атай','Салават','Адинай','Жоомарт','Алымбек','Эрмек','Дастан','Бекмамат','Аслан')
i = 0
while i < 12:
print (names [i])
i = i + 2
|
[
"khalilov.timur97@mail.ru"
] |
khalilov.timur97@mail.ru
|
1d87192e81d61530ae36b21063abb510bd089aee
|
fbaf44a5f4effe2838a03165f237a7a282284f64
|
/Practice/PIle_length-width_soilE/1.1 readODB.py
|
3f59145257606d79712227f140d6214a9b44a5d9
|
[] |
no_license
|
WangDooo/Python-in-Abaqus
|
b568f5499bbfd8bc4893f4510a233b9c0be30cf8
|
c7bcbd1adc3bcff9661e13c8ce883cb59269ceb8
|
refs/heads/master
| 2021-06-13T14:05:25.639543
| 2021-03-24T03:32:44
| 2021-03-24T03:32:44
| 173,902,521
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
# coding:utf8
from odbAccess import *
odb = openOdb(path='Job-1.odb')
step = odb.steps['Step-1']
point = odb.rootAssembly.nodeSets['SET-PILETOPPOINT']
lastFrame = step.frames[-1]
u = lastFrame.fieldOutputs['U']
u_point = u.getSubset(region=point)
uFile = open('U2.csv','w')
uFile.write('nodeLabel,U2 \n')
for uValue in u_point.values:
uFile.write('NO.%s, %f \n' % (uValue.nodeLabel, uValue.data[1]))
|
[
"wangbc1993@163.com"
] |
wangbc1993@163.com
|
6d268fb1bb10e27331a3f7427f4e7ec31917a891
|
5e557741c8867bca4c4bcf2d5e67409211d059a3
|
/test/distributed/elastic/timer/local_timer_example.py
|
8d3702c9a70283500c437adc763c2e6090b382a9
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
Pandinosaurus/pytorch
|
a2bb724cfc548f0f2278b5af2fd8b1d2758adb76
|
bb8978f605e203fbb780f03010fefbece35ac51c
|
refs/heads/master
| 2023-05-02T20:07:23.577610
| 2021-11-05T14:01:30
| 2021-11-05T14:04:40
| 119,666,381
| 2
| 0
|
NOASSERTION
| 2021-11-05T19:55:56
| 2018-01-31T09:37:34
|
C++
|
UTF-8
|
Python
| false
| false
| 4,080
|
py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import multiprocessing as mp
import signal
import time
import unittest
import torch.distributed.elastic.timer as timer
import torch.multiprocessing as torch_mp
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
IS_WINDOWS,
IS_MACOS,
sandcastle_skip_if,
)
logging.basicConfig(
level=logging.INFO, format="[%(levelname)s] %(asctime)s %(module)s: %(message)s"
)
def _happy_function(rank, mp_queue):
timer.configure(timer.LocalTimerClient(mp_queue))
with timer.expires(after=1):
time.sleep(0.5)
def _stuck_function(rank, mp_queue):
timer.configure(timer.LocalTimerClient(mp_queue))
with timer.expires(after=1):
time.sleep(5)
# timer is not supported on macos or windowns
if not (IS_WINDOWS or IS_MACOS):
class LocalTimerExample(unittest.TestCase):
"""
Demonstrates how to use LocalTimerServer and LocalTimerClient
to enforce expiration of code-blocks.
Since torch multiprocessing's ``start_process`` method currently
does not take the multiprocessing context as parameter argument
there is no way to create the mp.Queue in the correct
context BEFORE spawning child processes. Once the ``start_process``
API is changed in torch, then re-enable ``test_torch_mp_example``
unittest. As of now this will SIGSEGV.
"""
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible")
def test_torch_mp_example(self):
# in practice set the max_interval to a larger value (e.g. 60 seconds)
mp_queue = mp.get_context("spawn").Queue()
server = timer.LocalTimerServer(mp_queue, max_interval=0.01)
server.start()
world_size = 8
# all processes should complete successfully
# since start_process does NOT take context as parameter argument yet
# this method WILL FAIL (hence the test is disabled)
torch_mp.spawn(
fn=_happy_function, args=(mp_queue,), nprocs=world_size, join=True
)
with self.assertRaises(Exception):
# torch.multiprocessing.spawn kills all sub-procs
# if one of them gets killed
torch_mp.spawn(
fn=_stuck_function, args=(mp_queue,), nprocs=world_size, join=True
)
server.stop()
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible")
def test_example_start_method_spawn(self):
self._run_example_with(start_method="spawn")
# @sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible")
# def test_example_start_method_forkserver(self):
# self._run_example_with(start_method="forkserver")
def _run_example_with(self, start_method):
spawn_ctx = mp.get_context(start_method)
mp_queue = spawn_ctx.Queue()
server = timer.LocalTimerServer(mp_queue, max_interval=0.01)
server.start()
world_size = 8
processes = []
for i in range(0, world_size):
if i % 2 == 0:
p = spawn_ctx.Process(target=_stuck_function, args=(i, mp_queue))
else:
p = spawn_ctx.Process(target=_happy_function, args=(i, mp_queue))
p.start()
processes.append(p)
for i in range(0, world_size):
p = processes[i]
p.join()
if i % 2 == 0:
self.assertEqual(-signal.SIGKILL, p.exitcode)
else:
self.assertEqual(0, p.exitcode)
server.stop()
if __name__ == "__main__":
run_tests()
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
31c75823ceccc46b7570986abb36366707a7b394
|
f995860ad78fc266d04b03c3478c74e989d8b568
|
/PE/pe0178.py
|
50751fc483b0c73fe0a8686c699f684a06e3cf11
|
[] |
no_license
|
196884/Python
|
edd0234fd72a40d7a0b3310776edcaa8bda74478
|
8dc2e7a32dd350227cde748600e713dc3eea3f4a
|
refs/heads/master
| 2016-09-06T19:26:19.860746
| 2015-11-09T00:09:23
| 2015-11-09T00:09:23
| 28,167,634
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 821
|
py
|
def initList():
r = []
for i in range(0, 10):
r.append([0 for j in range(0, 10)])
return r
def solve():
# dynamic programming:
# after n steps, l[i][j] is the number of paths:
# * of length n
# * starting at 0
# * going up or down by 1 at each step
# * with minimum -i, and maximum +j
r = 0
l = initList()
l[0][0] = 1
for n in range(1, 40):
lNew = initList()
for i in range(0, 10):
for j in range(0, 9):
lNew[max(0, i-1)][j+1] += l[i][j]
lNew[j+1][max(0, i-1)] += l[j][i]
l = lNew
for i in range(1, 10): # The starting with a 0 is covered in the previous count!
r += l[i][9-i]
return r
if __name__ == "__main__":
result = solve()
print "Result: %d" % result
|
[
"regis.dupont+git@m4x.org"
] |
regis.dupont+git@m4x.org
|
962da2abca34985938d9ede37484fcea375e39e4
|
c730d4df20898a966b8ff215b2d3cce894bcf55e
|
/Linked_Lists/concatenate_circularLinkedList.py
|
9cf6ab5a0b417f94f29737d725ed6562f0d0d219
|
[] |
no_license
|
venukumarbv/Datastructure_Algorithms_using_Python
|
23a6996b171aafc0bcfc43f55e679ee6ef76c5d7
|
cd32691edbf9f7b6cdfc16ea742f78fbc5f003e4
|
refs/heads/master
| 2022-11-19T22:27:38.751963
| 2020-07-21T10:46:55
| 2020-07-21T10:46:55
| 281,368,835
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,608
|
py
|
class Node:
def __init__(self,value):
self.info = value
self.link = None
class CircularLinkedList:
def __init__(self):
self.last = None
def insert_a_node(self, value):
temp = Node(value)
if self.last is None: # Create a logical cicular list during empty list
self.last = temp
self.last.link = self.last
# insert at end
temp.link = self.last.link
self.last.link = temp
self.last = temp
def create_list(self):
n = int(input("Enter number of Nodes: "))
for i in range(n):
value = int(input("Enter the vale of {} node ".format(i+1)))
self.insert_a_node(value)
def display(self):
if self.last is None:
print("The List is Empty")
return
p = self.last.link
while True:
print('-->', p.info, end='')
p = p.link
if p == self.last.link:
break
print()
def concatenate(self, list2):
if self.last is None:
self.last = list2.last.link
return
if list2.last is None:
return
p = self.last.link
self.last.link = list2.last.link
list2.last.link = p
self.last = list2.last
clist1 = CircularLinkedList()
clist2 = CircularLinkedList()
print("List 1")
clist1.create_list()
print("List 2")
clist2.create_list()
print("The List 1 is:")
clist1.display()
print("The List 2 is:")
clist2.display()
print("Concatenated List is :")
clist1.concatenate(clist2)
clist1.display()
|
[
"VKvision@venu.com"
] |
VKvision@venu.com
|
d0f7ae8b7499a9ca59ab3835244c320159fe0290
|
d6589ff7cf647af56938a9598f9e2e674c0ae6b5
|
/imagesearch-20201214/setup.py
|
3e51f0f9157b6734ebf9de9021339da732085c83
|
[
"Apache-2.0"
] |
permissive
|
hazho/alibabacloud-python-sdk
|
55028a0605b1509941269867a043f8408fa8c296
|
cddd32154bb8c12e50772fec55429a9a97f3efd9
|
refs/heads/master
| 2023-07-01T17:51:57.893326
| 2021-08-02T08:55:22
| 2021-08-02T08:55:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,885
|
py
|
# -*- coding: utf-8 -*-
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import os
from setuptools import setup, find_packages
"""
setup module for alibabacloud_imagesearch20201214.
Created on 20/05/2021
@author: Alibaba Cloud SDK
"""
PACKAGE = "alibabacloud_imagesearch20201214"
NAME = "alibabacloud_imagesearch20201214" or "alibabacloud-package"
DESCRIPTION = "Alibaba Cloud image search (20201214) SDK Library for Python"
AUTHOR = "Alibaba Cloud SDK"
AUTHOR_EMAIL = "sdk-team@alibabacloud.com"
URL = "https://github.com/aliyun/alibabacloud-python-sdk"
VERSION = __import__(PACKAGE).__version__
REQUIRES = [
"alibabacloud_tea_util>=0.3.3, <1.0.0",
"alibabacloud_oss_sdk>=0.1.0, <1.0.0",
"alibabacloud_tea_rpc>=0.1.0, <1.0.0",
"alibabacloud_openplatform20191219>=1.1.1, <2.0.0",
"alibabacloud_oss_util>=0.0.5, <1.0.0",
"alibabacloud_tea_fileform>=0.0.3, <1.0.0",
"alibabacloud_tea_openapi>=0.2.4, <1.0.0",
"alibabacloud_openapi_util>=0.1.4, <1.0.0",
"alibabacloud_endpoint_util>=0.0.3, <1.0.0"
]
LONG_DESCRIPTION = ''
if os.path.exists('./README.md'):
with open("README.md", encoding='utf-8') as fp:
LONG_DESCRIPTION = fp.read()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache License 2.0",
url=URL,
keywords=["alibabacloud","imagesearch20201214"],
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
platforms="any",
install_requires=REQUIRES,
python_requires=">=3.6",
classifiers=(
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
"Topic :: Software Development"
)
)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
b56f2969e543d5827dc089cd6dcd23d2f694d788
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_chomps.py
|
b371d13ce61f330b9635ad464a74c0b37a02a7dd
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
from xai.brain.wordbase.verbs._chomp import _CHOMP
#calss header
class _CHOMPS(_CHOMP, ):
def __init__(self,):
_CHOMP.__init__(self)
self.name = "CHOMPS"
self.specie = 'verbs'
self.basic = "chomp"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
7fa882dc540662fffa8f714c6124767e6bb8b1a6
|
7118862c20c0b503f9e901026e48a809e29f5cf5
|
/ar_markers/coding.py
|
87df7bd2c25243aa1dfe07fe9b784377cd8a6788
|
[
"BSD-3-Clause"
] |
permissive
|
pstraczynski/ar_markers
|
964c0405dd7b51ac12f6f4c042626514667f7324
|
408737244ef7a655607858a6852189d5aef02e9b
|
refs/heads/master
| 2022-11-17T22:08:42.885805
| 2020-07-16T11:46:38
| 2020-07-16T11:46:38
| 280,138,112
| 0
| 0
|
BSD-3-Clause
| 2020-07-16T11:38:53
| 2020-07-16T11:38:52
| null |
UTF-8
|
Python
| false
| false
| 2,924
|
py
|
# this is all hamming code stuff, no user stuff here ... move along, move along
from numpy import matrix, array
GENERATOR_MATRIX = matrix([
[1, 1, 0, 1],
[1, 0, 1, 1],
[1, 0, 0, 0],
[0, 1, 1, 1],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
])
REGENERATOR_MATRIX = matrix([
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1],
])
PARITY_CHECK_MATRIX = matrix([
[1, 0, 1, 0, 1, 0, 1],
[0, 1, 1, 0, 0, 1, 1],
[0, 0, 0, 1, 1, 1, 1],
])
HAMMINGCODE_MARKER_POSITIONS = [
[1, 2], [1, 3], [1, 4],
[2, 1], [2, 2], [2, 3], [2, 4], [2, 5],
[3, 1], [3, 2], [3, 3], [3, 4], [3, 5],
[4, 1], [4, 2], [4, 3], [4, 4], [4, 5],
[5, 2], [5, 3], [5, 4],
]
def encode(bits):
encoded_code = ''
if len(bits) % 4 != 0:
raise ValueError('Only a multiple of 4 as bits are allowed.')
while len(bits) >= 4:
four_bits = bits[:4]
bit_array = generate_bit_array(four_bits)
hamming_code = matrix_array_multiply_and_format(GENERATOR_MATRIX, bit_array)
encoded_code += ''.join(hamming_code)
bits = bits[4:]
return encoded_code
def decode(bits):
decoded_code = ''
if len(bits) % 7 != 0:
raise ValueError('Only a multiple of 7 as bits are allowed.')
for bit in bits:
if int(bit) not in [0, 1]:
raise ValueError('The provided bits contain other values that 0 or 1: %s' % bits)
while len(bits) >= 7:
seven_bits = bits[:7]
uncorrected_bit_array = generate_bit_array(seven_bits)
corrected_bit_array = parity_correct(uncorrected_bit_array)
decoded_bits = matrix_array_multiply_and_format(REGENERATOR_MATRIX, corrected_bit_array)
decoded_code += ''.join(decoded_bits)
bits = bits[7:]
return decoded_code
def parity_correct(bit_array):
# Check the parity using the PARITY_CHECK_MATRIX
checked_parity = matrix_array_multiply_and_format(PARITY_CHECK_MATRIX, bit_array)
parity_bits_correct = True
# every value as to be 0, so no error accoured:
for bit in checked_parity:
if int(bit) != 0:
parity_bits_correct = False
if not parity_bits_correct:
error_bit = int(''.join(checked_parity), 2)
for index, bit in enumerate(bit_array):
if error_bit == index + 1:
if bit == 0:
bit_array[index] = 1
else:
bit_array[index] = 0
return bit_array
def matrix_array_multiply_and_format(matrix, array):
unformated = matrix.dot(array).tolist()[0]
return [str(bit % 2) for bit in unformated]
def generate_bit_array(bits):
return array([int(bit) for bit in bits])
def extract_hamming_code(mat):
hamming_code = ''
for pos in HAMMINGCODE_MARKER_POSITIONS:
hamming_code += str(int(mat[pos[0], pos[1]]))
return hamming_code
|
[
"walchko@users.noreply.github.com"
] |
walchko@users.noreply.github.com
|
a6c223f868e4c11922e97249c425499dc397669a
|
9baa9f1bedf7bc973f26ab37c9b3046824b80ca7
|
/venv-bck/lib/python2.7/site-packages/pymongo/write_concern.py
|
d16f1d0b8e1ba18818c6bcc891bc21c10cae0f6b
|
[] |
no_license
|
shakthydoss/suriyan
|
58774fc5de1de0a9f9975c2ee3a98900e0a5dff4
|
8e39eb2e65cc6c6551fc165b422b46d598cc54b8
|
refs/heads/master
| 2020-04-12T05:36:59.957153
| 2017-01-08T06:12:13
| 2017-01-08T06:12:13
| 59,631,349
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,444
|
py
|
# Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for working with write concerns."""
from bson.py3compat import integer_types, string_type
from pymongo.errors import ConfigurationError
class WriteConcern(object):
"""WriteConcern
:Parameters:
- `w`: (integer or string) Used with replication, write operations
will block until they have been replicated to the specified number
or tagged set of servers. `w=<integer>` always includes the replica
set primary (e.g. w=3 means write to the primary and wait until
replicated to **two** secondaries). **w=0 disables acknowledgement
of write operations and can not be used with other write concern
options.**
- `wtimeout`: (integer) Used in conjunction with `w`. Specify a value
in milliseconds to control how long to wait for write propagation
to complete. If replication does not complete in the given
timeframe, a timeout exception is raised.
- `j`: If ``True`` block until write operations have been committed
to the journal. Cannot be used in combination with `fsync`. Prior
to MongoDB 2.6 this option was ignored if the server was running
without journaling. Starting with MongoDB 2.6 write operations will
fail with an exception if this option is used when the server is
running without journaling.
- `fsync`: If ``True`` and the server is running without journaling,
blocks until the server has synced all data files to disk. If the
server is running with journaling, this acts the same as the `j`
option, blocking until write operations have been committed to the
journal. Cannot be used in combination with `j`.
"""
__slots__ = ("__document", "__acknowledged")
def __init__(self, w=None, wtimeout=None, j=None, fsync=None):
self.__document = {}
self.__acknowledged = True
if wtimeout is not None:
if not isinstance(wtimeout, integer_types):
raise TypeError("wtimeout must be an integer")
self.__document["wtimeout"] = wtimeout
if j is not None:
if not isinstance(j, bool):
raise TypeError("j must be True or False")
self.__document["j"] = j
if fsync is not None:
if not isinstance(fsync, bool):
raise TypeError("fsync must be True or False")
if j and fsync:
raise ConfigurationError("Can't set both j "
"and fsync at the same time")
self.__document["fsync"] = fsync
if self.__document and w == 0:
raise ConfigurationError("Can not use w value "
"of 0 with other options")
if w is not None:
if isinstance(w, integer_types):
self.__acknowledged = w > 0
elif not isinstance(w, string_type):
raise TypeError("w must be an integer or string")
self.__document["w"] = w
@property
def document(self):
"""The document representation of this write concern.
.. note::
:class:`WriteConcern` is immutable. Mutating the value of
:attr:`document` does not mutate this :class:`WriteConcern`.
"""
return self.__document.copy()
@property
def acknowledged(self):
"""If ``True`` write operations will wait for acknowledgement before
returning.
"""
return self.__acknowledged
def __repr__(self):
return ("WriteConcern(%s)" % (
", ".join("%s=%s" % kvt for kvt in self.document.items()),))
def __eq__(self, other):
return self.document == other.document
def __ne__(self, other):
return self.document != other.document
|
[
"shakthydoss@gmail.com"
] |
shakthydoss@gmail.com
|
19225bced8ac87070dfd4bf7df8d4fe653fba6af
|
0d59fa410624676908e1470fb9105cb8a280525c
|
/Algorithms/itertools/itertools_cycle.py
|
779bb2629e1349e4c5d5978c5e075686ef194ad3
|
[
"MIT"
] |
permissive
|
Nobodylesszb/python_module
|
122d41e776036dfc61a187e383dda821c35e25c4
|
37d2cdcf89a3ff02a9e560696a059cec9272bd1f
|
refs/heads/master
| 2020-05-31T07:48:57.695494
| 2019-07-29T11:32:17
| 2019-07-29T11:32:17
| 190,173,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
#该cycle()函数返回一个迭代器,它重复无限期给出的参数的内容。
# 由于它必须记住输入迭代器的全部内容,
# 如果迭代器很长,它可能会消耗相当多的内存
from itertools import *
for i in zip(range(7), cycle(['a', 'b', 'c'])):
print(i)
"""
output:
(0, 'a')
(1, 'b')
(2, 'c')
(3, 'a')
(4, 'b')
(5, 'c')
(6, 'a')
"""
|
[
"nobodylesszb@163.com"
] |
nobodylesszb@163.com
|
62cee17ddeb7c10ac5f70ed1eb57139892d7c7ca
|
a16236f9fbe72be1a8566d2067e4e66921a8a90e
|
/fbpmp/data_processing/attribution_id_combiner/attribution_id_spine_combiner_cpp.py
|
b6614231fe3d00730513a482b8b7b874faec7f16
|
[
"MIT"
] |
permissive
|
peking2/fbpcs-1
|
dc9e57afc5bab28f0d43ed537d4147e008f51030
|
234bc748f24046a13fbd14ee7794df5d70ab348b
|
refs/heads/main
| 2023-07-29T22:03:05.983480
| 2021-08-18T23:56:25
| 2021-08-18T23:57:19
| 397,813,444
| 0
| 0
|
MIT
| 2021-08-19T04:15:22
| 2021-08-19T04:15:22
| null |
UTF-8
|
Python
| false
| false
| 5,169
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import asyncio
import logging
from typing import Optional
from fbpcp.entity.container_instance import ContainerInstanceStatus
from fbpcp.service.onedocker import OneDockerService
from fbpmp.onedocker_binary_names import OneDockerBinaryNames
from fbpmp.pid.service.pid_service.pid_stage import PIDStage
# 10800 s = 3 hrs
DEFAULT_CONTAINER_TIMEOUT_IN_SEC = 10800
class CppAttributionIdSpineCombinerService:
def _get_combine_cmd_args_for_container(
self,
spine_path: str,
data_path: str,
output_path: str,
run_name: str,
tmp_directory: str,
padding_size: int,
sort_strategy: str,
) -> str:
# TODO: Probably put exe in an env variable?
# Try to align with existing paths
cmd_args = " ".join(
[
f"--spine_path={spine_path}",
f"--data_path={data_path}",
f"--output_path={output_path}",
f"--run_name={run_name}",
f"--tmp_directory={tmp_directory}",
f"--padding_size={padding_size}",
f"--sort_strategy={sort_strategy}",
]
)
return cmd_args
def combine_on_container(
self,
spine_path: str,
data_path: str,
output_path: str,
num_shards: int,
run_name: str,
onedocker_svc: OneDockerService,
tmp_directory: str,
padding_size: int,
binary_version: str,
sort_strategy: str = "sort",
container_timeout: Optional[int] = None,
) -> None:
asyncio.run(
self.combine_on_container_async(
spine_path,
data_path,
output_path,
num_shards,
run_name,
onedocker_svc,
tmp_directory,
padding_size,
binary_version,
sort_strategy,
container_timeout,
)
)
async def combine_on_container_async(
self,
spine_path: str,
data_path: str,
output_path: str,
num_shards: int,
run_name: str,
onedocker_svc: OneDockerService,
tmp_directory: str,
padding_size: int,
binary_version: str,
sort_strategy: str = "sort",
container_timeout: Optional[int] = None,
) -> None:
logger = logging.getLogger(__name__)
timeout = container_timeout or DEFAULT_CONTAINER_TIMEOUT_IN_SEC
# TODO: Combiner could be made async so we don't have to spawn our
# own ThreadPoolExecutor here and instead use async primitives
cmd_args_list = []
for shard in range(num_shards):
# TODO: There's a weird dependency between these two services
# AttributionIdSpineCombiner should operate independently of PIDStage
next_spine_path = PIDStage.get_sharded_filepath(spine_path, shard)
next_data_path = PIDStage.get_sharded_filepath(data_path, shard)
next_output_path = PIDStage.get_sharded_filepath(output_path, shard)
cmd_args = self._get_combine_cmd_args_for_container(
next_spine_path,
next_data_path,
next_output_path,
run_name,
tmp_directory,
padding_size,
sort_strategy,
)
cmd_args_list.append(cmd_args)
containers = await onedocker_svc.start_containers_async(
package_name=OneDockerBinaryNames.ATTRIBUTION_ID_SPINE_COMBINER.value,
version=binary_version,
cmd_args_list=cmd_args_list,
timeout=timeout,
)
# Busy wait until all containers are finished
any_failed = False
for shard, container in enumerate(containers):
# Busy wait until the container is finished
status = ContainerInstanceStatus.UNKNOWN
logger.info(f"Task[{shard}] started, waiting for completion")
while status not in [
ContainerInstanceStatus.FAILED,
ContainerInstanceStatus.COMPLETED,
]:
container = onedocker_svc.get_containers([container.instance_id])[0]
status = container.status
# Sleep 5 seconds between calls to avoid an unintentional DDoS
logger.debug(f"Latest status: {status}")
await asyncio.sleep(5)
logger.info(
f"container_id({container.instance_id}) finished with status: {status}"
)
if status is not ContainerInstanceStatus.COMPLETED:
logger.error(f"Container {container.instance_id} failed!")
any_failed = True
if any_failed:
raise RuntimeError(
"One or more containers failed. See the logs above to find the exact container_id"
)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
c4b3fd6c50a9c062239571170f6518b778e577d4
|
fb67e1b98f4170077da0e29617e34317d7d68d53
|
/main.py
|
4fa1d56aae636f2b2f3181cb77d4674c0efca12c
|
[] |
no_license
|
HadiGhazali/rock-paper-scissors
|
14151f518d0349bb07b4d22d88a2d423165c9553
|
5505a91f27fb448536364aab277f91a4193cf5a2
|
refs/heads/main
| 2023-02-01T10:04:22.179265
| 2020-12-20T16:12:59
| 2020-12-20T16:12:59
| 318,649,381
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,080
|
py
|
from random import choice
from constants import PLAYER_OPTIONS, PLAY_BUTTON, STATUS
from core import check_win, modify_scores, check_total
scores = {'user': 0, 'system': 0, 'total_user': 0, 'total_system': 0}
play = True
while play:
user_input = input('Enter your choice pleas')
system_input = choice(list(PLAYER_OPTIONS.keys()))
if user_input in PLAYER_OPTIONS.keys():
result = check_win(user_input, system_input)
current_scores = modify_scores(result, scores)
print('your choice:{}, system choice:{},result:{},\t {}-{}'.format(PLAYER_OPTIONS[user_input],
PLAYER_OPTIONS[system_input], STATUS[result],
current_scores['user'],
current_scores['system']))
check_total(current_scores)
elif user_input in PLAY_BUTTON.keys():
play = False
print('Bye!')
else:
print('Invalid input')
|
[
"hadivardanjani1378@gmail.com"
] |
hadivardanjani1378@gmail.com
|
ee143f1efcc713e6d0ebae48abd8d2f0e560c0ad
|
cf0c4657fd8198b904932a3c924f3c1f22bddd87
|
/setup.py
|
cf7c2a620b5ed21b92440e9125e1ae9bfc4fad00
|
[] |
no_license
|
Coconut-System-Engineer/Create-automatic-akun-instagram
|
5e46fd2df6c5a3dcd22058a9e009c972340208bd
|
00ec28ffed76428a9db7e05f5ad3e3023897ad87
|
refs/heads/master
| 2021-03-01T03:16:46.573225
| 2020-03-09T06:21:09
| 2020-03-09T06:21:09
| 245,750,050
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,003
|
py
|
import os
import random
import sys
import time
from time import sleep
os.system('clear')
def mengetik (s):
for c in s + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(random.random() * 0.4)
os.system('clear')
sleep(0.1)
print ('Loading...')
sleep(0.1)
mengetik(' > > > > > > > > > > > > > > > > > > > > > > > > > >] 100%')
sleep(1)
def main():
print "\033[1;00m==============================================================================================================="
sleep(0.1)
print "\033[1;91m*********************___________***Auto Create Akun Instagram***___________************************************"
sleep(0.1)
print "* 0000000000000 000000000000 000000000000 000000 000000 00000000000000000000 *"
sleep(0.1)
print "* 000 0000 000000000000 0000000000000 00000000000 000000000 00000000000000000000 *"
sleep(0.1)
print "* 000 00 0000 0000 0000 000000 000000 000000 000000 000000 00 00000 00 *"
sleep(0.1)
print "* 000 00000 0000 0000 000000 00000 00000 00000 00000 00000 *"
sleep(0.1)
print "* 000 000000000000 0000 00000 0000 0000 0000 0000 00000 *"
sleep(0.1)
print "* 000 00000 000000000000 00000000000 0000 0000 0000 0000 00000 *"
sleep(0.1)
print "* 000 00 0000 0000 00000000000 00000 00000 00000 00000 00000 *"
sleep(0.1)
print "* 000 0000 0000 0000 0000 000000 000000 000000 000000 000000 00000 *"
sleep(0.1)
print "* 000 0000 000000000000 0000 000000 00000000000 0000000000 00000 *"
sleep(0.1)
print "* 000000000000 000000000000 0000 000000 000000 000000 00000000000000 *"
sleep(0.1)
print "\033[00m \033[1;94m*********************___________****** C O C O N U T ******___________****************************************"
sleep(0.1)
print "\033[00m==============================================================================================================="
print '\n \033[1;92m > > > silakan tunggu proses penginstalan pakage < < < \n'
sleep(0.1)
os.system("apt-get update && apt-get install python-pip && pip install selenium")
sleep(0.1)
mengetik('> > > > > > > > > > > > > > > > > > > > > > > > >] 100%')
sleep(0.1)
os.system("chmod 777 geckodriver-v0.26.0-linux64/geckodriver")
sleep(0.1)
mengetik('> > > > > > > > > > > > > > > > > > > > > > > > >] 100%')
sleep(0.1)
os.system("cp geckodriver-v0.26.0-linux64/geckodriver /usr/local/bin/")
sleep(0.1)
mengetik('> > > > > > > > > > > > > > > > > > > > > > > > >] 100%')
sleep(0.1)
print '\n \033[1;00m\033[1;94m*************** __________Selelsai__________ ***************'
main()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
464ebf186e3319a72253e12fa4a37890c21aa4a0
|
06a2dab18197a13fc3371debd29b476ae99cb01c
|
/T3/inputs/dnn2017.py
|
3c0661637347e70ff855197b207c33c81ac06421
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
PandaPhysics/PandaAnalysis
|
397a031f9e8d399be1814ab04dd525d69b41f060
|
3167d106d41dfce58219c3e07d30e201ee823b55
|
refs/heads/master
| 2021-06-18T13:52:57.650900
| 2019-04-08T17:35:29
| 2019-04-08T17:35:29
| 168,376,672
| 0
| 0
|
NOASSERTION
| 2019-04-08T17:33:55
| 2019-01-30T16:34:09
|
C++
|
UTF-8
|
Python
| false
| false
| 2,252
|
py
|
#!/usr/bin/env python
from re import sub
from sys import argv,exit
from os import system,getenv,path
from time import clock,time
import json
which = int(argv[1])
submit_id = int(argv[2])
sname = argv[0]
argv=[]
import ROOT as root
from PandaCore.Tools.Misc import *
from PandaCore.Utils.load import *
import PandaCore.Tools.job_config as cb
import PandaAnalysis.Tagging.cfg_v8 as tagcfg
import PandaAnalysis.T3.job_utilities as utils
from PandaAnalysis.Flat.analysis import wlnhbb2017, breg
Load('PandaAnalyzer')
data_dir = getenv('CMSSW_BASE') + '/src/PandaAnalysis/data/'
def fn(input_name, isData, full_path):
logger.info(sname+'.fn','Starting to process '+input_name)
# now we instantiate and configure the analyzer
a = breg(True)
a.bjetBDTReg = True
a.bjetDeepReg = True
a.inpath = input_name
a.outpath = utils.input_to_output(input_name)
a.datapath = data_dir
a.isData = isData
utils.set_year(a, 2017)
a.processType = utils.classify_sample(full_path, isData)
if a.processType in {root.pa.kTT, root.pa.kH}:
a.reclusterGen = True # only turn on if necessary
skimmer = root.pa.PandaAnalyzer(a)
return utils.run_PandaAnalyzer(skimmer, isData, a.outpath)
if __name__ == "__main__":
sample_list = cb.read_sample_config('local.cfg',as_dict=False)
to_run = None #sample_list[which]
for s in sample_list:
if which==s.get_id():
to_run = s
break
if not to_run:
logger.error(sname,'Could not find a job for PROCID=%i'%(which))
exit(3)
outdir = getenv('SUBMIT_OUTDIR')
lockdir = getenv('SUBMIT_LOCKDIR')
outfilename = to_run.name+'_%i.root'%(submit_id)
processed = {}
utils.report_start(outdir,outfilename,to_run.files)
wd = utils.isolate()
utils.main(to_run, processed, fn)
utils.hadd(processed.keys())
utils.print_time('hadd')
ret = utils.stageout(outdir,outfilename)
utils.cleanup('*.root')
utils.un_isolate(wd)
utils.print_time('stageout and cleanup')
if not ret:
utils.report_done(lockdir,outfilename,processed)
utils.cleanup('*.lock')
utils.print_time('create lock')
else:
exit(-1*ret)
exit(0)
|
[
"sidn@mit.edu"
] |
sidn@mit.edu
|
02f22fe5f02b8df2182114217e0c398ecfda644f
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_buyout.py
|
5ef4f182702e4982179b22670203e03692b7d3ff
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
#calss header
class _BUYOUT():
def __init__(self,):
self.name = "BUYOUT"
self.definitions = [u'(in business) a situation in which a person or group buys all the shares belonging to a company and so gets control of it: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
7e862eae0d9148a1e0b88084c5981c3280296cc4
|
53b1cf89f3ac00d86add6dc6e103160d50e1b4ea
|
/pgadmin/pgadmin4/web/pgadmin/browser/server_groups/servers/tests/test_server_get.py
|
338f7fcfb45c96ee37238b621d1a4a0c92353062
|
[
"PostgreSQL"
] |
permissive
|
luvres/armhf
|
b5e9e59c0e5db7f4a280242a0d940c4066a47716
|
aa1ec48e246f1fb8e0f4099fa8d392eddcb414ad
|
refs/heads/master
| 2021-10-01T19:08:53.395884
| 2018-11-28T17:57:42
| 2018-11-28T17:57:42
| 79,672,248
| 10
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,629
|
py
|
# ##########################################################################
#
# #pgAdmin 4 - PostgreSQL Tools
#
# #Copyright (C) 2013 - 2016, The pgAdmin Development Team
# #This software is released under the PostgreSQL Licence
#
# ##########################################################################
from pgadmin.utils.route import BaseTestGenerator
from regression import test_utils as utils
from regression import parent_node_dict
class ServersGetTestCase(BaseTestGenerator):
"""
This class will fetch added servers under default server group
by response code.
"""
scenarios = [
# Fetch the default url for server node
('Default Server Node url', dict(url='/browser/server/obj/'))
]
def setUp(self):
"""This function add the server to test the GET API"""
self.server_id = utils.create_server(self.server)
server_dict = {"server_id": self.server_id}
utils.write_node_info("sid", server_dict)
def runTest(self):
""" This function will fetch the added servers to object browser. """
server_id = parent_node_dict["server"][-1]["server_id"]
if not server_id:
raise Exception("Server not found to test GET API")
response = self.tester.get(self.url + str(utils.SERVER_GROUP) + '/' +
str(server_id),
follow_redirects=True)
self.assertEquals(response.status_code, 200)
def tearDown(self):
"""This function delete the server from SQLite """
utils.delete_server_with_api(self.tester, self.server_id)
|
[
"luvres@hotmail.com"
] |
luvres@hotmail.com
|
a4c44f2e0343cc29ca7b39dda84c174ba0bae39a
|
01733042e84a768b77f64ec24118d0242b2f13b8
|
/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/learnedfilter_dc8ce473700453874488c1ea95947fa8.py
|
0e44fc370da4d80e98b03eef81573a428644af64
|
[
"MIT"
] |
permissive
|
slieberth/ixnetwork_restpy
|
e95673905854bc57e56177911cb3853c7e4c5e26
|
23eeb24b21568a23d3f31bbd72814ff55eb1af44
|
refs/heads/master
| 2023-01-04T06:57:17.513612
| 2020-10-16T22:30:55
| 2020-10-16T22:30:55
| 311,959,027
| 0
| 0
|
NOASSERTION
| 2020-11-11T12:15:34
| 2020-11-11T12:06:00
| null |
UTF-8
|
Python
| false
| false
| 5,854
|
py
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class LearnedFilter(Base):
"""This object contains criteria for filtering the learned routes.
The LearnedFilter class encapsulates a required learnedFilter resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'learnedFilter'
_SDM_ATT_MAP = {
'Afi': 'afi',
'EnableAfiSafi': 'enableAfiSafi',
'EnablePrefix': 'enablePrefix',
'Safi': 'safi',
}
def __init__(self, parent):
super(LearnedFilter, self).__init__(parent)
@property
def Capabilities(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.capabilities_4db6ad32c315806e926b0bd131f64535.Capabilities): An instance of the Capabilities class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.capabilities_4db6ad32c315806e926b0bd131f64535 import Capabilities
return Capabilities(self)._select()
@property
def Prefix(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.prefix_14ff2c47c83ae14aa22718e67f21f827.Prefix): An instance of the Prefix class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.prefix_14ff2c47c83ae14aa22718e67f21f827 import Prefix
return Prefix(self)._select()
@property
def Afi(self):
"""
Returns
-------
- number: Address Family Identifier value. Identifies the network layer protocol to be used with these routes.
"""
return self._get_attribute(self._SDM_ATT_MAP['Afi'])
@Afi.setter
def Afi(self, value):
self._set_attribute(self._SDM_ATT_MAP['Afi'], value)
@property
def EnableAfiSafi(self):
"""
Returns
-------
- bool: If enabled, allows the user to set values to be used for BGP-MP - the user-specified AFI and SAFI values for the BGP MP_REACH_NLRI.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableAfiSafi'])
@EnableAfiSafi.setter
def EnableAfiSafi(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableAfiSafi'], value)
@property
def EnablePrefix(self):
"""
Returns
-------
- bool: If enabled, BGP Prefix Filters configured in this dialog will be used to filter for routes that match those filter entries. Only those routes will be stored in the routing table. If disabled, all learned BGP routes will be stored.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnablePrefix'])
@EnablePrefix.setter
def EnablePrefix(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnablePrefix'], value)
@property
def Safi(self):
"""
Returns
-------
- number: Subsequent Address Family Identifier value. Used with, and provides additional information about, the AFI in the NLRI, per RFC 2858.
"""
return self._get_attribute(self._SDM_ATT_MAP['Safi'])
@Safi.setter
def Safi(self, value):
self._set_attribute(self._SDM_ATT_MAP['Safi'], value)
def update(self, Afi=None, EnableAfiSafi=None, EnablePrefix=None, Safi=None):
"""Updates learnedFilter resource on the server.
Args
----
- Afi (number): Address Family Identifier value. Identifies the network layer protocol to be used with these routes.
- EnableAfiSafi (bool): If enabled, allows the user to set values to be used for BGP-MP - the user-specified AFI and SAFI values for the BGP MP_REACH_NLRI.
- EnablePrefix (bool): If enabled, BGP Prefix Filters configured in this dialog will be used to filter for routes that match those filter entries. Only those routes will be stored in the routing table. If disabled, all learned BGP routes will be stored.
- Safi (number): Subsequent Address Family Identifier value. Used with, and provides additional information about, the AFI in the NLRI, per RFC 2858.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
|
[
"andy.balogh@keysight.com"
] |
andy.balogh@keysight.com
|
8f1ad74885c3e26272c09d24c7a5c2073c619087
|
bb8ed8b5aeede5f503ff5dac3870cf3817619282
|
/trunk/soft/common/tools/change_pkg_proto.py
|
a4d8b24460f16c0c6a6455fd20e6e89f113f0b26
|
[] |
no_license
|
mengtest/idle
|
561da3b4542ceca8a1b983e9214a57d6ecb7d22d
|
6e7866d0f493155fbfc9c2c35062af833217cbd0
|
refs/heads/master
| 2022-03-01T00:07:51.808702
| 2019-10-31T11:09:22
| 2019-10-31T11:09:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 610
|
py
|
import sys
if __name__ == '__main__':
if len(sys.argv) > 1:
fname = sys.argv[1]
with open(fname, 'r') as f:
content = f.read()
flag = True
start = 0
while flag:
flag = False
i1 = content.find("_pb2 as ", start)
start = i1 + 21
if i1 >= 0:
flag = True
i2 = content.rfind("import", 0, i1)
content = content[0:i2 + 7] + "common.proto." + content[i2 + 7:]
with open(fname, 'w') as f:
f.write(content)
|
[
"rocketxyfb@163.com"
] |
rocketxyfb@163.com
|
0715fd0b7eb50cb61eb5b8b45cab73ceb41c0401
|
f4534e1f23add4255a810688cc2d1c6c10a4c9b3
|
/ch07/ex7-1.py
|
39a55a8d82466def138a5764f39a4b20b086866d
|
[
"MIT"
] |
permissive
|
jasonhuayen91/Introduction_to_Computing_and_Programming_Using_Python
|
610ee2c060dd45d04652fb823f29a88c6bca1c45
|
9f211e66f8711b6c35405a1f40f14fcf9637294a
|
refs/heads/master
| 2021-05-29T23:21:40.326647
| 2015-03-03T16:41:54
| 2015-03-03T16:41:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
def sumDigit(s):
"""sを文字列とする.
sの中の数字の合計を返す.
例えば, sが'a2b3c'ならば5を返す"""
ret = 0
for d in s:
try:
ret += int(d)
except ValueError:
pass
return ret
print(sumDigit('a2b3c') == 5)
|
[
"premier3next@yahoo.co.jp"
] |
premier3next@yahoo.co.jp
|
6a3623bd08a74a8f907ecbdfc4368e677f98e843
|
9d0195aa83cc594a8c61f334b90375961e62d4fe
|
/JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano1433.py
|
a2fb809bcd5cf445b8aa9aa55ee1a7513b1de66e
|
[] |
no_license
|
rsk146/CMS
|
4e49592fc64f6438051544c5de18598db36ed985
|
5f8dab8c59ae556598b9747b52b88205fffc4dbe
|
refs/heads/master
| 2022-12-01T03:57:12.126113
| 2020-08-04T03:29:27
| 2020-08-04T03:29:27
| 284,863,383
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,293
|
py
|
# Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/120000/AFA857F6-C77B-314D-B472-A50BFA0A7BAC.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest1433.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
|
[
"rsk146@scarletmail.rutgers.edu"
] |
rsk146@scarletmail.rutgers.edu
|
3dd6b2986f6fd886dd1179e7b456bb349f201ad3
|
e9156143e706fa7981f531dafb4fec72f42d9d78
|
/snapflow_bi/functions/transaction_ltv_model/tests/test_transaction_ltv_model.py
|
637af7409b431737d5b34b8640b2f23d389eff06
|
[
"BSD-3-Clause"
] |
permissive
|
kvh/snapflow-bi
|
b5a00b4c8902e663b400e4831da53ce7d1888a21
|
2e0877b19fb0738ba384b798ad1c5c33c4b3111e
|
refs/heads/master
| 2023-06-07T20:27:16.467895
| 2021-06-18T15:17:20
| 2021-06-18T15:17:20
| 308,482,793
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,809
|
py
|
inputs = {
"transactions": dict(
data="""
customer_id,transacted_at,amount
1,2020-01-01 00:00:00,100
2,2020-02-01 00:00:00,100
2,2020-03-01 00:00:00,100
3,2020-01-01 00:00:00,300
3,2020-04-01 00:00:00,400
4,2020-01-01 00:00:00,100
4,2020-02-01 00:00:00,100
4,2020-03-01 00:00:00,50
5,2020-01-01 00:00:00,1000
""",
schema="bi.Transaction",
)
}
outputs = {
"default": """
customer_id,ltv
1,100
2,100
3,400
4,50
5,1000
"""
}
# from __future__ import annotations
# from dcp.storage.database.utils import get_tmp_sqlite_db_url
# from snapflow import Environment, graph, produce
# from snapflow.testing.utils import str_as_dataframe
# def test_ltv():
# from snapflow_bi import module as bi
# input_data = """
# customer_id,transacted_at,amount
# 1,2020-01-01 00:00:00,100
# 2,2020-02-01 00:00:00,100
# 2,2020-03-01 00:00:00,100
# 3,2020-01-01 00:00:00,300
# 3,2020-04-01 00:00:00,400
# 4,2020-01-01 00:00:00,100
# 4,2020-02-01 00:00:00,100
# 4,2020-03-01 00:00:00,50
# 5,2020-01-01 00:00:00,1000
# """
# env = Environment(metadata_storage=get_tmp_sqlite_db_url())
# txs = str_as_dataframe(env, input_data, nominal_schema=bi.schemas.Transaction)
# g = graph()
# df = g.create_node(
# "core.import_dataframe", params={"dataframe": txs, "schema": "bi.Transaction"}
# )
# ltv = g.create_node(bi.functions.transaction_ltv_model, upstream=df)
# blocks = produce(ltv, env=env, modules=[bi])
# output_df = blocks[0].as_dataframe()
# assert len(output_df) == 5
# assert set(output_df["customer_id"]) == set(i for i in range(1, 6))
|
[
"kenvanharen@gmail.com"
] |
kenvanharen@gmail.com
|
6e2124708a83e98ff77c0a59c40e0542ef09c006
|
ea5bc4fedbc076ce20fc51b0a6c0a231b1301fc0
|
/tests/test_topchef_client_end_to_end.py
|
8205f191f3715f810b426de384416170c960fbf3
|
[] |
no_license
|
TopChef/NMRClient
|
57f1c692014291aebcd6febf30d8f5d1bb4d8ec7
|
40d1ae3f6bc585ef3707c01f46d8bfbe576bd279
|
refs/heads/master
| 2020-09-18T19:14:38.566893
| 2016-09-06T17:27:45
| 2016-09-06T17:27:45
| 67,529,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 615
|
py
|
import sys
import time
LIBRARY_PATH = '/opt/topspin/exp/stan/nmr/py/user'
sys.path.append(LIBRARY_PATH)
from topchef_client import NetworkManager
from topchef_client import TopChefService
True = "1"
False = "0"
server_address = 'http://192.168.1.216/dev'
adder_service_id = '1cb40868-101f-11d9-9a55-000cf18a2ce6'
network = NetworkManager(server_address)
service = TopChefService(adder_service_id, network)
assert (service.has_timed_out() == False)
parameters = {'value': 10}
job = service.request_job(parameters)
result = service.get_result_for_job(job, polling_interval=5, timeout=30)
MSG(str(result))
|
[
"michalkononenko@gmail.com"
] |
michalkononenko@gmail.com
|
619937df9bc5ad69bb41fd822a6d57377e711e63
|
d659fb0db310793b918640fdb673b9bd755578bc
|
/third_party/text_analysis.py
|
77b623298203a576c583c2364375d44483e1c9d1
|
[
"MIT"
] |
permissive
|
astuk/python-snippets
|
562bdcdb23c537650a767fb0369388d9530a67ae
|
212f63f820b6f5842f74913ed08da18d41dfe7a4
|
refs/heads/master
| 2023-06-18T04:29:48.111537
| 2021-07-14T10:55:59
| 2021-07-14T10:55:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
from pathlib import Path
from textblob import TextBlob
path = Path("src/text.txt")
with open(path) as f:
text = f.read()
blob = TextBlob(text)
for sentence in blob.sentences:
print(sentence.sentiment.polarity)
|
[
"f2dahlitz@freenet.de"
] |
f2dahlitz@freenet.de
|
00d78c4a4adeb9bd9683c99726c067a3d7829696
|
80d9806dfb09858875c77c285a3ce1ce496dbbcd
|
/setup.py
|
d418d3608286c45bad5380aed630c48c76ffa793
|
[] |
no_license
|
wkentaro/chainer-cyclegan
|
86e9a5a3c8aae03caf37940209aa432738478989
|
64b811773802e4d755eebb5110735f8953beb220
|
refs/heads/master
| 2021-10-23T15:33:26.856556
| 2019-03-18T13:00:07
| 2019-03-18T13:00:07
| 114,517,994
| 13
| 4
| null | 2018-03-30T14:40:41
| 2017-12-17T07:32:05
|
Python
|
UTF-8
|
Python
| false
| false
| 982
|
py
|
import subprocess
import sys
from setuptools import find_packages
from setuptools import setup
version = '1.2.5'
if sys.argv[-1] == 'release':
commands = [
'python setup.py sdist upload',
'git tag v{0}'.format(version),
'git push origin master --tag',
]
for cmd in commands:
subprocess.call(cmd, shell=True)
sys.exit(0)
try:
import cv2 # NOQA
except ImportError:
print('Please install OpenCV.')
quit(1)
install_requires = []
with open('requirements.txt') as f:
for req in f:
if req.startswith('-e'):
continue
install_requires.append(req.strip())
setup(
name='chainer-cyclegan',
description='Chainer Implementation of CycleGAN.',
version=version,
packages=find_packages(),
install_requires=install_requires,
author='Kentaro Wada',
author_email='www.kentaro.wada@gmail.com',
url='https://github.com/wkentaro/chainer-cyclegan',
license='MIT',
)
|
[
"www.kentaro.wada@gmail.com"
] |
www.kentaro.wada@gmail.com
|
0305bffab91530450d963a852da22b235312750e
|
41d1e085dc3ec6c329b8d6443035e1e8a1c93bcc
|
/gridded/tests/test_pysgrid/test_processing_2d.py
|
dc315187cf10aeba2e0c9777265a7f8e7304e614
|
[
"Unlicense"
] |
permissive
|
Ocean1125/gridded
|
9252d3d89ecacc55c59a0ecf6fd60fe6ac0afd6e
|
90cca5edf4c8d9a47914c2b6d6f78180d9c280a5
|
refs/heads/master
| 2023-05-15T13:21:34.144583
| 2021-06-03T21:50:01
| 2021-06-03T21:50:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,341
|
py
|
"""
Created on Apr 3, 2015
@author: ayan
"""
from __future__ import (absolute_import, division, print_function)
import pytest
import numpy as np
from gridded.pysgrid.processing_2d import avg_to_cell_center, rotate_vectors, vector_sum
def test_vector_sum():
x_vector = np.array([3, 5, 9, 11])
y_vector = np.array([4, 12, 40, 60])
sum_result = vector_sum(x_vector, y_vector)
expected = np.array([5, 13, 41, 61])
np.testing.assert_almost_equal(sum_result, expected)
@pytest.fixture
def rotate_vectors_data():
x = np.array([3, 5, 9, 11])
y = np.array([4, 12, 40, 60])
angles_simple = np.array([0, np.pi / 2, 0, np.pi / 2])
angles_complex = np.array([np.pi / 6, np.pi / 5,
np.pi / 4, np.pi / 3])
return x, y, angles_simple, angles_complex
def test_vector_rotation_simple(rotate_vectors_data):
x, y, angles_simple, angles_complex = rotate_vectors_data
rotated_x, rotated_y = rotate_vectors(x, y, angles_simple)
expected_x = np.array([3, -12, 9, -60])
expected_y = np.array([4, 5, 40, 11])
np.testing.assert_almost_equal(rotated_x, expected_x, decimal=3)
np.testing.assert_almost_equal(rotated_y, expected_y, decimal=3)
def test_vector_rotation_complex(rotate_vectors_data):
x, y, angles_simple, angles_complex = rotate_vectors_data
rotated_x, rotated_y = rotate_vectors(x, y, angles_complex)
expected_x = np.array([0.5981, -3.0083, -21.9203, -46.4615])
expected_y = np.array([4.9641, 12.6471, 34.6482, 39.5263])
np.testing.assert_almost_equal(rotated_x, expected_x, decimal=3)
np.testing.assert_almost_equal(rotated_y, expected_y, decimal=3)
@pytest.fixture
def avg_center_data():
return np.array([[4, 5, 9, 10], [8, 39, 41, 20], [5, 29, 18, 71]])
def test_no_transpose(avg_center_data):
data = avg_center_data
avg_result = avg_to_cell_center(data, 1)
expected = np.array([[4.5, 7, 9.5],
[23.5, 40, 30.5],
[17, 23.5, 44.5]])
np.testing.assert_almost_equal(avg_result, expected, decimal=3)
def test_with_transpose(avg_center_data):
data = avg_center_data
avg_result = avg_to_cell_center(data, 0)
expected = np.array([[6, 22, 25, 15], [6.5, 34, 29.5, 45.5]])
np.testing.assert_almost_equal(avg_result, expected, decimal=3)
|
[
"Chris.Barker@noaa.gov"
] |
Chris.Barker@noaa.gov
|
8852a16d08a5a003bc41bff9adedcf3cc48f8f8d
|
ec34cd789c188573987741d478addc3c4a576f22
|
/BIOMD0000000500/model.py
|
11dfd41813aa524d632ef5d5903df4221ed7bffd
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
biomodels/BIOMD0000000500
|
49362f1fffbb49e07d8077a5aab81e3ec7072ab5
|
2e28e1c78e37f1bdb716300a0bf902c6e8a0056e
|
refs/heads/master
| 2018-12-31T19:25:22.954078
| 2014-10-16T05:27:55
| 2014-10-16T05:27:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'BIOMD0000000500.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString)
|
[
"stanleygu@gmail.com"
] |
stanleygu@gmail.com
|
59835f76410fdd430aaafe095baf7b9c493635fe
|
f848ebf1adb25cc6d188f43fb02c06dad1b01651
|
/api/employee.py
|
71e0dada8044141e1a869937b0bb167c0e182676
|
[] |
no_license
|
miao88318/day03_apiTestIHRM
|
673320c724d9a661fa9ed120a62e0d82118719d9
|
213e4a498055e693993b21ca2bc7942af2a25c74
|
refs/heads/master
| 2022-07-28T04:39:05.390142
| 2020-05-21T07:06:23
| 2020-05-21T07:06:23
| 265,769,499
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,596
|
py
|
# 导包
import requests
# 创建员工的api类
class TestEmployeeApi:
def __init__(self):
self.login_url = "http://ihrm-test.itheima.net" + "/api/sys/login"
self.emp_url = "http://ihrm-test.itheima.net" + "/api/sys/user"
def add_emp(self, headers, username, mobile):
response = requests.post(self.emp_url,
json={
"username": username,
"mobile": mobile,
"timeOfEntry": "2020-05-05",
"formOfEmployment": 1,
"workNumber": "123433",
"departmentName": "测试部",
"departmentId": "1063678149528784896",
"correctionTime": "2020-05-17T16:00:00.000Z"
}, headers=headers)
return response
def query_emp(self, emp_id, headers):
query_url = self.emp_url + "/" + emp_id
response = requests.get(query_url, headers=headers)
return response
def modify_emp(self,emp_id, headers, username):
modify_url = self.emp_url + "/" + emp_id
response = requests.put(url=modify_url,json={"username":username},
headers=headers)
return response
def delete_emp(self, emp_id, headers):
delete_url = self.emp_url + "/" + emp_id
response = requests.delete(url=delete_url, headers=headers)
return response
|
[
"stan@stan.com"
] |
stan@stan.com
|
8d7f11c56fe6bb5b741355a5dfad0460a1ea89f4
|
10b4db1d4f894897b5ee435780bddfdedd91caf7
|
/thrift/compiler/test/fixtures/basic-annotations/gen-py3/module/types.pyi
|
d60450c59a3809ab28d5574573d39ae4ae414318
|
[
"Apache-2.0"
] |
permissive
|
SammyEnigma/fbthrift
|
04f4aca77a64c65f3d4537338f7fbf3b8214e06a
|
31d7b90e30de5f90891e4a845f6704e4c13748df
|
refs/heads/master
| 2021-11-11T16:59:04.628193
| 2021-10-12T11:19:22
| 2021-10-12T11:20:27
| 211,245,426
| 1
| 0
|
Apache-2.0
| 2021-07-15T21:12:07
| 2019-09-27T05:50:42
|
C++
|
UTF-8
|
Python
| false
| false
| 4,129
|
pyi
|
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
import folly.iobuf as _fbthrift_iobuf
import thrift.py3.types
import thrift.py3.exceptions
from thrift.py3.types import __NotSet, NOTSET
import typing as _typing
from typing_extensions import Final
import sys
import itertools
__property__ = property
class MyEnum(thrift.py3.types.Enum):
MyValue1: MyEnum = ...
MyValue2: MyEnum = ...
DOMAIN: MyEnum = ...
class MyStructNestedAnnotation(thrift.py3.types.Struct, _typing.Hashable):
class __fbthrift_IsSet:
name: bool
pass
name: Final[str] = ...
def __init__(
self, *,
name: _typing.Optional[str]=None
) -> None: ...
def __call__(
self, *,
name: _typing.Union[str, __NotSet, None]=NOTSET
) -> MyStructNestedAnnotation: ...
def __reduce__(self) -> _typing.Tuple[_typing.Callable, _typing.Tuple[_typing.Type['MyStructNestedAnnotation'], bytes]]: ...
def __hash__(self) -> int: ...
def __str__(self) -> str: ...
def __repr__(self) -> str: ...
def __lt__(self, other: 'MyStructNestedAnnotation') -> bool: ...
def __gt__(self, other: 'MyStructNestedAnnotation') -> bool: ...
def __le__(self, other: 'MyStructNestedAnnotation') -> bool: ...
def __ge__(self, other: 'MyStructNestedAnnotation') -> bool: ...
class MyStruct(thrift.py3.types.Struct, _typing.Hashable):
class __fbthrift_IsSet:
major: bool
package: bool
annotation_with_quote: bool
class_: bool
annotation_with_trailing_comma: bool
empty_annotations: bool
pass
major: Final[int] = ...
package: Final[str] = ...
annotation_with_quote: Final[str] = ...
class_: Final[str] = ...
annotation_with_trailing_comma: Final[str] = ...
empty_annotations: Final[str] = ...
def __init__(
self, *,
major: _typing.Optional[int]=None,
package: _typing.Optional[str]=None,
annotation_with_quote: _typing.Optional[str]=None,
class_: _typing.Optional[str]=None,
annotation_with_trailing_comma: _typing.Optional[str]=None,
empty_annotations: _typing.Optional[str]=None
) -> None: ...
def __call__(
self, *,
major: _typing.Union[int, __NotSet, None]=NOTSET,
package: _typing.Union[str, __NotSet, None]=NOTSET,
annotation_with_quote: _typing.Union[str, __NotSet, None]=NOTSET,
class_: _typing.Union[str, __NotSet, None]=NOTSET,
annotation_with_trailing_comma: _typing.Union[str, __NotSet, None]=NOTSET,
empty_annotations: _typing.Union[str, __NotSet, None]=NOTSET
) -> MyStruct: ...
def __reduce__(self) -> _typing.Tuple[_typing.Callable, _typing.Tuple[_typing.Type['MyStruct'], bytes]]: ...
def __hash__(self) -> int: ...
def __str__(self) -> str: ...
def __repr__(self) -> str: ...
def __lt__(self, other: 'MyStruct') -> bool: ...
def __gt__(self, other: 'MyStruct') -> bool: ...
def __le__(self, other: 'MyStruct') -> bool: ...
def __ge__(self, other: 'MyStruct') -> bool: ...
class SecretStruct(thrift.py3.types.Struct, _typing.Hashable):
class __fbthrift_IsSet:
id: bool
password: bool
pass
id: Final[int] = ...
password: Final[str] = ...
def __init__(
self, *,
id: _typing.Optional[int]=None,
password: _typing.Optional[str]=None
) -> None: ...
def __call__(
self, *,
id: _typing.Union[int, __NotSet, None]=NOTSET,
password: _typing.Union[str, __NotSet, None]=NOTSET
) -> SecretStruct: ...
def __reduce__(self) -> _typing.Tuple[_typing.Callable, _typing.Tuple[_typing.Type['SecretStruct'], bytes]]: ...
def __hash__(self) -> int: ...
def __str__(self) -> str: ...
def __repr__(self) -> str: ...
def __lt__(self, other: 'SecretStruct') -> bool: ...
def __gt__(self, other: 'SecretStruct') -> bool: ...
def __le__(self, other: 'SecretStruct') -> bool: ...
def __ge__(self, other: 'SecretStruct') -> bool: ...
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
f6bdb6fdae81f13cfe121cc6e8b2f81bffc9cc72
|
485cf3c70fcaa68689a2b690b6465f1d6bcf21bd
|
/Python3_Selenium3/第7章/7.28.py
|
8d5897a67392f8b8f40cc6a250867a33283293b2
|
[] |
no_license
|
lxz0503/study_20190608
|
5ffe08c4704bb00ad8d1980baf16b8f5e7135ff4
|
47c37798140883b8d6dc21ec5da5bc7a20988ce9
|
refs/heads/master
| 2022-12-23T17:23:45.039015
| 2021-06-23T14:50:19
| 2021-06-23T14:50:19
| 190,884,812
| 1
| 3
| null | 2022-12-15T23:17:33
| 2019-06-08T12:22:56
|
Python
|
GB18030
|
Python
| false
| false
| 455
|
py
|
###
###配套视频已出版,学习有疑问联系作者qq:2574674466###
###
#coding=utf-8
dict_1 = {'Name': 'Jack','Age':18,'Score':100}
print("操作字典元素之前,遍历并打印字典元素如下:")
for (key,value) in dict_1.items():
print(key + ":" + str(value))
dict_1.clear()
print("操作字典元素之后,遍历并打印字典元素如下:")
print(dict_1)
for (key,value) in dict_1.items():
print(key + ":" + str(value))
|
[
"lxz_20081025@163.com"
] |
lxz_20081025@163.com
|
f185fb7d2592d7b702fbb0aa041313972b43ce49
|
a140fe192fd643ce556fa34bf2f84ddbdb97f091
|
/.history/모듈과 패키지/외장함수_20200711174751.py
|
99b2fc1648509015a8491be7758fc5ff48cd8b55
|
[] |
no_license
|
sangha0719/py-practice
|
826f13cb422ef43992a69f822b9f04c2cb6d4815
|
6d71ce64bf91cc3bccee81378577d84ba9d9c121
|
refs/heads/master
| 2023-03-13T04:40:55.883279
| 2021-02-25T12:02:04
| 2021-02-25T12:02:04
| 342,230,484
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
# list of python modules라고 검색한다
# Python Module Index라는 페이지를 들어간다.
# # glob : 경로 내의 폴더 / 파일 목록 조회(윈도우 dir)
# import glob
# print(glob.glob("*.py")) # 확장자가 py 인 모든 파일
# os : 운영체제에서 제공하는 기본 기능
import os
print(os,get)
|
[
"sangha0719@gmail.com"
] |
sangha0719@gmail.com
|
0bbdd4bf7a5f32254ed7f31f8c35606cae64ef68
|
3e5ecad4d2f681f2f4f749109cc99deea1209ea4
|
/tf114/tf11_2_diabetes.py
|
25d2c7c7cc73f3820f81ab3f4d6d2093ecf8625e
|
[] |
no_license
|
SunghoonSeok/Study
|
f41ede390079037b2090e6df20e5fb38f2e59b8f
|
50f02b9c9bac904cd4f6923b41efabe524ff3d8a
|
refs/heads/master
| 2023-06-18T06:47:55.545323
| 2021-07-05T00:47:55
| 2021-07-05T00:47:55
| 324,866,762
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,851
|
py
|
from sklearn.datasets import load_diabetes
import tensorflow as tf
tf.compat.v1.set_random_seed(66)
dataset = load_diabetes()
x_data = dataset.data
y_data = dataset.target
y_data = y_data.reshape(-1,1)
print(x_data.shape, y_data.shape) # (442, 10) (442,1)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, train_size=0.8, shuffle=True, random_state=66)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
x = tf.compat.v1.placeholder(tf.float32, shape=[None, 10])
y = tf.compat.v1.placeholder(tf.float32, shape=[None, 1])
y_true = tf.compat.v1.placeholder(tf.float32, shape=[None, 1])
y_pred = tf.compat.v1.placeholder(tf.float32, shape=[None, 1])
w = tf.Variable(tf.random.normal([10,1]), name='weight')
b = tf.Variable(tf.random.normal([1]), name='bias')
# hypothesis = x * w + b
hypothesis = tf.matmul(x, w) + b
cost = tf.reduce_mean(tf.square(hypothesis - y)) # loss='mse'
train = tf.train.AdamOptimizer(learning_rate=0.002).minimize(cost) # optimizer + train
from sklearn.metrics import r2_score
# r2 = r2_score(y_true, y_pred)
# with문 사용해서 자동으로 sess가 닫히도록 할수도 있다.
import numpy as np
with tf.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
for step in range(150001):
cost_val, w_val, b_val, hy_val, _ = sess.run([cost,w,b,hypothesis,train], feed_dict={x:x_train,y:y_train})
if step %20 == 0:
print(step, "loss :",cost_val) # epoch, loss
y_predict = sess.run([hypothesis], feed_dict={x:x_test,y:y_test})
y_predict = np.array(y_predict)
y_predict = y_predict.reshape(-1,1)
print(r2_score(y_test, y_predict))
# 0.5063167888110058
|
[
"76455292+SunghoonSeok@users.noreply.github.com"
] |
76455292+SunghoonSeok@users.noreply.github.com
|
f451b1b4faea36c7f6d7ca4ceec46e4a325c2715
|
4e5b20fdcca20f458322f0a8cd11bbdacb6fb3e5
|
/test/shop/ShopInfoModifyTest.py
|
bf78087f7fbec6a3ef4fc0ab81ee164682b8ea35
|
[] |
no_license
|
shijingyu/sunningAPI
|
241f33b0660dc84635ce39688fed499f5c57a5da
|
4a3b2ef7f9bdc4707d1eaff185bc7eb636fe90d5
|
refs/heads/master
| 2020-04-24T22:15:11.584028
| 2019-02-24T06:41:20
| 2019-02-24T06:41:20
| 172,305,179
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
#!usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on 2014-8-22
@author: suning
'''
import sys
sys.path.append("../../../api-sdk-python")
import suning.api as api
a=api.ShopInfoModifyRequest()
a.placard = "心心相印"
a.telphone = "010-11255555"
f = a.getResponse()
print(f)
|
[
"945090896@qq.com"
] |
945090896@qq.com
|
d2a35ea2668ab07a1748e0d2a8759317926dfa88
|
02495eeb56c436d1dbf9f4700c43658d16ffe0ca
|
/03_P💀Spoopy/pylindrome/docker/app.py
|
098301d96aed87ab4fa1b8e3ec47ee7f45351bbd
|
[] |
no_license
|
ce8so9/csr-2020-tasks
|
906a55c14bca0f7a14b228cbce08a38f7d2271eb
|
cd6ca7f98a40d5e7eb41c61f5b293537188b85c4
|
refs/heads/master
| 2023-01-12T09:33:02.928645
| 2020-11-10T16:19:30
| 2020-11-10T16:19:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
#!/usr/bin/env python3
import subprocess
def sandbox(toexec):
return subprocess.check_output(["sudo", "-u", "sandbox", "python3", "-c", toexec]).decode().strip()
try:
code = input()[:100]
for bad in ['#', '"""', "'''"]:
code = code.replace(bad, "")
assert code == code[::-1]
exec(sandbox(code))
except:
print(open(__file__,"r").read())
|
[
"lukas@schauer.so"
] |
lukas@schauer.so
|
0c490d56b46e3f0a4b6cb6f26b399042af3e6b37
|
b7f45072d056b80ed49e6bcde91877d8576e970d
|
/ImageJ/py/load_blobs.py
|
3037ef28495247737fab6bf5bc930be4277273f8
|
[] |
no_license
|
jrminter/tips
|
128a18ee55655a13085c174d532c77bcea412754
|
f48f8b202f8bf9e36cb6d487a23208371c79718e
|
refs/heads/master
| 2022-06-14T08:46:28.972743
| 2022-05-30T19:29:28
| 2022-05-30T19:29:28
| 11,463,325
| 5
| 8
| null | 2019-12-18T16:24:02
| 2013-07-17T00:16:43
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 137
|
py
|
"""
load_blobs.py
"""
from ij import IJ
IJ.run("Close All")
imp = IJ.openImage("http://imagej.nih.gov/ij/images/blobs.gif")
imp.show()
|
[
"jrminter@gmail.com"
] |
jrminter@gmail.com
|
9634eb466219c63cc085cd1895ec57eb62ce0188
|
94ed98b2f4eec63be1510cc1555dad064bcc8f13
|
/example/mypackage/gui.py
|
a7104897b98fd8b34cd2a7ddc4d9a617212b18c5
|
[
"MIT"
] |
permissive
|
axju/setuptools_freeze
|
dae496e66e5c6dc5c3d28876a056c8ddd8b570d9
|
c1d16bd714f5aec36ea07202f1a466eb0573d839
|
refs/heads/master
| 2020-07-24T05:43:06.920994
| 2019-09-11T13:32:18
| 2019-09-11T13:32:18
| 207,817,984
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,654
|
py
|
import random
from tkinter import Tk, Label, Button, Entry, StringVar, DISABLED, NORMAL, END, W, E
class ConfiguratorGUI:
def __init__(self, master):
self.master = master
master.title("Guessing Game")
self.secret_number = random.randint(1, 100)
self.guess = None
self.num_guesses = 0
self.message = "Guess a number from 1 to 100"
self.label_text = StringVar()
self.label_text.set(self.message)
self.label = Label(master, textvariable=self.label_text)
vcmd = master.register(self.validate) # we have to wrap the command
self.entry = Entry(master, validate="key", validatecommand=(vcmd, '%P'))
self.guess_button = Button(master, text="Guess", command=self.guess_number)
self.reset_button = Button(master, text="Play again", command=self.reset, state=DISABLED)
self.label.grid(row=0, column=0, columnspan=2, sticky=W+E)
self.entry.grid(row=1, column=0, columnspan=2, sticky=W+E)
self.guess_button.grid(row=2, column=0)
self.reset_button.grid(row=2, column=1)
def validate(self, new_text):
if not new_text: # the field is being cleared
self.guess = None
return True
try:
guess = int(new_text)
if 1 <= guess <= 100:
self.guess = guess
return True
else:
return False
except ValueError:
return False
def guess_number(self):
self.num_guesses += 1
if self.guess is None:
self.message = "Guess a number from 1 to 100"
elif self.guess == self.secret_number:
suffix = '' if self.num_guesses == 1 else 'es'
self.message = "Congratulations! You guessed the number after %d guess%s." % (self.num_guesses, suffix)
self.guess_button.configure(state=DISABLED)
self.reset_button.configure(state=NORMAL)
elif self.guess < self.secret_number:
self.message = "Too low! Guess again!"
else:
self.message = "Too high! Guess again!"
self.label_text.set(self.message)
def reset(self):
self.entry.delete(0, END)
self.secret_number = random.randint(1, 100)
self.guess = 0
self.num_guesses = 0
self.message = "Guess a number from 1 to 100"
self.label_text.set(self.message)
self.guess_button.configure(state=NORMAL)
self.reset_button.configure(state=DISABLED)
def main():
root = Tk()
ConfiguratorGUI(root)
root.mainloop()
if __name__ == '__main__':
main()
|
[
"axel.juraske@short-report.de"
] |
axel.juraske@short-report.de
|
7a9eacaaff1dee09c8f626968b2da5d9c9330251
|
881041fab1b4d05f1c5371efed2f9276037eb609
|
/tasks/airport-polygon/depositor.py
|
901985aae38ae47c1dac4ee3d0ad64212ad37cc1
|
[] |
no_license
|
ResidentMario/urban-physiology-nyc-catalog
|
b568f3b6ee1a887a50c4df23c488f50c92e30625
|
cefbc799f898f6cdf24d0a0ef6c9cd13c76fb05c
|
refs/heads/master
| 2021-01-02T22:43:09.073952
| 2017-08-06T18:27:22
| 2017-08-06T18:27:22
| 99,377,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 353
|
py
|
import requests
r = requests.get("https://data.cityofnewyork.us/api/geospatial/xfhz-rhsk?method=export&format=GeoJSON")
with open("/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/airport-polygon/data.geojson", "wb") as f:
f.write(r.content)
outputs = ["/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/airport-polygon/data.geojson"]
|
[
"aleksey.bilogur@gmail.com"
] |
aleksey.bilogur@gmail.com
|
d996405520f5fadcbb45bb17b636f2011447af94
|
f5b5a6e3f844d849a05ff56c497638e607f940e0
|
/capitulo 05/05.02.py
|
3c052b435e84ceae4eef942fd6fc518631fd4e89
|
[] |
no_license
|
alexrogeriodj/Caixa-Eletronico-em-Python
|
9237fa2f7f8fab5f17b7dd008af215fb0aaed29f
|
96b5238437c88e89aed7a7b9c34b303e1e7d61e5
|
refs/heads/master
| 2020-09-06T21:47:36.169855
| 2019-11-09T00:22:14
| 2019-11-09T00:22:14
| 220,563,960
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2019
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Terceira edição - Janeiro/2019 - ISBN 978-85-7522-718-3
# Site: http://python.nilo.pro.br/
#
# Arquivo: listagem3\capítulo 05\05.02.py
# Descrição:
##############################################################################
x = 1
print(x)
x = 2
print(x)
x = 3
print(x)
|
[
"noreply@github.com"
] |
alexrogeriodj.noreply@github.com
|
5a5e52126f3d65f4e181b73cf8ef52d1509c7bbe
|
49800e971c605d74d0841a9bb07a618ad1fc6e49
|
/web/apps/nosari/urls.py
|
d7af7bec90c2311f11dcb3cfe7f3bacf8d6b4a99
|
[] |
no_license
|
cerfelix/AntiSARI
|
ab0c9bd96c8044cd806d26db7b6eea67cf008f70
|
8a217390c367d2af65fd373cbf5794eaa841efea
|
refs/heads/master
| 2020-12-22T10:12:25.454134
| 2020-01-29T09:50:13
| 2020-01-29T09:50:13
| 236,748,324
| 0
| 0
| null | 2020-01-28T14:02:09
| 2020-01-28T14:02:08
| null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
# _*_coding:utf-8_*_
"""
@ProjectName: AntiSARI
@Author: Javen Yan
@File: urls.py
@Software: PyCharm
@Time : 2020/1/28 下午1:58
"""
from web.apps.nosari.controller import NoSariHandler
urlpatterns = [
(r'', NoSariHandler)
]
|
[
"2023335616@qq.com"
] |
2023335616@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.