blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3c22bf817ee148fbc70da528dfb8cff5991cedb0
|
f12fac0dd5c9c9eeedff16377d1f57a3cd02ef32
|
/Python游戏编程入门/02.初识Pygame:Pie游戏/绘制弧形.py
|
8031255f9f3580e0e721331544bdda1f67ae9357
|
[] |
no_license
|
SesameMing/PythonPygame
|
61fe09a38d1729963b86f348b349572760676195
|
ca0554427cd30838d56630e8b1e04aa0e26834a1
|
refs/heads/master
| 2020-12-07T21:23:56.271193
| 2016-11-25T06:38:06
| 2016-11-25T06:38:06
| 66,639,140
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 685
|
py
|
#!/usr/bin/env python3
# -*-coding:utf-8-*-
# Author:SesameMing <blog.v-api.cn>
# Email:admin@v-api.cn
# Time:2016-11-25 12:51
import sys
import math
import pygame
from pygame.locals import *
pygame.init()
screen = pygame.display.set_mode((600, 500))
pygame.display.set_caption("Drawing Arcs")
while True:
for event in pygame.event.get():
if event.type in (QUIT, KEYDOWN):
sys.exit()
screen.fill((0, 0, 200))
color = 255, 0, 255
position = 200, 150, 200, 200
start_angle = math.radians(0)
end_angle = math.radians(180)
width = 8
pygame.draw.arc(screen, color, position, start_angle, end_angle, width)
pygame.display.update()
|
[
"admin@v-api.cn"
] |
admin@v-api.cn
|
04e3a1cfd126c0710557c5f5944b73240af4deec
|
e82b761f53d6a3ae023ee65a219eea38e66946a0
|
/All_In_One/addons/vb25/plugins/TexSwirl.py
|
9ca7e67f86475efdb3be99c3fa816a582b516141
|
[] |
no_license
|
2434325680/Learnbgame
|
f3a050c28df588cbb3b14e1067a58221252e2e40
|
7b796d30dfd22b7706a93e4419ed913d18d29a44
|
refs/heads/master
| 2023-08-22T23:59:55.711050
| 2021-10-17T07:26:07
| 2021-10-17T07:26:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,575
|
py
|
#
# V-Ray/Blender
#
# http://vray.cgdo.ru
#
# Author: Andrey M. Izrantsev (aka bdancer)
# E-Mail: izrantsev@cgdo.ru
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# All Rights Reserved. V-Ray(R) is a registered trademark of Chaos Software.
#
# Blender module
import bpy
from bpy.props import *
# V-Ray/Blender modules
from vb25.utils import *
from vb25.ui import ui
from vb25.plugins import *
from vb25.texture import *
from vb25.uvwgen import *
TYPE = 'TEXTURE'
ID = 'TexSwirl'
PLUG = 'TexSwirl'
NAME = 'Swirl'
DESC = "TexSwirl"
PID = 15
PARAMS = (
'uvwgen',
'color1',
'color2',
'swirl_intensity',
'color_contrast',
'swirl_amount',
'constant_detail',
'center_x',
'center_y',
'random_seed',
'twist',
)
def add_properties(rna_pointer):
class TexSwirl(bpy.types.PropertyGroup):
pass
bpy.utils.register_class(TexSwirl)
rna_pointer.TexSwirl= PointerProperty(
name= "TexSwirl",
type= TexSwirl,
description= "V-Ray TexSwirl settings"
)
TexSwirl.color1= FloatVectorProperty(
name= "Color 1",
description= "First color",
subtype= 'COLOR',
min= 0.0,
max= 1.0,
soft_min= 0.0,
soft_max= 1.0,
default= (1,1,1)
)
# color2
TexSwirl.color2= FloatVectorProperty(
name= "Color 2",
description= "Second color",
subtype= 'COLOR',
min= 0.0,
max= 1.0,
soft_min= 0.0,
soft_max= 1.0,
default= (0,0,0)
)
# swirl_intensity
TexSwirl.swirl_intensity= FloatProperty(
name= "Swirl Intensity",
description= "Swirl Intensity",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= 2
)
# color_contrast
TexSwirl.color_contrast= FloatProperty(
name= "Color Contrast",
description= "Color Contrast",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= 0.4
)
# swirl_amount
TexSwirl.swirl_amount= FloatProperty(
name= "Swirl Amount",
description= "Swirl Amount",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= 1
)
# constant_detail
TexSwirl.constant_detail= IntProperty(
name= "Constant Detail",
description= "Constant Detail",
min= 0,
max= 100,
soft_min= 0,
soft_max= 10,
default= 4
)
# center_x
TexSwirl.center_x= FloatProperty(
name= "Center X",
description= "Center Position X",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= -0.5
)
# center_y
TexSwirl.center_y= FloatProperty(
name= "Center Y",
description= "Center Position Y",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= -0.5
)
# random_seed
TexSwirl.random_seed= FloatProperty(
name= "Random Seed",
description= "Random Seed",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= 0
)
# twist
TexSwirl.twist= FloatProperty(
name= "Twist",
description= "Twist",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= 1
)
'''
OUTPUT
'''
def write(bus):
scene= bus['scene']
ofile= bus['files']['textures']
slot= bus['mtex']['slot']
texture= bus['mtex']['texture']
tex_name= bus['mtex']['name']
uvwgen= write_uvwgen(bus)
TexSwirl= getattr(texture.vray, PLUG)
ofile.write("\n%s %s {"%(PLUG, tex_name))
PLUGINS['TEXTURE']['TexCommon'].write(bus)
for param in PARAMS:
if param == 'uvwgen':
value= uvwgen
else:
value= getattr(TexSwirl, param)
ofile.write("\n\t%s= %s;"%(param, a(scene, value)))
ofile.write("\n}\n")
return tex_name
'''
GUI
'''
class VRAY_TP_TexSwirl(ui.VRayTexturePanel, bpy.types.Panel):
bl_label = NAME
COMPAT_ENGINES = {'VRAY_RENDER','VRAY_RENDER_PREVIEW'}
@classmethod
def poll(cls, context):
tex = context.texture
return tex and tex.type == 'VRAY' and tex.vray.type == ID and ui.engine_poll(cls, context)
def draw(self, context):
wide_ui = context.region.width > ui.narrowui
layout = self.layout
tex= context.texture
TexSwirl= getattr(tex.vray, PLUG)
split= layout.split()
col= split.column()
col.prop(TexSwirl, 'color1', text="")
if wide_ui:
col= split.column()
col.prop(TexSwirl, 'color2', text="")
split= layout.split()
col= split.column(align=True)
col.prop(TexSwirl, 'swirl_amount', text="Amount")
col.prop(TexSwirl, 'swirl_intensity', text="Intensity")
col.prop(TexSwirl, 'color_contrast', text="Color Contrast")
if not wide_ui:
split= layout.split()
col= split.column(align=True)
col.prop(TexSwirl, 'twist')
col.prop(TexSwirl, 'constant_detail')
split= layout.split()
row= split.row(align=True)
row.prop(TexSwirl, 'center_x')
row.prop(TexSwirl, 'center_y')
split= layout.split()
col= split.column()
col.prop(TexSwirl, 'random_seed', text="Seed")
def GetRegClasses():
return (
VRAY_TP_TexSwirl,
)
def register():
for regClass in GetRegClasses():
bpy.utils.register_class(regClass)
def unregister():
for regClass in GetRegClasses():
bpy.utils.unregister_class(regClass)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
c04479133e596d0015f9df6569bf7d2c2283e6d1
|
b23c6c02d9b54c987bca2e36c3506cf80fa28239
|
/python databse connectivity progs/bind variable.py
|
a9bf8a8d9dcc71bd722251121197416765b6ba4e
|
[] |
no_license
|
nishikaverma/Python_progs
|
21190c88460a79f5ce20bb25d1b35f732fadd642
|
78f0cadde80b85356b4cb7ba518313094715aaa5
|
refs/heads/master
| 2022-06-12T14:54:03.442837
| 2020-05-08T10:28:58
| 2020-05-08T10:28:58
| 262,293,571
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 953
|
py
|
import cx_Oracle
try:
conn=cx_Oracle.connect("system/oracle123@localhost/orcl")
print("connection established")
cur=conn.cursor()
print("cursor created!")
print("***********************")
cur.execute("Select Book_name,Book_price from Books")
for x in cur:
print(x)
print("***********************")
name=input("enter book name : ")
price=int(input("enter book price"))
cur.execute("Insert into Books (Book_name,Book_price)values(:1,:2)",(name,price))
n=cur.rowcount
print(n,'rows inserted')
conn.commit()
cur.execute("Select Book_name,Book_price from Books")
for x in cur:
print(x)
print("************************")
except(cx_Oracle.DatabaseError)as e:
print("Error in connectin: ",e)
finally:
if conn is not None:
cur.close()
print("curser closed!")
conn.close()
print("connection closed!")
|
[
"nishika.verma@live.com"
] |
nishika.verma@live.com
|
b48a2e29d81c5d7ddbf5cc76cd714fe6c1483872
|
9e27f91194541eb36da07420efa53c5c417e8999
|
/twilio/twiml/messaging_response.py
|
abb58ff2c6d33ad1d66998d8f9520dd3786f329a
|
[] |
no_license
|
iosmichael/flask-admin-dashboard
|
0eeab96add99430828306b691e012ac9beb957ea
|
396d687fd9144d3b0ac04d8047ecf726f7c18fbd
|
refs/heads/master
| 2020-03-24T05:55:42.200377
| 2018-09-17T20:33:42
| 2018-09-17T20:33:42
| 142,508,888
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,875
|
py
|
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
import json
from admin.twilio.twiml import (
TwiML,
format_language,
)
class MessagingResponse(TwiML):
""" <Response> TwiML for Messages """
def __init__(self, **kwargs):
super(MessagingResponse, self).__init__(**kwargs)
self.name = 'Response'
def message(self, body=None, to=None, from_=None, action=None, method=None,
status_callback=None, **kwargs):
"""
Create a <Message> element
:param body: Message Body
:param to: Phone Number to send Message to
:param from: Phone Number to send Message from
:param action: Action URL
:param method: Action URL Method
:param status_callback: Status callback URL. Deprecated in favor of action.
:param kwargs: additional attributes
:returns: <Message> element
"""
return self.nest(Message(
body=body,
to=to,
from_=from_,
action=action,
method=method,
status_callback=status_callback,
**kwargs
))
def redirect(self, url, method=None, **kwargs):
"""
Create a <Redirect> element
:param url: Redirect URL
:param method: Redirect URL method
:param kwargs: additional attributes
:returns: <Redirect> element
"""
return self.nest(Redirect(url, method=method, **kwargs))
class Redirect(TwiML):
""" <Redirect> TwiML Verb """
def __init__(self, url, **kwargs):
super(Redirect, self).__init__(**kwargs)
self.name = 'Redirect'
self.value = url
class Message(TwiML):
""" <Message> TwiML Verb """
def __init__(self, body=None, **kwargs):
super(Message, self).__init__(**kwargs)
self.name = 'Message'
if body:
self.value = body
def body(self, message, **kwargs):
"""
Create a <Body> element
:param message: Message Body
:param kwargs: additional attributes
:returns: <Body> element
"""
return self.nest(Body(message, **kwargs))
def media(self, url, **kwargs):
"""
Create a <Media> element
:param url: Media URL
:param kwargs: additional attributes
:returns: <Media> element
"""
return self.nest(Media(url, **kwargs))
class Media(TwiML):
""" <Media> TwiML Noun """
def __init__(self, url, **kwargs):
super(Media, self).__init__(**kwargs)
self.name = 'Media'
self.value = url
class Body(TwiML):
""" <Body> TwiML Noun """
def __init__(self, message, **kwargs):
super(Body, self).__init__(**kwargs)
self.name = 'Body'
self.value = message
|
[
"michaelliu@iresearch.com.cn"
] |
michaelliu@iresearch.com.cn
|
cbd142b626698fe1debd6ecef0822cc0d7b13f7f
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_150/ch50_2020_04_13_03_25_44_929209.py
|
a262c1522f55ac719f56e8c2e06b6e69fde73ed5
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
def junta_nome_sobrenome(nome, sobrenome):
nome_e_sobrenome = []
i = 0
while i < len(nome) and i < len(sobrenome):
nome_e_sobrenome.append(nome[i] + ' ' +sobrenome[i])
i += 1
return nome_e_sobrenome
|
[
"you@example.com"
] |
you@example.com
|
45f343096530fa01c5f2708f14403031fa6baa1f
|
5332fef91e044555e605bb37cbef7c4afeaaadb0
|
/hy-data-analysis-with-python-2020/part06-e07_binding_sites/src/binding_sites.py
|
6baad43f425d059dd9d258e457e1d88a1b708b0e
|
[] |
no_license
|
nopomi/hy-data-analysis-python-2019
|
f3baa96bbe9b6ee7f0b3e6f6b8b0f3adfc3b6cc8
|
464685cb377cfdeee890a008fbfbd9ed6e3bcfd0
|
refs/heads/master
| 2021-07-10T16:16:56.592448
| 2020-08-16T18:27:38
| 2020-08-16T18:27:38
| 185,044,621
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,379
|
py
|
#!/usr/bin/env python3
import pandas as pd
import numpy as np
import scipy
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import accuracy_score
from sklearn.metrics import pairwise_distances
from matplotlib import pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
import scipy.spatial as sp
import scipy.cluster.hierarchy as hc
def find_permutation(n_clusters, real_labels, labels):
permutation=[]
for i in range(n_clusters):
idx = labels == i
# Choose the most common label among data points in the cluster
new_label=scipy.stats.mode(real_labels[idx])[0][0]
permutation.append(new_label)
return permutation
def toint(x):
return 'ACGT'.find(x)
def get_features_and_labels(filename):
df = pd.read_csv(filename, sep="\t")
X = [[toint(c) for c in s] for s in df["X"]]
return (np.array(X), np.array(df["y"]))
def plot(distances, method='average', affinity='euclidean'):
mylinkage = hc.linkage(sp.distance.squareform(distances), method=method)
g=sns.clustermap(distances, row_linkage=mylinkage, col_linkage=mylinkage )
g.fig.suptitle(f"Hierarchical clustering using {method} linkage and {affinity} affinity")
plt.show()
def cluster_euclidean(filename):
X, y = get_features_and_labels(filename)
model = AgglomerativeClustering(linkage="average", affinity="euclidean")
model.fit(X)
permutation = find_permutation(2, y, model.labels_)
new_labels = [permutation[label] for label in model.labels_]
score = accuracy_score(y, new_labels)
distances=pairwise_distances(X, metric="euclidean")
#plot(distances)
return score
def cluster_hamming(filename):
X, y = get_features_and_labels(filename)
distances = pairwise_distances(X, metric="hamming")
model = AgglomerativeClustering(affinity="precomputed", linkage="average")
model.fit_predict(distances)
permutation = find_permutation(2, y, model.labels_)
new_labels = [permutation[label] for label in model.labels_]
score = accuracy_score(y, new_labels)
#plot(distances, method="average", affinity="hamming")
return score
def main():
print("Accuracy score with Euclidean affinity is", cluster_euclidean("src/data.seq"))
print("Accuracy score with Hamming affinity is", cluster_hamming("src/data.seq"))
if __name__ == "__main__":
main()
|
[
"miska.noponen@gmail.com"
] |
miska.noponen@gmail.com
|
2ee2ccec5dbf7843302c65bae409bb7fdcc29b2a
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit_noisy3325.py
|
ba3c7f7f745af20e6283d8398fd4aeb577461651
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,323
|
py
|
# qubit number=4
# total number=44
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=13
prog.h(input_qubit[3]) # number=23
prog.cz(input_qubit[0],input_qubit[3]) # number=24
prog.y(input_qubit[1]) # number=37
prog.h(input_qubit[3]) # number=25
prog.x(input_qubit[3]) # number=18
prog.cx(input_qubit[3],input_qubit[1]) # number=40
prog.cx(input_qubit[0],input_qubit[3]) # number=19
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=32
prog.h(input_qubit[0]) # number=41
prog.cz(input_qubit[3],input_qubit[0]) # number=42
prog.h(input_qubit[0]) # number=43
prog.cx(input_qubit[3],input_qubit[0]) # number=26
prog.z(input_qubit[3]) # number=27
prog.h(input_qubit[0]) # number=29
prog.cz(input_qubit[3],input_qubit[0]) # number=30
prog.h(input_qubit[0]) # number=31
prog.h(input_qubit[0]) # number=33
prog.cz(input_qubit[3],input_qubit[0]) # number=34
prog.h(input_qubit[0]) # number=35
prog.h(input_qubit[2]) # number=36
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.y(input_qubit[2]) # number=38
prog.y(input_qubit[2]) # number=39
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy3325.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
deaa0857f040e4558c3a3aa27b0b1ff32bf995cc
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/CJ_16_1/16_1_3_ka_ya_c.py
|
7735ad455887347c1c5a1e4c3582e3531bafa93a
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,141
|
py
|
def solve(n, fs):
fs = [f-1 for f in fs]
lp = [None for p in xrange(n)]
for i in xrange(n):
chk = [False for p in xrange(n)]
p = i
cnt = 0
while not chk[p] and not lp[p]:
chk[p] = True
p = fs[p]
cnt += 1
if p == i:
while not lp[p]:
lp[p] = (cnt, 0)
p = fs[p]
for i in xrange(n):
p = i
cnt = 0
while not lp[p]:
p = fs[p]
cnt += 1
l, b = lp[p]
if cnt > b:
lp[p] = (l, cnt)
res = 0
tmp = 0
for i in xrange(n):
if lp[i]:
l, b = lp[i]
if l == 2:
j = fs[i]
_, bj = lp[j]
tmp += l + b + bj
else:
if l > res:
res = l
if tmp / 2 > res:
res = tmp / 2
return res
T = input()
for i in xrange(1, T+1):
N = input()
Fs = map(int, raw_input().split())
print 'Case #{}: {}'.format(i, solve(N, Fs))
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
9646ac4cc55d9a5e30e41d7546f3ca1df7b888f9
|
f0d9ba8456cdad2b2fa711fa8975b41da7af1784
|
/worms/tests/__init__.py
|
2b9503765bab2d60bb03f655ddf70c5209239ab5
|
[
"Apache-2.0"
] |
permissive
|
willsheffler/worms
|
f1d893d4f06b421abdd4d1e526b43c2e132e19a2
|
27993e33a43474d647ecd8277b210d4206858f0b
|
refs/heads/master
| 2023-04-08T01:18:33.656774
| 2022-06-09T20:04:55
| 2022-06-09T20:04:55
| 118,678,808
| 6
| 5
|
NOASSERTION
| 2021-10-05T22:28:24
| 2018-01-23T22:30:45
|
Python
|
UTF-8
|
Python
| false
| false
| 670
|
py
|
# -*- coding: utf-8 -*-
"""Unit test package for worms."""
import os
import pytest
try:
import pyrosetta
HAVE_PYROSETTA = True
only_if_pyrosetta = lambda x: x
try:
import pyrosetta.distributed
HAVE_PYROSETTA_DISTRIBUTED = True
only_if_pyrosetta_distributed = lambda x: x
except ImportError:
HAVE_PYROSETTA_DISTRIBUTED = False
only_if_pyrosetta_distributed = pytest.mark.skip
except ImportError:
HAVE_PYROSETTA = HAVE_PYROSETTA_DISTRIBUTED = False
only_if_pyrosetta = only_if_pyrosetta_distributed = pytest.mark.skip
only_if_jit = lambda x: x
if "NUMBA_DISABLE_JIT" in os.environ:
only_if_jit = pytest.mark.skip
|
[
"willsheffler@gmail.com"
] |
willsheffler@gmail.com
|
297467e64e5b45612d4fe55253b3388b8442f79f
|
770d4df866b9e66a333f3ffeacdd659b8553923a
|
/results/0193/config.py
|
fbbe800c6116da5429a209d219fc7846de53d1e2
|
[] |
no_license
|
leojo/ResultsOverview
|
b2062244cbd81bc06b99963ae9b1695fa9718f90
|
a396abc7a5b4ab257150c0d37c40b646ebb13fcf
|
refs/heads/master
| 2020-03-20T19:52:37.217926
| 2018-08-05T12:50:27
| 2018-08-05T12:50:27
| 137,656,327
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,322
|
py
|
import os
import numpy as np
import waveUtils
class config(object):
def __init__(self):
self.prepare_data()
# Bsub arguments
bsub_mainfile = "main.py"
bsub_processors = 4
bsub_timeout = "4:00"
bsub_memory = 8000
# Epoch and batch config
batch_size = 128
latent_dim = 100
epochs = 100
epoch_updates = 100
# Network structure
input_s = 16000
n_ae = 5
n_conv_layers = 3
n_deconv_layers = 3
first_size = input_s // (2 ** n_deconv_layers)
final_decoder_filter_size = 3
# Model
load_model = False
model_path = os.path.join("models", "0103", "model") # only used if load_model=True
# Miscellaneous constants
sample_rate = 8000
reconstruction_mult = 1
learning_rate_min = 1e-3
learning_rate_max = 1e-3
learning_rate_scaling_factor = 0 # controlls the shape of the scaling curve from max to min learning rate
learning_rate = 1e-3 # legacy
kl_loss_mult = 1e-7
kl_extra_mult = 2
kl_extra_exponent = 2
keep_prob = 1
use_square = False
data_sources = ["sax-baritone","violin"]
data = None
# Functions
def prepare_data(self):
self.load_data()
def load_and_prepare_audio(self, source):
duration = self.input_s / float(self.sample_rate)
data_dir = os.path.join("wav_files", source)
waves, original_sample_rate = waveUtils.loadAudioFiles(data_dir)
cut_data = waveUtils.extractHighestMeanIntensities(waves, sample_rate=original_sample_rate, duration=duration)
del waves
data = waveUtils.reduceQuality(cut_data, self.sample_rate, duration)
del cut_data
return data
def load_data(self):
if self.data is None:
self.data = [self.load_and_prepare_audio(source) for source in self.data_sources]
def get_training_batch(self):
samples = []
originals = []
num_sources = len(self.data_sources)
sample_shape = self.data[0][0].shape
for _ in range(self.batch_size):
waves = []
sample = np.zeros(sample_shape)
for s in range(num_sources):
i = np.random.randint(len(self.data[s]))
wave = self.data[s][i]
waves.append(wave)
sample += wave
sample = sample/num_sources
samples.append(sample)
originals.append(waves)
samples = np.asarray(samples)
originals = np.asarray(originals)
return samples, originals
def normalize_batch(self, batch):
x = batch.astype(np.float32)
return x / np.max(np.abs(x))
|
[
"leojohannsson91@gmail.com"
] |
leojohannsson91@gmail.com
|
661cac8acf0eadfcb8a1d63605e97bdbdb2e9740
|
2652fd6261631794535589427a384693365a585e
|
/trunk/workspace/Squish/src/TestScript/UI/suite_UI_62/tst_UI_62_Cellular_design/test.py
|
4b116d08c137cfe84f4e37aea4edc7de3cf116e4
|
[] |
no_license
|
ptqatester1/ptqa
|
88c652380167f64a953bfd7a65041e7d8ac48c90
|
5b5997ea459e9aac17db8da2041e2af331927104
|
refs/heads/master
| 2021-01-21T19:06:49.275364
| 2017-06-19T03:15:00
| 2017-06-19T03:15:00
| 92,115,462
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,323
|
py
|
######################
#Author: Alex Leung ##
######################
from API.Utility import UtilConst
from API.Utility.Util import Util
from API.ComponentBox import ComponentBoxConst
from API.Device.EndDevice.PC.PC import PC
from API.Device.CellTower.CellTower import CellTower
from API.Device.COServer.COServer import COServer
from API.Toolbar.GoldenPhysicalToolbar.GoldenPhysicalToolbarConst import GoldenPhysicalToolbarConst
from API.Toolbar.GoldenPhysicalToolbar.GoldenPhysicalToolbar import GoldenPhysicalToolbar
from API.SimulationPanel.EventList.EventList import EventList
from API.SimulationPanel.PlayControls.PlayControls import PlayControls
from API.functions import check
from API.Workspace.Physical import Physical
from API.Device.DeviceBase.ServicesBase.ServicesBaseConst import ServicesConst
#function initialization
util = Util()
pda0 = PC(ComponentBoxConst.DeviceModel.PDA, 200, 100, "Pda0")
pda1 = PC(ComponentBoxConst.DeviceModel.PDA, 200, 200, "Pda1")
ct = CellTower(ComponentBoxConst.DeviceModel.CELL_TOWER, 100, 100, "Cell Tower0")
cos = COServer(ComponentBoxConst.DeviceModel.CO_SERVER, 100, 200, "Central OfficeServer0")
gpt = GoldenPhysicalToolbar()
gptc = GoldenPhysicalToolbarConst()
def main():
util.init()
maketop()
checksettings()
movephysical()
def maketop():
pda0.create()
pda1.create()
ct.create()
cos.create()
ct.connect(cos, ComponentBoxConst.Connection.CONN_COAXIAL, "Coaxial0", "Coaxial0/0")
util.speedUpConvergence()
def checksettings():
ct.select()
ct.clickConfigTab()
ct.close()
cos.select()
cos.clickConfigTab()
cos.config.selectInterface('Cell Tower')
cos.config.interface.cellTower.check.ip("172.16.1.1")
cos.config.interface.cellTower.check.subnet('255.255.255.0')
cos.config.interface.cellTower.check.ipv6("2001::1")
cos.config.interface.cellTower.check.subnetv6("64")
cos.config.interface.cellTower.check.linkLocal("FE80::[A-F\d]{1,4}:[A-F\d]{1,4}:[A-F\d]{1,4}:[A-F\d]{1,4}")
cos.clickServicesTab()
cos.services.selectInterface('DHCP')
cos.services.dhcp.check.ip("172.16.1.1")
cos.services.dhcp.check.subnet("255.255.255.0")
cos.services.dhcp.check.startIp1("172")
cos.services.dhcp.check.startIp2('16')
cos.services.dhcp.check.startIp3('1')
cos.services.dhcp.check.startIp4('100')
cos.services.dhcp.check.maxUsers('50')
cos.services.selectInterface('DHCPv6')
#cos.services.dhcpv6.on()
cos.services.dhcpv6.check.on(True)
test.compare(findObject(cos.squishName + ServicesConst.dhcpv6.PREFIX_TABLE).rowCount, 1)
test.compare(findObject(cos.squishName + ServicesConst.dhcpv6.LOCAL_TABLE).rowCount, 1)
cos.services.selectInterface("CELL TOWER")
test.compare(findObject(cos.squishName + ServicesConst.cellTower.CELL_TOWER_LIST).rowCount, 1)
cos.services.cellTower.refreshButton()
test.compare(findObject(cos.squishName + ServicesConst.cellTower.CELL_TOWER_LIST).rowCount, 1)
cos.services.cellTower.clickItem("0/0")
test.compare(findObject(cos.squishName + ServicesConst.cellTower.CELL_DEVICE_LIST).rowCount, 2)
cos.services.selectInterface("PAP/CHAP")
cos.close()
def movephysical():
util.clickOnPhysical()
gpt.clickButton(gptc.NAVIGATION)
gpt.clickItem(gptc.NAVIGATION_LIST, "Intercity_1.Home City.Corporate Office.Smartphone0")
gpt.clickButton(gptc.JUMP_TO_SELECTED_LOCATION)
# gpt.scrollTo(gptc.RACK_VIEW_V_SCROLL_BAR, 409)
# gpt.scrollTo(gptc.RACK_VIEW_V_SCROLL_BAR, 818)
gpt.clickButton(gptc.MOVE_OBJECT)
util.clickOnPhysicalWorkspace(172, 215)
#mouseClick(waitForObject(gptc.TABLE1_DEVICE1), 39, 848, 0, Qt.LeftButton)
#sendEvent("QMouseEvent", waitForObject(gptc.TABLE1_DEVICE1), QEvent.MouseButtonRelease, 38, 95, Qt.LeftButton, 0, 0)
activateItem(waitForObjectItem(gptc.MOVE_DROPDOWN, "Move to Intercity"))
snooze(5)
#gpt.clickButton(gptc.NAVIGATION)
gpt.clickItem(gptc.NAVIGATION_LIST, "Intercity_1")
gpt.clickButton(gptc.JUMP_TO_SELECTED_LOCATION)
smartphone = Physical().getObject('Smartphone0')
util.dragAndDrop(smartphone, 10, 10, UtilConst.PHYSICAL_WORKSPACE, 500, 300)
util.clickOnLogical()
pda0.select()
pda0.clickDesktopTab()
pda0.desktop.applications.commandPrompt()
pda0.desktop.commandPrompt.setText("ping 172.16.1.1")
util.fastForwardTime()
pda0.desktop.commandPrompt.textCheckPoint("Received = 0", 1)
#checkpoint phone outside range
#checkpoint phone not getting reception
pda0.close()
util.clickOnPhysical()
smartphone = Physical().getObject('Smartphone0')
util.dragAndDrop(smartphone, 10, 10, UtilConst.PHYSICAL_WORKSPACE, 200, 200)
util.clickOnLogical()
util.clickOnSimulation()
pda0.select()
pda0.clickTab('Desktop')
pda0.desktop.applications.commandPrompt()
pda0.desktop.commandPrompt.setText('ping 172.16.255.255')
PlayControls().captureForward(10)
foundEvent = []
foundEvent.append(EventList().findEventAt('Smartphone0', 'Cell Tower0', 'ICMP'))
foundEvent.append(EventList().findEventAt('Smartphone1', 'Cell Tower0', 'ICMP'))
foundEvent.append(EventList().findEventAt('Central Office Server0', 'Cell Tower0', 'ICMP'))
check(not False in foundEvent)
|
[
"ptqatester1@gmail.com"
] |
ptqatester1@gmail.com
|
a270947c1b4f962a0d9e5be8ec990bbefd2b4a32
|
3a39ddc4a8600ffc5110453867370c1d8e2da121
|
/x11-libs/libXcomposite/libXcomposite-0.4.3.py
|
8ce4b041dc0124e9f86b8c9c3514052f3dd809a7
|
[] |
no_license
|
seqizz/hadron64
|
f2276133786c62f490bdc0cbb6801491c788520f
|
ca6ef5df3972b925f38e3666ccdc20f2d0bfe87e
|
refs/heads/master
| 2021-01-18T04:53:09.597388
| 2013-02-25T21:25:32
| 2013-02-25T21:25:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
metadata = """
summary @ X11 Composite extension library
homepage @ http://xorg.freedesktop.org/
license @ MIT
src_url @ http://xorg.freedesktop.org/releases/individual/lib/libXcomposite-$version.tar.bz2
arch @ ~x86
"""
depends = """
runtime @ x11-libs/libXfixes x11-proto/compositeproto
"""
#srcdir = "libXcomposite-%s" % version
def configure():
conf(
"--disable-static")
def install():
raw_install("DESTDIR=%s" % install_dir)
insdoc("COPYING")
|
[
"bburaksezer@gmail.com"
] |
bburaksezer@gmail.com
|
59534247ee1449496330021da54fc527d05a14e3
|
34a043e6961639657e36e7ac9fd459ad5b1f6de1
|
/openpathsampling/experimental/storage/test_mdtraj_json.py
|
f3c57c4ad31a103b69866649884b52ccf8542b6a
|
[
"MIT"
] |
permissive
|
dwhswenson/openpathsampling
|
edaddc91e443e7ffc518e3a06c99fc920ad9d053
|
3d02df4ccdeb6d62030a28e371a6b4ea9aaee5fe
|
refs/heads/master
| 2023-02-04T12:31:17.381582
| 2023-01-30T21:17:01
| 2023-01-30T21:17:01
| 23,991,437
| 3
| 1
|
MIT
| 2022-08-12T17:48:04
| 2014-09-13T10:15:43
|
Python
|
UTF-8
|
Python
| false
| false
| 2,273
|
py
|
from .mdtraj_json import *
import pytest
import numpy as np
import numpy.testing as npt
from ..simstore.custom_json import bytes_codec, numpy_codec, custom_json_factory
from ..simstore.test_custom_json import CustomJSONCodingTest
from openpathsampling.tests.test_helpers import data_filename
class MDTrajCodingTest(CustomJSONCodingTest):
def setup(self):
if not HAS_MDTRAJ:
pytest.skip()
self.filename = data_filename('ala_small_traj.pdb')
def test_default(self):
# custom for handling numpy
for (obj, dct) in zip(self.objs, self.dcts):
default = self.codec.default(obj)
numpy_attrs = [attr for attr, val in dct.items()
if isinstance(val, np.ndarray)]
other_attrs = [attr for attr, val in dct.items()
if not isinstance(val, np.ndarray)]
for attr in numpy_attrs:
npt.assert_array_equal(default[attr], dct[attr])
for attr in other_attrs:
assert default[attr] == dct[attr]
def test_round_trip(self):
codecs = [numpy_codec, bytes_codec] + mdtraj_codecs
encoder, decoder = custom_json_factory(codecs)
self._test_round_trip(encoder, decoder)
class TestTopologyCoding(MDTrajCodingTest):
def setup(self):
super(TestTopologyCoding, self).setup()
self.codec = top_codec
top = md.load(self.filename).topology
dataframe, bonds = top.to_dataframe()
self.objs = [top]
self.dcts = [{
'__class__': 'Topology',
'__module__': 'mdtraj.core.topology',
'atoms': dataframe.to_json(),
'bonds': bonds
}]
class TestTrajectoryCoding(MDTrajCodingTest):
def setup(self):
super(TestTrajectoryCoding, self).setup()
self.codec = traj_codec
traj = md.load(self.filename)
self.objs = [traj]
self.dcts = [{
'__class__': 'Trajectory',
'__module__': 'mdtraj.core.trajectory',
'xyz': traj.xyz,
'topology': traj.topology,
'time': traj.time,
'unitcell_lengths': traj.unitcell_lengths,
'unitcell_angles': traj.unitcell_angles
}]
|
[
"dwhs@hyperblazer.net"
] |
dwhs@hyperblazer.net
|
f63a1432724c3cac911ccad6422806edc4c92da0
|
0369761e54c2766ff2ce13ed249d462a12320c0f
|
/bubble-search/bubble-search-practice/exercise-09.py
|
de843c707b960f927b8aa8ee8b57bf0057cd539f
|
[] |
no_license
|
JasoSalgado/algorithms
|
e54c739005cc47ee8a401912a77cc70865d28c87
|
8db7d2bedfe468c70e5191bc7873e4dd86e7f95a
|
refs/heads/master
| 2023-04-25T23:41:10.655874
| 2021-06-11T17:35:49
| 2021-06-11T17:35:49
| 333,979,204
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 622
|
py
|
"""
Bubble search exercise 09
"""
list = [6514 , 2352 , 3984 , 3596 , 2445 , 5535 , 6332 , 5346 , 617 , 3976 , 1242 , 2573 , 7772 , 9324 , 4655 , 3144 , 6233 , 2287 , 6109 , 4139 , 2030 , 6734 , 1495 , 9466 , 6893 , 9336 , 963 , 4412 , 5347 , 2565 , 7590 , 5932 , 6747 , 7566 , 2456 , 9982 , 8880 , 6816 , 9415 , 2426 , 5892 , 5074 , 1501 , 9445 , 6921 , 545 , 4415 , 9516 , 6426 , 7369]
print(f"List: {list}")
for i in range(len(list)):
for x in range(len(list) - 1):
if list[x] > list[x + 1]:
aux = list[x]
list[x] = list[x + 1]
list[x + 1] = aux
print(list)
|
[
"jaso_98@hotmail.com"
] |
jaso_98@hotmail.com
|
f59837294f8f44c5babd41a112e886e751a61e97
|
31401549d7a342b3fcb0f276f20e18f130730c69
|
/utils/loadweight.py
|
05c9d7ff211cd6d9235020fb2c41f2ffb3f1af14
|
[] |
no_license
|
takeitea/Attention-Echino
|
e79f207010ad9c57b31d39ba8681d2cb0e59643f
|
e157c99e5784c8dc2470b0d3f3ffa61b7921ce09
|
refs/heads/master
| 2020-05-21T00:01:06.170506
| 2019-03-06T13:27:52
| 2019-03-06T13:27:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,897
|
py
|
"""
load part of the pre-trained parameters
"""
import os
import torch
import torch.utils.model_zoo as model_zoo
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
def loadcheckpoint(model, optimizer, args):
if args.resume:
if os.path.isfile(args):
print("load checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print(" loaded checkpoint '{}'({}) best_prec: {}".format(args.resume, checkpoint['epoch'], best_prec1))
else:
print("no checkpoint found at {}".format(args.resume))
def loadpartweight(model):
old_dict=model.state_dict()
new_dict=model_zoo.load_url(model_urls['vgg16_bn'])
count_feat=0
count_fetch=0
skip=0
for k,_ in new_dict.items():
if 'features' in k:
count_feat=count_feat+1
for i in range(count_feat):
for k in range(i,len(old_dict)):
if 'num_batches_tracked' in list(old_dict.keys())[k+skip]:
skip+=1
if new_dict[list(new_dict.keys())[i]].size()==old_dict[list(old_dict.keys())[k+skip]].size():
old_dict[list(old_dict.keys())[k+skip]]=list(new_dict.values())[i]
count_fetch+=1
break
old_dict.update()
model.load_state_dict(old_dict)
return model
|
[
"945193029@qq.com"
] |
945193029@qq.com
|
2fe653f3c427c1407ff776b05974647bae83e94b
|
e5504d8c4880993b82d5583a11c5cc4623e0eac2
|
/Arrays/twoSum2.py
|
dacf7a07e9511280bc0929061c05928bfd38bb93
|
[] |
no_license
|
noorulameenkm/DataStructuresAlgorithms
|
e5f87f426fc444d18f830e48569d2a7a50f5d7e0
|
7c3bb89326d2898f9e98590ceb8ee5fd7b3196f0
|
refs/heads/master
| 2023-06-08T19:29:42.507761
| 2023-05-28T16:20:19
| 2023-05-28T16:20:19
| 219,270,731
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 766
|
py
|
def pair_with_targetsum(arr, target_sum):
result = []
start, end = 0, len(arr) - 1
while start < end:
sum_ = arr[start] + arr[end]
# sum == target
if sum_ == target_sum:
result.append(start)
result.append(end)
break
# sum > target
elif sum_ > target_sum:
end -= 1
else:
start += 1
return result
def two_sum_pair(arr, target_sum):
nums = {}
for i, num in enumerate(arr):
if target_sum - num in nums:
return [nums[target_sum - num], i]
else:
nums[num] = i
return [-1, -1]
print(pair_with_targetsum([1, 2, 3, 4, 6], 6))
print(pair_with_targetsum([2, 5, 9, 11], 11))
print(two_sum_pair([1, 2, 3, 4, 6], 6))
print(two_sum_pair([2, 5, 9, 11], 11))
|
[
"noorul.km@people10.com"
] |
noorul.km@people10.com
|
ae953f626dcd7a8cc3573ca343fdeac058daa21f
|
df0c4875b45e68c106dd1e2ba397f71a10794327
|
/src/pifetcher/utilities/sys_utils.py
|
d389d2340abd6f3e65f41dbd8999e6aed152bff2
|
[
"MIT"
] |
permissive
|
gavinz0228/pifetcher
|
c28b407cf4965852af67ffe619a55ee90fa49a72
|
c8419ae153eefed04e0e8b239cf1a9226fa91c29
|
refs/heads/master
| 2021-07-04T20:26:41.973408
| 2020-11-22T16:57:38
| 2020-11-22T16:57:38
| 203,682,327
| 1
| 0
| null | 2019-08-24T17:04:59
| 2019-08-22T00:06:58
|
Python
|
UTF-8
|
Python
| false
| false
| 507
|
py
|
from os import path, chmod
from sys import platform
import stat
class SysUtils:
@staticmethod
def ensure_path(file_path):
if not path.exists(file_path):
raise Exception(f'file path {file_path} does not exist.')
else:
return file_path
@staticmethod
def set_executable_permission(file_path):
if platform in ['linux', 'linux2', 'darwin']:
chmod(file_path, stat.S_IRWXO)
chmod(file_path, stat.S_IRWXO)
|
[
"gavinz0228@gmail.com"
] |
gavinz0228@gmail.com
|
91bfa4b69dc8175e14f2c85dffe644cc6f7a0d71
|
fe9e6580e954ed62c4e8fd6b860000bb553150a6
|
/ecommerce/forms.py
|
bffb01b5ed4507bffcb530dd54713c62b71512fe
|
[] |
no_license
|
Brucehaha/ecommerce
|
037fb25608e848f5c0fd4ed78f42028d21872e39
|
bea5e5a13ad1e958912b0ac99cfc556a593f91f3
|
refs/heads/workplace
| 2023-01-03T19:35:13.894572
| 2018-06-20T07:22:19
| 2018-06-20T07:22:19
| 124,492,135
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 688
|
py
|
from django import forms
class ContactForm(forms.Form):
fullname = forms.CharField(
widget=forms.TextInput(
attrs={
"class": "form-control",
"placeholder": "Your fullname"
}
)
)
email = forms.EmailField(
widget=forms.EmailInput(
attrs={
"class": "form-control",
"placeholder": "Your Email"
}
)
)
content = forms.CharField(
widget=forms.Textarea(
attrs={
"class": "form-control",
"placeholder": "Year message"
}
)
)
def clean_email(self):
email = self.cleaned_data.get("email")
if not "gmail.com" in email:
raise forms.ValidationError("Email has to be gmail.com")
return email
|
[
"henninglee2013@gmail.com"
] |
henninglee2013@gmail.com
|
5ce2a703f5302283b074de6d2a1fb30fb8b91aa4
|
bc0938b96b86d1396cb6b403742a9f8dbdb28e4c
|
/aliyun-python-sdk-nas/aliyunsdknas/request/v20170626/DescribeTagsRequest.py
|
d76b528b9d21f049ae887b42b56847b5cd568288
|
[
"Apache-2.0"
] |
permissive
|
jia-jerry/aliyun-openapi-python-sdk
|
fb14d825eb0770b874bc123746c2e45efaf64a6d
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
refs/heads/master
| 2022-11-16T05:20:03.515145
| 2020-07-10T08:45:41
| 2020-07-10T09:06:32
| 278,590,780
| 0
| 0
|
NOASSERTION
| 2020-07-10T09:15:19
| 2020-07-10T09:15:19
| null |
UTF-8
|
Python
| false
| false
| 2,120
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdknas.endpoint import endpoint_data
class DescribeTagsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'NAS', '2017-06-26', 'DescribeTags','nas')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Tags(self):
return self.get_query_params().get('Tags')
def set_Tags(self,Tags):
for i in range(len(Tags)):
if Tags[i].get('Value') is not None:
self.add_query_param('Tag.' + str(i + 1) + '.Value' , Tags[i].get('Value'))
if Tags[i].get('Key') is not None:
self.add_query_param('Tag.' + str(i + 1) + '.Key' , Tags[i].get('Key'))
def get_FileSystemId(self):
return self.get_query_params().get('FileSystemId')
def set_FileSystemId(self,FileSystemId):
self.add_query_param('FileSystemId',FileSystemId)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
1f1a15327737df474e4091401068d90bf7b7a2d8
|
df856d5cb0bd4a4a75a54be48f5b91a62903ee6e
|
/jishaku/__init__.py
|
be18c93d969f66dcdc330dc9e0ffd89dc6bb8cc2
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
mortalsky/jishaku
|
4c89bd69f6e1efcc45fcfdcc81427c71e10dc1de
|
9cbbf64dd83697559a50c64653350253b876165a
|
refs/heads/master
| 2023-07-20T04:55:19.144528
| 2021-01-22T08:18:12
| 2021-01-22T08:18:12
| 299,701,523
| 0
| 0
|
MIT
| 2020-09-29T18:16:24
| 2020-09-29T18:16:23
| null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
# -*- coding: utf-8 -*-
"""
jishaku
~~~~~~~
A discord.py extension including useful tools for bot development and debugging.
:copyright: (c) 2021 Devon (Gorialis) R
:license: MIT, see LICENSE for more details.
"""
# pylint: disable=wildcard-import
from jishaku.cog import * # noqa: F401
from jishaku.features.baseclass import Feature # noqa: F401
from jishaku.meta import * # noqa: F401
__all__ = (
'Jishaku',
'Feature',
'setup'
)
|
[
"sansgorialis@gmail.com"
] |
sansgorialis@gmail.com
|
fe3f96a2af6475819c782c04a2b8e8b6b3e3d814
|
52a7b1bb65c7044138cdcbd14f9d1e8f04e52c8a
|
/budget/urls.py
|
c353880f983753ec457815a9fa5d6fa7951041ab
|
[] |
no_license
|
rds0751/aboota
|
74f8ab6d0cf69dcb65b0f805a516c5f94eb8eb35
|
2bde69c575d3ea9928373085b7fc5e5b02908374
|
refs/heads/master
| 2023-05-03T00:54:36.421952
| 2021-05-22T15:40:48
| 2021-05-22T15:40:48
| 363,398,229
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
from django.urls import path,include
from . import views
from django.contrib.auth import views as auth_views
urlpatterns = [
path('app/',views.index,name='index'),
path('add_item/',views.add_item,name='add item'),
]
|
[
"you@example.com"
] |
you@example.com
|
c42cc045d3613843df744ac6b74f7a368d40170e
|
f46e5ab4747d113215e46240eee4d75509e4be0d
|
/tests.py
|
2dd01180f049fb3cb67a16cefd56d899698aae9a
|
[
"MIT"
] |
permissive
|
xmonader/objsnapshot
|
0d2dc17f9637dfe614332f125af5d867a8110118
|
ab639630e6762a1d7c8e7df251f959e27e270e4e
|
refs/heads/master
| 2021-01-22T06:19:26.026384
| 2017-05-30T13:12:22
| 2017-05-30T13:12:22
| 92,542,117
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,737
|
py
|
from .objsnapshot import commit, rollback
class Human:
def __init__(self, name, age):
self.name = name
self.age = age
def inc(self, by=None):
if by is None:
by = self.age
self.age += by
def __str__(self):
return "{} {} ".format(self.name, self.age)
def godangerous(self):
self.name = "mr x"
self.age = 90
class MovingBall:
__slots__ = ['x', 'y']
def __init__(self, x, y):
self.x = x
self.y = y
def move2(self, x, y):
self.x = x
self.y = y
__str__ = lambda self: "{} {}".format(self.x, self.y)
h = Human("Ahmed", 50)
mb = MovingBall(0, 0)
### Examples
def test_commit_state():
h = Human("Ahmed", 50)
mb = MovingBall(0, 0)
commit1 = commit(h)
assert commit1.state['name'] == 'Ahmed'
assert commit1.state['age'] == 50
assert len(commit1.state) == 2
h.inc(20)
h.inc(2)
commit2 = commit(h)
assert commit2.state['name'] == 'Ahmed'
assert commit2.state['age'] != 50
assert commit2.state['age'] == 72
assert len(commit2.state) == 2
h.godangerous()
commit3 = commit(h)
assert commit3.state['name'] == 'mr x'
assert len(commit3.state) == 2
## be good again
h = rollback(h, commit1)
assert h.name == 'Ahmed'
assert h.age == 50
commit1 = commit(mb)
assert len(commit1.state) == 2
assert commit1.state['x'] == 0
assert commit1.state['y'] == 0
mb.move2(5, 124)
commit2 = commit(mb)
assert commit2.state['x'] == 5
print(commit2.state)
assert commit2.state['y'] == 124
assert len(commit2.state) == 2
mb = rollback(mb, commit1)
assert mb.x == 0
assert mb.y == 0
|
[
"xmonader@gmail.com"
] |
xmonader@gmail.com
|
3090368248d3f1123c7946855c97dbc0ec1154e9
|
4fd84e0e1097d1153ed477a5e76b4972f14d273a
|
/myvirtualenv/lib/python3.7/site-packages/azure/mgmt/iothub/models/certificate_properties.py
|
d91afb9c0adb00d0e035b9e1023cc3ad459f53fc
|
[
"MIT"
] |
permissive
|
peterchun2000/TerpV-U
|
c045f4a68f025f1f34b89689e0265c3f6da8b084
|
6dc78819ae0262aeefdebd93a5e7b931b241f549
|
refs/heads/master
| 2022-12-10T09:31:00.250409
| 2019-09-15T15:54:40
| 2019-09-15T15:54:40
| 208,471,905
| 0
| 2
|
MIT
| 2022-12-08T06:09:33
| 2019-09-14T16:49:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,165
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CertificateProperties(Model):
"""The description of an X509 CA Certificate.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar subject: The certificate's subject name.
:vartype subject: str
:ivar expiry: The certificate's expiration date and time.
:vartype expiry: datetime
:ivar thumbprint: The certificate's thumbprint.
:vartype thumbprint: str
:ivar is_verified: Determines whether certificate has been verified.
:vartype is_verified: bool
:ivar created: The certificate's create date and time.
:vartype created: datetime
:ivar updated: The certificate's last update date and time.
:vartype updated: datetime
"""
_validation = {
'subject': {'readonly': True},
'expiry': {'readonly': True},
'thumbprint': {'readonly': True},
'is_verified': {'readonly': True},
'created': {'readonly': True},
'updated': {'readonly': True},
}
_attribute_map = {
'subject': {'key': 'subject', 'type': 'str'},
'expiry': {'key': 'expiry', 'type': 'rfc-1123'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'is_verified': {'key': 'isVerified', 'type': 'bool'},
'created': {'key': 'created', 'type': 'rfc-1123'},
'updated': {'key': 'updated', 'type': 'rfc-1123'},
}
def __init__(self, **kwargs):
super(CertificateProperties, self).__init__(**kwargs)
self.subject = None
self.expiry = None
self.thumbprint = None
self.is_verified = None
self.created = None
self.updated = None
|
[
"peterchun2000@gmail.com"
] |
peterchun2000@gmail.com
|
9918925b5893ab5e67cfe34926eb8f39e50a3f68
|
a5b3c17361b0d68818a0088d2632706353aa768f
|
/app/core/urls.py
|
2c01c9dc76b59d446a2cc277aaf6d2d00a8d8820
|
[] |
no_license
|
marcinpelszyk/django-docker-compose-deploy
|
7bd6d91a08aa4c60fd801115e4277d26cfd77642
|
6e4716d5324172778e5babecb40952de66448301
|
refs/heads/main
| 2023-06-06T02:56:44.709915
| 2021-06-28T15:38:56
| 2021-06-28T15:38:56
| 380,349,649
| 0
| 1
| null | 2021-06-28T08:10:53
| 2021-06-25T20:42:07
|
Python
|
UTF-8
|
Python
| false
| false
| 387
|
py
|
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"marcin.pelszyk90@gmail.com"
] |
marcin.pelszyk90@gmail.com
|
88d871218ddc9d5a96e3ac821323d3bf566ce9b1
|
fb05ae8048b188c7d73e45d0b0732223686eb4e4
|
/dash-demo.py
|
8c67cc6049a8940e154186d5777e2c72a2d37422
|
[] |
no_license
|
jluttine/dash-demo
|
1b8bd0bf0b6570cf8e33c0fb9278390f37baa686
|
2eab4c7cd92b24214354d8a5e3bce866677efe50
|
refs/heads/master
| 2023-01-12T19:03:09.745917
| 2020-11-13T16:57:41
| 2020-11-13T16:57:41
| 312,356,690
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,211
|
py
|
import dash
import dash_html_components as html
import dash_core_components as dcc
from pages import demo1_graph, demo2_datatable
# Create the Dash app/server
app = dash.Dash(
__name__,
external_stylesheets=[
"https://codepen.io/chriddyp/pen/bWLwgP.css",
],
# We need to suppress these errors because when we define the callbacks,
# the subpage layouts haven't been defined yet.. So there would be errors
# about missing IDs. Is there some better solution?
suppress_callback_exceptions=True,
)
# List separate pages
subpages = [
("/demo-graph", demo1_graph),
("/demo-datatable", demo2_datatable),
]
# Generic page layout for the entire app
app.layout = html.Div(
[
# This element is used to read the current URL. Not visible to the
# user.
dcc.Location(id="url", refresh=False),
# The content will be rendered in this element so the children of this
# element will change when browsing to a different page
html.Div(
id="page-content",
className="DashboardContainer",
),
]
)
# Set callbacks for each page
for (_, page) in subpages:
page.set_callbacks(app)
# Layout of the main page
main_layout = html.Div(
className="Container",
children=[
html.H1("Plotly Dash demo"),
html.P(html.I("Jaakko Luttinen - November 16, 2020")),
html.P(html.I("Lead Data Scientist @ Leanheat by Danfoss")),
html.Ul(
[
html.Li([
"This demo is available at: ",
html.A(
"https://github.com/jluttine/dash-demo",
href="https://github.com/jluttine/dash-demo"
)
]),
html.Li("What is Plotly Dash?"),
html.Li("Why not Jupyter Notebooks?"),
]
),
] + [
html.A(
html.Div(
className="Card",
children=[
html.H2(page.title),
html.P(page.description),
]
),
href=url,
) for (url, page) in subpages
] + [
html.Ul([
html.Li([
"So much more cool features: ",
html.A(
"https://dash.plotly.com/",
href="https://dash.plotly.com/",
),
]),
html.Li("Show our real production Dash")
]),
]
)
@app.callback(
dash.dependencies.Output("page-content", "children"),
[dash.dependencies.Input("url", "pathname")]
)
def display_page(pathname):
"""Render the newly selected page when the URL changes"""
if pathname == "/":
return main_layout
page = dict(subpages)[pathname]
return html.Div(
[
# For subpages, add a few fixed elements at the top of the page
dcc.Link("< Back to main page", href="/"),
html.H1(page.title),
html.P(page.description),
# Then, the actual subpage content
page.layout,
]
)
if __name__ == "__main__":
app.run_server(debug=True)
|
[
"jaakko.luttinen@iki.fi"
] |
jaakko.luttinen@iki.fi
|
f88d26fd93f16bef39a4eafcdb8174838d8e21bd
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2147/60692/307788.py
|
10de8faf4f82529d5d59df010ef8d72681e4f591
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
n = input()
if n == '5 5 1 3 2':
print(0)
print(3)
print(3)
print(2)
print(5)
elif n == '100 109 79 7 5':
list1 = [27,52,80,50,40,37,27,60,60,55,55,25,40,80,52,50,25,45,72,45,65,32,22,50,20,80,35,20,22,47,52,20,77,22,52,12,75,55,75,77,75,27,7,75,27,82,52,47,22,75,65,22,57,42,45,40,77,45,40,7,50,57,85,5,47,50,50,32,60,55,62,27,52,20,52,62,25,42,0,45,30,40,15,82,17,67,52,65,50,10,87,52,67,25,,70,67,52,67,42,55]
for i in list1:
print(i)
else:
print(n)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
846029f797948ff4c428cce8a5922b17ffbbd67d
|
78d35bb7876a3460d4398e1cb3554b06e36c720a
|
/sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2016_09_01/aio/_monitor_management_client.py
|
c050f4b4aa8fc88df3e7a1e1c02c2d1b67f42612
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
catchsrinivas/azure-sdk-for-python
|
e35f59b60318a31b3c940a7a3a07b61b28118aa5
|
596227a7738a5342274486e30489239d539b11d1
|
refs/heads/main
| 2023-08-27T09:08:07.986249
| 2021-11-11T11:13:35
| 2021-11-11T11:13:35
| 427,045,896
| 0
| 0
|
MIT
| 2021-11-11T15:14:31
| 2021-11-11T15:14:31
| null |
UTF-8
|
Python
| false
| false
| 3,731
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import MonitorManagementClientConfiguration
from .operations import MetricsOperations
from .operations import ServiceDiagnosticSettingsOperations
from .. import models
class MonitorManagementClient(object):
"""Monitor Management Client.
:ivar metrics: MetricsOperations operations
:vartype metrics: $(python-base-namespace).v2016_09_01.aio.operations.MetricsOperations
:ivar service_diagnostic_settings: ServiceDiagnosticSettingsOperations operations
:vartype service_diagnostic_settings: $(python-base-namespace).v2016_09_01.aio.operations.ServiceDiagnosticSettingsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param str base_url: Service URL
"""
def __init__(
self,
credential: "AsyncTokenCredential",
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = MonitorManagementClientConfiguration(credential, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.metrics = MetricsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service_diagnostic_settings = ServiceDiagnosticSettingsOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
http_request.url = self._client.format_url(http_request.url)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "MonitorManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
[
"noreply@github.com"
] |
catchsrinivas.noreply@github.com
|
cb915a83c326ed9358735e7e6a6123656ae20d18
|
f00ae2cb4709539e8a78247678d9bb51913e0373
|
/oacids/schedules/schedule.py
|
76499b213fe0838b48b11e39aed9eecb971f06d3
|
[
"MIT"
] |
permissive
|
openaps/oacids
|
576351d34d51c62492fc0ed8be5e786273f27aee
|
ed8d6414171f45ac0c33636b5b00013e462e89fb
|
refs/heads/master
| 2021-01-10T06:03:53.395357
| 2016-03-21T04:02:47
| 2016-03-21T04:02:47
| 51,559,470
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
from openaps.configurable import Configurable
import recurrent
class Schedule (Configurable):
prefix = 'schedule'
required = [ 'phases', 'rrule' ]
url_template = "schedule://{name:s}/{rrule:s}"
@classmethod
def parse_rrule (Klass, rrule):
parser = recurrent.RecurringEvent( )
rule = parser.parse(rrule)
return rule
|
[
"bewest@gmail.com"
] |
bewest@gmail.com
|
101b641690e7cda59c300f207ef57d7b4d613baa
|
ac10ccaf44a7610d2230dbe223336cd64f8c0972
|
/ms2ldaviz/basicviz/migrations/0033_auto_20160920_0859.py
|
b74d76b496f5d8f05e297caac658ce76fd904faf
|
[] |
no_license
|
ymcdull/ms2ldaviz
|
db27d3f49f43928dcdd715f4a290ee3040d27b83
|
bd5290496af44b3996c4118c6ac2385a5a459926
|
refs/heads/master
| 2020-05-21T03:04:29.939563
| 2017-03-14T11:44:42
| 2017-03-14T11:44:42
| 84,564,829
| 0
| 0
| null | 2017-03-10T13:54:23
| 2017-03-10T13:54:22
| null |
UTF-8
|
Python
| false
| false
| 456
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('basicviz', '0032_auto_20160920_0857'),
]
operations = [
migrations.RemoveField(
model_name='alphacorroptions',
name='multifileexperiment',
),
migrations.DeleteModel(
name='AlphaCorrOptions',
),
]
|
[
"="
] |
=
|
8926cbe8d1538cbbd04bf86bf0af6e92ec04783c
|
adb295bf248ded84d2c126d73c58b570af440dc6
|
/scripts/providers.py
|
13d8d431cf8b25bd62662d5e17425d61e6862069
|
[] |
no_license
|
sshveta/cfme_tests
|
eaeaf0076e87dd6c2c960887b242cb435cab5151
|
51bb86fda7d897e90444a6a0380a5aa2c61be6ff
|
refs/heads/master
| 2021-03-30T22:30:12.476326
| 2017-04-26T22:47:25
| 2017-04-26T22:47:25
| 17,754,019
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,531
|
py
|
#!/usr/bin/env python
"""
Given the name of a provider from cfme_data and using credentials from
the credentials stash, call the corresponding action on that provider, along
with any additional action arguments.
See cfme_pages/common/mgmt_system.py for documentation on the callable methods
themselves.
Example usage:
scripts/providers.py providername stop_vm vm-name
Note that attempts to be clever will likely be successful, but fruitless.
For example, this will work but not do anyhting helpful:
scripts/providers.py providername __init__ username password
"""
import argparse
import os
import sys
# Make sure the parent dir is on the path before importing provider_factory
cfme_tests_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, cfme_tests_path)
from utils.providers import provider_factory
def main():
parser = argparse.ArgumentParser(epilog=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('provider_name',
help='provider name in cfme_data')
parser.add_argument('action',
help='action to take (list_vm, stop_vm, delete_vm, etc.)')
parser.add_argument('action_args', nargs='*',
help='foo')
args = parser.parse_args()
try:
result = call_provider(args.provider_name, args.action, *args.action_args)
if isinstance(result, list):
exit = 0
for entry in result:
print entry
elif isinstance(result, str):
exit = 0
print result
elif isinstance(result, bool):
# 'True' result becomes flipped exit 0, and vice versa for False
exit = int(not result)
else:
# Unknown type, explode
raise Exception('Unknown return type for "%s"' % args.action)
except Exception as e:
exit = 1
exc_type = type(e).__name__
if e.message:
sys.stderr.write('%s: %s\n' % (exc_type, e.message))
else:
sys.stderr.write('%s\n' % exc_type)
return exit
def call_provider(provider_name, action, *args):
# Given a provider class, find the named method and call it with
# *args. This could possibly be generalized for other CLI tools.
provider = provider_factory(provider_name)
try:
call = getattr(provider, action)
except AttributeError:
raise Exception('Action "%s" not found' % action)
return call(*args)
if __name__ == '__main__':
sys.exit(main())
|
[
"sean.myers@redhat.com"
] |
sean.myers@redhat.com
|
4731da9d96c4ef1421303672f8b8b4c0f711c63d
|
d59bad348c88026e444c084e6e68733bb0211bc2
|
/problema_arg_padrao_mutavel.py
|
616a4efc8c311158f135deac65c9f0a80b8121e6
|
[] |
no_license
|
dersonf/udemy-python
|
f96ec883decb21a68233b2e158c82db1c8878c7a
|
92471c607d8324902902774284f7ca81d2f25888
|
refs/heads/master
| 2022-09-25T00:18:49.833210
| 2020-06-05T18:18:38
| 2020-06-05T18:18:38
| 262,049,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
#!/usr/bin/python3.6
def fibonacci(sequencia=[0, 1]):
# Uso de mutáveis como valor default (armadilha)
sequencia.append(sequencia[-1] + sequencia[-2])
return sequencia
if __name__ == '__main__':
inicio = fibonacci()
print(inicio, id(inicio))
print(fibonacci(inicio))
restart = fibonacci()
print(restart, id(restart))
|
[
"anderson@ferneda.com.br"
] |
anderson@ferneda.com.br
|
a3a3312b93fd1130507887a28abc6e2859e972c6
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/Guanghan_ROLO/ROLO-master/update/utils/utils_draw_coord.py
|
cb75d7c64e6e32251001750fef1e6f67b093e62e
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 2,002
|
py
|
from utils_convert_coord import coord_regular_to_decimal, coord_decimal_to_regular
import cv2
def debug_decimal_coord(img, coord_decimal, prob = None, class_id = None):
img_cp = img.copy()
img_ht, img_wid, nchannels = img.shape
coord_regular = coord_decimal_to_regular(coord_decimal, img_wid, img_ht)
debug_regular_coord(img, coord_regular, prob, class_id)
def debug_regular_coord(img, coord_regular, prob = None, class_id = None):
img_cp = img.copy()
[x_topleft, y_topleft, w_box, h_box] = coord_regular
cv2.rectangle(img_cp,
(x_topleft, y_topleft),
(x_topleft + w_box, y_topleft + h_box),
(0,255,0), 2)
if prob is not None and class_id is not None:
assert(isinstance(prob, (float)))
assert(isinstance(class_id, (int, long)))
cv2.rectangle(img_cp,
(x_topleft, y_topleft - 20),
(x_topleft + w_box, y_topleft),
(125,125,125),-1)
cv2.putText(img_cp,
str(class_id) + ' : %.2f' % prob,
(x_topleft + 5, y_topleft - 7),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 1)
cv2.imshow('debug_detection',img_cp)
cv2.waitKey(1)
def debug_3_locations( img, gt_location, yolo_location, rolo_location):
img_cp = img.copy()
for i in range(3): # b-g-r channels
if i== 0: location= gt_location; color= (0, 0, 255) # red for gt
elif i ==1: location= yolo_location; color= (255, 0, 0) # blur for yolo
elif i ==2: location= rolo_location; color= (0, 255, 0) # green for rolo
x = int(location[0])
y = int(location[1])
w = int(location[2])
h = int(location[3])
if i == 1 or i== 2: cv2.rectangle(img_cp,(x-w//2, y-h//2),(x+w//2,y+h//2), color, 2)
elif i== 0: cv2.rectangle(img_cp,(x,y),(x+w,y+h), color, 2)
cv2.imshow('3 locations',img_cp)
cv2.waitKey(100)
return img_cp
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
e168407eb15bcececca9947e72682be0c3429267
|
47596e586b3e21b31cf360be7cd1c7d3a5dc6163
|
/Google/trafficSnapshot.py
|
2dd2859f31fd1a85b4610e8d672e415ce5a7e784
|
[] |
no_license
|
jasonlingo/RoadSafety
|
bfef06abe0668a9cb8ead5b183008a53eabdefa2
|
b20af54b915daf7635204e3b942b3ae4624887d7
|
refs/heads/master
| 2021-03-19T13:51:13.736277
| 2015-09-17T03:49:43
| 2015-09-17T03:49:43
| 36,019,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,140
|
py
|
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from GPS.GPSPoint import GPSPoint
from File.Directory import createDirectory
import webbrowser
from Google.findTimeZone import findTimeZone
from time import sleep
from PIL import Image
import datetime, pytz
from config import TRAFFIC_IMAGE_DIRECTORY
def trafficSnapshot(gpsPoint, numOfShot, interval, size):
"""
Capture traffic snapshots periodically using Google MAP traffic and store those images
Args:
(GPSPoint) gpsPoint: the center of the map from which we capture traffic images
(int) numOfShot: the total number of images that are going to captured
(int) interval: the interval (in seconds) between two captured images
(int) size: the size of the map (from 3(big) to 21(detail))
"""
# Create Google MAP with traffic info request url
url = "https://www.google.com/maps/@"
gps = str(gpsPoint.lat) + ',' + str(gpsPoint.lng)
# The scale of the map.
size = str(size) + "z"
# Street view parameter.
traffic_param = "/data=!5m1!1e1"
# Combine request url
url = url + gps + "," + size + traffic_param
# Create the output directory if it doesn't exist.
createDirectory(TRAFFIC_IMAGE_DIRECTORY)
for i in range(numOfShot):
# Open the Google MAP street view on a web browser.
webbrowser.open(url)
# Wait for the page opens
sleep(5)
# Get the current time of the location
timezone, current_time = findTimeZone(gpsPoint)
imgName = TRAFFIC_IMAGE_DIRECTORY + "traffic-" + current_time + ".png"
command = "screencapture " + imgName
# Screen shot
os.system(command)
im = Image.open(imgName)
# Get captured image size
width, height = im.size
# Crop the captured area, need to be customized depending on different computer
im.crop((500, 350, width-300, height-30)).save(imgName)
print imgName + " captured!"
# Program sleeps for the interval time
sleep(interval)
|
[
"jasonlingo@gmail.com"
] |
jasonlingo@gmail.com
|
85ae65707ad634936086129bb17d2ebc16ab0115
|
eef39fd96ef4ed289c1567f56fde936d5bc42ea4
|
/BaekJoon/Bronze2/2744.py
|
15ea7e4ea8c55e3f6546f94a24d170bd01b27fa9
|
[] |
no_license
|
dudwns9331/PythonStudy
|
3e17da9417507da6a17744c72835c7c2febd4d2e
|
b99b9ef2453af405daadc6fbf585bb880d7652e1
|
refs/heads/master
| 2023-06-15T12:19:56.019844
| 2021-07-15T08:46:10
| 2021-07-15T08:46:10
| 324,196,430
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 753
|
py
|
# 대소문자 바꾸기
"""
2021-01-20 오후 4:09
안영준
문제
영어 소문자와 대문자로 이루어진 단어를 입력받은 뒤, 대문자는 소문자로, 소문자는 대문자로 바꾸어 출력하는 프로그램을 작성하시오.
입력
첫째 줄에 영어 소문자와 대문자로만 이루어진 단어가 주어진다. 단어의 길이는 최대 100이다.
출력
첫째 줄에 입력으로 주어진 단어에서 대문자는 소문자로, 소문자는 대문자로 바꾼 단어를 출력한다.
"""
String = input()
result = list()
for i in range(len(String)):
if String[i].islower():
result.append(String[i].upper())
else:
result.append(String[i].lower())
print(''.join(map(str, result)))
|
[
"dudwns1045@naver.com"
] |
dudwns1045@naver.com
|
51ef475926c1fe3bb2fb1c490a227bcaa3740d0b
|
21bd66da295baa48603ca9f169d870792e9db110
|
/cgp/utils/failwith.py
|
3647d91543301dbab771107a4c9d604d07544190
|
[] |
no_license
|
kristto/cgptoolbox
|
e6c01ccea1da06e35e26ffbca227258023377e48
|
8bbaf462e9c1320f237dd3c1ae6d899e1d01ade7
|
refs/heads/master
| 2021-01-16T20:38:45.097722
| 2012-03-01T09:18:10
| 2012-03-01T09:18:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,964
|
py
|
"""Modify a function to return a default value in case of error."""
from functools import wraps
import logging
from contextlib import contextmanager
import numpy as np
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger("failwith")
logger.addHandler(NullHandler())
@contextmanager
def silenced(logger, level=logging.CRITICAL):
"""
Silence a logger for the duration of the 'with' block.
>>> logger.error("Error as usual.")
Error as usual.
>>> with silenced(logger):
... logger.error("Silenced error.")
>>> logger.error("Back to normal.")
Back to normal.
You may specify a different temporary level if you like.
>>> with silenced(logger, logging.INFO):
... logger.error("Breaking through the silence.")
Breaking through the silence.
"""
oldlevel = logger.level
try:
logger.setLevel(level)
yield logger
finally:
logger.setLevel(oldlevel)
def nans_like(x):
"""
Returns an array of nans with the same shape and type as a given array.
This also works recursively with tuples, lists or dicts whose leaf nodes
are arrays.
>>> x = np.arange(3.0)
>>> nans_like(x)
array([ nan, nan, nan])
>>> y = x.view([(k, float) for k in "a", "b", "c"])
>>> nans_like(y)
array([(nan, nan, nan)], dtype=[('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
>>> nans_like(y.view(np.recarray))
rec.array([(nan, nan, nan)], dtype=[('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
Tuple, list, dict.
>>> nans_like((x, y))
[array([ nan, nan, nan]), array([(nan, nan, nan)],
dtype=[('a', '<f8'), ('b', '<f8'), ('c', '<f8')])]
>>> nans_like([x, y])
[array([ nan, nan, nan]), array([(nan, nan, nan)],
dtype=[('a', '<f8'), ('b', '<f8'), ('c', '<f8')])]
>>> nans_like(dict(a=x, b=y))
{'a': array([ nan, nan, nan]), 'b': array([(nan, nan, nan)],
dtype=[('a', '<f8'), ('b', '<f8'), ('c', '<f8')])}
Nested list and dict.
>>> nans_like([x, [x, y]])
[array([ nan, nan, nan]), [array([ nan, nan, nan]), array([(nan, nan, nan)],
dtype=[('a', '<f8'), ('b', '<f8'), ('c', '<f8')])]]
>>> nans_like(dict(a=x, b=dict(c=x, d=y)))
{'a': array([ nan, nan, nan]),
'b': {'c': array([ nan, nan, nan]), 'd': array([(nan, nan, nan)],
dtype=[('a', '<f8'), ('b', '<f8'), ('c', '<f8')])}}
Note that there is no nan for integers.
>>> nans_like((1, 2, 3))
Traceback (most recent call last):
AssertionError: nan is only defined for float types, not int...
This works because the 1.0 makes Numpy interpret the tuple as a float array.
>>> nans_like((1.0, 2, 3))
array([ nan, nan, nan])
"""
try:
return dict((k, nans_like(v)) for k, v in x.iteritems())
except AttributeError:
try:
xc = np.copy(x)
try:
xc = x.__array_wrap__(xc)
except AttributeError:
pass
msg = "nan is only defined for float types, not %s" % xc.dtype
assert not xc.dtype.kind == "i", msg
xc.view(np.float).fill(np.nan)
return xc
except TypeError:
return [nans_like(i) for i in x]
def failwith(default=None):
"""
Modify a function to return a default value in case of error.
>>> @failwith("Default")
... def f(x):
... raise Exception("Failure")
>>> f(1)
'Default'
Exceptions are logged, but the default handler doesn't do anything.
This example adds a handler so exceptions are logged to :data:`sys.stdout`.
>>> import sys
>>> logger.addHandler(logging.StreamHandler(sys.stdout))
>>> f(2)
Failure in <function f at 0x...>. Default: Default. args = (2,), kwargs = {}
Traceback (most recent call last):...
Exception: Failure
'Default'
>>> del logger.handlers[-1] # Removing the handler added by the doctest
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
except Exception, exc:
msg = "Failure in %s. Default: %s. args = %s, kwargs = %s"
logger.exception(msg, func, default, args, kwargs)
result = default
return result
return wrapper
return decorator
def failwithnanlikefirst(func):
"""
Like :func:`failwith`, but the default is set to `nan` + result on first evaluation.
>>> @failwithnanlikefirst
... def f(x):
... return 1.0 / x
>>> f(1)
1.0
>>> f(0)
array(nan)
Exceptions are logged, but the default handler doesn't do anything.
This example adds a handler so exceptions are logged to :data:`sys.stdout`.
>>> import sys
>>> logger.addHandler(logging.StreamHandler(sys.stdout))
>>> f(0)
Failure in <function f at 0x...>. Default: nan. args = (0,), kwargs = {}
Traceback (most recent call last):...
ZeroDivisionError: float division...
array(nan)
If the first evaluation fails, the exception is logged with an explanatory
note, then re-raised.
>>> @failwithnanlikefirst
... def g():
... raise Exception("Failure")
>>> try:
... g()
... except Exception, exc:
... print "Caught exception:", exc
<function g at 0x...> failed on first evaluation, or result could not be
interpreted as array of float. args = (), kwargs = {}
Traceback (most recent call last):...Exception: Failure
Caught exception: Failure
"""
d = {} # mutable container to store the default between evaluations
@wraps(func)
def wrapper(*args, **kwargs):
if not d:
# First evaluation
try:
result = func(*args, **kwargs)
d["default"] = nans_like(result)
except Exception, exc:
msg = "%s failed on first evaluation, "
msg += "or result could not be interpreted as array of float. "
msg += "args = %s, kwargs = %s"
logger.exception(msg, func, args, kwargs)
raise
else:
# Not first evaluation, so default is defined
try:
result = func(*args, **kwargs)
except Exception, exc:
msg = "Failure in %s. Default: %s. args = %s, kwargs = %s"
logger.exception(msg, func, d["default"], args, kwargs)
result = d["default"]
return result
return wrapper
def failwithnan_asfor(*args, **kwargs):
"""
Like :func:`failwith`, but the default is set to `nans_like(func(*args, **kwargs))`.
>>> @failwithnan_asfor(2.0, 3)
... def f(value, length):
... return [value] * length
>>> f()
array([ nan, nan, nan])
"""
def decorator(func):
default = nans_like(func(*args, **kwargs))
return failwith(default)(func)
return decorator
def failwithdefault_asfor(*args, **kwargs):
"""
Like :func:`failwith`, but the default is set to `func(*args, **kwargs)`.
>>> @failwithdefault_asfor(2, 3)
... def f(value, length):
... return [value] * length
>>> f()
[2, 2, 2]
"""
def decorator(func):
default = func(*args, **kwargs)
return failwith(default)(func)
return decorator
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS)
|
[
"jonovik@gmail.com"
] |
jonovik@gmail.com
|
c7cc769036318b5263632ef6db922b0a4ffa72cf
|
0533d0ceb5966f7327f40d54bbd17e08e13d36bf
|
/python/HashMap/Maximum Number of Balloons/Maximum Number of Balloons.py
|
485eee52af17f72c857b5f35d3beacd6b25b3591
|
[] |
no_license
|
danwaterfield/LeetCode-Solution
|
0c6178952ca8ca879763a87db958ef98eb9c2c75
|
d89ebad5305e4d1a185b0c6f101a88691602b523
|
refs/heads/master
| 2023-03-19T01:51:49.417877
| 2020-01-11T14:17:42
| 2020-01-11T14:17:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
from collections import Counter
class Solution(object):
def maxNumberOfBalloons(self, text):
"""
:type text: str
:rtype: int
"""
c = Counter(text)
return min(c["b"], c["a"], c["l"] // 2, c["o"] // 2, c["n"])
|
[
"zjuzjj@gmail.com"
] |
zjuzjj@gmail.com
|
3b5771126a3de74c7f3d369f13baba24a89456bb
|
1b300019417ea1e25c59dd6f00fbffb60ec5a123
|
/python/example/run_demo.py
|
1bcd9cc24764d75872a145135941ce238fefc7d5
|
[
"MIT"
] |
permissive
|
Wendong-Huo/diff_stokes_flow
|
9176210b162e9a8c7b9910274fe4c699814fa7d7
|
55eb7c0f3a9d58a50c1a09c2231177b81e0da84e
|
refs/heads/master
| 2023-03-16T13:16:17.028974
| 2020-12-11T03:55:44
| 2020-12-11T03:55:44
| 576,797,332
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,729
|
py
|
import sys
sys.path.append('../')
from pathlib import Path
import numpy as np
from importlib import import_module
import scipy.optimize
import time
import matplotlib.pyplot as plt
from tqdm import tqdm
import pickle
import os
from py_diff_stokes_flow.common.common import print_info, print_ok, print_error, print_warning, ndarray
from py_diff_stokes_flow.common.grad_check import check_gradients
from py_diff_stokes_flow.common.display import export_gif
# Update this dictionary if you would like to add new demos.
all_demo_names = {
# ID: (module name, class name).
'amplifier': ('amplifier_env_2d', 'AmplifierEnv2d'),
'flow_averager': ('flow_averager_env_3d', 'FlowAveragerEnv3d'),
'superposition_gate': ('superposition_gate_env_3d', 'SuperpositionGateEnv3d'),
'funnel': ('funnel_env_3d', 'FunnelEnv3d'),
'fluidic_twister': ('fluidic_twister_env_3d', 'FluidicTwisterEnv3d'),
'fluidic_switch': ('fluidic_switch_env_3d', 'FluidicSwitchEnv3d'),
}
if __name__ == '__main__':
# Input check.
if len(sys.argv) != 2:
print_error('Usage: python run_demo.py [demo_name]')
sys.exit(0)
demo_name = sys.argv[1]
assert demo_name in all_demo_names
# Hyperparameters which are loaded from the config file.
config_file_name = 'config/{}.txt'.format(demo_name)
config = {}
with open(config_file_name, 'r') as f:
lines = f.readlines()
for line in lines:
key, val = line.strip().split(':')
key = key.strip()
val = val.strip()
config[key] = val
seed = int(config['seed'])
sample_num = int(config['sample_num'])
solver = config['solver']
rel_tol = float(config['rel_tol'])
max_iter = int(config['max_iter'])
enable_grad_check = config['enable_grad_check'] == 'True'
spp = int(config['spp'])
fps = int(config['fps'])
# Load class.
module_name, env_name = all_demo_names[demo_name]
Env = getattr(import_module('py_diff_stokes_flow.env.{}'.format(module_name)), env_name)
env = Env(seed, demo_name)
# Global search: randomly sample initial guesses and pick the best.
samples = []
losses = []
best_sample = None
best_loss = np.inf
print_info('Randomly sampling initial guesses...')
for _ in tqdm(range(sample_num)):
x = env.sample()
loss, _ = env.solve(x, False, { 'solver': solver })
losses.append(loss)
samples.append(ndarray(x).copy())
if loss < best_loss:
best_loss = loss
best_sample = np.copy(x)
unit_loss = np.mean(losses)
pickle.dump((losses, samples, unit_loss, best_sample), open('{}/sample.data'.format(demo_name), 'wb'))
# Load from file.
losses, _, unit_loss, best_sample = pickle.load(open('{}/sample.data'.format(demo_name), 'rb'))
print_info('Randomly sampled {:d} initial guesses.'.format(sample_num))
print_info('Loss (min, max, mean): ({:4f}, {:4f}, {:4f}).'.format(
np.min(losses), np.max(losses), np.mean(losses)
))
print_info('Normalized loss (min, max, mean): ({:4f}, {:4f}, {:4f}).'.format(
np.min(losses) / unit_loss, np.max(losses) / unit_loss, 1
))
# Local optimization: run L-BFGS from best_sample.
x_init = np.copy(best_sample)
bounds = scipy.optimize.Bounds(env.lower_bound(), env.upper_bound())
def loss_and_grad(x):
t_begin = time.time()
loss, grad, _ = env.solve(x, True, { 'solver': solver })
# Normalize loss and grad.
loss /= unit_loss
grad /= unit_loss
t_end = time.time()
print('loss: {:3.6e}, |grad|: {:3.6e}, time: {:3.6f}s'.format(loss, np.linalg.norm(grad), t_end - t_begin))
return loss, grad
if enable_grad_check:
print_info('Checking gradients...')
# Sanity check gradients.
success = check_gradients(loss_and_grad, x_init)
if success:
print_ok('Gradient check succeeded.')
else:
print_error('Gradient check failed.')
sys.exit(0)
# File index + 1 = len(opt_history).
loss, grad = loss_and_grad(x_init)
opt_history = [(x_init.copy(), loss, grad.copy())]
pickle.dump(opt_history, open('{}/{:04d}.data'.format(demo_name, 0), 'wb'))
def callback(x):
loss, grad = loss_and_grad(x)
global opt_history
cnt = len(opt_history)
print_info('Summary of iteration {:4d}'.format(cnt))
opt_history.append((x.copy(), loss, grad.copy()))
print_info('loss: {:3.6e}, |grad|: {:3.6e}, |x|: {:3.6e}'.format(
loss, np.linalg.norm(grad), np.linalg.norm(x)))
# Save data to the folder.
pickle.dump(opt_history, open('{}/{:04d}.data'.format(demo_name, cnt), 'wb'))
results = scipy.optimize.minimize(loss_and_grad, x_init.copy(), method='L-BFGS-B', jac=True, bounds=bounds,
callback=callback, options={ 'ftol': rel_tol, 'maxiter': max_iter})
if not results.success:
print_warning('Local optimization fails to reach the optimal condition and will return the last solution.')
print_info('Data saved to {}/{:04d}.data.'.format(demo_name, len(opt_history) - 1))
# Load results from demo_name.
cnt = 0
while True:
data_file_name = '{}/{:04d}.data'.format(demo_name, cnt)
if not os.path.exists(data_file_name):
cnt -= 1
break
cnt += 1
data_file_name = '{}/{:04d}.data'.format(demo_name, cnt)
print_info('Loading data from {}.'.format(data_file_name))
opt_history = pickle.load(open(data_file_name, 'rb'))
# Plot the optimization progress.
plt.rc('pdf', fonttype=42)
plt.rc('font', size=18)
plt.rc('axes', titlesize=18)
plt.rc('axes', labelsize=18)
fig = plt.figure(figsize=(18, 12))
ax_loss = fig.add_subplot(121)
ax_grad = fig.add_subplot(122)
ax_loss.set_position((0.12, 0.2, 0.33, 0.6))
iterations = np.arange(len(opt_history))
ax_loss.plot(iterations, [l for _, l, _ in opt_history], color='tab:red')
ax_loss.set_xlabel('Iteration')
ax_loss.set_ylabel('Loss')
ax_loss.set_yscale('log')
ax_loss.grid(True, which='both')
ax_grad.set_position((0.55, 0.2, 0.33, 0.6))
ax_grad.plot(iterations, [np.linalg.norm(g) + np.finfo(np.float).eps for _, _, g in opt_history],
color='tab:green')
ax_grad.set_xlabel('Iteration')
ax_grad.set_ylabel('|Gradient|')
ax_grad.set_yscale('log')
ax_grad.grid(True, which='both')
plt.show()
fig.savefig('{}/progress.pdf'.format(demo_name))
# Render the results.
print_info('Rendering optimization history in {}/'.format(demo_name))
# 000k.png renders opt_history[k], which is also the last element in 000k.data.
cnt = len(opt_history)
for k in range(cnt - 1):
xk0, _, _ = opt_history[k]
xk1, _, _ = opt_history[k + 1]
for i in range(fps):
t = i / fps
xk = (1 - t) * xk0 + t * xk1
env.render(xk, '{:04d}.png'.format(k * fps + i), { 'solver': solver, 'spp': spp })
print_info('{}/mode_[0-9]*/{:04d}.png is ready.'.format(demo_name, k * fps + i))
env.render(opt_history[-1][0], '{:04d}.png'.format((cnt - 1) * fps), { 'solver': solver, 'spp': spp })
print_info('{}/mode_[0-9]*/{:04d}.png is ready.'.format(demo_name, (cnt - 1) * fps))
# Get mode number.
mode_num = 0
while True:
mode_folder = Path(demo_name) / 'mode_{:04d}'.format(mode_num)
if not mode_folder.exists():
break
export_gif(mode_folder, '{}_{:04d}.gif'.format(demo_name, mode_num), fps=fps)
print_info('Video {}_{:04d}.gif is ready.'.format(demo_name, mode_num))
mode_num += 1
|
[
"taodu@csail.mit.edu"
] |
taodu@csail.mit.edu
|
73dde30ee3e5e9b336b4af24f9c38c43d0e0cf60
|
a5698f82064aade6af0f1da21f504a9ef8c9ac6e
|
/huaweicloud-sdk-cce/huaweicloudsdkcce/v3/region/cce_region.py
|
8075aff2ddabc7a62cba30087f4176a99207fa16
|
[
"Apache-2.0"
] |
permissive
|
qizhidong/huaweicloud-sdk-python-v3
|
82a2046fbb7d62810984399abb2ca72b3b47fac6
|
6cdcf1da8b098427e58fc3335a387c14df7776d0
|
refs/heads/master
| 2023-04-06T02:58:15.175373
| 2021-03-30T10:47:29
| 2021-03-30T10:47:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,907
|
py
|
# coding: utf-8
import types
from huaweicloudsdkcore.region.region import Region
class CceRegion:
def __init__(self):
pass
CN_NORTH_1 = Region(id="cn-north-1", endpoint="https://cce.cn-north-1.myhuaweicloud.com")
CN_NORTH_4 = Region(id="cn-north-4", endpoint="https://cce.cn-north-4.myhuaweicloud.com")
CN_SOUTH_1 = Region(id="cn-south-1", endpoint="https://cce.cn-south-1.myhuaweicloud.com")
CN_EAST_2 = Region(id="cn-east-2", endpoint="https://cce.cn-east-2.myhuaweicloud.com")
CN_EAST_3 = Region(id="cn-east-3", endpoint="https://cce.cn-east-3.myhuaweicloud.com")
CN_SOUTHWEST_2 = Region(id="cn-southwest-2", endpoint="https://cce.cn-southwest-2.myhuaweicloud.com")
AP_SOUTHEAST_1 = Region(id="ap-southeast-1", endpoint="https://cce.ap-southeast-1.myhuaweicloud.com")
AP_SOUTHEAST_2 = Region(id="ap-southeast-2", endpoint="https://cce.ap-southeast-2.myhuaweicloud.com")
AP_SOUTHEAST_3 = Region(id="ap-southeast-3", endpoint="https://cce.ap-southeast-3.myhuaweicloud.com")
AF_SOUTH_1 = Region(id="af-south-1", endpoint="https://cce.af-south-1.myhuaweicloud.com")
static_fields = types.MappingProxyType({
"cn-north-1": CN_NORTH_1,
"cn-north-4": CN_NORTH_4,
"cn-south-1": CN_SOUTH_1,
"cn-east-2": CN_EAST_2,
"cn-east-3": CN_EAST_3,
"cn-southwest-2": CN_SOUTHWEST_2,
"ap-southeast-1": AP_SOUTHEAST_1,
"ap-southeast-2": AP_SOUTHEAST_2,
"ap-southeast-3": AP_SOUTHEAST_3,
"af-south-1": AF_SOUTH_1,
})
@staticmethod
def value_of(region_id, static_fields=static_fields):
if region_id is None or len(region_id) == 0:
raise KeyError("Unexpected empty parameter: region_id.")
if not static_fields.get(region_id):
raise KeyError("Unexpected region_id: " + region_id)
return static_fields.get(region_id)
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
dbbab268c0f12ac2bcfab7eab23967dd84e060e4
|
0252a277036b9ac7f95e5db3cad6c1a94b89c4ef
|
/eaif4_ws/build/turtlebot_apps/turtlebot_rapps/catkin_generated/pkg.installspace.context.pc.py
|
5910a9329ceee27595e6b34e8f4452ce3011c710
|
[] |
no_license
|
maxwelldc/lidar_slam
|
1e5af586cd2a908474fa29224b0d9f542923c131
|
560c8507ea1a47844f9ce6059f48937b0627967b
|
refs/heads/master
| 2020-07-01T03:15:42.877900
| 2019-08-07T10:25:27
| 2019-08-07T10:25:27
| 201,025,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot_rapps"
PROJECT_SPACE_DIR = "/home/wenhou/eaif4_ws/install"
PROJECT_VERSION = "2.3.7"
|
[
"374931377@qq.com"
] |
374931377@qq.com
|
c42484aa0e251a858cba80f1b7cbda8c5b61ad40
|
b6fa182321756b891b84958e2b2c01e63b3f88b2
|
/stepik/product _of_numbers.py
|
61d44cf81b636fd8b2f1484dd3cedb783f9c8444
|
[] |
no_license
|
carden-code/python
|
872da0dff5466070153cf945c428f1bc8309ea2b
|
64e4df0d9893255ad362a904bb5d9677a383591c
|
refs/heads/master
| 2023-07-05T05:14:16.479392
| 2021-08-22T21:27:36
| 2021-08-22T21:27:36
| 305,476,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,546
|
py
|
# Напишите программу для определения, является ли число произведением двух чисел из данного набора,
# выводящую результат в виде ответа «ДА» или «НЕТ».
#
# Формат входных данных
# В первой строке подаётся число n, (0 < n < 1000) – количество чисел в наборе.
# В последующих n строках вводятся целые числа, составляющие набор (могут повторяться).
# Затем следует целое число, которое является или не является произведением двух каких-то чисел из набора.
#
# Формат выходных данных
# Программа должна вывести «ДА» или «НЕТ» в соответствии с условием задачи.
#
# Примечание.
# Само на себя число из набора умножиться не может, другими словами, два множителя должны иметь разные номера в наборе.
amount_numbers = int(input())
numbers_list = [int(input()) for _ in range(amount_numbers)]
product = int(input())
yes = False
for index, num in enumerate(numbers_list):
for i, n in enumerate(numbers_list):
if index != i and num * n == product:
yes = True
print('ДА' if yes else 'НЕТ')
|
[
"carden.ruby@gmail.com"
] |
carden.ruby@gmail.com
|
4c005fbd4e54f24c9b1a2f8d6364a336338e0c60
|
0fd5793e78e39adbfe9dcd733ef5e42390b8cc9a
|
/python3/19_Concurrency_and_Parallel_Programming/02_multiprocessing/example2.py
|
b775c924c64d2785f7f994c9c8c606e50a2ae97e
|
[] |
no_license
|
udhayprakash/PythonMaterial
|
3ea282ceb4492d94d401e3bc8bad9bf6e9cfa156
|
e72f44e147141ebc9bf9ec126b70a5fcdbfbd076
|
refs/heads/develop
| 2023-07-08T21:07:33.154577
| 2023-07-03T10:53:25
| 2023-07-03T10:53:25
| 73,196,374
| 8
| 5
| null | 2023-05-26T09:59:17
| 2016-11-08T14:55:51
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,073
|
py
|
import collections
import multiprocessing as mp
Msg = collections.namedtuple("Msg", ["event", "args"])
class BaseProcess(mp.Process):
"""A process backed by an internal queue for simple one-way message passing."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.queue = mp.Queue()
def send(self, event, *args):
"""Puts the event and args as a `Msg` on the queue"""
msg = Msg(event, args)
self.queue.put(msg)
def dispatch(self, msg):
event, args = msg
handler = getattr(self, "do_%s" % event, None)
if not handler:
raise NotImplementedError("Process has no handler for [%s]" % event)
handler(*args)
def run(self):
while True:
msg = self.queue.get()
self.dispatch(msg)
# usage
class MyProcess(BaseProcess):
def do_helloworld(self, arg1, arg2):
print(arg1, arg2)
if __name__ == "__main__":
process = MyProcess()
process.start()
process.send("helloworld", "hello", "world")
|
[
"uday3prakash@gmail.com"
] |
uday3prakash@gmail.com
|
f10cdd86dd40f18b8d7c02cf3eabfd28b6204cf2
|
9f61f361a545825dd6ff650c2d81bc4d035649bd
|
/tests/test_document.py
|
e95f63d807e367b91125f2d53fc4b1218a64b17d
|
[
"MIT"
] |
permissive
|
cassj/dexy
|
53c9e7ce3f601d9af678816397dcaa3a111ba670
|
fddfeb4db68c362a4126f496dbd019f4639d07ba
|
refs/heads/master
| 2020-12-25T11:52:35.144908
| 2011-06-05T20:52:52
| 2011-06-05T20:52:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,699
|
py
|
from dexy.controller import Controller
from dexy.document import Document
from dexy.artifacts.file_system_json_artifact import FileSystemJsonArtifact
import os
def setup_controller():
controller = Controller()
controller.artifacts_dir = 'artifacts'
if not os.path.isdir(controller.artifacts_dir):
os.mkdir(controller.artifacts_dir)
controller.artifact_class = FileSystemJsonArtifact
controller.allow_remote = True
controller.config = {
'tests/data' : {
"@simple.py|pyg" : {
"contents" : "x = 5\nx^2"
}
}
}
controller.setup_and_run()
return controller
def setup_doc():
controller = setup_controller()
doc = controller.members['tests/data/simple.py|pyg']
assert isinstance(doc, Document)
return doc
def setup_artifact():
doc = setup_doc()
return doc.final_artifact()
def test_artifact_hash_dict():
artifact = setup_artifact()
hash_dict = artifact.hash_dict()
for k in hash_dict.keys():
assert k in artifact.HASH_WHITELIST
# hashstring shouldn't change
hashstring = artifact.hashstring
artifact.set_hashstring
assert artifact.hashstring == hashstring
def test_init():
"""document: filters should be processed correctly"""
doc = Document(FileSystemJsonArtifact, "data/test.py|abc")
assert doc.name == "data/test.py"
assert doc.filters == ['abc']
doc.filters += ['def', 'xyz']
assert doc.filters == ['abc', 'def', 'xyz']
assert doc.key() == "data/test.py|abc|def|xyz"
def test_complete():
"""document: after controller has run"""
doc = setup_doc()
assert doc.key() == "tests/data/simple.py|pyg"
|
[
"ana@ananelson.com"
] |
ana@ananelson.com
|
0d87632a4b2c03e675bb8726a5f7622be7f35e49
|
06e897ed3b6effc280eca3409907acc174cce0f5
|
/plugins/pelican_unity_webgl/config.py
|
d7250678123196715136c46cfa982901234d38d6
|
[
"LicenseRef-scancode-other-permissive",
"MIT",
"AGPL-3.0-only"
] |
permissive
|
JackMcKew/jackmckew.dev
|
ae5a32da4f1b818333ae15c6380bca1329d38f1e
|
b5d68070b6f15677a183424c84e30440e128e1ea
|
refs/heads/main
| 2023-09-02T14:42:19.010294
| 2023-08-15T22:08:19
| 2023-08-15T22:08:19
| 213,264,451
| 15
| 8
|
MIT
| 2023-02-14T21:50:28
| 2019-10-07T00:18:15
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 201
|
py
|
# unity webgl options
DEFAULT_WIDTH = 960
DEFAULT_HEIGHT = 600
DEFAULT_ALIGN = "center"
# paths
GAMES_ROOT_DIR = "/games" # directory with games
TEMPLATE_PATH = "/games/utemplate" # template path
|
[
"jackmckew2@gmail.com"
] |
jackmckew2@gmail.com
|
8418693b0b7f600bc206c9513a976a8685d46f52
|
7a7ed5656b3a162523ba0fd351dd551db99d5da8
|
/x11/library/wayland/actions.py
|
73d6d6bb7fcd4510e4e0b35f12090d0231dd9fe0
|
[] |
no_license
|
klaipedetis/PisiLinux
|
acd4953340ebf14533ea6798275b8780ad96303b
|
3384e5dfa1acd68fa19a26a6fa1cf717136bc878
|
refs/heads/master
| 2021-01-24T22:59:30.055059
| 2013-11-08T21:43:39
| 2013-11-08T21:43:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 755
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 TUBITAK/BILGEM
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import shelltools
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
Libdir = "/usr/lib32" if get.buildTYPE() == "emul32" else "/usr/lib"
def setup():
autotools.autoreconf("-vif")
autotools.configure("--disable-documentation --disable-static")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
if get.buildTYPE() == "emul32":
return
pisitools.dodoc("COPYING", "TODO", "README")
|
[
"namso-01@hotmail.it"
] |
namso-01@hotmail.it
|
85d65df06168b2114299f77d388cbe712b4b7085
|
458c487a30df1678e6d22ffdb2ea426238197c88
|
/ubcsp/add_gc.py
|
e6be0db995f628f5c02f95391bfd50d28fde12ec
|
[
"MIT"
] |
permissive
|
pluck992/ubc
|
04062d2cdeef8d983da1bfaa0ff640a3b25c72c2
|
54fc89ae6141775321d5ea770e973ff09be51c0c
|
refs/heads/master
| 2023-02-19T05:01:42.401329
| 2021-01-21T06:32:15
| 2021-01-21T06:32:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 939
|
py
|
import pp
from gdslib import plot_circuit
from simphony.library import siepic
from simphony.netlist import Subcircuit
def add_gc_te(circuit, gc=siepic.ebeam_gc_te1550):
""" add input and output gratings
Args:
circuit: needs to have `input` and `output` pins
gc: grating coupler
"""
c = Subcircuit(f"{circuit}_gc")
gc = pp.call_if_func(gc)
c.add([(gc, "gci"), (gc, "gco"), (circuit, "circuit")])
c.connect_many(
[("gci", "n1", "circuit", "input"), ("gco", "n1", "circuit", "output")]
)
# c.elements["circuit"].pins["input"] = "input_circuit"
# c.elements["circuit"].pins["output"] = "output_circuit"
c.elements["gci"].pins["n2"] = "input"
c.elements["gco"].pins["n2"] = "output"
return c
if __name__ == "__main__":
import matplotlib.pyplot as plt
from ubc.cm.mzi import mzi
c1 = mzi()
c2 = add_gc_te(c1)
plot_circuit(c2)
plt.show()
|
[
"j"
] |
j
|
c808420814784eb74158420818d1e193c2cff1fe
|
eed5c6267fe9ac9031c21eae6bc53010261505ac
|
/tests/metrics/test_default_metrics.py
|
9609d6d88e052ff6a942b412ee06c84c93ff3b82
|
[
"MIT"
] |
permissive
|
voxmedia/thumbor
|
3a07ae182143b5a850bf63c36887a1ee8e3ad617
|
29b92b69e4c241ddd5ba429f8269d775a1508e70
|
refs/heads/master
| 2022-08-25T13:07:12.136876
| 2022-08-18T16:15:00
| 2022-08-18T16:15:00
| 22,433,808
| 6
| 0
|
MIT
| 2019-09-13T18:05:03
| 2014-07-30T15:33:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,049
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
import mock
from preggy import expect
import thumbor.metrics
from thumbor.importer import Importer
from tests.base import TestCase
class DefaultMetricsTestCase(TestCase):
def get_importer(self):
importer = Importer(self.config)
importer.import_modules()
return importer
def test_can_create_context_with_default_metrics(self):
expect(self.context).not_to_be_null()
expect(self.context.metrics).to_be_instance_of(thumbor.metrics.logger_metrics.Metrics)
@mock.patch('thumbor.metrics.BaseMetrics.initialize')
def test_can_initizalize_when_request_comes(self, mocked_initialize):
expect(mocked_initialize.call_count).to_equal(0)
self.fetch('/unsafe/smart/image.jpg')
expect(mocked_initialize.call_count).to_equal(1)
|
[
"rflorianobr@gmail.com"
] |
rflorianobr@gmail.com
|
8792f9fb40411dda7586be8db31e4e63b961154c
|
2dd814284a1408706459e7dd6295a4575617c0c6
|
/cupyx/scipy/special/digamma.py
|
af54d2a7fd9ec2e5072f91abcaa7fd7cf6a903c3
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
dendisuhubdy/cupy
|
4e31c646fa697f69abbb07f424910cc8e5f0e595
|
b612827e858b8008455a76e8d9b396386c1e4467
|
refs/heads/master
| 2021-01-23T10:56:45.639699
| 2018-07-12T17:41:26
| 2018-07-12T17:41:26
| 93,111,021
| 0
| 0
|
MIT
| 2019-12-09T06:55:54
| 2017-06-02T00:31:07
|
Python
|
UTF-8
|
Python
| false
| false
| 4,681
|
py
|
# This source code contains SciPy's code.
# https://github.com/scipy/scipy/blob/master/scipy/special/cephes/psi.c
#
#
# Cephes Math Library Release 2.8: June, 2000
# Copyright 1984, 1987, 1992, 2000 by Stephen L. Moshier
#
#
# Code for the rational approximation on [1, 2] is:
#
# (C) Copyright John Maddock 2006.
# Use, modification and distribution are subject to the
# Boost Software License, Version 1.0. (See accompanying file
# LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
import cupy
from cupy import core
_digamma_kernel = None
polevl_definition = '''
template<int N> static __device__ double polevl(double x, double coef[])
{
double ans;
double *p;
p = coef;
ans = *p++;
for (int i = 0; i < N; ++i){
ans = ans * x + *p++;
}
return ans;
}
'''
psi_definition = '''
__constant__ double A[] = {
8.33333333333333333333E-2,
-2.10927960927960927961E-2,
7.57575757575757575758E-3,
-4.16666666666666666667E-3,
3.96825396825396825397E-3,
-8.33333333333333333333E-3,
8.33333333333333333333E-2
};
__constant__ double PI = 3.141592653589793;
__constant__ double EULER = 0.5772156649015329;
__constant__ float Y = 0.99558162689208984f;
__constant__ double root1 = 1569415565.0 / 1073741824.0;
__constant__ double root2 = (381566830.0 / 1073741824.0) / 1073741824.0;
__constant__ double root3 = 0.9016312093258695918615325266959189453125e-19;
__constant__ double P[] = {
-0.0020713321167745952,
-0.045251321448739056,
-0.28919126444774784,
-0.65031853770896507,
-0.32555031186804491,
0.25479851061131551
};
__constant__ double Q[] = {
-0.55789841321675513e-6,
0.0021284987017821144,
0.054151797245674225,
0.43593529692665969,
1.4606242909763515,
2.0767117023730469,
1.0
};
static __device__ double digamma_imp_1_2(double x)
{
/*
* Rational approximation on [1, 2] taken from Boost.
*
* Now for the approximation, we use the form:
*
* digamma(x) = (x - root) * (Y + R(x-1))
*
* Where root is the location of the positive root of digamma,
* Y is a constant, and R is optimised for low absolute error
* compared to Y.
*
* Maximum Deviation Found: 1.466e-18
* At double precision, max error found: 2.452e-17
*/
double r, g;
g = x - root1 - root2 - root3;
r = polevl<5>(x - 1.0, P) / polevl<6>(x - 1.0, Q);
return g * Y + g * r;
}
static __device__ double psi_asy(double x)
{
double y, z;
if (x < 1.0e17) {
z = 1.0 / (x * x);
y = z * polevl<6>(z, A);
}
else {
y = 0.0;
}
return log(x) - (0.5 / x) - y;
}
double __device__ psi(double x)
{
double y = 0.0;
double q, r;
int i, n;
if (isnan(x)) {
return x;
}
else if (isinf(x)){
if(x > 0){
return x;
}else{
return nan("");
}
}
else if (x == 0) {
return -1.0/0.0;
}
else if (x < 0.0) {
/* argument reduction before evaluating tan(pi * x) */
r = modf(x, &q);
if (r == 0.0) {
return nan("");
}
y = -PI / tan(PI * r);
x = 1.0 - x;
}
/* check for positive integer up to 10 */
if ((x <= 10.0) && (x == floor(x))) {
n = (int)x;
for (i = 1; i < n; i++) {
y += 1.0 / i;
}
y -= EULER;
return y;
}
/* use the recurrence relation to move x into [1, 2] */
if (x < 1.0) {
y -= 1.0 / x;
x += 1.0;
}
else if (x < 10.0) {
while (x > 2.0) {
x -= 1.0;
y += 1.0 / x;
}
}
if ((1.0 <= x) && (x <= 2.0)) {
y += digamma_imp_1_2(x);
return y;
}
/* x is large, use the asymptotic series */
y += psi_asy(x);
return y;
}
'''
def _get_digamma_kernel():
global _digamma_kernel
if _digamma_kernel is None:
_digamma_kernel = core.ElementwiseKernel(
'T x', 'T y',
"""
y = psi(x)
""",
'digamma_kernel',
preamble=polevl_definition+psi_definition
)
return _digamma_kernel
def digamma(x):
"""The digamma function.
Args:
x (cupy.ndarray): The input of digamma function.
Returns:
cupy.ndarray: Computed value of digamma function.
.. seealso:: :data:`scipy.special.digamma`
"""
if x.dtype.char in '?ebBhH':
x = x.astype(cupy.float32)
elif x.dtype.char in 'iIlLqQ':
x = x.astype(cupy.float64)
y = cupy.zeros_like(x)
_get_digamma_kernel()(x, y)
return y
|
[
"yoshikawa@preferred.jp"
] |
yoshikawa@preferred.jp
|
1fa1a301a80606168abdda73ff6ba0c7c75eb089
|
0c6c7365d6ff8b694bc906ec5f74c741e8bb0d37
|
/Algorithms/1-Two-Sum.py
|
5a8065a1ff799e35aa89d3fd7283348dbcfd26ad
|
[] |
no_license
|
XiongQiuQiu/leetcode-slove
|
d58ab90caa250c86b7a1ade8b60c669821d77995
|
60f0da57b8ea4bfb937e2fe0afe3caea719cd7e4
|
refs/heads/master
| 2021-01-23T11:21:15.069080
| 2019-07-08T15:42:48
| 2019-07-08T15:42:48
| 93,133,558
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 696
|
py
|
'''
Given an array of integers, return indices of the two numbers such that they add up to a specific target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
Example:
Given nums = [2, 7, 11, 15], target = 9,
Because nums[0] + nums[1] = 2 + 7 = 9,
return [0, 1].
'''
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
have = {}
for i in xrange(len(nums)):
if target - nums[i] in have:
return (have[target - nums[i]], i)
else:
have[nums[i]] = i
|
[
"zjw2goo@gmail.com"
] |
zjw2goo@gmail.com
|
9082848ae2d0cc2948f499a7e0d5ab47e3aea76a
|
7109eecfb78e0123b534ef960dbf42be38e49514
|
/x7-src/engine/engine/db/__init__.py
|
092a2b6c0406d609cd15150f7c8c97faf8669621
|
[
"Apache-2.0"
] |
permissive
|
wendy-king/x7_compute_venv
|
a6eadd9a06717090acea3312feebcbc9d3925e88
|
12d74f15147868463954ebd4a8e66d5428b6f56d
|
refs/heads/master
| 2016-09-06T16:58:13.897069
| 2012-01-31T01:26:27
| 2012-01-31T01:26:27
| 3,310,779
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 883
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
DB abstraction for Engine
"""
from engine.db.api import *
|
[
"king_wendy@sina.com"
] |
king_wendy@sina.com
|
aced241806907aec705128d3774a0a81da9b26ed
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5706278382862336_0/Python/neilw4/base.py
|
6cd2e86d9431e61edda3533f65f23cfb2d36240a
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,035
|
py
|
#!/usr/bin/python
import sys
def memo(f):
cache = {}
def memf(*x):
if not x in cache:
cache[x] = f(*x)
return cache[x]
return memf
def memo(*x):
if not x in cache:
cache[x] = f(*x)
return cache[x]
return memf
def valid(p, q, g):
return (p * (2**g)) % q == 0
def solve(l):
l = l.split('/')
p = int(l[0])
q = int(l[1])
g = 40
if not valid(p, q, g):
return "impossible"
for i in xrange(0, g):
if p * (2**i) >= q:
return i
#needs an input file
infname = sys.argv[1]
inf = open(infname)
#assumes infname ends with .in
outfname = infname[:-3] + ".out"
#output file can be specified separately
if len(sys.argv) > 2:
outfname = sys.argv[2]
outf = open(outfname, "w")
case = 1
#ignore 1st line
inf.readline()
while True:
line = inf.readline()
if line == '':
break
sol = "Case #" + str(case) + ": " + str(solve(line.strip()))
print sol
outf.write(sol + "\n")
case += 1
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
512f01a1261eb1c96485dc9c80c20b5d387c5e0a
|
71ddc215db07f311e7028cedcaaaaa08b92d5022
|
/how_to_find_in_list_int_float_str.py
|
61b074edfa7607a79552fc823c11540059116f88
|
[] |
no_license
|
kabitakumari20/list_logical
|
026a17e80c8feeeccf9f4141882eb6a31b80b082
|
af86c6609a2b20f0019e0bd33e498ab34c546fbd
|
refs/heads/main
| 2023-05-31T23:49:08.922831
| 2021-06-08T11:15:30
| 2021-06-08T11:15:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
list=[2, 3.5,4.3,"hello world", 5, 4.3]
empty1=[]
empty2=[]
empty3=[]
i = 0
while i<len(list):
if list[i]==str(list[i]):
empty1.append(list[i])
elif list[i]==int(list[i]):
empty2.append(list[i])
elif list[i]==float(list[i]):
empty3.append(list[i])
else:
print(i)
i+=1
print(empty1)
print(empty2)
print(empty3)
|
[
"kabita20@navgurukul.org"
] |
kabita20@navgurukul.org
|
fdba97aa3f723173a174712b445c40df7b64abcd
|
3a642fa1fc158d3289358b53770cdb39e5893711
|
/src/xlsxwriter/test/comparison/test_print_area02.py
|
8dc1c8ed62b42654997cba02f26ba5b02274c02d
|
[] |
no_license
|
andbar-ru/traceyourself.appspot.com
|
d461277a3e6f8c27a651a1435f3206d7b9307d9f
|
5f0af16ba2727faceb6b7e1b98073cd7d3c60d4c
|
refs/heads/master
| 2020-07-23T14:58:21.511328
| 2016-12-26T22:03:01
| 2016-12-26T22:03:01
| 73,806,841
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,906
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, jmcnamara@cpan.org
#
import unittest
import os
from ...workbook import Workbook
from ..helperfunctions import _compare_xlsx_files
class TestCompareXLSXFiles(unittest.TestCase):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'print_area02.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = ['xl/printerSettings/printerSettings1.bin',
'xl/worksheets/_rels/sheet1.xml.rels']
self.ignore_elements = {'[Content_Types].xml': ['<Default Extension="bin"'],
'xl/worksheets/sheet1.xml': ['<pageMargins', '<pageSetup']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with a print area."""
filename = self.got_filename
####################################################
workbook = Workbook(filename)
worksheet = workbook.add_worksheet()
worksheet.print_area('A1:G1')
worksheet.write('A1', 'Foo')
workbook.close()
####################################################
got, exp = _compare_xlsx_files(self.got_filename,
self.exp_filename,
self.ignore_files,
self.ignore_elements)
self.assertEqual(got, exp)
def tearDown(self):
# Cleanup.
if os.path.exists(self.got_filename):
os.remove(self.got_filename)
if __name__ == '__main__':
unittest.main()
|
[
"andrey@voktd-andbar.int.kronshtadt.ru"
] |
andrey@voktd-andbar.int.kronshtadt.ru
|
b468b68150bb6fd52e90e01fcf615bdf01f04f4b
|
3b50605ffe45c412ee33de1ad0cadce2c5a25ca2
|
/python/paddle/fluid/tests/unittests/test_dist_fleet_ps13.py
|
58248d325b1452e0525f68f20276017e7ad7e814
|
[
"Apache-2.0"
] |
permissive
|
Superjomn/Paddle
|
f5f4072cf75ac9ecb0ff528876ee264b14bbf8d1
|
7a0b0dab8e58b6a3b28b3b82c43d55c9bd3d4188
|
refs/heads/develop
| 2023-02-04T20:27:54.244843
| 2023-01-26T15:31:14
| 2023-01-26T15:31:14
| 66,896,049
| 4
| 1
|
Apache-2.0
| 2023-04-14T02:29:52
| 2016-08-30T01:45:54
|
C++
|
UTF-8
|
Python
| false
| false
| 6,958
|
py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
os.environ["WITH_DISTRIBUTE"] = "ON"
import unittest
import paddle
import paddle.distributed.fleet as fleet
import paddle.distributed.fleet.base.role_maker as role_maker
import paddle.fluid as fluid
paddle.enable_static()
# For Net
base_lr = 0.2
emb_lr = base_lr * 3
dict_dim = 1500
emb_dim = 128
hid_dim = 128
margin = 0.1
sample_rate = 1
batch_size = 4
# this unittest is tested for SparseSharedAdamSGDRule
class TestPSPassWithBow(unittest.TestCase):
def net(self):
def get_acc(cos_q_nt, cos_q_pt, batch_size):
cond = paddle.less_than(cos_q_nt, cos_q_pt)
cond = fluid.layers.cast(cond, dtype='float64')
cond_3 = paddle.sum(cond)
acc = paddle.divide(
cond_3,
fluid.layers.fill_constant(
shape=[1], value=batch_size * 1.0, dtype='float64'
),
name="simnet_acc",
)
return acc
def get_loss(cos_q_pt, cos_q_nt):
loss_op1 = paddle.subtract(
fluid.layers.fill_constant_batch_size_like(
input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32'
),
cos_q_pt,
)
loss_op2 = paddle.add(loss_op1, cos_q_nt)
loss_op3 = paddle.maximum(
fluid.layers.fill_constant_batch_size_like(
input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32'
),
loss_op2,
)
avg_cost = paddle.mean(loss_op3)
return avg_cost
is_distributed = False
is_sparse = True
# query
q = paddle.static.data(
name="query_ids", shape=[-1, 1], dtype="int64", lod_level=1
)
# embedding
q_emb = fluid.contrib.layers.sparse_embedding(
input=q,
size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.01),
name="__emb__",
learning_rate=emb_lr,
),
)
q_emb = paddle.reshape(q_emb, [-1, emb_dim])
# vsum
q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum')
q_ss = paddle.nn.functional.softsign(q_sum)
# fc layer after conv
q_fc = paddle.static.nn.fc(
x=q_ss,
size=hid_dim,
weight_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.01),
name="__q_fc__",
learning_rate=base_lr,
),
)
# label data
label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64")
# pt
pt = paddle.static.data(
name="pos_title_ids", shape=[-1, 1], dtype="int64", lod_level=1
)
# embedding
pt_emb = fluid.contrib.layers.sparse_embedding(
input=pt,
size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.01),
name="__emb__",
learning_rate=emb_lr,
),
)
pt_emb = paddle.reshape(pt_emb, [-1, emb_dim])
# vsum
pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum')
pt_ss = paddle.nn.functional.softsign(pt_sum)
# fc layer
pt_fc = paddle.static.nn.fc(
x=pt_ss,
size=hid_dim,
weight_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.01),
name="__fc__",
learning_rate=base_lr,
),
bias_attr=fluid.ParamAttr(name="__fc_b__"),
)
# nt
nt = paddle.static.data(
name="neg_title_ids", shape=[-1, 1], dtype="int64", lod_level=1
)
# embedding
nt_emb = fluid.contrib.layers.sparse_embedding(
input=nt,
size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.01),
name="__emb__",
learning_rate=emb_lr,
),
)
nt_emb = paddle.reshape(nt_emb, [-1, emb_dim])
# vsum
nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum')
nt_ss = paddle.nn.functional.softsign(nt_sum)
# fc layer
nt_fc = paddle.static.nn.fc(
x=nt_ss,
size=hid_dim,
weight_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.01),
name="__fc__",
learning_rate=base_lr,
),
bias_attr=fluid.ParamAttr(name="__fc_b__"),
)
cos_q_pt = paddle.nn.functional.cosine_similarity(q_fc, pt_fc)
cos_q_nt = paddle.nn.functional.cosine_similarity(q_fc, nt_fc)
# loss
avg_cost = get_loss(cos_q_pt, cos_q_nt)
# acc
acc = get_acc(cos_q_nt, cos_q_pt, batch_size)
return [avg_cost, acc, cos_q_pt]
def test(self):
os.environ["PADDLE_PSERVER_NUMS"] = "2"
os.environ["PADDLE_TRAINERS_NUM"] = "2"
os.environ["POD_IP"] = "127.0.0.1"
os.environ["PADDLE_PORT"] = "36001"
os.environ["PADDLE_TRAINER_ID"] = "0"
os.environ["PADDLE_TRAINERS_NUM"] = "2"
os.environ[
"PADDLE_PSERVERS_IP_PORT_LIST"
] = "127.0.0.1:36001,127.0.0.2:36001"
os.environ["TRAINING_ROLE"] = "PSERVER"
role = role_maker.PaddleCloudRoleMaker()
fleet.init(role)
loss, acc, _ = self.net()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.a_sync = True
configs = {}
configs['__emb__'] = {
"table_parameters.__emb__.accessor.embed_sgd_param.name": "SparseSharedAdamSGDRule",
"table_parameters.__emb__.accessor.embedx_sgd_param.name": "SparseSharedAdamSGDRule",
}
strategy.sparse_table_configs = configs
optimizer = paddle.fluid.optimizer.SGD(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
optimizer.minimize(loss)
fleet.init_server()
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
Superjomn.noreply@github.com
|
7ed4c2eb2c224f3d1a91789faff26ab73a083d63
|
6821339070e85305875633abca1c3d6c90881ede
|
/flaskWeb/flask_demo/blue_print/index.py
|
ebd3377ee3bac19028f4335aaccdf5e7338cc9be
|
[] |
no_license
|
Abel-Fan/uaif1901
|
07cda7ea5675ec52ae92c0021f713951c62bd198
|
f6d81a44b658e61b2c3ae6b4b604faebc1fb136a
|
refs/heads/master
| 2020-05-03T01:05:46.289805
| 2019-04-30T10:16:53
| 2019-04-30T10:16:53
| 178,328,172
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
from flask import Blueprint,render_template
from flaskWeb.flask_demo.db.connectdb import database,cursor
from flaskWeb.flask_demo.settings import INDEX_STATIC
indexblue = Blueprint("index",__name__,url_prefix="/")
@indexblue.route("/",methods=["GET"])
def index():
data = {}
sql = "select * from produces limit 3"
cursor.execute(sql) # 执行sql语句
tuijians = cursor.fetchall() # 获取数据
data['tuijian'] = tuijians
return render_template("index/index.html",data=data,index_static=INDEX_STATIC)
@indexblue.route("/<pagename>.html",methods=["GET"])
def getpage(pagename):
return render_template("index/%s.html"%pagename)
|
[
"842615663@qq.com"
] |
842615663@qq.com
|
1d2fcfdd3bd3561748484b153ccd79db0d2f6603
|
ca850269e513b74fce76847310bed143f95b1d10
|
/build/navigation/move_slow_and_clear/catkin_generated/pkg.installspace.context.pc.py
|
e8dee1765968cad46f6536a7c38fe58f630c2d73
|
[] |
no_license
|
dvij542/RISS-2level-pathplanning-control
|
f98f2c83f70c2894d3c248630159ea86df8b08eb
|
18390c5ab967e8649b9dc83681e9090a37f3d018
|
refs/heads/main
| 2023-06-15T03:58:25.293401
| 2021-06-20T20:20:30
| 2021-06-20T20:20:30
| 368,553,169
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "geometry_msgs;nav_core;pluginlib;roscpp".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lmove_slow_and_clear".split(';') if "-lmove_slow_and_clear" != "" else []
PROJECT_NAME = "move_slow_and_clear"
PROJECT_SPACE_DIR = "/home/dvij5420/catkin_ws/install"
PROJECT_VERSION = "1.14.9"
|
[
"dvij.kalaria@gmail.com"
] |
dvij.kalaria@gmail.com
|
2bce411c35e912e6ed7c250789f2f2259956fe8f
|
6679fd1102802bf190294ef43c434b6047840dc2
|
/openconfig_bindings/bgp/global_/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/__init__.py
|
912ccb145ff12843af0245b01ed67e1ee0f21e7d
|
[] |
no_license
|
robshakir/pyangbind-openconfig-napalm
|
d49a26fc7e38bbdb0419c7ad1fbc590b8e4b633e
|
907979dc14f1578f4bbfb1c1fb80a2facf03773c
|
refs/heads/master
| 2023-06-13T17:17:27.612248
| 2016-05-10T16:46:58
| 2016-05-10T16:46:58
| 58,091,515
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,217
|
py
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import config
import state
class prefix_limit(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-bgp - based on the path /bgp/global/afi-safis/afi-safi/l2vpn-vpls/prefix-limit. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configure the maximum number of prefixes that will be
accepted from a peer
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__config','__state',)
_yang_name = 'prefix-limit'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
helper = kwargs.pop("path_helper", None)
if helper is False:
self._path_helper = False
elif helper is not None and isinstance(helper, xpathhelper.YANGPathHelper):
self._path_helper = helper
elif hasattr(self, "_parent"):
helper = getattr(self._parent, "_path_helper", False)
self._path_helper = helper
else:
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=True)
self.__config = YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'bgp', u'global', u'afi-safis', u'afi-safi', u'l2vpn-vpls', u'prefix-limit']
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /bgp/global/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/config (container)
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /bgp/global/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
try:
t = YANGDynClass(v,base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=True)""",
})
self.__config = t
if hasattr(self, '_set'):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=True)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /bgp/global/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/state (container)
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /bgp/global/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
try:
t = YANGDynClass(v,base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=True)""",
})
self.__state = t
if hasattr(self, '_set'):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=True)
config = property(_get_config, _set_config)
state = property(_get_state, _set_state)
_pyangbind_elements = {'config': config, 'state': state, }
|
[
"rjs@jive.com"
] |
rjs@jive.com
|
7111ddfb6acf2732a7fac3581369ead18f23ff53
|
109ac2988a85c85ce0d734b788caca1c3177413b
|
/senlin/tests/__init__.py
|
1634fd8f1ae8335f9341c3e1fcb454027b088cb8
|
[
"Apache-2.0"
] |
permissive
|
tengqm/senlin
|
481c16e19bc13911625d44819c6461a7c72e41cd
|
aa59c55c098abb13590bc4308c753338ce4a70f4
|
refs/heads/master
| 2021-01-19T04:51:17.010414
| 2015-03-16T10:06:09
| 2015-03-16T10:06:09
| 28,478,662
| 2
| 5
| null | 2015-03-04T07:05:00
| 2014-12-25T10:22:18
|
Python
|
UTF-8
|
Python
| false
| false
| 912
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_i18n
def fake_translate_msgid(msgid, domain, desired_locale=None):
return msgid
oslo_i18n.enable_lazy()
#To ensure messages don't really get translated while running tests.
#As there are lots of places where matching is expected when comparing
#exception message(translated) with raw message.
oslo_i18n._translate_msgid = fake_translate_msgid
|
[
"tengqim@cn.ibm.com"
] |
tengqim@cn.ibm.com
|
4ad44bcde9b6556481cdb983363a5b9757ecef01
|
e1b09ae83920656b20cad0e84f21b741752e926d
|
/sams/check_dupl_def2.py
|
29943740c0b63b607eb174d6f368341eced7c57f
|
[] |
no_license
|
yeongsun/cute
|
5c46729d43f13967cdf4bda0edd100362de90c70
|
3150d7387c04c15e3569dc821562564cd8f9d87c
|
refs/heads/master
| 2020-04-25T10:38:41.833479
| 2018-11-29T05:42:46
| 2018-11-29T05:42:46
| 156,344,910
| 0
| 0
| null | 2018-11-06T07:41:03
| 2018-11-06T07:41:03
| null |
UTF-8
|
Python
| false
| false
| 2,231
|
py
|
import os, sys
import logging
import concurrent.futures
import ys_logger
sys.path.append(os.path.abspath('..'))
logger = logging.getLogger('root')
logger.setLevel("INFO")
logger.addHandler(ys_logger.MyHandler())
logger.info("Finish setting logger")
class check_dupl_conc():
def __init__(self):
self.f1 = open("delivered_data/sum.tsv", "r")
# 박영선 a
# 이원문 b
# 카카오 c
# 박영선 d
# 이원문 e
self.f2 = open("not_dup_head_conc.txt", "w")
self.f3 = open("dup_head_conc.txt", "w")
self.lst = list()
def preproc(self):
l1 = list
for ff in self.f1:
ff = ff.replace("\n", "")
i = ff.split("\t")
if len(i) == 9:
q1 = i[4].strip().replace("?", "")
q2 = i[5].strip().replace("?", "")
ans = i[6].strip()
l1 = q1, q2, ans
elif len(i) == 5:
q1 = i[1].strip().replace("?", "")
q2 = i[2].strip().replace("?", "")
ans = i[3].strip()
l1 = q1, q2, ans
self.lst.append(l1)
self.f1.close()
logger.info("Finish load f1")
def comp(self, f):
for line in f:
item = line.split("\t")
q1 = item[5].strip().replace("?", "")
q2 = item[13].strip().replace("?", "")
ans = item[6].strip()
flag = True
for l in self.lst:
if q1 == l[0] and q2 == l[1] and ans == l[2]:
flag = False
self.f3.write(line)
break
if flag:
self.f2.write(line)
def main(self):
with open("select3.txt", "r") as f:
# 박영선 parkys a
# 이원문 moon b
# 카카오 kakao c
# 박영선 ylunar x
# 이원문 moon y
self.comp(f)
logger.info("Finish All")
self.f2.close()
self.f3.close()
if __name__ == "__main__":
a = check_dupl_conc()
a.preproc()
a.main()
|
[
"ylunar@naver.com"
] |
ylunar@naver.com
|
a868f06ffc94c8e8f5374027fa9157e9edf75fed
|
9d5ae8cc5f53f5aee7247be69142d9118769d395
|
/582. Kill Process.py
|
f6d2712a589e4d1bded42a8fccb55a00c2de168e
|
[] |
no_license
|
BITMystery/leetcode-journey
|
d4c93319bb555a7e47e62b8b974a2f77578bc760
|
616939d1599b5a135747b0c4dd1f989974835f40
|
refs/heads/master
| 2020-05-24T08:15:30.207996
| 2017-10-21T06:33:17
| 2017-10-21T06:33:17
| 84,839,304
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
class Solution(object):
def killProcess(self, pid, ppid, kill):
"""
:type pid: List[int]
:type ppid: List[int]
:type kill: int
:rtype: List[int]
"""
d = {}
for i in xrange(len(ppid)):
if ppid[i] in d:
d[ppid[i]] += [pid[i]]
else:
d[ppid[i]] = [pid[i]]
res = []
stack = [kill]
while stack:
k = stack.pop()
res += [k]
if k in d:
stack += d[k]
return res
s = Solution()
print s.killProcess([1, 3, 10, 5], [3, 0, 5, 3], 5)
|
[
"noreply@github.com"
] |
BITMystery.noreply@github.com
|
c462013ed3ab5ba561d890a7be8d9df5ed9bdf6f
|
c362623e7bd0d656ad3a5a87cff8c2f2f4d64c30
|
/example/wikidocs_exam_11_20.py
|
b96e7d53b878744a881b52ea3ed6b05932a6a7b8
|
[] |
no_license
|
bbster/PracticeAlgorithm
|
92ce418e974e4be8e95b0878b2e349bf8438de5f
|
171fa1880fb2635c5bac55c18a6981a656470292
|
refs/heads/master
| 2021-07-10T16:17:24.088996
| 2020-12-09T10:47:46
| 2020-12-09T10:47:46
| 222,721,632
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,257
|
py
|
# https://wikidocs.net/7014
# 011
삼성전자 = 50000
print("평가금액", 삼성전자 * 10)
# 012
시가총액 = 298000000000
현재가 = 50000
PER = 15.79
print("시가총액:", 시가총액, "현재가:", 현재가, "PER:", PER)
# 답안지
# 시가총액 = 298000000000000
# 현재가 = 5000
# PER = 15.79
# print(시가총액, type(시가총액))
# print(현재가, type(현재가))
# print(PER, type(PER))
# type(변수) - 변수의 데이터 타입을 알수있다. int형인지 float인지 등등
# 013
s = "hello"
t = "python"
print(s, end="! ");print(t)
# 답안지
# s = "hello"
# t = "python"
# print(s+"!", t)
# 014
print(2+2*3)
# 015
a = "128"
print(type(a))
# class 'str'
# 016
num_str = "720"
num_int_casting = int("720")
print(num_str, type(num_str))
print(num_int_casting, type(num_int_casting))
# 017
num = 100
str_casting = str(100)
str_casting2 = str(num)
print(str_casting, type(str_casting))
print(str_casting2, type(str_casting2))
# 018
str_a = "15.79"
float_casting = float(str_a)
print(float_casting, type(float_casting))
# 019
year = "2020"
print(year, type(year))
year_int_casting = int(year)
print(year_int_casting, type(year_int_casting))
# 020
air_conditioner = 48584
term = 36
print(air_conditioner * term)
|
[
"bbster12@naver.com"
] |
bbster12@naver.com
|
3b0d6a8455a25f85ab87e64585230366a5e647bc
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5744014401732608_0/Python/veluca/sol.py
|
bd10179fcb95ce05a54755b6ee878bca104f9dda
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 581
|
py
|
#!/usr/bin/env pypy3
import sys
def solve():
B, M = map(int, input().split())
if M > 2**(B-2):
return "IMPOSSIBLE"
sol = [['0' for i in range(B)] for i in range(B)]
for i in range(B-1):
for j in range(0, i):
sol[j][i] = '1'
if M == 2**(B-2):
sol[0][B-1] = '1'
M -= 1
for i in range(B-2):
if M & (2**i):
sol[1+i][B-1] = '1'
return "POSSIBLE\n" + "\n".join("".join(sol[i]) for i in range(B))
T = int(input())
for l in range(1, T+1):
print("Case #%d:" % l, end=" ")
print(solve())
|
[
"alexandra1.back@gmail.com"
] |
alexandra1.back@gmail.com
|
2ed301967dcb7f052a8c51f56ef1b0bdc1ca357e
|
fa54359c670fd9d4db543505819ce26481dbcad8
|
/setup.py
|
4d01cb7c22b59ecad2520a5c62baf9bba188d3c2
|
[
"MIT"
] |
permissive
|
masasin/decorating
|
4b961e7b2201b84a1cf0553c65e4d0c0768723d5
|
c19bc19b30eea751409f727b03e156123df704e1
|
refs/heads/master
| 2021-01-20T16:35:43.333543
| 2016-05-18T08:22:48
| 2016-05-18T08:22:48
| 59,138,136
| 0
| 0
| null | 2016-05-18T17:43:23
| 2016-05-18T17:43:23
| null |
UTF-8
|
Python
| false
| false
| 2,158
|
py
|
#!/usr/bin/env python
# coding=utf-8
#
# Python Script
#
# Copyright © Manoel Vilela
#
#
from setuptools import setup, find_packages
from codecs import open # To use a consistent encoding
from os import path
from warnings import warn
import decorating
try:
import pypandoc
except ImportError:
warn("Only-for-developers: you need pypandoc for upload "
"correct reStructuredText into PyPI home page")
here = path.abspath(path.dirname(__file__))
readme = path.join(here, 'README.md')
if 'pypandoc' in globals():
long_description = pypandoc.convert(readme, 'rst', format='markdown')
else:
# Get the long description from the relevant file
with open(readme, encoding='utf-8') as f:
long_description = f.read()
setup(
name='decorating',
version=decorating.__version__,
description="A useful collection of decorators (focused in animation)",
long_description=long_description,
classifiers=[
"Environment :: Console",
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"Operating System :: Unix",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='decorating animation decorators decorator',
author=decorating.__author__,
author_email=decorating.__email__,
url=decorating.__url__,
download_url="{u}/archive/v{v}.tar.gz".format(u=decorating.__url__,
v=decorating.__version__),
zip_safe=False,
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples',
'tests', 'docs', '__pycache__']),
platforms='unix',
install_requires=[
x.strip() for x in open('requirements.txt').readlines()
],
entry_points={ # no entry-points yet
# 'console_scripts': [
# 'decorating = decorating.cli:main'
# ]
}
)
|
[
"manoel_vilela@engineer.com"
] |
manoel_vilela@engineer.com
|
8939aa5cea12440890c866f83eaff3e3468a5fb9
|
9c79c683196e0d42b41a831a6e37bb520a75e269
|
/bin/read_csv.py
|
cd747d2de7527220c0d51ccbc09642e1e551c460
|
[] |
no_license
|
YutingYao/crater_lakes
|
7714cf64cd3649bd93b2c3cafcc8c73b4a3ff05b
|
b57ac0c18ce37b0f71f59fc8d254fa12890090ee
|
refs/heads/master
| 2023-05-14T08:45:02.290369
| 2017-05-13T00:55:48
| 2017-05-13T00:55:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 710
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
read_csv.py
Created on Fri Feb 10 08:48:07 2017
@author: sam
"""
import os
import pandas as pd
import numpy as np
import datetime
def read_csv(target):
try:
os.chdir('/home/sam/git/crater_lakes/atmcorr/results/'+target)
df = pd.read_csv(target+'.csv')
return {
'r':np.clip(df.red.values,0,1),
'g':np.clip(df.green.values,0,1),
'b':np.clip(df.blue.values,0,1),
'dT':df.dBT.values,
'timestamps':df.timestamp.values,
'datetimes':[datetime.datetime.fromtimestamp(t) for t in df.timestamp.values],
'satellites':df.satellite.values
}
except:
print('File IO error for :'+target)
|
[
"samsammurphy@gmail.com"
] |
samsammurphy@gmail.com
|
f16bb51a8835137aba50c21bb060c677a7604e02
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_musses.py
|
b1ce3e681289e77be9498786f527b925bf9b01de
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 216
|
py
|
#calss header
class _MUSSES():
def __init__(self,):
self.name = "MUSSES"
self.definitions = muss
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['muss']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
6b55598316455e43f008e4b6dad8851ba4ed3aa7
|
e9a3f4a6f8828597dae8af8ea318b444af1798ba
|
/mag_ng/users/migrations/0003_auto_20200818_0517.py
|
f4d959172de433cee25454c2887bbea24208b12e
|
[] |
no_license
|
kinsomaz/Online-Magazine-Website
|
c4a0b3b067a28202763a3646e02db9355e2e98a7
|
dbb02225af2202913ea7dcc076f5af0052db117c
|
refs/heads/master
| 2022-12-04T00:46:31.619920
| 2020-08-21T12:53:58
| 2020-08-21T12:53:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
# Generated by Django 3.1 on 2020-08-18 04:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20200818_0506'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='username',
field=models.CharField(max_length=20, unique=True, verbose_name='username'),
),
]
|
[
"alameenraji31@gmail.com"
] |
alameenraji31@gmail.com
|
30dbf2c9ddf45492b2c4906ac69c6fdaf6cf3b0c
|
9547f82dc5a81bdc19ba5442d41518a81b518825
|
/consecucion_traspaso/models.py
|
e3468724b015cae28f71774b7f879788abe68b5d
|
[] |
no_license
|
luisfarfan/capacitacion
|
12784f95564eda1dc38dc22aa518b99d4b315c75
|
c93e4502476c02bb3755a68d84404453b2c2dd81
|
refs/heads/master
| 2021-01-11T04:17:15.476849
| 2017-02-14T01:13:27
| 2017-02-14T01:13:27
| 71,189,018
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,823
|
py
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class PersonalCapacitacion(models.Model):
id_per = models.IntegerField(primary_key=True)
dni = models.CharField(max_length=8, blank=True, null=True)
ape_paterno = models.CharField(max_length=100, blank=True, null=True, db_column='ape_paterno')
ape_materno = models.CharField(max_length=100, blank=True, null=True, db_column='ape_materno')
nombre = models.CharField(max_length=100, blank=True, null=True, db_column='nombre')
id_cargofuncional = models.IntegerField()
id_convocatoriacargo = models.IntegerField()
zona = models.CharField(max_length=5, blank=True, null=True)
contingencia = models.IntegerField(blank=True, null=True)
ubigeo = models.CharField(max_length=6)
class Meta:
managed = False
db_table = 'v_personal_capacitacion'
class MetaSeleccion(models.Model):
ccdd = models.CharField(max_length=2, blank=True, null=True)
ccpp = models.CharField(max_length=2, blank=True, null=True)
ccdi = models.CharField(max_length=2, blank=True, null=True)
ubigeo = models.CharField(max_length=6, blank=True, null=True)
id_convocatoriacargo = models.IntegerField()
id_cargofuncional = models.IntegerField()
meta = models.IntegerField()
class Meta:
managed = False
db_table = 'meta_seleccion'
# bandaprob
# 3 = ALTA
# 4 = BAJA
class Ficha177(models.Model):
id_per = models.IntegerField(primary_key=True)
id_convocatoriacargo = models.IntegerField()
capacita = models.IntegerField()
notacap = models.FloatField()
seleccionado = models.IntegerField()
sw_titu = models.IntegerField()
bandaprob = models.IntegerField()
class Meta:
managed = False
db_table = 'ficha_177'
|
[
"lucho.farfan9@gmail.com"
] |
lucho.farfan9@gmail.com
|
6fae34308cd664decc0ad86974d5ad045c8d9d68
|
7af5288111965b8bbcdfcd21fcf9db1f2e886741
|
/point_to_path_measurement.py
|
742e4e4ebcc00750b26d9257ebc1950227237cc5
|
[] |
no_license
|
GeoTecINIT/CyclingPathAnalysis
|
fc65b506da5f9365ed1fa7595fa3e16a3e54c581
|
fb54af19b6dd217ffd224b4ec87e18ab8045c35e
|
refs/heads/master
| 2020-03-14T02:39:14.968754
| 2018-04-27T17:11:56
| 2018-04-27T17:11:56
| 131,403,393
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,722
|
py
|
"""
This script allow us to convert a list of coordinates into a string geometry
It does not consider the information of trips
It just considers location, distance and time
Author: Diego Pajarito
"""
import datetime
import data_setup as data
import geojson
from LatLon import LatLon, Latitude, Longitude
from geojson import FeatureCollection, Feature, LineString
import pandas as pd
location = data.getLocation()
measurement = data.getMeasurement()
def build_feature(ftr_geometry, ftr_properties):
ftr = Feature(properties=ftr_properties, geometry=ftr_geometry)
if ftr.is_valid:
return ftr
else:
print(ftr)
return False
def get_start_stop_linestring(point):
tp = []
tp.append(point)
tp.append(point)
return LineString(tp)
def get_generic_linestring():
pt = (0, 0)
pt1 = (0.0001, 0.001)
return LineString([pt, pt1])
def build_trip_feature(properties, points):
linestring = LineString(points)
if linestring.is_valid:
feature = build_feature(linestring, properties)
else:
if len(points) == 1:
ls = LineString(get_start_stop_linestring(points[0]))
feature = build_feature(ls, properties)
print ("trip with only one point: " + str(properties))
else:
ls = LineString(get_generic_linestring())
feature = build_feature(ls, properties)
print ("Trip with empty Linestring: " + str(properties))
return feature
def build_segment_feature(properties, start_point, end_point):
ls = LineString([start_point, end_point])
if ls.is_valid:
feature = build_feature(ls, properties)
else:
ls = LineString(get_generic_linestring())
feature = build_feature(ls, properties)
print ("Segment with empty Linestring: " + str(properties))
return feature
def get_distance(point1, point2):
point1_coordinates = LatLon(Latitude(point1[1]), Longitude(point1[0]))
point2_coordinates = LatLon(Latitude(point2[1]), Longitude(point2[0]))
distance = point1_coordinates.distance(point2_coordinates)
return distance * 1000
def get_last_speed(device, time):
values = measurement[measurement.measurement == 'speed']
values = values[values.device == device]
values = values[values.time_device < time]
if values.size > 1:
values_sort = values.sort_values('time_device', ascending=False)
value = values_sort['value'].iloc[0] * 3.6
else:
value = -1
return value
def get_last_distance_a(device, time):
values = measurement[measurement.measurement == 'distance']
values = values[values.device == device]
values = values[values.time_device < time]
if values.size > 1:
values_sort = values.sort_values('time_device', ascending=False)
value = values_sort['value'].iloc[0]
else:
value = -1
return value
def get_last_distance_b(device, time):
values = measurement[measurement.measurement == 'last_distance']
values = values[values.device == device]
values = values[values.time_device < time]
if values.size > 1:
values_sort = values.sort_values('time_device', ascending=False)
value = values_sort['value'].iloc[0]
else:
value = -1
return value
def main():
trip_points = []
feature_segments = []
feature_trips = []
new_trip = True
trip_count = 0
location_sort = location.sort_values(['device', 'time_gps'])
for i, row in location_sort.iterrows():
lat = location['latitude'][i]
lon = location['longitude'][i]
alt = location['altitude'][i]
device = location['device'][i]
precision = location['precision'][i]
timestamp = pd.to_datetime(location_sort['time_gps'][i])
point = (lon, lat, alt)
if new_trip:
new_trip = False
segment_count = 1
trip_count = trip_count + 1
trip_points.append(point)
segment_start = timestamp
trip_start = timestamp
last_point = point
last_device = device
last_timestamp = timestamp
else:
distance = get_distance(last_point, point)
time_difference_min = pd.Timedelta(timestamp - last_timestamp).total_seconds() / 60
if distance > 500 or time_difference_min > 5 or last_device != device:
properties_trip = {'device': last_device, 'start_time': str(trip_start), 'end_time': str(last_timestamp),
'trip_count': trip_count, 'point_count': len(trip_points)}
feature_trip = build_trip_feature(properties_trip, trip_points)
if feature_trip:
feature_trips.append(feature_trip)
trip_count = trip_count + 1
trip_start = timestamp
trip_points = [point]
segment_start = timestamp
segment_count = 1
last_point = point
last_device = device
last_timestamp = timestamp
else:
last_distance_a = get_last_distance_a(device, location_sort['time_gps'][i])
last_distance_b = get_last_distance_b(device, location_sort['time_gps'][i])
last_speed = get_last_speed(device, location_sort['time_gps'][i])
if time_difference_min == 0:
speed_geometry = 0
else:
speed_geometry = (distance / 1000) / (time_difference_min / 60)
# get last distance
properties_segment = {'device': device, 'start_time': str(segment_start), 'end_time': str(timestamp),
'segment_count': segment_count, 'distance_geometry': distance,
'last_distance_a': last_distance_a, 'last_distance_b': last_distance_b,
'speed_geometry': speed_geometry, 'last_speed': last_speed,
'precision_end': precision, 'trip_count': trip_count}
feature_segment = build_segment_feature(properties_segment, last_point, point)
if feature_segment:
feature_segments.append(feature_segment)
trip_points.append(point)
segment_start = timestamp
segment_count = segment_count + 1
last_point = point
last_device = device
last_timestamp = timestamp
# last point to build a trip
properties_trip = {'device': last_device, 'start_time': str(trip_start), 'end_time': str(last_timestamp),
'trip_count': trip_count, 'point_count': len(trip_points)}
feature_trip = build_trip_feature(properties_trip, trip_points)
if feature_trip:
feature_trips.append(feature_trip)
feature_collection_trips = FeatureCollection(feature_trips)
print("Trips Feature collection is valid: " + str(feature_collection_trips.is_valid))
with open('./output/trips_raw.geojson', 'w') as outfile:
geojson.dump(feature_collection_trips, outfile)
feature_collection_segments = FeatureCollection(feature_segments)
print("Segments Feature collection is valid: " + str(feature_collection_segments.is_valid))
with open('./output/segments_raw.geojson', 'w') as outfile:
geojson.dump(feature_collection_segments, outfile)
print("Processed %d points, finished at %s" % {location.size, str(datetime.datetime.now().time())})
if __name__ == "__main__":
print ("Processing started at %s" % str(datetime.datetime.now().time()))
main()
|
[
"diegopajarito@gmail.com"
] |
diegopajarito@gmail.com
|
77160378e0aff096aa646eaca4addb171b24a317
|
59de7788673ade984b9c9fbc33664a7cbdba67d3
|
/res_bw/scripts/common/lib/encodings/hz.py
|
fc3d801e512648fcedb54a7c040b1b2914c9941b
|
[] |
no_license
|
webiumsk/WOT-0.9.15-CT
|
3fa24ab37a6c91b7073034afb2f355efa5b7fe36
|
fbd194fbaa6bdece51c7a68fc35bbb5257948341
|
refs/heads/master
| 2020-12-24T21:27:23.175774
| 2016-05-01T13:47:44
| 2016-05-01T13:47:44
| 57,600,180
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 1,131
|
py
|
# 2016.05.01 15:29:55 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/encodings/hz.py
import _codecs_cn, codecs
import _multibytecodec as mbc
codec = _codecs_cn.getcodec('hz')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(name='hz', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\encodings\hz.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.05.01 15:29:55 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
3544578b5eba352958bb896b645b4312ea39834f
|
769c8cac5aea3c9cb1e7eeafb1e37dbe9ea4d649
|
/TaskScheduler/hotel_list_task.py
|
0bee18d0d9cf36192d1c2f1f2dd5ddf676443a6a
|
[] |
no_license
|
20113261/p_m
|
f0b93b516e4c377aaf8b1741671759822ee0ec1a
|
ca7713de005c4c10e5cae547851a38a13211b71d
|
refs/heads/master
| 2020-03-20T01:03:29.785618
| 2018-03-17T11:06:49
| 2018-03-17T11:06:49
| 137,065,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 963
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/7/11 下午5:30
# @Author : Hou Rong
# @Site :
# @File : hotel_list_task.py
# @Software: PyCharm
import Common.DateRange
import dataset
from Common.DateRange import dates_tasks
from TaskScheduler.TaskInsert import InsertTask
Common.DateRange.DATE_FORMAT = '%Y%m%d'
db = dataset.connect('mysql+pymysql://reader:mioji1109@10.19.118.147/source_info?charset=utf8')
if __name__ == '__main__':
with InsertTask(worker='hotel_list', task_name='ctrip_hotel_list_0711') as it:
for line in db.query('''SELECT city_id
FROM hotel_suggestions_city
WHERE source = 'ctrip' AND select_index != -1 AND annotation != -1;'''):
city_id = line['city_id']
for day in dates_tasks(90, day_step=10, ignore_days=20):
args = {'source': 'ctrip', 'city_id': city_id, 'check_in': day,
'part': '20170711'}
it.insert_task(args)
|
[
"nmghr9@gmail.com"
] |
nmghr9@gmail.com
|
004867de305d55875c7b5d8dc93e22bff54fff86
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/restore-the-array-from-adjacent-pairs.py
|
91aa1ba0ebb1c185e6625d0352c4f6985e14a576
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 571
|
py
|
# Time: O(n)
# Space: O(n)
import collections
class Solution(object):
def restoreArray(self, adjacentPairs):
"""
:type adjacentPairs: List[List[int]]
:rtype: List[int]
"""
adj = collections.defaultdict(list)
for u, v in adjacentPairs:
adj[u].append(v)
adj[v].append(u)
result = next([x, adj[x][0]] for x in adj if len(adj[x]) == 1)
while len(result) != len(adjacentPairs)+1:
result.append(adj[result[-1]][adj[result[-1]][0] == result[-2]])
return result
|
[
"noreply@github.com"
] |
kamyu104.noreply@github.com
|
105947379a933fb3d9c7594e0f9ee5edef5ec989
|
659836ef3a9ac558538b016dbf4e128aa975ae7c
|
/backend/ingredient/models.py
|
ba8262719d98f47795c66d3d2646c01dcfba676b
|
[] |
no_license
|
zzerii/save_your_ingredients
|
fda1c769d158bca9dfd3c28ac9ff34ed7ae4e6a3
|
5ebde82255c1a6edf0c19d9032015d05c9d0abc9
|
refs/heads/master
| 2023-02-21T22:19:28.954594
| 2021-01-22T11:39:16
| 2021-01-22T11:39:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
from django.db import models
# Create your models here.
class Ingredient(models.Model):
name = models.CharField(max_length=255)
info = models.CharField(max_length=255)
trim = models.CharField(max_length=255)
|
[
"jinsoo941010@naver.com"
] |
jinsoo941010@naver.com
|
7ac936ecd5083f62b8a3b206f7e560a01d51ac58
|
e0a9dcd4f53aa6bf4472efe451e226663212abda
|
/core/execute.py
|
d8d444c3f1a16fa7af00f3de0f4f8ca5d7541d09
|
[] |
no_license
|
dilawar/ghonchu
|
f0505dce8ba76402e7c58c7fc4efd0412ce3503a
|
5527b4d444f113b0ab51f758fc809e8ab81c5a72
|
refs/heads/master
| 2016-09-02T05:33:07.167106
| 2014-12-12T12:07:50
| 2014-12-12T12:07:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 515
|
py
|
"""execute.py: Execute core action.
Last modified: Sat Jan 18, 2014 05:01PM
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2013, Dilawar Singh and NCBS Bangalore"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
from notes import note
def new_note(title):
n = note.Note(title)
n.write()
|
[
"dilawars@ncbs.res.in"
] |
dilawars@ncbs.res.in
|
9116fbcd17562627c4d5504fdc5b28015b3d830d
|
6fe2d3c27c4cb498b7ad6d9411cc8fa69f4a38f8
|
/algorithms/algorithms-python/leetcode/Question_111_Minimum_Depth_of_Binary_Tree.py
|
20e53e489f88b9f32c07604bd8be49b4895f2660
|
[] |
no_license
|
Lanceolata/code
|
aae54af632a212c878ce45b11dab919bba55bcb3
|
f7d5a7de27c3cc8a7a4abf63eab9ff9b21d512fb
|
refs/heads/master
| 2022-09-01T04:26:56.190829
| 2021-07-29T05:14:40
| 2021-07-29T05:14:40
| 87,202,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 566
|
py
|
#!/usr/bin/python
# coding: utf-8
from TreeNode import *
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def minDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return 0
left = self.minDepth(root.left)
right = self.minDepth(root.right)
return left + right + 1 if left == 0 or right == 0 else min(left, right) + 1
|
[
"lanceolatayuan@gmail.com"
] |
lanceolatayuan@gmail.com
|
aa893b07c3613f505969019869fe7e5913d60a10
|
8634b4f7f2293bf431ba8ed59e95f80abc59483f
|
/Homework/10/orderdict.py
|
fae771bb2e90cba4047e19dc516c8e03b0f7b948
|
[] |
no_license
|
TitanVA/Metiz
|
e1e2dca42118f660356254c39c7fadc47f772719
|
e54f10b98226e102a5bb1eeda7f1e1eb30587c32
|
refs/heads/master
| 2020-12-22T11:44:58.746055
| 2020-02-10T14:41:16
| 2020-02-10T14:41:16
| 236,770,476
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
from _collections import OrderedDict
favorite_languages = OrderedDict()
favorite_languages['jen'] = 'python'
favorite_languages['sarah'] = 'c'
favorite_languages['edward'] = 'ruby'
favorite_languages['phil'] = 'python'
for name, language in favorite_languages.items():
print(name.title() + '\'s favorite language is',
language.title() + '.')
|
[
"viktorbezai@gmail.com"
] |
viktorbezai@gmail.com
|
d86da89a7837039de5cc9432332391c1929d6f86
|
d2e8ad203a37b534a113d4f0d4dd51d9aeae382a
|
/django_graphene_authentication/django_graphene_authentication/signals.py
|
47adcc189eddf36fa915f1ac41f05cdf7b2ebd8f
|
[
"MIT"
] |
permissive
|
Koldar/django-koldar-common-apps
|
40e24a7aae78973fa28ca411e2a32cb4b2f4dbbf
|
06e6bb103d22f1f6522e97c05ff8931413c69f19
|
refs/heads/main
| 2023-08-17T11:44:34.631914
| 2021-10-08T12:40:40
| 2021-10-08T12:40:40
| 372,714,560
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
from django.dispatch import Signal
# providing_args=['request', 'refresh_token']
refresh_token_revoked = Signal()
# providing_args=['request', 'refresh_token', 'refresh_token_issued']
refresh_token_rotated = Signal()
|
[
"massimobono1@gmail.com"
] |
massimobono1@gmail.com
|
9eb53df032e3c06138e6c43f5b306169140d64a0
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part006719.py
|
42aa4358fcc37db511e0345b6fdde91a2bd9246d
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,596
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher47811(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.3.2.2.1.0_1', 1, 1, S(1)), Mul)
]),
1: (1, Multiset({}), [
(VariableWithCount('i2.2.1.1', 1, 1, None), Mul),
(VariableWithCount('i2.3.2.2.1.0_1', 1, 1, S(1)), Mul)
]),
2: (2, Multiset({}), [
(VariableWithCount('i2.3.2.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.3.2.2.1.0_2', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher47811._instance is None:
CommutativeMatcher47811._instance = CommutativeMatcher47811()
return CommutativeMatcher47811._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 47810
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
754d441707341b8ba8d827ed526ecce1b52c54ed
|
fd4dd0ce51eb1c9206d5c1c29d6726fc5f2cb122
|
/src/kafka_consumer.py
|
2c15842317f104c1081a9e44920ee8bec1234986
|
[] |
no_license
|
kbaseapps/relation_engine_sync
|
0a9ae11326245b98bd173d77203ff49ccd222165
|
def99d329d0d4101f3864e21a3e1a6ecb34fa6e0
|
refs/heads/master
| 2020-04-12T13:07:27.771094
| 2019-08-05T23:53:50
| 2019-08-05T23:53:50
| 162,512,534
| 0
| 0
| null | 2019-08-05T23:53:51
| 2018-12-20T01:56:13
|
Python
|
UTF-8
|
Python
| false
| false
| 3,996
|
py
|
"""
Consume workspace update events from kafka.
"""
import json
import traceback
from confluent_kafka import Consumer, KafkaError
from src.utils.logger import log
from src.utils.config import get_config
from src.utils.workspace_client import download_info
from src.utils.re_client import check_doc_existence
from src.import_object import import_object
_CONFIG = get_config()
def run():
"""Run the main event loop, ie. the Kafka Consumer, dispatching to self._handle_message."""
topics = [
_CONFIG['kafka_topics']['workspace_events'],
_CONFIG['kafka_topics']['re_admin_events']
]
log('INFO', f"Subscribing to: {topics}")
log('INFO', f"Client group: {_CONFIG['kafka_clientgroup']}")
log('INFO', f"Kafka server: {_CONFIG['kafka_server']}")
consumer = Consumer({
'bootstrap.servers': _CONFIG['kafka_server'],
'group.id': _CONFIG['kafka_clientgroup'],
'auto.offset.reset': 'earliest',
'enable.auto.commit': True
})
consumer.subscribe(topics)
while True:
msg = consumer.poll(timeout=0.5)
if msg is None:
continue
if msg.error():
if msg.error().code() == KafkaError._PARTITION_EOF:
log('INFO', 'End of stream.')
else:
log('ERROR', f"Kafka message error: {msg.error()}")
continue
val = msg.value().decode('utf-8')
try:
msg = json.loads(val)
log('INFO', f'New message: {msg}')
_handle_msg(msg)
except Exception as err:
log('ERROR', '=' * 80)
log('ERROR', f"Error importing:\n{type(err)} - {err}")
log('ERROR', msg)
log('ERROR', err)
# Prints to stderr
traceback.print_exc()
log('ERROR', '=' * 80)
consumer.close()
def _handle_msg(msg):
"""Receive a kafka message."""
event_type = msg.get('evtype')
wsid = msg.get('wsid')
if not wsid:
raise RuntimeError(f'Invalid wsid in event: {wsid}')
if not event_type:
raise RuntimeError(f"Missing 'evtype' in event: {msg}")
log('INFO', f'Received {msg["evtype"]} for {wsid}/{msg.get("objid", "?")}')
if event_type in ['IMPORT', 'NEW_VERSION', 'COPY_OBJECT', 'RENAME_OBJECT']:
_import_obj(msg)
elif event_type == 'IMPORT_NONEXISTENT':
_import_nonexistent(msg)
elif event_type == 'OBJECT_DELETE_STATE_CHANGE':
_delete_obj(msg)
elif event_type == 'WORKSPACE_DELETE_STATE_CHANGE':
_delete_ws(msg)
elif event_type in ['CLONE_WORKSPACE', 'IMPORT_WORKSPACE']:
_import_ws(msg)
elif event_type == 'SET_GLOBAL_PERMISSION':
_set_global_perms(msg)
else:
raise RuntimeError(f"Unrecognized event {event_type}.")
def _import_obj(msg):
log('INFO', 'Downloading obj')
obj_info = download_info(msg['wsid'], msg['objid'], msg.get('ver'))
import_object(obj_info)
def _import_nonexistent(msg):
"""Import an object only if it does not exist in RE already."""
upa = ':'.join([str(p) for p in [msg['wsid'], msg['objid'], msg['ver']]])
log('INFO', f'_import_nonexistent on {upa}') # TODO
_id = 'wsfull_object_version/' + upa
exists = check_doc_existence(_id)
if not exists:
_import_obj(msg)
def _delete_obj(msg):
"""Handle an object deletion event (OBJECT_DELETE_STATE_CHANGE)"""
log('INFO', '_delete_obj TODO') # TODO
raise NotImplementedError()
def _delete_ws(msg):
"""Handle a workspace deletion event (WORKSPACE_DELETE_STATE_CHANGE)."""
log('INFO', '_delete_ws TODO') # TODO
raise NotImplementedError()
def _import_ws(msg):
"""Import all data for an entire workspace."""
log('INFO', '_import_ws TODO') # TODO
raise NotImplementedError()
def _set_global_perms(msg):
"""Set permissions for an entire workspace (SET_GLOBAL_PERMISSION)."""
log('INFO', '_set_global_perms TODO') # TODO
raise NotImplementedError()
|
[
"jayrbolton@gmail.com"
] |
jayrbolton@gmail.com
|
cccac8d820d9d534647989e6cfc573f5a94e1876
|
5c15aba2bdcd4348c988245f59817cbe71b87749
|
/src/trial.py
|
00cd0826415c55ab5e87e90071586c86ffae075a
|
[] |
no_license
|
chengshaozhe/commitmentBenefits
|
f7db038333ee95217713d1d4b2a1fb3d0c295fdd
|
0388803960bc9995ffbcfb6435c134e488a98b63
|
refs/heads/master
| 2023-03-27T02:31:01.522997
| 2021-01-12T10:18:12
| 2021-01-12T10:18:12
| 310,592,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,356
|
py
|
import numpy as np
import pygame as pg
from pygame import time
import collections as co
import pickle
import random
def calculateGridDis(grid1, grid2):
gridDis = np.linalg.norm(np.array(grid1) - np.array(grid2), ord=1)
return int(gridDis)
def creatRect(coor1, coor2):
vector = np.array(list(zip(coor1, coor2)))
vector.sort(axis=1)
rect = [(i, j) for i in range(vector[0][0], vector[0][1] + 1) for j in range(vector[1][0], vector[1][1] + 1)]
return rect
def calculateAvoidCommitmnetZone(playerGrid, target1, target2):
dis1 = calculateGridDis(playerGrid, target1)
dis2 = calculateGridDis(playerGrid, target2)
if dis1 == dis2:
rect1 = creatRect(playerGrid, target1)
rect2 = creatRect(playerGrid, target2)
avoidCommitmentZone = list(set(rect1).intersection(set(rect2)))
avoidCommitmentZone.remove(tuple(playerGrid))
else:
avoidCommitmentZone = []
return avoidCommitmentZone
def inferGoal(originGrid, aimGrid, targetGridA, targetGridB):
pacmanBean1aimDisplacement = calculateGridDis(targetGridA, aimGrid)
pacmanBean2aimDisplacement = calculateGridDis(targetGridB, aimGrid)
pacmanBean1LastStepDisplacement = calculateGridDis(targetGridA, originGrid)
pacmanBean2LastStepDisplacement = calculateGridDis(targetGridB, originGrid)
bean1Goal = pacmanBean1LastStepDisplacement - pacmanBean1aimDisplacement
bean2Goal = pacmanBean2LastStepDisplacement - pacmanBean2aimDisplacement
if bean1Goal > bean2Goal:
goal = 1
elif bean1Goal < bean2Goal:
goal = 2
else:
goal = 0
return goal
def checkTerminationOfTrial(bean1Grid, bean2Grid, humanGrid):
if calculateGridDis(humanGrid, bean1Grid) == 0 or calculateGridDis(humanGrid, bean2Grid) == 0:
pause = False
else:
pause = True
return pause
class SingleGoalTrial():
def __init__(self, controller, drawNewState, drawText, normalNoise, checkBoundary):
self.controller = controller
self.drawNewState = drawNewState
self.drawText = drawText
self.normalNoise = normalNoise
self.checkBoundary = checkBoundary
def __call__(self, beanGrid, playerGrid, designValues):
obstacles = []
initialPlayerGrid = playerGrid
reactionTime = list()
trajectory = [initialPlayerGrid]
results = co.OrderedDict()
aimActionList = list()
totalStep = int(np.linalg.norm(np.array(playerGrid) - np.array(beanGrid), ord=1))
noiseStep = random.sample(list(range(2, totalStep)), designValues)
stepCount = 0
goalList = list()
self.drawText("+", [0, 0, 0], [7, 7])
pg.time.wait(1300)
self.drawNewState(beanGrid, beanGrid, initialPlayerGrid, obstacles)
pg.event.set_allowed([pg.KEYDOWN, pg.KEYUP, pg.QUIT])
realPlayerGrid = initialPlayerGrid
pause = True
initialTime = time.get_ticks()
while pause:
aimPlayerGrid, aimAction = self.controller(realPlayerGrid, beanGrid, beanGrid)
reactionTime.append(time.get_ticks() - initialTime)
stepCount = stepCount + 1
noisePlayerGrid, realAction = self.normalNoise(realPlayerGrid, aimAction, noiseStep, stepCount)
realPlayerGrid = self.checkBoundary(noisePlayerGrid)
self.drawNewState(beanGrid, beanGrid, realPlayerGrid, obstacles)
trajectory.append(list(realPlayerGrid))
aimActionList.append(aimAction)
pause = checkTerminationOfTrial(beanGrid, beanGrid, realPlayerGrid)
pg.time.wait(500)
pg.event.set_blocked([pg.KEYDOWN, pg.KEYUP])
results["reactionTime"] = str(reactionTime)
results["trajectory"] = str(trajectory)
results["aimAction"] = str(aimActionList)
results["noisePoint"] = str(noiseStep)
return results
class NormalTrial():
def __init__(self, controller, drawNewState, drawText, normalNoise, checkBoundary):
self.controller = controller
self.drawNewState = drawNewState
self.drawText = drawText
self.normalNoise = normalNoise
self.checkBoundary = checkBoundary
def __call__(self, bean1Grid, bean2Grid, playerGrid, obstacles, designValues):
initialPlayerGrid = playerGrid
reactionTime = list()
trajectory = [initialPlayerGrid]
results = co.OrderedDict()
aimActionList = list()
aimPlayerGridList = []
leastStep = min([calculateGridDis(playerGrid, beanGrid) for beanGrid in [bean1Grid, bean2Grid]])
noiseStep = sorted(random.sample(list(range(2, leastStep)), designValues))
stepCount = 0
goalList = list()
self.drawText("+", [0, 0, 0], [7, 7])
pg.time.wait(1300)
self.drawNewState(bean1Grid, bean2Grid, initialPlayerGrid, obstacles)
pg.event.set_allowed([pg.KEYDOWN, pg.KEYUP, pg.QUIT])
realPlayerGrid = initialPlayerGrid
pause = True
initialTime = time.get_ticks()
while pause:
aimPlayerGrid, aimAction = self.controller(realPlayerGrid, bean1Grid, bean2Grid)
reactionTime.append(time.get_ticks() - initialTime)
goal = inferGoal(trajectory[-1], aimPlayerGrid, bean1Grid, bean2Grid)
goalList.append(goal)
stepCount = stepCount + 1
noisePlayerGrid, realAction = self.normalNoise(realPlayerGrid, aimAction, noiseStep, stepCount)
if noisePlayerGrid in obstacles:
noisePlayerGrid = tuple(trajectory[-1])
realPlayerGrid = self.checkBoundary(noisePlayerGrid)
self.drawNewState(bean1Grid, bean2Grid, realPlayerGrid, obstacles)
trajectory.append(list(realPlayerGrid))
aimActionList.append(aimAction)
aimPlayerGridList.append(aimPlayerGrid)
pause = checkTerminationOfTrial(bean1Grid, bean2Grid, realPlayerGrid)
pg.time.wait(500)
pg.event.set_blocked([pg.KEYDOWN, pg.KEYUP])
results["reactionTime"] = str(reactionTime)
results["trajectory"] = str(trajectory)
results["aimPlayerGridList"] = str(aimPlayerGridList)
results["aimAction"] = str(aimActionList)
results["noisePoint"] = str(noiseStep)
results["goal"] = str(goalList)
return results
|
[
"shaozhecheng@outlook.com"
] |
shaozhecheng@outlook.com
|
797a8815744350425e025a5f0309849676b9691c
|
e27333261b8e579564016c71d2061cc33972a8b8
|
/.history/api/IR_engine_20210728213929.py
|
ddcc939eb070ba750cc5357a2d6a5aa401fe3e9a
|
[] |
no_license
|
Dustyik/NewsTweet_InformationRetrieval
|
882e63dd20bc9101cbf48afa6c3302febf1989b1
|
d9a6d92b51c288f5bcd21ea1cc54772910fa58f7
|
refs/heads/master
| 2023-07-01T09:12:53.215563
| 2021-08-12T08:28:33
| 2021-08-12T08:28:33
| 382,780,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,136
|
py
|
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import euclidean_distances
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
from IPython.display import display
'''
Functions to write:
1. tf-idf with cosine sim/Euclidean distance
- represent terms in each document with its tf-idf weights,
2. VSM with cosine sim/Euclidean distance
3. BIM
4. BM25
5. BERT
Test Titles:
f7ca322d-c3e8-40d2-841f-9d7250ac72ca Worcester breakfast club for veterans gives hunger its marching orders
609772bc-0672-4db5-8516-4c025cfd54ca Jumpshot Gives Marketers Renewed Visibility Into Paid and Organic Keywords With Launch of Jumpshot Elite
1aa9d1b0-e6ba-4a48-ad0c-66552d896aac The Return Of The Nike Air Max Sensation Has 80’s Babies Hyped!
719699f9-47be-4bc7-969b-b53a881c95ae This New Dating App Will Ruin Your Internet Game
Test Titles Stemmed:
worcest breakfast club for veteran give hunger it march order
jumpshot give market renew visibl into paid and organ keyword with launch of jumpshot elit
the return of the nike air max sensat ha s babi hype
thi new date app will ruin your internet game
'''
titles_file_path = r"D:\Desktop\IR_term_8\IR-tweets---disaster-\article_titles_stemmed.csv"
tweets_file_path = r"D:\Desktop\IR_term_8\IR-tweets---disaster-\dataset_scrapped.csv"
SEARCH_MODELS = {
"tfcs": "Tf-idf w Cosine Sim",
"tfed": "Tf-idf w Euclidean Dist"
}
def returnTweetsBasedOnSearchModel(article_id, searchModel):
return
class DataProcessor:
def __init__(self):
self.titles_data = pd.read_csv(titles_file_path)
self.titles_data = self.titles_data.dropna()
self.tweets_data = pd.read_csv(tweets_file_path)
self.tweets_data = self.tweets_data.dropna()
#self.data.title = self.data.title.astype(str)
#self.porter = PorterStemmer()
#self.get_clean_data()
print ("Data Processor up and ready...")
'''
Tokenizing of article titles should be done beforehand
def tokenize_stem_lower(self, text):
tokens = word_tokenize(text)
tokens = list(filter(lambda x: x.isalpha(), tokens))
tokens = [self.porter.stem(x.lower()) for x in tokens]
return ' '.join(tokens)
def get_clean_data(self):
self.data['clean_text'] = self.data.apply(lambda x: self.tokenize_stem_lower(x.title), axis=1)
return self.data
'''
class CosineSimilarity:
def __init__(self, titles, tweets, type='tfidf'):
self.titles = titles #contains titles data
self.tweets = tweets #contains tweets data
self.vectorizer = self.change_matrix_type(type)
def get_result(self, return_size):
cos_sim = cosine_similarity(self.matrix, self.matrix)
top_ind = np.flip(np.argsort(cos_sim[0]))[1:return_size+1]
top_id = [list(self.matrix.index)[i] for i in top_ind]
# print(top_10_ind ,top_10_id)
self.result = []
for i in top_id:
filt = self.data[self.data.document==i]
for ind, r in filt.iterrows():
rel = r['rel']
text = r['text']
related = r['topic']
score = 0
if related==self.query_id and rel>0:
score = 1
if related==self.query_id and rel==0:
score = -1
self.result.append({'tweet_id':i, 'text': text, 'related_article':related,'score': score})
def query(self, query_id, query_text, return_size=40):
self.query_id = query_id
term_doc = self.vectorizer.fit_transform([query_text]+list(self.data.tweets))
#ind = ['query'] + list(self.documents)
#self.matrix = pd.DataFrame(term_doc.toarray(), columns=self.tweets.get_feature_names(), index=ind)
#self.get_result(return_size)
#return pd.DataFrame(self.result)
def change_matrix_type(self, type):
if type == 'tfidf':
return TfidfVectorizer()
elif type == 'dt':
return CountVectorizer() #transforms the entire word matrix into a set of vectors
else:
print('Type is invalid')
def get_matrix(self):
return self.matrix
class EuclideanDistance:
def __init__(self, data, type='tfidf'):
self.data = data
self.change_matrix_type(type)
self.matrix = None
def get_result(self, return_size):
euclidean = euclidean_distances(self.matrix.values[1:], [self.matrix.values[0]])
top_ind = np.argsort(euclidean.T[0])[:return_size]
top_id = [list(self.matrix.index)[i] for i in top_ind]
# print(sorted(euclidean[:20]),top_10_ind ,top_10_id)
self.result = []
for i in top_id:
filt = self.data[self.data.document==i]
for ind, r in filt.iterrows():
rel = r['rel']
text = r['text']
related = r['topic']
score = 0
if related==self.query_id and rel>0:
score = 1
if related==self.query_id and rel==0:
score = -1
self.result.append({'tweet_id':i, 'text': text, 'related_article':related,'score': score})
def query(self, query_id, query_text, return_size=10):
self.query_id = query_id
term_doc = self.vec.fit_transform([query_text]+list(self.data.clean_text))
ind = ['query'] + list(self.data.document)
self.matrix = pd.DataFrame(term_doc.toarray(), columns=self.vec.get_feature_names(), index=ind)
self.get_result(return_size)
return pd.DataFrame(self.result)
def change_matrix_type(self, type):
if type == 'tfidf':
self.vec = TfidfVectorizer()
elif type == 'dt':
self.vec = CountVectorizer()
else:
print('Type is invalid')
def get_matrix(self):
return self.matrix
dataProcessor = DataProcessor()
tweets = dataProcessor.tweets_data
titles = dataProcessor.titles_data
#display(tweets.head())
#display(titles.head())
sample_query_id = "f7ca322d-c3e8-40d2-841f-9d7250ac72ca"
sample_query_text = "Worcester breakfast club for veterans gives hunger its marching orders"
cosine_similarity = CosineSimilarity(titles = titles, tweets = tweets)
cosine_similarity.vectorizer.fit_transform([sample_query_text])
print (cosine_similarity.vectorizer.get_feature_names())
#cosine_similarity.query(sample_query_id, sample_query_text)
|
[
"chiayik_tan@mymail.sutd.edu.sg"
] |
chiayik_tan@mymail.sutd.edu.sg
|
3f27767e32d95a71d36747e6db0b0d8e9bfabfc9
|
f0a65d21d5ba16888f131fe99ed8baf0a85cf7dd
|
/pygmsh/volume_base.py
|
d3a22878fde32ff32a8b8924022e7a8096963a9b
|
[
"MIT"
] |
permissive
|
mjredmond/pygmsh
|
d4a1e4e418af931eccbe73db01813a70efc2924a
|
972e1164d77ecbf6c2b50b93fec9dc48c8d913e6
|
refs/heads/master
| 2021-01-19T07:52:53.057151
| 2017-04-06T09:48:21
| 2017-04-06T09:48:21
| 87,581,937
| 0
| 0
| null | 2017-04-07T19:52:56
| 2017-04-07T19:52:56
| null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
# -*- coding: utf-8 -*-
#
class VolumeBase(object):
_ID = 0
def __init__(self, id=None):
if id:
self.id = id
else:
self.id = 'v%d' % VolumeBase._ID
VolumeBase._ID += 1
return
|
[
"nico.schloemer@gmail.com"
] |
nico.schloemer@gmail.com
|
c1396ab21dabc56b8319ae076980db2b18e388c6
|
e2e7b6ae6f8897a75aaa960ed36bd90aa0743710
|
/swagger_client/models/post_deployment.py
|
2e5931f02a8d25c5931c6afa88ad097e5ca01832
|
[
"Apache-2.0"
] |
permissive
|
radon-h2020/radon-ctt-cli
|
36912822bc8d76d52b00ea657ed01b8bfcc5056f
|
3120b748c73e99d81d0cac5037e393229577d640
|
refs/heads/master
| 2023-08-19T10:54:01.517243
| 2021-09-15T15:38:51
| 2021-09-15T15:38:51
| 299,571,330
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,461
|
py
|
# coding: utf-8
"""
RADON CTT Server API
This is API of the RADON Continuous Testing Tool (CTT) Server: <a href=\"https://github.com/radon-h2020/radon-ctt\">https://github.com/radon-h2020/radon-ctt<a/> # noqa: E501
OpenAPI spec version: 1.0.0-oas3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class POSTDeployment(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'testartifact_uuid': 'str'
}
attribute_map = {
'testartifact_uuid': 'testartifact_uuid'
}
def __init__(self, testartifact_uuid=None): # noqa: E501
"""POSTDeployment - a model defined in Swagger""" # noqa: E501
self._testartifact_uuid = None
self.discriminator = None
self.testartifact_uuid = testartifact_uuid
@property
def testartifact_uuid(self):
"""Gets the testartifact_uuid of this POSTDeployment. # noqa: E501
:return: The testartifact_uuid of this POSTDeployment. # noqa: E501
:rtype: str
"""
return self._testartifact_uuid
@testartifact_uuid.setter
def testartifact_uuid(self, testartifact_uuid):
"""Sets the testartifact_uuid of this POSTDeployment.
:param testartifact_uuid: The testartifact_uuid of this POSTDeployment. # noqa: E501
:type: str
"""
if testartifact_uuid is None:
raise ValueError("Invalid value for `testartifact_uuid`, must not be `None`") # noqa: E501
self._testartifact_uuid = testartifact_uuid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(POSTDeployment, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, POSTDeployment):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"duellmann@iste.uni-stuttgart.de"
] |
duellmann@iste.uni-stuttgart.de
|
a4e16aa3029986e19186a08d10ba6756a749ef85
|
865bd5e42a4299f78c5e23b5db2bdba2d848ab1d
|
/Python/75.sort-colors.132268888.ac.python3.py
|
420999a7c4e65d780fb46607f6690cc3de47a52b
|
[] |
no_license
|
zhiymatt/Leetcode
|
53f02834fc636bfe559393e9d98c2202b52528e1
|
3a965faee2c9b0ae507991b4d9b81ed0e4912f05
|
refs/heads/master
| 2020-03-09T08:57:01.796799
| 2018-05-08T22:01:38
| 2018-05-08T22:01:38
| 128,700,683
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,386
|
py
|
#
# [75] Sort Colors
#
# https://leetcode.com/problems/sort-colors/description/
#
# algorithms
# Medium (38.90%)
# Total Accepted: 217.5K
# Total Submissions: 559.1K
# Testcase Example: '[0]'
#
#
# Given an array with n objects colored red, white or blue, sort them so that
# objects of the same color are adjacent, with the colors in the order red,
# white and blue.
#
#
#
# Here, we will use the integers 0, 1, and 2 to represent the color red, white,
# and blue respectively.
#
#
#
# Note:
# You are not suppose to use the library's sort function for this problem.
#
#
# click to show follow up.
#
#
# Follow up:
# A rather straight forward solution is a two-pass algorithm using counting
# sort.
# First, iterate the array counting number of 0's, 1's, and 2's, then overwrite
# array with total number of 0's, then 1's and followed by 2's.
# Could you come up with an one-pass algorithm using only constant space?
#
#
#
class Solution:
def sortColors(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
i = j = 0 # i for 0, j for 0 and 1
for k, v in enumerate(nums):
nums[k] = 2
if v < 2:
nums[j] = 1
j += 1
if v == 0:
nums[i] = 0
i += 1
|
[
"miylolmiy@gmail.com"
] |
miylolmiy@gmail.com
|
a100678014c55766c07b94ae81cf67b691c11c59
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-sblp/sblp_ut=3.5_rd=1_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=6/sched.py
|
3605875026286563e51a9292de1d94125c66f6dc
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 529
|
py
|
-S 1 -X RUN -Q 0 -L 2 106 400
-S 0 -X RUN -Q 0 -L 2 86 400
-S 0 -X RUN -Q 0 -L 2 74 250
-S 0 -X RUN -Q 0 -L 2 59 250
-S 2 -X RUN -Q 1 -L 1 54 200
-S 3 -X RUN -Q 1 -L 1 44 175
-S 2 -X RUN -Q 1 -L 1 40 200
-S 2 -X RUN -Q 1 -L 1 37 125
-S 4 -X RUN -Q 2 -L 1 35 125
-S 4 -X RUN -Q 2 -L 1 33 125
-S 4 -X RUN -Q 2 -L 1 32 300
-S 4 -X RUN -Q 2 -L 1 30 100
-S 5 -X RUN -Q 3 -L 1 30 300
-S 5 -X RUN -Q 3 -L 1 28 150
-S 5 -X RUN -Q 3 -L 1 24 300
-S 5 -X RUN -Q 3 -L 1 19 125
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
f55df027f5a380a2302722b0a432c76857f85315
|
a1a43879a2da109d9fe8d9a75f4fda73f0d7166b
|
/api/tests/equal_all.py
|
1f1a1f3cf9a2c23dd214b96ee1e53b5c0fc00069
|
[] |
no_license
|
PaddlePaddle/benchmark
|
a3ed62841598d079529c7440367385fc883835aa
|
f0e0a303e9af29abb2e86e8918c102b152a37883
|
refs/heads/master
| 2023-09-01T13:11:09.892877
| 2023-08-21T09:32:49
| 2023-08-21T09:32:49
| 173,032,424
| 78
| 352
| null | 2023-09-14T05:13:08
| 2019-02-28T03:14:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,661
|
py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common_import import *
@benchmark_registry.register("equal_all")
class EqualAllConfig(APIConfig):
def __init__(self):
super(EqualAllConfig, self).__init__("equal_all")
self.run_tf = False
@benchmark_registry.register("equal_all")
class PaddleEqualAll(PaddleOpBenchmarkBase):
def build_graph(self, config):
x = self.variable(name='x', shape=config.x_shape, dtype=config.x_dtype)
y = self.variable(name='y', shape=config.y_shape, dtype=config.y_dtype)
result = paddle.equal_all(x=x, y=y)
self.feed_list = [x, y]
self.fetch_list = [result]
@benchmark_registry.register("equal_all")
class TorchEqualAll(PytorchOpBenchmarkBase):
def build_graph(self, config):
x = self.variable(name='x', shape=config.x_shape, dtype=config.x_dtype)
y = self.variable(name='y', shape=config.y_shape, dtype=config.y_dtype)
result = torch.equal(input=x, other=y)
result = torch.tensor(result)
self.feed_list = [x, y]
self.fetch_list = [result]
|
[
"noreply@github.com"
] |
PaddlePaddle.noreply@github.com
|
6153ed244acbd1deac19c433cbd01c43350d4ff4
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/2gFkEsAqNZrs4yeck_13.py
|
0e96182a63f5aad185cacd1b5bcad33ff13d32f2
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 696
|
py
|
"""
Write a function that returns all the elements in an array that are **strictly
greater** than their adjacent left and right neighbors.
### Examples
mini_peaks([4, 5, 2, 1, 4, 9, 7, 2]) ➞ [5, 9]
# 5 has neighbours 4 and 2, both are less than 5.
mini_peaks([1, 2, 1, 1, 3, 2, 5, 4, 4]) ➞ [2, 3, 5]
mini_peaks([1, 2, 3, 4, 5, 6]) ➞ []
### Notes
* Do not count boundary numbers, since they only have **one** left/right neighbor.
* If no such numbers exist, return an empty array.
"""
def mini_peaks(lst):
alist = []
for i in range(1,len(lst)-1):
if lst[i-1] < lst[i] > lst[i+1]:
alist.append(lst[i])
return alist
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
20fb226181a168dd6671f5f065e241134074e33a
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/route53_write_f/dns-answer_test.py
|
f8d48671d2f746eca041962279f310374a54a8cc
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
"""
write_parameter("route53", "test-dns-answer")
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
c1b26cced7bf736c91ff5349abd7750a5eefa8d8
|
e60487a8f5aad5aab16e671dcd00f0e64379961b
|
/python_stack/Algos/numPy/updateNumpy.py
|
764b3af48fbc0778e1b980e0ca73c7c9f9fe3f14
|
[] |
no_license
|
reenadangi/python
|
4fde31737e5745bc5650d015e3fa4354ce9e87a9
|
568221ba417dda3be7f2ef1d2f393a7dea6ccb74
|
refs/heads/master
| 2021-08-18T08:25:40.774877
| 2021-03-27T22:20:17
| 2021-03-27T22:20:17
| 247,536,946
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 775
|
py
|
import numpy as np
x=np.array([12,34,56,78,99])
y=np.array([[1,2,3],[4,5,6],[7,8,9]])
print(f"Orginal array{x}")
# access
print(x[0],x[len(x)-1],x[-1],x[-2])
# Modify
for i in range(len(x)):
x[i]=x[i]*2
# delete first and last element
x=np.delete(x,[0,4])
print(x)
print(y)
# delete first row (x axis)
y=np.delete(y,[0],axis=0)
print(y)
# delete first col(y axis)
y=np.delete(y,[0],axis=1)
print(y)
# append
print(x.dtype)
x=np.append(x,[14.5,243])
print(x)
print(x.dtype)
# insert
x=np.insert(x,1,58)
print(x)
x=np.insert(x,2,3)
print(x)
y=np.insert(y,1,34,axis=1)
print(y)
# stacking - vstack/hstack
# It's important that size of stacks are same
x=np.array([1,2,3])
y=np.array([30,40,50])
z=np.vstack((x,y))
print(z)
# hstack - Horizontal
z=np.hstack((x,y))
print(z)
|
[
"reena.dangi@gmail.com"
] |
reena.dangi@gmail.com
|
284ce95f34b4a10c66e71f2e3477dda5167fac94
|
b6d2354b06732b42d3de49d3054cb02eb30298c4
|
/finance/models/score.py
|
df2c1a647c8480e32ca35a6f81dc0cb04266d188
|
[] |
no_license
|
trivvet/finplanner
|
52ad276839bfae67821b9684f7db549334ef0a59
|
1d82d1a09da6f04fced6f71b53aeb784af00f758
|
refs/heads/master
| 2020-03-17T23:24:25.071311
| 2018-10-28T10:12:07
| 2018-10-28T10:12:07
| 134,043,419
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,408
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class ScorePrototype(models.Model):
class Meta:
abstract=True
month = models.ForeignKey(
'Month',
on_delete=models.CASCADE,
blank=False,
null=False,
verbose_name="Month"
)
amount = models.IntegerField(
blank=False,
null=False,
verbose_name="Money Amount"
)
remainder = models.IntegerField(
blank=True,
null=True,
verbose_name="Money Remainder"
)
class Score(ScorePrototype):
class Meta:
verbose_name = "Belance"
verbose_name_plural = "Belances"
account = models.ForeignKey(
'Account',
on_delete=models.CASCADE,
blank=False,
null=False,
verbose_name="Bank Account"
)
def __unicode__(self):
return u"Залишок за %s по %s" % (self.month.name, self.account)
class PlannedExpense(ScorePrototype):
class Meta:
verbose_name = "Planned Expense"
verbose_name_plural = "Planned Expenses"
title = models.CharField(
max_length=256,
blank=False,
null=False,
verbose_name="Title"
)
def __unicode__(self):
return u"Заплановані витрати на %s за %s" % (self.title, self.month.name)
|
[
"trivvet@gmail.com"
] |
trivvet@gmail.com
|
4bc6b2ded7a42b226ac3a04ee7c6be4878dd796e
|
8ca4992e5c7f009147875549cee21c0efb7c03eb
|
/mmseg/models/decode_heads/nl_head.py
|
bbbe70b5fb7233fd840941678657950119fda43e
|
[
"Apache-2.0"
] |
permissive
|
JiayuZou2020/DiffBEV
|
0ada3f505fc5106d8b0068c319f0b80ed366b673
|
527acdb82ac028061893d9d1bbe69e589efae2a0
|
refs/heads/main
| 2023-05-23T07:25:39.465813
| 2023-04-04T02:53:05
| 2023-04-04T02:53:05
| 613,895,691
| 181
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,655
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.cnn import NonLocal2d
from ..builder import HEADS
from .fcn_head import FCNHead
@HEADS.register_module()
class NLHead(FCNHead):
"""Non-local Neural Networks.
This head is the implementation of `NLNet
<https://arxiv.org/abs/1711.07971>`_.
Args:
reduction (int): Reduction factor of projection transform. Default: 2.
use_scale (bool): Whether to scale pairwise_weight by
sqrt(1/inter_channels). Default: True.
mode (str): The nonlocal mode. Options are 'embedded_gaussian',
'dot_product'. Default: 'embedded_gaussian.'.
"""
def __init__(self,
reduction=2,
use_scale=True,
mode='embedded_gaussian',
**kwargs):
super(NLHead, self).__init__(num_convs=2, **kwargs)
self.reduction = reduction
self.use_scale = use_scale
self.mode = mode
self.nl_block = NonLocal2d(
in_channels=self.channels,
reduction=self.reduction,
use_scale=self.use_scale,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
mode=self.mode)
def forward(self, inputs):
"""Forward function."""
x = self._transform_inputs(inputs)
output = self.convs[0](x)
output = self.nl_block(output)
output = self.convs[1](output)
if self.concat_input:
output = self.conv_cat(torch.cat([x, output], dim=1))
output = self.cls_seg(output)
return output
|
[
"noreply@github.com"
] |
JiayuZou2020.noreply@github.com
|
256bb7942ddc5136f4fa22e73823cc34bb46d2c0
|
0156514d371c04da404b50994804ede8d264042a
|
/rest_batteries/exception_handlers.py
|
15824e0dd41058bf34e5dd42c73220ed016ef552
|
[
"MIT"
] |
permissive
|
defineimpossible/django-rest-batteries
|
68b074f18fcae304b9bac4a242f9a9eea98c6e9c
|
951cc7ec153d1342a861d7f6468862000d5ea9f3
|
refs/heads/master
| 2023-07-21T10:45:18.133691
| 2023-07-11T02:52:45
| 2023-07-11T02:52:45
| 284,420,681
| 21
| 0
|
MIT
| 2023-07-11T02:34:06
| 2020-08-02T08:19:39
|
Python
|
UTF-8
|
Python
| false
| false
| 397
|
py
|
from rest_framework.views import exception_handler
from .errors_formatter import ErrorsFormatter
def errors_formatter_exception_handler(exc, context):
response = exception_handler(exc, context)
# If unexpected error occurs (server error, etc.)
if response is None:
return response
formatter = ErrorsFormatter(exc)
response.data = formatter()
return response
|
[
"denis.orehovsky@gmail.com"
] |
denis.orehovsky@gmail.com
|
69bc2b87b4e297ce71f450a7c46c546972fa3449
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/PSg77AZJGACk4a7gt_6.py
|
5bcd00492613d502e7d26232c6bfe6cf615fc660
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 737
|
py
|
"""
For this challenge, forget how to add two numbers together. The best
explanation on what to do for this function is this meme:

### Examples
meme_sum(26, 39) ➞ 515
# 2+3 = 5, 6+9 = 15
# 26 + 39 = 515
meme_sum(122, 81) ➞ 1103
# 1+0 = 1, 2+8 = 10, 2+1 = 3
# 122 + 81 = 1103
meme_sum(1222, 30277) ➞ 31499
### Notes
N/A
"""
def meme_sum(a, b):
sum = ""
c=0
if b>a:
c=a
a=b
b=c
a = str(a)
b= str(b)
i=0
while i < (len(a)-len(b)):
sum = sum + a[i]
i += 1
i = 0
while i < len(b):
sum = sum + str((int(a[i+len(a)-len(b)])+ int(b[i])))
i += 1
return int(sum)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
2f2a1743222841ff34512aa1889a1587bd61b5ce
|
c759ca98768dd8fd47621e3aeda9069d4e0726c6
|
/codewof/users/forms.py
|
211e37a2e4797d3fa23af236d3215009c7f787c4
|
[
"MIT"
] |
permissive
|
lucyturn3r/codewof
|
50fc504c3a539c376b3d19906e92839cadabb012
|
acb2860c4b216013ffbba5476d5fac1616c78454
|
refs/heads/develop
| 2020-06-24T08:25:28.788099
| 2019-08-12T02:50:35
| 2019-08-12T02:50:35
| 198,912,987
| 0
| 0
|
MIT
| 2019-08-07T03:22:21
| 2019-07-25T23:17:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,132
|
py
|
"""Forms for user application."""
from django.forms import ModelForm
from django.contrib.auth import get_user_model, forms
User = get_user_model()
class SignupForm(ModelForm):
"""Sign up for user registration."""
class Meta:
"""Metadata for SignupForm class."""
model = get_user_model()
fields = ['first_name', 'last_name']
def signup(self, request, user):
"""Extra logic when a user signs up.
Required by django-allauth.
"""
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.save()
class UserChangeForm(forms.UserChangeForm):
"""Form class for changing user."""
class Meta(forms.UserChangeForm.Meta):
"""Metadata for UserChangeForm class."""
model = User
fields = ('email', 'last_name')
class UserCreationForm(forms.UserCreationForm):
"""Form class for creating user."""
class Meta(forms.UserCreationForm.Meta):
"""Metadata for UserCreationForm class."""
model = User
fields = ('email', 'first_name', 'last_name')
|
[
"jackmorgannz@gmail.com"
] |
jackmorgannz@gmail.com
|
f0305eec604f96a1c795b04494e5e2bd3d1ca417
|
14df5d90af993150634e596c28cecf74dffe611f
|
/imghdr_test.py
|
2c67ccbd0946c5e2ff7d38098fb675ccc446307d
|
[] |
no_license
|
mamaker/IntroPy
|
7a0614905b95ab5c15ac94b1245278c3ae5d4ce0
|
dfea20eb465077e3512c878c549529a4b9282297
|
refs/heads/master
| 2020-05-09T18:26:16.681103
| 2019-04-23T01:05:31
| 2019-04-23T01:05:31
| 181,342,054
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
# -*- coding: utf-8 -*-
"""
imghdr_test.py
Created on Sat Apr 20 11:19:17 2019
@author: madhu
"""
import imghdr
file_name = 'oreilly.png'
print('File', file_name,'is a:', imghdr.what(file_name))
|
[
"madhuvasudevan@yahoo.com"
] |
madhuvasudevan@yahoo.com
|
4f48a8ed86212b4798e38875b2970b4d6d92420d
|
7e9b15d1793aaee5873d0047ed7dd0f47f01d905
|
/series_tiempo_ar_api/apps/analytics/elasticsearch/constants.py
|
0626fc0e4cfa298137da7d090e046ca718473e69
|
[
"MIT"
] |
permissive
|
SantiagoPalay/series-tiempo-ar-api
|
9822b7eac5714c1ed07ee11664b3608f1fc3e9cf
|
c0c665fe4caf8ce43a5eb12962ee36a3dd6c2aa4
|
refs/heads/master
| 2020-04-24T19:41:02.857554
| 2019-02-21T14:43:23
| 2019-02-21T14:43:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 553
|
py
|
from series_tiempo_ar_api.libs.indexing.constants import \
VALUE, CHANGE, PCT_CHANGE, CHANGE_YEAR_AGO, PCT_CHANGE_YEAR_AGO
SERIES_QUERY_INDEX_NAME = 'query'
REP_MODES = [
VALUE,
CHANGE,
PCT_CHANGE,
CHANGE_YEAR_AGO,
PCT_CHANGE_YEAR_AGO,
]
AGG_DEFAULT = 'avg'
AGG_SUM = 'sum'
AGG_END_OF_PERIOD = 'end_of_period'
AGG_MAX = 'max'
AGG_MIN = 'min'
AGGREGATIONS = [
AGG_DEFAULT,
AGG_SUM,
AGG_END_OF_PERIOD,
AGG_MAX,
AGG_MIN,
]
PARAM_REP_MODE = 'representation_mode'
PARAM_COLLAPSE_AGG = 'collapse_aggregation'
|
[
"19612265+lucaslavandeira@users.noreply.github.com"
] |
19612265+lucaslavandeira@users.noreply.github.com
|
a665bef85088b02f9afefbab6d33cec9c86181e8
|
b7cfdeb15b109220017a66ed6094ce890c234b74
|
/AI/deep_learning_from_scratch/numpy_prac/multidimensional_array.py
|
f4bcc03331c3e16239389b8444d40b2f660af3db
|
[] |
no_license
|
voidsatisfaction/TIL
|
5bcde7eadc913bdf6f5432a30dc9c486f986f837
|
43f0df9f8e9dcb440dbf79da5706b34356498e01
|
refs/heads/master
| 2023-09-01T09:32:04.986276
| 2023-08-18T11:04:08
| 2023-08-18T11:04:08
| 80,825,105
| 24
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 188
|
py
|
import numpy as np
B = np.array([[1, 2], [3, 4], [5, 6]])
B
np.ndim(B) # 2
B.shape # (3,2) 3x2 행렬
A = np.array([[1,2,3], [4,5,6]])
B = np.array([[1,2], [3,4], [5,6]])
np.dot(A, B)
|
[
"lourie@naver.com"
] |
lourie@naver.com
|
5909b8a429dde3c3db85365a4a2fcafe8504a73c
|
6febd920ced70cbb19695801a163c437e7be44d4
|
/leetcode_oj/string/strStr.py
|
b0bc9a66ec3e9adf67f316f37ee2da101b6c25ef
|
[] |
no_license
|
AngryBird3/gotta_code
|
b0ab47e846b424107dbd3b03e0c0f3afbd239c60
|
b9975fef5fa4843bf95d067bea6d064723484289
|
refs/heads/master
| 2021-01-20T16:47:35.098125
| 2018-03-24T21:31:01
| 2018-03-24T21:31:01
| 53,180,336
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 538
|
py
|
class Solution(object):
def strStr(self, haystack, needle):
"""
:type haystack: str
:type needle: str
:rtype: int
"""
if not haystack and not needle:
return 0
for i in range(len(haystack) - len(needle) + 1):
match = True
for j in range(len(needle)):
if haystack[i+j] != needle[j]:
match = False
break
if match:
return i
return -1
|
[
"dhaaraa.darji@gmail.com"
] |
dhaaraa.darji@gmail.com
|
a7e448139f2bd614be72df1a7ece9dde49e3be0f
|
2a7e44adc8744c55a25e3cafcc2fa19a1607e697
|
/settings_inspector/management/commands/inspect_settings.py
|
6f7437af5ca147fdd91a75feddb2467cdbec5bf7
|
[
"MIT"
] |
permissive
|
fcurella/django-settings_inspector
|
45529288dc8dde264383739c55abe6a9d2077ded
|
69a6295de865f540d024e79aab4d211ce3c1d847
|
refs/heads/master
| 2020-06-04T01:57:17.216783
| 2012-01-05T19:05:12
| 2012-01-05T19:05:12
| 2,989,324
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 256
|
py
|
from settings_inspector.parser import Setting
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
def handle(self, *args, **options):
root_setting = Setting('django.conf')
import ipdb; ipdb.set_trace()
|
[
"flavio.curella@gmail.com"
] |
flavio.curella@gmail.com
|
a04179ec631fa9ee2c77775b4b950d00ead1cff3
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/get.py
|
c8b1ce0c2a7ca85de612a46ed698cd5daf7180dc
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,674
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-machinelearningservices
# USAGE
python get.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = MachineLearningServicesMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-1111-2222-3333-444444444444",
)
response = client.registry_code_containers.get(
resource_group_name="testrg123",
registry_name="testregistry",
code_name="testContainer",
)
print(response)
# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/CodeContainer/get.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
da080bc3ffe0ad4f0d4461acf3bf439970b3713b
|
d706f83450d32256e568ea2e279649b9d85ddb94
|
/accounts/views.py
|
8cd59810b95abf689b8f6bdf3151729484d2fb7d
|
[] |
no_license
|
celord/advacneddjango
|
146d3d4ae351803b37e8599225b38b948e42a8b7
|
044d172fb10556cdeede6888dcec5f466097754d
|
refs/heads/main
| 2023-08-18T19:26:07.230821
| 2021-09-26T17:58:45
| 2021-09-26T17:58:45
| 406,921,992
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
# accounts/views.py
from django.urls import reverse_lazy
from django.views import generic
from .forms import CustomUserCreationForm
class SignupPageView(generic.CreateView):
form_class = CustomUserCreationForm
success_url = reverse_lazy('login')
template_name = 'registration/signup.html'
|
[
"celord@gmail.com"
] |
celord@gmail.com
|
db62acc5b5c6704db566b47448faeaed2132e6ba
|
bb64d7194d9f7e8ef6fc2dbfdbc0569713d1079c
|
/FocalLoss.py
|
74a05c5aa62338c5c30e91a1981482671095182f
|
[] |
no_license
|
scott-mao/Top-Related-Meta-Learning-Method-for-Few-Shot-Detection
|
471e7d6e71255333d9b4c929023d7e43ef19fdd2
|
49bfd702f41deaec60fa95314436f69b4e217e6f
|
refs/heads/main
| 2023-04-11T13:00:13.358560
| 2021-04-27T02:24:23
| 2021-04-27T02:24:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,767
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Licensed under The MIT License [see LICENSE for details]
# Written by Chao CHEN (chaochancs@gmail.com)
# Created On: 2017-08-11
# --------------------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class FocalLoss(nn.Module):
r"""
This criterion is a implemenation of Focal Loss, which is proposed in
Focal Loss for Dense Object Detection.
Loss(x, class) = - \alpha (1-softmax(x)[class])^gamma \log(softmax(x)[class])
The losses are averaged across observations for each minibatch.
Args:
alpha(1D Tensor, Variable) : the scalar factor for this criterion
gamma(float, double) : gamma > 0; reduces the relative loss for well-classified examples (p > .5),
putting more focus on hard, misclassified examples
size_average(bool): size_average(bool): By default, the losses are averaged over observations for each minibatch.
However, if the field size_average is set to False, the losses are
instead summed for each minibatch.
"""
def __init__(self, class_num, alpha=None, gamma=2, size_average=False):
super(FocalLoss, self).__init__()
if alpha is None:
self.alpha = Variable(torch.Tensor([[0.25]]*class_num))
else:
if isinstance(alpha, Variable):
self.alpha = alpha
else:
self.alpha = Variable(alpha)
self.gamma = gamma
self.class_num = class_num
self.size_average = size_average
def forward(self, inputs, targets):
N = inputs.size(0)
#print(N)
C = inputs.size(1)
P = F.softmax(inputs,dim=1)
#class_mask = inputs.data.new(N, C).fill_(0)
#class_mask = Variable(class_mask)
ids = targets.unsqueeze(-1)
#class_mask.scatter_(1, ids.data, 1.)
#class_mask = Variable(class_mask)
#print(class_mask)
class_mask=Variable(torch.zeros(N,C).scatter_(1,ids,1.0).cuda())
if inputs.is_cuda and not self.alpha.is_cuda:
self.alpha = self.alpha.cuda()
#print(self.alpha,Variable(ids).data.view(-1))
alpha = self.alpha[ids.squeeze(-1).cuda()]
probs = (P*class_mask).sum(1).view(-1,1)
log_p = probs.log()
#print('probs size= {}'.format(probs.size()))
#print(probs)
batch_loss = -alpha*(torch.pow((1-probs), self.gamma))*log_p
#print('-----bacth_loss------')
#print(batch_loss)
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
if __name__ == "__main__":
alpha = torch.rand(21, 1)
print(alpha)
FL = FocalLoss(class_num=5, gamma=0 )
CE = nn.CrossEntropyLoss()
N = 4
C = 5
inputs = torch.rand(N, C)
targets = torch.LongTensor(N).random_(C)
inputs_fl = Variable(inputs.clone(), requires_grad=True)
targets_fl = Variable(targets.clone())
inputs_ce = Variable(inputs.clone(), requires_grad=True)
targets_ce = Variable(targets.clone())
print('----inputs----')
print(inputs)
print('---target-----')
print(targets)
fl_loss = FL(inputs_fl, targets_fl)
ce_loss = CE(inputs_ce, targets_ce)
print('ce = {}, fl ={}'.format(ce_loss.data[0], fl_loss.data[0]))
fl_loss.backward()
ce_loss.backward()
#print(inputs_fl.grad.data)
print(inputs_ce.grad.data)
|
[
"noreply@github.com"
] |
scott-mao.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.