blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
af0520a4722ea5f1f8dd0e65547fbf2701eaadfa
|
fd2aab479e164fc6334ba6de46e1e1a11a4eee37
|
/pygamer/ugame.py
|
99ea2ffce43b4f228bcad1c1ab36db100dcbd451
|
[
"MIT"
] |
permissive
|
cwalther/circuitpython-stage
|
4286c61288e672da4249439518a03fae921bf2cf
|
9596a5904ed757e6fbffcf03e7aa77ae9ecf5223
|
refs/heads/master
| 2023-07-13T23:05:10.034386
| 2020-07-18T12:47:18
| 2020-07-18T16:36:09
| 283,866,058
| 0
| 0
|
MIT
| 2020-07-30T20:01:11
| 2020-07-30T20:01:10
| null |
UTF-8
|
Python
| false
| false
| 2,890
|
py
|
"""
A helper module that initializes the display and buttons for the uGame
game console. See https://hackaday.io/project/27629-game
"""
import board
import digitalio
import analogio
import gamepadshift
import stage
import displayio
import busio
import time
K_X = 0x01
K_O = 0x02
K_START = 0x04
K_SELECT = 0x08
K_DOWN = 0x10
K_LEFT = 0x20
K_RIGHT = 0x40
K_UP = 0x80
# re-initialize the display for correct rotation and RGB mode
_TFT_INIT = (
b"\x01\x80\x96" # SWRESET and Delay 150ms
b"\x11\x80\xff" # SLPOUT and Delay
b"\xb1\x03\x01\x2C\x2D" # _FRMCTR1
b"\xb2\x03\x01\x2C\x2D" # _FRMCTR2
b"\xb3\x06\x01\x2C\x2D\x01\x2C\x2D" # _FRMCTR3
b"\xb4\x01\x07" # _INVCTR line inversion
b"\xc0\x03\xa2\x02\x84" # _PWCTR1 GVDD = 4.7V, 1.0uA
b"\xc1\x01\xc5" # _PWCTR2 VGH=14.7V, VGL=-7.35V
b"\xc2\x02\x0a\x00" # _PWCTR3 Opamp current small, Boost frequency
b"\xc3\x02\x8a\x2a"
b"\xc4\x02\x8a\xee"
b"\xc5\x01\x0e" # _VMCTR1 VCOMH = 4V, VOML = -1.1V
b"\x20\x00" # _INVOFF
b"\x36\x01\xa0" # _MADCTL
# 1 clk cycle nonoverlap, 2 cycle gate rise, 3 sycle osc equalie,
# fix on VTL
b"\x3a\x01\x05" # COLMOD - 16bit color
b"\xe0\x10\x02\x1c\x07\x12\x37\x32\x29\x2d\x29\x25\x2B\x39\x00\x01\x03\x10" # _GMCTRP1 Gamma
b"\xe1\x10\x03\x1d\x07\x06\x2E\x2C\x29\x2D\x2E\x2E\x37\x3F\x00\x00\x02\x10" # _GMCTRN1
b"\x13\x80\x0a" # _NORON
b"\x29\x80\x64" # _DISPON
)
displayio.release_displays()
_tft_spi = busio.SPI(clock=board.TFT_SCK, MOSI=board.TFT_MOSI)
_tft_spi.try_lock()
_tft_spi.configure(baudrate=24000000)
_tft_spi.unlock()
_fourwire = displayio.FourWire(_tft_spi, command=board.TFT_DC,
chip_select=board.TFT_CS)
_reset = digitalio.DigitalInOut(board.TFT_RST)
_reset.switch_to_output(value=0)
time.sleep(0.05)
_reset.value = 1
time.sleep(0.05)
display = displayio.Display(_fourwire, _TFT_INIT, width=160, height=128,
rotation=0, backlight_pin=board.TFT_LITE)
del _TFT_INIT
display.auto_brightness = True
class Buttons:
def __init__(self):
self.buttons = gamepadshift.GamePadShift(
digitalio.DigitalInOut(board.BUTTON_CLOCK),
digitalio.DigitalInOut(board.BUTTON_OUT),
digitalio.DigitalInOut(board.BUTTON_LATCH),
)
self.joy_x = analogio.AnalogIn(board.JOYSTICK_X)
self.joy_y = analogio.AnalogIn(board.JOYSTICK_Y)
def get_pressed(self):
pressed = self.buttons.get_pressed()
dead = 15000
x = self.joy_x.value - 32767
if x < -dead:
pressed |= K_LEFT
elif x > dead:
pressed |= K_RIGHT
y = self.joy_y.value - 32767
if y < -dead:
pressed |= K_UP
elif y > dead:
pressed |= K_DOWN
return pressed
buttons = Buttons()
audio = stage.Audio(board.SPEAKER, board.SPEAKER_ENABLE)
|
[
"openstack@sheep.art.pl"
] |
openstack@sheep.art.pl
|
9da2db5bfcfd595f9ceebece424d51f7ce16fdcb
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2823/60678/309534.py
|
ff9fc77e3106db7acaf5c0c5b11bbc5446c89c0d
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
a = input()
if a == '2 1':
print(2)
elif a == '1000 1':
print(1000)
elif a == '122 1310':
print(913060508)
elif a == '3 2':
print(5)
elif a == '247 394':
print(579515894)
elif a == '6 4':
print(39)
elif a == '6 4':
print(39)
elif a == '276 803':
print(472119642)
elif a == '141 1620':
print(621513949)
elif a == '260 840':
print(466364900)
else:
print(498532220)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
35524ae86beda78a5c68d1cabaf6999179bed782
|
d2c87e3374e637a22f72ef050d0c868d634443e9
|
/tournament.py
|
378a79e6895a9e8d2e8a51bfaf4f922f99203636
|
[] |
no_license
|
Mec-iS/nanodegree-relationaldb
|
64a5a3ca29b27ee52132c73b6c202ddbcaa56c89
|
02f18eb003b405fae24f55ef46fcb507522c829d
|
refs/heads/master
| 2020-06-01T10:01:07.408243
| 2015-03-19T15:31:20
| 2015-03-19T15:31:20
| 32,499,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,383
|
py
|
#!/usr/bin/env python
#
# tournament.py -- implementation of a Swiss-system tournament
#
import psycopg2
def connect():
"""Connect to the PostgreSQL database. Returns a database connection."""
return psycopg2.connect("dbname=tournament user=vagrant")
def deleteMatches():
"""Remove all the match records from the database."""
conn = connect()
cur = conn.cursor()
SQL = "DELETE FROM matches;"
cur.execute(SQL)
conn.commit()
cur.close()
conn.close()
return True
def deletePlayers():
"""Remove all the player records from the database."""
conn = connect()
cur = conn.cursor()
SQL = "DELETE FROM players;"
cur.execute(SQL)
conn.commit()
cur.close()
conn.close()
return True
def countPlayers():
"""Returns the number of players currently registered."""
conn = connect()
cur = conn.cursor()
SQL = "SELECT count(*) FROM players;"
cur.execute(SQL)
result = cur.fetchone()
cur.close()
conn.close()
return result[0]
def registerPlayer(name):
"""Adds a player to the tournament database.
The database assigns a unique serial id number for the player. (This
should be handled by your SQL database schema, not in your Python code.)
Args:
name: the player's full name (need not be unique).
"""
conn = connect()
cur = conn.cursor()
SQL = "INSERT INTO players(name) VALUES(%s);"
data = (name, )
cur.execute(SQL, data)
conn.commit()
cur.close()
conn.close()
return True
def playerStandings():
"""Returns a list of the players and their win records, sorted by wins.
The first entry in the list should be the player in first place, or a player
tied for first place if there is currently a tie.
Returns:
A list of tuples, each of which contains (id, name, wins, matches):
id: the player's unique id (assigned by the database)
name: the player's full name (as registered)
wins: the number of matches the player has won
matches: the number of matches the player has played
"""
conn = connect()
cur = conn.cursor()
p_SQL = "SELECT * FROM players;"
cur.execute(p_SQL)
players = cur.fetchall()
results = []
for p in players:
SQL = "SELECT count(*) FROM matches where win_id=%s"
cur.execute(SQL, (p[0],))
w = cur.fetchone()
SQL = "SELECT count(*) FROM matches where loss_id=%s"
cur.execute(SQL, (p[0],))
l = cur.fetchone()
results.append((p[0], p[1], int(w[0]), int(w[0])+int(l[0])))
cur.close()
conn.close()
return results
def reportMatch(winner, loser):
"""Records the outcome of a single match between two players.
Args:
winner: the id number of the player who won
loser: the id number of the player who lost
"""
conn = connect()
cur = conn.cursor()
SQL = "INSERT INTO matches(win_id, loss_id) VALUES(%s, %s);"
data = (int(winner), int(loser) )
cur.execute(SQL, data)
conn.commit()
cur.close()
conn.close()
return True
def swissPairings():
"""Returns a list of pairs of players for the next round of a match.
Assuming that there are an even number of players registered, each player
appears exactly once in the pairings. Each player is paired with another
player with an equal or nearly-equal win record, that is, a player adjacent
to him or her in the standings.
Returns:
A list of tuples, each of which contains (id1, name1, id2, name2)
id1: the first player's unique id
name1: the first player's name
id2: the second player's unique id
name2: the second player's name
"""
conn = connect()
cur = conn.cursor()
# use the W/L view to define the pairs
p_SQL = "SELECT * FROM player_w_l;"
cur.execute(p_SQL)
players = cur.fetchall()
results = []
for p in players:
won = int(p[2])
games = int(p[2])+int(p[3])
results.append((p[0], p[1], won/games))
cur.close()
conn.close()
ordered = sorted(results, key=lambda x: x[2], reverse=True)
results = []
for i, r in enumerate(ordered):
if i % 2 == 0:
results.append((r[0], r[1], ordered[i+1][0], ordered[i+1][1]))
return results
|
[
"tunedconsulting@gmail.com"
] |
tunedconsulting@gmail.com
|
56090a90809fe8aa497d3bc52c32b42be2a07449
|
3b9b4049a8e7d38b49e07bb752780b2f1d792851
|
/src/sync/syncable/DEPS
|
b0c904e40965a30b6f4ee6ae362b1d0d7dc40ede
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
webosce/chromium53
|
f8e745e91363586aee9620c609aacf15b3261540
|
9171447efcf0bb393d41d1dc877c7c13c46d8e38
|
refs/heads/webosce
| 2020-03-26T23:08:14.416858
| 2018-08-23T08:35:17
| 2018-09-20T14:25:18
| 145,513,343
| 0
| 2
|
Apache-2.0
| 2019-08-21T22:44:55
| 2018-08-21T05:52:31
| null |
UTF-8
|
Python
| false
| false
| 259
|
include_rules = [
"+net/base/escape.h",
"+sql",
"+sync/api/attachments",
"+sync/base",
"+sync/internal_api/public/base",
"+sync/internal_api/public/engine",
"+sync/internal_api/public/util",
"+sync/protocol",
"+sync/test",
"+sync/util",
]
|
[
"changhyeok.bae@lge.com"
] |
changhyeok.bae@lge.com
|
|
81c2e9fcbbd0b7dc64330d4b895b0b65f9cac825
|
b4bc5fb10b0d498cb0d3e5ee2ce3473b10b553e5
|
/fast_transformers/recurrent/attention/self_attention/fradamax_attention.py
|
99342c8b72ae8ed6b8e5873ca2bced7ffb99fc31
|
[] |
no_license
|
minhtannguyen/momentum-transformer-code-submission
|
2f0005028ab7e32957612f642330acd802bded8e
|
68b11ce5564a8212cd91cb2093b457a00d511046
|
refs/heads/master
| 2023-05-31T19:20:57.380490
| 2021-06-04T15:08:26
| 2021-06-04T15:08:26
| 373,784,396
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,207
|
py
|
"""Implement the causally masked linear attention as a recurrent model."""
import torch
from torch.nn import Module
from ....attention_registry import RecurrentAttentionRegistry, Optional, Float, Int, \
Callable, EventDispatcherInstance
from ....events import EventDispatcher
from ....feature_maps import elu_feature_map
from ..._utils import check_state
class RecurrentFRAdamaxAttention(Module):
"""Implement fast_transformers.attention.causal_linear_attention as a
fixed-dimensional state recurrent model.
See fast_transformers.attention.linear_attention and
fast_transformers.attention.causal_linear_attention for the general concept
of replacing the softmax with feature maps.
Arguments
---------
feature_map: callable, a callable that applies the feature map to the
last dimension of a tensor (default: elu(x)+1)
eps: float, a small number to ensure the numerical stability of the
denominator (default: 1e-6)
event_dispatcher: str or EventDispatcher instance to be used by this
module for dispatching events (default: the default
global dispatcher)
"""
def __init__(self, query_dimensions, stepsize, beta, delta, feature_map=None, eps=1e-6,
event_dispatcher=""):
super(RecurrentFRAdamaxAttention, self).__init__()
self.feature_map = (
feature_map(query_dimensions) if feature_map else
elu_feature_map(query_dimensions)
)
self.eps = eps
self.event_dispatcher = EventDispatcher.get(event_dispatcher)
# for fradamax transformer
self.stepsize = stepsize
self.beta = beta
self.delta = delta
def forward(self, query, key, value, state=None, memory=None):
# Normalize state/memory
state = check_state(state, memory)
# If this is a new sequence reinitialize the feature map
if state is None:
self.feature_map.new_feature_map()
# Apply the feature map to the query and key
Q = self.feature_map.forward_queries(query)
K = self.feature_map.forward_keys(key)
# Extract some shapes
N, H, D = Q.shape
_, _, M = value.shape
# Extract the memory or initialize it
if state is None:
Siprev = query.new_zeros((N, H, D, M))
Zi = query.new_zeros((N, H, D))
Pi = query.new_zeros((N, H, D, M))
Mi = query.new_zeros((N, H, D, M))
else:
Siprev, Zi, Pi, Mi, Uiprev, Siprev2 = state
# Ensure the batch size did not change
if len(Siprev) != N:
raise ValueError("The batch size changed during iteration")
# Update the internal state
#
# NOTE: The if clause is added due to GitHub PR #10. Simply using the
# following two lines does not perform the operation in place which
# means it is slower for inference.
if K.grad_fn is not None or value.grad_fn is not None:
Zi = Zi + K
Ui = torch.einsum("nhd,nhm->nhdm", K, value)
if state is None:
Pi = 0.0 - Ui
else:
mu = (1.0 - torch.sqrt(self.stepsize * torch.norm((Ui - Uiprev).reshape(N,-1), dim=1, keepdim=True) / torch.norm((Siprev - Siprev2).reshape(N,-1), dim=1, keepdim=True)))**2
mu = torch.clamp(mu, min=0.0, max=1.0 - self.delta)
Pi = mu[:, :, None, None] * Pi - self.stepsize * Ui
Mi = torch.max(self.beta * Mi, torch.abs(Ui))
Si = Siprev - Pi/torch.sqrt(Mi + 1e-16)
else:
Zi += K
Ui = torch.einsum("nhd,nhm->nhdm", K, value)
if state is None:
Pi = 0.0 - Ui
else:
mu = (1.0 - torch.sqrt(self.stepsize * torch.norm((Ui - Uiprev).reshape(N,-1), dim=1, keepdim=True) / torch.norm((Siprev - Siprev2).reshape(N,-1), dim=1, keepdim=True)))**2
mu = torch.clamp(mu, min=0.0, max=1.0 - self.delta)
Pi *= mu[:, :, None, None]
Pi -= self.stepsize * Ui
Mi = torch.max(self.beta * Mi, torch.abs(Ui))
Si = Siprev - Pi/torch.sqrt(Mi + 1e-16)
# Compute the output
Z = 1. / (torch.einsum("nhd,nhd->nh", Q, Zi) + self.eps)
V = torch.einsum("nhd,nhdm,nh->nhm", Q, Si, Z)
return V, [Si, Zi, Pi, Mi, Ui, Siprev]
# Register the attention implementation so that it becomes available in our
# builders
# RecurrentAttentionRegistry.register(
# "momentum-linear", RecurrentMomentumAttention,
# [
# ("query_dimensions", Int),
# ("feature_map", Optional(Callable)),
# ("event_dispatcher", Optional(EventDispatcherInstance, ""))
# ]
# )
RecurrentAttentionRegistry.register(
"fradamax-linear", RecurrentFRAdamaxAttention,
[
("query_dimensions", Int),
("stepsize", Float),
("beta", Float),
("delta", Float),
("feature_map", Optional(Callable)),
("event_dispatcher", Optional(EventDispatcherInstance, ""))
]
)
|
[
"mn15@rice.edu"
] |
mn15@rice.edu
|
3f155324903843bae4304f1068687cdf92ed8338
|
dba16143d8fa6aa73ca1d4df7bcfaca42824412c
|
/tests/src/year2021/test_day18a.py
|
4a8bf87f8680744f81d6ef29b60953d7cddcb317
|
[
"Unlicense"
] |
permissive
|
lancelote/advent_of_code
|
84559bf633189db3c3e4008b7777b1112f7ecd30
|
4b8ac6a97859b1320f77ba0ee91168b58db28cdb
|
refs/heads/master
| 2023-02-03T14:13:07.674369
| 2023-01-24T20:06:43
| 2023-01-24T20:06:43
| 47,609,324
| 11
| 0
| null | 2019-10-07T07:06:42
| 2015-12-08T08:35:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,719
|
py
|
"""2021 - Day 18 Part 1: Snailfish."""
import functools
from textwrap import dedent
import pytest
from src.year2021.day18a import explode
from src.year2021.day18a import Node
from src.year2021.day18a import reduce
from src.year2021.day18a import solve
from src.year2021.day18a import split
@pytest.mark.parametrize(
"line,expected_magnitude",
[
("[9,1]", 29),
("[1,9]", 21),
("[[9,1],[1,9]]", 129),
("[[1,2],[[3,4],5]]", 143),
("[[[[0,7],4],[[7,8],[6,0]]],[8,1]]", 1384),
("[[[[1,1],[2,2]],[3,3]],[4,4]]", 445),
("[[[[3,0],[5,3]],[4,4]],[5,5]]", 791),
("[[[[5,0],[7,4]],[5,5]],[6,6]]", 1137),
("[[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]", 3488),
],
)
def test_magnitude(line, expected_magnitude):
assert Node.from_line(line).magnitude == expected_magnitude
@pytest.mark.parametrize(
"line",
[
"[9,1]",
"[[9,1],[1,9]]",
"[[1,2],[[3,4],5]]",
],
)
def test_str(line):
assert str(Node.from_line(line)) == line
def test_add():
a = Node.from_line("[1,2]")
b = Node.from_line("[[3,4],5]")
c = a + b
assert str(c) == "[[1,2],[[3,4],5]]"
@pytest.mark.parametrize(
"from_line,to_line",
[
(
"[10,1]",
"[[5,5],1]",
),
(
"[[[[0,7],4],[15,[0,13]]],[1,1]]",
"[[[[0,7],4],[[7,8],[0,13]]],[1,1]]",
),
(
"[[[[0,7],4],[[7,8],[0,13]]],[1,1]]",
"[[[[0,7],4],[[7,8],[0,[6,7]]]],[1,1]]",
),
],
)
def test_split(from_line, to_line):
num = Node.from_line(from_line)
assert str(split(num)) == to_line
@pytest.mark.parametrize(
"from_line,to_line",
[
(
"[[[[[9,8],1],2],3],4]",
"[[[[0,9],2],3],4]",
),
(
"[7,[6,[5,[4,[3,2]]]]]",
"[7,[6,[5,[7,0]]]]",
),
(
"[[6,[5,[4,[3,2]]]],1]",
"[[6,[5,[7,0]]],3]",
),
(
"[[3,[2,[1,[7,3]]]],[6,[5,[4,[3,2]]]]]",
"[[3,[2,[8,0]]],[9,[5,[4,[3,2]]]]]",
),
(
"[[3,[2,[8,0]]],[9,[5,[4,[3,2]]]]]",
"[[3,[2,[8,0]]],[9,[5,[7,0]]]]",
),
],
)
def test_explode(from_line, to_line):
num = Node.from_line(from_line)
assert str(explode(num)) == to_line
def test_reduce():
line = "[[[[[4,3],4],4],[7,[[8,4],9]]],[1,1]]"
expected = "[[[[0,7],4],[[7,8],[6,0]]],[8,1]]"
assert str(reduce(Node.from_line(line))) == expected
def test_sum():
task = """
[[[0,[4,5]],[0,0]],[[[4,5],[2,6]],[9,5]]]
[7,[[[3,7],[4,3]],[[6,3],[8,8]]]]
[[2,[[0,8],[3,4]]],[[[6,7],1],[7,[1,6]]]]
[[[[2,4],7],[6,[0,5]]],[[[6,8],[2,8]],[[2,1],[4,5]]]]
[7,[5,[[3,8],[1,4]]]]
[[2,[2,2]],[8,[8,1]]]
[2,9]
[1,[[[9,3],9],[[9,0],[0,7]]]]
[[[5,[7,4]],7],1]
[[[[4,2],2],6],[8,7]]
"""
nums = [Node.from_line(line) for line in dedent(task).strip().splitlines()]
expected = "[[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]"
assert str(functools.reduce(lambda x, y: x + y, nums)) == expected
def test_solve():
task = """
[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]
"""
assert solve(dedent(task).strip()) == 4140
|
[
"lancelote.du.lac@gmail.com"
] |
lancelote.du.lac@gmail.com
|
d9ac7951c1faea6379410b63657b01be113355d3
|
d0d94276979375dd0e2ce0629afdfc9e7a1ca52e
|
/program/agent/basic.py
|
fafd805dd273843147d87a5c31748f7f0716b756
|
[] |
no_license
|
haoxizhong/handy
|
04f1abfe9855eb30531e51ad833b546438502a7c
|
e805dcabdb50d25f852d2eaec583fba7f6709e18
|
refs/heads/master
| 2020-03-08T10:20:09.245659
| 2018-05-03T08:38:57
| 2018-05-03T08:38:57
| 128,070,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
class BasicAgent:
def __init__(self, k, mod):
pass
def reinit(self):
pass
def next_step(self, l1, r1, l2, r2, action_list):
pass
|
[
"zhonghaoxi@yeah.net"
] |
zhonghaoxi@yeah.net
|
9a9a722f51886c3fdb5a4a62aed3c8878ddfb36e
|
8ccf2280a5b14e5003cf876692f99fad59d47d92
|
/coding_corner/urls.py
|
6f0326b80566c55784e65c3b1673feede0bd7ee1
|
[] |
no_license
|
Ngahu/coding-corner
|
aa746dc0cac84f91c4afee620a593b7745a31b20
|
199839d093f4261384282e687af00a6dc46ae7f2
|
refs/heads/master
| 2020-03-11T05:00:12.444649
| 2018-04-16T20:13:51
| 2018-04-16T20:13:51
| 129,790,773
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
py
|
from django.conf.urls import include,url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api-auth/', include('rest_framework.urls')),
url(r'^todo/', include('todo.urls', namespace='todo')),
]
|
[
"jamaalaraheem@gmail.com"
] |
jamaalaraheem@gmail.com
|
9dc9d9fb1df3a21179180a6280037b1caa2c6bb4
|
e7718e75201b5506206871da1239e1e49c0c438e
|
/djredcap/management/commands/redcap.py
|
504014e946de7f547374d5fc0346750477f76f0d
|
[
"BSD-3-Clause"
] |
permissive
|
dmegahan/django-redcap
|
aeca161f463ac5b69a8b6ae7beeb2d441bc18b26
|
9907bf8d35c02ab5937c60fdcd5c9bd88ccb24d8
|
refs/heads/master
| 2021-01-18T08:55:20.825128
| 2013-07-12T20:18:53
| 2013-07-12T20:18:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,862
|
py
|
import sys
from optparse import NO_DEFAULT, OptionParser
from django.core.management.base import CommandError, BaseCommand, handle_default_options
from django.utils.importlib import import_module
class Command(BaseCommand):
help = "A wrapper for REDCap subcommands"
commands = ['inspect']
def print_subcommands(self, prog_name):
usage = ['', 'Available subcommands:']
for name in sorted(self.commands):
usage.append(' {0}'.format(name))
return '\n'.join(usage)
def usage(self, subcommand):
usage = '%prog {0} subcommand [options] [args]'.format(subcommand)
if self.help:
return '{0}\n\n{1}'.format(usage, self.help)
return usage
def print_help(self, prog_name, subcommand):
super(Command, self).print_help(prog_name, subcommand)
sys.stdout.write('{0}\n\n'.format(self.print_subcommands(prog_name)))
def get_subcommand(self, name):
try:
module = import_module('djredcap.management.subcommands.{0}'.format(name))
return module.Command()
except KeyError:
raise CommandError('Unknown subcommand: redcap {0}'.format(name))
def run_from_argv(self, argv):
"""Set up any environment changes requested (e.g., Python path
and Django settings), then run this command.
"""
if len(argv) > 2 and not argv[2].startswith('-') and argv[2] in self.commands:
subcommand = argv[2]
klass = self.get_subcommand(subcommand)
parser = OptionParser(prog=argv[0], usage=klass.usage('{0} {1}'.format(argv[1], subcommand)),
version=klass.get_version(), option_list=klass.option_list)
options, args = parser.parse_args(argv[3:])
args = [subcommand] + args
else:
parser = self.create_parser(argv[0], argv[1])
options, args = parser.parse_args(argv[2:])
handle_default_options(options)
self.execute(*args, **options.__dict__)
def handle(self, *args, **options):
if not args or args[0] not in self.commands:
return self.print_help('./manage.py', 'redcap')
subcommand, args = args[0], args[1:]
klass = self.get_subcommand(subcommand)
# Grab out a list of defaults from the options. optparse does this for
# us when the script runs from the command line, but since
# call_command can be called programatically, we need to simulate the
# loading and handling of defaults (see #10080 for details).
defaults = {}
for opt in klass.option_list:
if opt.default is NO_DEFAULT:
defaults[opt.dest] = None
else:
defaults[opt.dest] = opt.default
defaults.update(options)
return klass.execute(*args, **defaults)
|
[
"b@devel.io"
] |
b@devel.io
|
8118b5fbdb5b2009ee62c064de4350914636261b
|
78d23de227a4c9f2ee6eb422e379b913c06dfcb8
|
/LeetCode/205.py
|
3bee43846ede99d934ea7b76898a58a5aed0797e
|
[] |
no_license
|
siddharthcurious/Pythonic3-Feel
|
df145293a3f1a7627d08c4bedd7e22dfed9892c0
|
898b402b7a65073d58c280589342fc8c156a5cb1
|
refs/heads/master
| 2020-03-25T05:07:42.372477
| 2019-09-12T06:26:45
| 2019-09-12T06:26:45
| 143,430,534
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 650
|
py
|
class Solution:
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
s1 = set(s)
t1 = set(t)
if len(s1) != len(t1):
return False
hashmap = {}
for i, j in zip(s,t):
if i in hashmap:
if j == hashmap[i]:
continue
else:
return False
elif i not in hashmap:
hashmap.update({i: j})
return True
if __name__ == "__main__":
obj = Solution()
s = "ab"
t = "aa"
r = obj.isIsomorphic(s, t)
print(r)
|
[
"sandhyalalkumar@gmail.com"
] |
sandhyalalkumar@gmail.com
|
48627bb2a04d19b055aa36f14dabc49952e1c8a7
|
e10422c540b3199cc5663c1c226ae2b8f24fd5cf
|
/OsComponents/mkdir_func.py
|
7a3f21eb0fc810380d92f8b8716383523d149003
|
[] |
no_license
|
cccccsf/single_point
|
f014a9f0a18eb30ddd4a967a822eba3bd26ed53a
|
61cc11b0c40e082b45c5458c8435dbea001af466
|
refs/heads/master
| 2020-05-09T10:10:05.035435
| 2019-05-07T12:44:30
| 2019-05-07T12:44:30
| 181,030,692
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
#!/usr/bin/python3
import os
def mkdir(path):
folder = os.path.exists(path)
if not folder: #判断是否存在文件夹如果不存在则创建为文件夹
os.makedirs(path) #makedirs 创建文件时如果路径不存在会创建这个路径
print("--- new folder: ---")
print(os.path.split(os.path.split(os.path.split(path)[0])[0])[-1] + '/' + os.path.split(os.path.split(path)[0])[-1] + '/' + os.path.split(path)[-1])
else:
print("--- There is already this folder! ---")
print(os.path.split(os.path.split(os.path.split(path)[0])[0])[-1] + '/' + os.path.split(os.path.split(path)[0])[-1] + '/' + os.path.split(path)[-1])
|
[
"cccccsf@hotmail.com"
] |
cccccsf@hotmail.com
|
7fc5ebfee57b2b9578b1c963127de47bf9d7bf00
|
c5294a8e9a6aa7da37850443d3a5d366ee4b5c35
|
/build/spencer_people_tracking/messages/spencer_vision_msgs/catkin_generated/pkg.installspace.context.pc.py
|
44a3d4ad2ccc89c32276c0ffe532f9874abd3346
|
[] |
no_license
|
scutDavid/ros_gradution_project
|
6eab9a5776ae090ae8999d31e840a12a99020c79
|
fbbd83ada5aa223809615d55a48e632699afd4b5
|
refs/heads/master
| 2020-03-07T18:39:24.084619
| 2018-04-25T13:41:04
| 2018-04-25T13:41:04
| 127,647,113
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 489
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/wwh/qqq/install/include".split(';') if "/home/wwh/qqq/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;std_msgs;sensor_msgs;geometry_msgs;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "spencer_vision_msgs"
PROJECT_SPACE_DIR = "/home/wwh/qqq/install"
PROJECT_VERSION = "1.0.8"
|
[
"1902828943@qq.com"
] |
1902828943@qq.com
|
2de1274d7baaf6c2df45857c1c62f0be6aeb0e56
|
e3c79bc77c660dd400e68ed498d876ec1b2a54f3
|
/distances.py
|
9686a61d7745b417ce403e8bcdcdd4ff25cde4cb
|
[] |
no_license
|
rotolonico/image-regression
|
e3267e184b8ad30c3b8ce24d75b0a0b83eec9354
|
edef7bf3aa9bc5b58c97e91fc4ffd3ac43ad9293
|
refs/heads/master
| 2022-09-11T02:23:20.387385
| 2020-06-02T17:26:08
| 2020-06-02T17:26:08
| 268,854,054
| 0
| 0
| null | 2020-06-02T16:35:45
| 2020-06-02T16:35:45
| null |
UTF-8
|
Python
| false
| false
| 1,176
|
py
|
from __future__ import division
import argparse
from skimage.io import imread
import numpy as np
from tsp_solver.greedy import solve_tsp
"""
Naively calculate a short path through the images
"""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
'-i',
dest='image_filenames',
nargs='+',
type=str,
help='File names of input images',
required=True
)
args = arg_parser.parse_args()
images = []
for image_filename in args.image_filenames:
image = imread(image_filename, as_grey=True, plugin='pil')
if str(image.dtype) == 'uint8':
image = np.divide(image, 255.0)
images.append(image)
num_images = len(images)
differences = np.zeros((num_images, num_images))
for i, image in enumerate(images):
for j in range(i, len(images)):
other_image = images[j]
difference = ((image - other_image) ** 2).sum()
differences[i, j] = difference
differences[j, i] = difference
differences_matrix = differences.tolist()
path = solve_tsp(differences_matrix)
print(path)
ordered_image_filenames = [args.image_filenames[i] for i in path]
for filename in ordered_image_filenames:
print(filename)
|
[
"iver56@hotmail.com"
] |
iver56@hotmail.com
|
0c02dd809ad65b1b45b65187e2984797641b152a
|
275a96a33ae1f89e7b2ee0ecdbac7d78abe6d6cc
|
/test/test_run_operation_result.py
|
858116adc0976720fb2bb7ad92ca5198c0249495
|
[] |
no_license
|
cascadiarc/cyclos-python-client
|
8029ce07174f2fe92350a92dda9a60976b2bb6c2
|
a2e22a30e22944587293d51be2b8268bce808d70
|
refs/heads/main
| 2023-04-03T16:52:01.618444
| 2021-04-04T00:00:52
| 2021-04-04T00:00:52
| 354,419,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 903
|
py
|
# coding: utf-8
"""
Cyclos 4.11.5 API
The REST API for Cyclos 4.11.5 # noqa: E501
OpenAPI spec version: 4.11.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.run_operation_result import RunOperationResult # noqa: E501
from swagger_client.rest import ApiException
class TestRunOperationResult(unittest.TestCase):
"""RunOperationResult unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRunOperationResult(self):
"""Test RunOperationResult"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.run_operation_result.RunOperationResult() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"dan@leftcoastfs.com"
] |
dan@leftcoastfs.com
|
8ea9654512a97537bcda051c9e026e93673c53da
|
eee85a1ee54fa54e74b93bf3af8391c3f0c80b2a
|
/basic/python_izm01/joinpath.py
|
15e64e79b249d6c2dbacd0b88e92f6391351436d
|
[] |
no_license
|
ryu-0406/study-python
|
8712a6e235e1ca92bb3c00ad053c8298f691108c
|
da10d5913de32569b2ba4bc98d9919a78e85d22a
|
refs/heads/master
| 2022-12-14T22:43:45.236184
| 2020-09-13T03:55:36
| 2020-09-13T03:55:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 195
|
py
|
# os.join.path
import os
PROJECT_DIR = 'C:\python-izm'
SETTING_FILE = 'setting.ini'
print(os.path.join(PROJECT_DIR, SETTING_FILE))
print(os.path.join(PROJECT_DIR, 'setting_dir', SETTING_FILE))
|
[
"yoshi-da0406@outlook.jp"
] |
yoshi-da0406@outlook.jp
|
337304ee03e82971ba43476dce4568be927b4c77
|
44d1936bbc8e256534f3946f100bb0028e92fee5
|
/backend/src/hatchling/builders/hooks/version.py
|
e139d19d9c3fdd6576fe6d2c89376873ed7d7b45
|
[
"MIT"
] |
permissive
|
pypa/hatch
|
aeb72e6a465a39073a020f63a931def16ce90ce8
|
7dac9856d2545393f7dd96d31fc8620dde0dc12d
|
refs/heads/master
| 2023-09-04T04:04:25.079348
| 2023-09-03T23:48:21
| 2023-09-03T23:48:21
| 92,997,800
| 1,869
| 125
|
MIT
| 2023-09-13T19:39:25
| 2017-05-31T23:37:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,362
|
py
|
from __future__ import annotations
from typing import Any
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
from hatchling.version.core import VersionFile
class VersionBuildHook(BuildHookInterface):
PLUGIN_NAME = 'version'
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.__config_path: str | None = None
self.__config_template: str | None = None
self.__config_pattern: str | bool | None = None
@property
def config_path(self) -> str:
if self.__config_path is None:
path = self.config.get('path', '')
if not isinstance(path, str):
message = f'Option `path` for build hook `{self.PLUGIN_NAME}` must be a string'
raise TypeError(message)
elif not path:
message = f'Option `path` for build hook `{self.PLUGIN_NAME}` is required'
raise ValueError(message)
self.__config_path = path
return self.__config_path
@property
def config_template(self) -> str:
if self.__config_template is None:
template = self.config.get('template', '')
if not isinstance(template, str):
message = f'Option `template` for build hook `{self.PLUGIN_NAME}` must be a string'
raise TypeError(message)
self.__config_template = template
return self.__config_template
@property
def config_pattern(self) -> str | bool:
if self.__config_pattern is None:
pattern = self.config.get('pattern', '')
if not isinstance(pattern, (str, bool)):
message = f'Option `pattern` for build hook `{self.PLUGIN_NAME}` must be a string or a boolean'
raise TypeError(message)
self.__config_pattern = pattern
return self.__config_pattern
def initialize(self, version: str, build_data: dict[str, Any]) -> None:
version_file = VersionFile(self.root, self.config_path)
if self.config_pattern:
version_file.read(self.config_pattern)
version_file.set_version(self.metadata.version)
else:
version_file.write(self.metadata.version, self.config_template)
build_data['artifacts'].append(f'/{self.config_path}')
|
[
"noreply@github.com"
] |
pypa.noreply@github.com
|
922d426fc1139ee74ca4893cc71971950691e447
|
cec68acfc0187b7d92fb7d6e5107058e3f8269ea
|
/OOPRehber.py
|
ca98535ddc792915a8aac56bbfe827b36e878c99
|
[] |
no_license
|
vektorelpython/Python8
|
441575224100a687467c4934f7c741aa0c4bd087
|
d135fbf1444d56a0da38c42fd2e8feda48646f49
|
refs/heads/master
| 2022-01-18T12:17:40.387422
| 2019-09-07T13:47:55
| 2019-09-07T13:47:55
| 205,534,765
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,727
|
py
|
from datetime import datetime
import os
class OOPDosya():
dosyaFormat = ".csv"
def __init__(self,**kwargs):
self.dosya = None
for key,value in kwargs.items():
if key == "dosya":
self.adres =os.getcwd()+os.sep+value+self.dosyaFormat
if key == "veriler":
self.degiskenler = value
def dosyaAcma(self):
if os.path.exists(self.adres):
kip = "r+"
else:
kip = "w+"
self.dosya = open(self.adres,kip)
def dosyaAcma2(self,adres=""):
if os.path.exists(adres):
kip = "r+"
else:
kip = "w+"
self.dosya = open(adres,kip)
def HataLog(self,Mesaj="",HataMesaji="",HataYer=""):
try:
adim = "Log1"
self.dosyaAcma2(os.getcwd()+os.sep+"hata.csv")
adim = "Log2"
hata = "{};{};{};{}\n".format(Mesaj,HataMesaji,HataYer,str(datetime.now()))
adim = "Log3"
self.dosya.read()
adim = "Log4"
self.dosya.write(hata)
except Exception as hata:
print("Log Hatası",hata,adim)
finally:
self.dosya.close()
def dosyaOkuma(self):
try:
adim = "dO1A"
self.dosyaAcma()
adim = "dO1A1"
print("-"*20)
tumListe = self.dosya.readlines()
adim = "dO1A3"
for item in tumListe:
adim = "dO1A3_for"
liste = item.split(";")
print("{}-{}-{}-{}".format(tumListe.index(item)+1,liste[0],liste[1],liste[2]))
print("-"*20)
adim = "dO1A4"
except Exception as hata:
print(adim)
self.HataLog("Dosya Okuma",hata,adim)
def dosyaSilDuzelt(self,islem = 0):
self.dosyaOkuma()
kayitNum = input("Kayıt Seçiniz")
self.dosyaAcma()
liste = self.dosya.readlines()
if islem == 0:
kayit = self.veriTopla()
liste[int(kayitNum)-1] = kayit
elif islem == 1:
liste.pop(int(kayitNum)-1)
self.dosyaKayit(liste)
print("Kayıt İşlemi Gerçekleşti")
def dosyaYazma(self):
self.dosyaAcma()
kayit = self.veriTopla()
liste = self.dosya.readlines()
liste.append(kayit)
self.dosyaKayit(liste)
print("Kayıt İşlemi Gerçekleşti")
def veriTopla(self):
kayit = ""
for item in self.degiskenler:
kayit += input(item+" Giriniz")
if self.dosyaFormat == ".csv":
kayit += ";"
else:
kayit += "\t"
return kayit
def dosyaKayit(self,liste):
self.dosya.seek(0)
self.dosya.truncate()
self.dosya.writelines(liste)
self.dosya.close()
def dosyaArama(self):
arama = input("Aramak istediğiniz metni giriniz")
self.dosyaAcma()
liste = self.dosya.readlines()
sonuc = []
for item in liste:
eleman = item.split(";")
if arama in eleman[0] or arama in eleman[1] or arama in eleman[2]:
sonuc.append(item)
for item in sonuc:
liste = item.split(";")
print("{}-{}-{}-{}".format(sonuc.index(item)+1,liste[0],liste[1],liste[2]))
def dosyaYazma(self):
self.dosyaAcma()
kayit = self.veriTopla()
liste = self.dosya.readlines()
liste.append(kayit)
self.dosya.seek(0)
self.dosya.truncate()
self.dosya.writelines(liste)
self.dosya.close()
print("Kayıt İşlemi Gerçekleşti")
def Menu(self):
adim = ""
metin = """
1 - Arama
2 - Ekleme
3 - Silme
4 - Düzeltme
5 - Listeleme
6 - Çıkış
"""
while True:
print(metin)
try:
islem = int(input("İşlem Seçiniz"))
if islem == 1:
self.dosyaArama()
elif islem == 2:
self.dosyaYazma()
elif islem == 3:
self.dosyaSilDuzelt(1)
elif islem == 4:
self.dosyaSilDuzelt()
elif islem == 5:
adim = "AnaI5A"
self.dosyaOkuma()
adim = "AnaI5B"
elif islem == 6:
break
except Exception as hata:
self.HataLog("Ana Menü",hata,adim)
if __name__=="__main__":
defter = OOPDosya(dosya ="banka", veriler=["Adı","Soyadı","Banka Hesap No","Bakiye"])
defter.Menu()
|
[
"Kurs"
] |
Kurs
|
0077d6ec93d6be2f47c4029e0d2c475b5640c47f
|
3a4fbde06794da1ec4c778055dcc5586eec4b7d2
|
/@lib/12-13-2011-01/vyperlogix/ssl/__init__.py
|
eb74e37cfe6c8bc07909756a2517d38c03690188
|
[] |
no_license
|
raychorn/svn_python-django-projects
|
27b3f367303d6254af55c645ea003276a5807798
|
df0d90c72d482b8a1e1b87e484d7ad991248ecc8
|
refs/heads/main
| 2022-12-30T20:36:25.884400
| 2020-10-15T21:52:32
| 2020-10-15T21:52:32
| 304,455,211
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,136
|
py
|
__copyright__ = """\
(c). Copyright 2008-2014, Vyper Logix Corp., All Rights Reserved.
Published under Creative Commons License
(http://creativecommons.org/licenses/by-nc/3.0/)
restricted to non-commercial educational use only.,
http://www.VyperLogix.com for details
THE AUTHOR VYPER LOGIX CORP DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
USE AT YOUR OWN RISK.
"""
def fetch_from_web(url):
import requests
from vyperlogix.misc import _utils
ioBuf = _utils.stringIO()
request = requests.get(url)
for block in request.iter_content(1024):
if not block:
break
print >>ioBuf, block
return ioBuf.getvalue()
def fetch_from_ssl(url):
return fetch_from_web(url)
|
[
"raychorn@gmail.com"
] |
raychorn@gmail.com
|
3ecf8dfe7d9571bffd4b64faea23576dd474e7a9
|
14fc2ee47e1081416f0465e8afa18da33169095f
|
/src/PP4E/Internet/Other/http-getfile.py
|
457616b78f1fe71aee22b13b3dcdf0b7839d82a9
|
[] |
no_license
|
madtyn/progPython
|
d95ea8021b1a54433e7b73de9d3b11d53a3096b7
|
f3a1169149afdeb5191dd895462139f60d21d458
|
refs/heads/master
| 2021-07-09T13:35:27.519439
| 2017-10-04T14:46:57
| 2017-10-04T14:46:57
| 104,866,903
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,486
|
py
|
"""
fetch a file from an HTTP (web) server over sockets via http.client; the filename
parameter may have a full directory path, and may name a CGI script with ? query
parameters on the end to invoke a remote program; fetched file data or remote
program output could be saved to a local file to mimic FTP, or parsed with str.find
or html.parser module; also: http.client request(method, url, body=None, hdrs={});
"""
import sys, http.client
showlines = 6
try:
servername, filename = sys.argv[1:] # cmdline args?
except:
servername, filename = 'learning-python.com', '/index.html'
print(servername, filename)
server = http.client.HTTPConnection(servername) # connect to http site/server
server.putrequest('GET', filename) # send request and headers
server.putheader('Accept', 'text/html') # POST requests work here too
server.endheaders() # as do CGI script filenames
reply = server.getresponse() # read reply headers + data
if reply.status != 200: # 200 means success
print('Error sending request', reply.status, reply.reason)
else:
data = reply.readlines() # file obj for data received
reply.close() # show lines with eoln at end
for line in data[:showlines]: # to save, write data to file
print(line) # line already has \n, but bytes
|
[
"madtyn@gmail.com"
] |
madtyn@gmail.com
|
a5c236042dbe21c66ff0f9cabec1cdaf04ff4535
|
24353bdd2695f7d277f00f1397b2fcc06a1413fe
|
/omsdk/http/sdkrestpdu.py
|
1ef2eabd17449d6aab16f19fd08a9be4b26d8016
|
[
"Apache-2.0",
"GPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
DanielFroehlich/omsdk
|
f129eb43a9335e29a9b1bb01b29a5c886138ea82
|
475d925e4033104957fdc64480fe8f9af0ab6b8a
|
refs/heads/master
| 2020-04-10T00:16:48.223458
| 2018-12-06T14:00:33
| 2018-12-06T14:00:33
| 160,680,831
| 0
| 0
|
Apache-2.0
| 2018-12-06T13:46:07
| 2018-12-06T13:46:07
| null |
UTF-8
|
Python
| false
| false
| 4,655
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
#
# Copyright © 2018 Dell Inc. or its subsidiaries. All rights reserved.
# Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
# Other trademarks may be trademarks of their respective owners.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Vaideeswaran Ganesan
#
import subprocess
import io
from xml.dom.minidom import parse
import xml.dom.minidom
import json
import re
import uuid
import sys
import xml.etree.ElementTree as ET
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
class RestRequest:
envAttrs = {
'xmlns:enc': 'http://www.w3.org/2003/05/soap-encoding',
'xmlns:env': 'http://www.w3.org/2003/05/soap-envelope',
'xmlns:tns': 'http://schemas.microsoft.com/wmx/2005/06',
# xmlns:a = xmlns:wsa
'xmlns:a': 'http://schemas.xmlsoap.org/ws/2004/08/addressing',
'xmlns:wse': 'http://schemas.xmlsoap.org/ws/2004/08/eventing',
# xmlns:n = xmlns:wsen
'xmlns:n': 'http://schemas.xmlsoap.org/ws/2004/09/enumeration',
# xmlns:w = xmlns:wsman
'xmlns:w': 'http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd',
# xmlns:b = xmlns:wsmb
'xmlns:b': 'http://schemas.dmtf.org/wbem/wsman/1/cimbinding.xsd',
'xmlns:wsmid': 'http://schemas.dmtf.org/wbem/wsman/identity/1/wsmanidentity.xsd',
# xmlns:x = xmlns:wxf
'xmlns:x': 'http://schemas.xmlsoap.org/ws/2004/09/transfer',
'xmlns:xsd': 'http://www.w3.org/2001/XMLSchema',
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xmlns:p': 'http://schemas.microsoft.com/wbem/wsman/1/wsman.xsd',
}
def __init__(self):
self.root = {}
self.selector = None
def enumerate(self, to, ruri, selectors, envSize=512000, mid=None, opTimeout=60):
return self
def set_header(self, to, ruri, action, envSize=512000, mid=None, opTimeout=60):
return self
def add_selectors(self, selectors):
return self
def add_body(self, ruri, action, args):
self.root = {}
sample = {
"ExportFormat": "XML",
"ShareParameters": {
"Target": "ALL",
"IPAddress": "10.9.9.9",
"ShareName": "sammba",
"ShareType": 0,
"UserName": "root",
"Password": "calvin",
"FileName": "/root/file.xml",
}
}
for i in args:
self.root[i] = str(args[i])
return self
def add_error(self, ex):
self.root = {
"Body": {
"ClientFault": {
"Reason": {
"Text": str(ex)
}
}
}
}
return self
def identify(self):
return self
def get_text(self):
return json.dumps(self.root)
class RestResponse:
def __init__(self):
pass
def strip_ns(self, s, stripNS):
return (re.sub(".*:", "", s) if stripNS else s)
def execute_str(self, value, stripNS=True):
return json.loads(value)
def get_message(self, fault):
msg = None
while fault != None and msg == None:
if not isinstance(fault, dict):
msg = fault
elif "Message" in fault:
if isinstance(fault["Message"], dict):
fault = fault["Message"]
else:
msg = fault["Message"]
elif "WSManFault" in fault:
fault = fault["WSManFault"]
else:
for field in fault:
if field.startswith("Fault"):
m = self.get_message(fault[field])
if not m is None:
msg = m
break
elif field == "Text":
msg = fault[field]
return msg
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
ecae9cbc32b6ec9ff2a4302c98197f778c67bb43
|
b3d86713ed58e0b7fe3c1191324e36659c0d9d78
|
/RegressionProgram/Testcode/kmeans/kmeans_test1.py
|
6cd86f98bfc94b55fd5f3dc719af5851cedc2e96
|
[] |
no_license
|
Kose-i/machine_learning_tutorial
|
3d6cb30a20d65c66aa6efcba0e693de75791507a
|
def223fecb459ad1a6e7f9f36b3d733a89efd378
|
refs/heads/master
| 2021-07-03T10:37:26.809388
| 2020-07-27T12:53:19
| 2020-07-27T12:53:19
| 174,057,143
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import kmeans
np.random.seed(0)
points1 = np.random.randn(50,2)
points2 = np.random.randn(50,2) + np.array([5,0])
points3 = np.random.randn(50,2) + np.array([5,5])
points = np.r_[points1, points2, points3]
np.random.shuffle(points)
model = kmeans.KMeans(3)
model.fit(points)
markers = ["+", "*", "o"]
for i in range(3):
p = points[model.labels_ == i, :]
plt.scatter(p[:,0], p[:,1], color="k", marker=markers[i])
plt.show()
|
[
"tamura.kosei905@mail.kyutech.jp"
] |
tamura.kosei905@mail.kyutech.jp
|
a3275208520062eedf2819e57546d3a412078bde
|
f1a5905649c19688f2d01998da805dbdd5b73a5d
|
/supervised_learning/0x06-keras/7-train.py.bak
|
834bd89ded9a75a8f806b9a74ca732608ed8c39f
|
[] |
no_license
|
Aishaharrash/holbertonschool-machine_learning
|
4d388daab993848b8c354af33478e14b04a6ef25
|
8a3792da58e6102293cd2b4aadc938c264ec3928
|
refs/heads/main
| 2023-06-07T07:16:37.643164
| 2021-07-09T19:14:47
| 2021-07-09T19:14:47
| 358,292,419
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,430
|
bak
|
#!/usr/bin/env python3
"""
7-train.py
Module that defines a function called train_model
"""
import tensorflow.keras as K
def train_model(network, data, labels, batch_size, epochs,
validation_data=None, early_stopping=False,
patience=0, learning_rate_decay=False, alpha=0.1,
decay_rate=1, verbose=True, shuffle=False):
"""
Function that trains a model using mini-batch gradient descent
Args:
network (keras model): model to train
data (np.ndarray): matrix of shape (m, nx) containing the input data
labels (np.ndarray): one hot matrix of shape (m, classes) containing
the labels of data
batch_size (int): size of the batch used for mini-batch gradient
descent
epochs (int): number of passes through data for mini-batch gradient
descent
validation_data (tuple): data to validate the model with, if not None
early_stopping(bool): indicates whether early stopping should be used
patient (int): the patience used for early stopping
learning_rate_decay (bool): indicates whether learning rate decay
should be used
alpha (float): learning rate
decay_rate (int): the decay rate
verbose (bool): determines if output should be printed during training
shuffle (bool): determines whether to shuffle the batches every epoch
Returns:
The History object generated after training the model.
"""
def step_decay(epoch):
"""Function that calculates the step decay"""
return alpha / (1 + decay_rate * epoch)
callbacks = []
if validation_data and learning_rate_decay:
callbacks.append(K.callbacks.LearningRateScheduler(step_decay,
verbose=1))
if validation_data and early_stopping:
callbacks.append(K.callbacks.EarlyStopping(monitor="val_loss",
patience=patience))
return network.fit(x=data,
y=labels,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
validation_data=validation_data,
shuffle=shuffle,
callbacks=callbacks)
|
[
"vagrant@precise32.(none)"
] |
vagrant@precise32.(none)
|
cc1137e006408026a166b891fd18a04af1c92d3a
|
c0b10aa2dbb20d916cf77c31aab9f27c3003ecdb
|
/constructbinarysearchtreefrompreordertraversal1008.py
|
e8b70142385656e327035da093e1706b8af6380d
|
[] |
no_license
|
cc13ny/LeetcodePractices
|
f6ba4881ebaa6d739cc01217d21653ae971f837d
|
95f3344a14e10a8ba7816632a6d2177c6c81b8a3
|
refs/heads/master
| 2021-01-14T14:38:12.559455
| 2020-02-24T03:59:38
| 2020-02-24T03:59:38
| 242,645,175
| 0
| 0
| null | 2020-02-24T04:19:18
| 2020-02-24T04:19:17
| null |
UTF-8
|
Python
| false
| false
| 2,259
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def bstFromPreorder(self, preorder):
#tree construction. insertion
if preorder == []:
return None
self.root = TreeNode(preorder[0])
for i in range(1, len(preorder),1):
self.insert(preorder[i])
return self.root
def insert(self,newItem):
pre = self.root
while pre:
if pre.val > newItem:
preP = pre
pre = pre.left
elif pre.val < newItem:
preP = pre
pre = pre.right
currentNode = TreeNode(newItem)
if preP.val > newItem:
preP.left = currentNode
else:
preP.right = currentNode
class Solution1:
def bstFromPreorder(self,preorder):
#recursive solution.
self.index = 0
def helper(lower = float('-inf'), upper = float('inf')):
if self.index>= len(preorder) or preorder[self.index]> upper or preorder[self.index] < lower:
return None
root = TreeNode(preorder[self.index])
self.index += 1
root.left = helper(lower, root.val)
root.right = helper(root.val, upper)
return root
return helper()
class Solution2:
def bstFromPreorder(self,preorder):
#iterative solution written by self.
if preorder == []:
return None
root = TreeNode(preorder[0])
stackList = [root]
for i in range(1, len(preorder)):
currentNode = TreeNode(preorder[i])
if stackList!=[] and preorder[i] > stackList[-1].val:
while stackList and currentNode.val > stackList[-1].val:
last = stackList.pop(-1)
last.right = currentNode
stackList.append(currentNode)
elif stackList!=[] and preorder[i] < stackList[-1].val:
stackList[-1].left = currentNode
stackList.append(currentNode)
elif stackList == []:
stackList.append(currentNode)
return root
|
[
"yiq.shang@gmail.com"
] |
yiq.shang@gmail.com
|
4177d7077e1bed4f0b9976c80926b62327c04b29
|
62ccdb11daefaecc8e63f235c7519cc7594f705a
|
/images/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/command_lib/compute/sole_tenancy/node_groups/flags.py
|
e1dc5a919cd87800bfb7e546356817c7a02ba3cf
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
hiday1979/kalabasa-mas
|
eccc869bfe259bb474f9d2a4dc4b8561a481f308
|
53a9818eb2a6f35ee57c4df655e7abaaa3e7ef5b
|
refs/heads/master
| 2021-07-05T16:34:44.962142
| 2018-07-10T10:22:24
| 2018-07-10T10:22:24
| 129,709,974
| 0
| 1
| null | 2020-07-24T22:15:29
| 2018-04-16T08:27:13
|
Python
|
UTF-8
|
Python
| false
| false
| 2,212
|
py
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flags for the `compute sole-tenancy node-groups` commands."""
from __future__ import absolute_import
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.command_lib.compute import flags as compute_flags
def MakeNodeGroupArg():
return compute_flags.ResourceArgument(
resource_name='node group',
zonal_collection='compute.nodeGroups',
zone_explanation=compute_flags.ZONE_PROPERTY_EXPLANATION)
def AddNoteTemplateFlagToParser(parser, required=True):
parser.add_argument(
'--node-template',
required=required,
help='The name of the node template resource to be set for this node '
'group.')
def AddCreateArgsToParser(parser):
"""Add flags for creating a node group to the argument parser."""
parser.add_argument(
'--description',
help='An optional description of this resource.')
AddNoteTemplateFlagToParser(parser)
parser.add_argument(
'--target-size',
required=True,
type=int,
help='The target initial number of nodes in the node group.')
def AddUpdateArgsToParser(parser):
"""Add flags for updating a node group to the argument parser."""
update_node_count_group = parser.add_group(mutex=True)
update_node_count_group.add_argument(
'--add-nodes',
type=int,
help='The number of nodes to add to the node group.')
update_node_count_group.add_argument(
'--delete-nodes',
metavar='NODE_INDEX',
type=arg_parsers.ArgList(element_type=int),
help='The indexes of the nodes to remove from the group.')
AddNoteTemplateFlagToParser(parser, required=False)
|
[
"accounts@wigitech.com"
] |
accounts@wigitech.com
|
362a7f37cbf41cc98839d61d62cb53d2eb8d9c2e
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/resolve/NumpyDocstringAttributeNameResolvesToInheritedClassAttribute.py
|
63f5ca2af921064b5e58ab99eb759e16d26333fb
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
class Foo:
"""
Attributes
----------
bar
Something cool
"""
bar = 1
class Baz(Foo):
"""
Attributes
----------
bar
<ref>
Re-documented but does exist still.
"""
|
[
"intellij-monorepo-bot-no-reply@jetbrains.com"
] |
intellij-monorepo-bot-no-reply@jetbrains.com
|
84c352a89e220a60388723a844f438c3b940417c
|
caf8cbcafd448a301997770165b323438d119f5e
|
/.history/chapter01/python_05_if_condition_20201128215853.py
|
959c6f854e6f730e5993c42e442d673bb15cbb06
|
[
"MIT"
] |
permissive
|
KustomApe/nerdape
|
03e0691f675f13ce2aefa46ee230111247e90c72
|
aef6fb2d1f8c364b26d91bf8570b4487a24de69a
|
refs/heads/main
| 2023-01-23T10:13:26.584386
| 2020-11-28T22:29:49
| 2020-11-28T22:29:49
| 309,897,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,253
|
py
|
"""[if文について]
もし〜だったら、こうして
"""
# if 条件:
# 実行するブロック
# 条件によって処理を適応したい場合
# 3000kmごとにオイル交換しないといけない
distance = 3403
# if distance > 3000:
# print('オイル交換時期です')
# 文字列を比較する/リストを比較する
# if 'abc' == 'ABC':
# print('1同類です')
# if 'CDE' == 'CDE':
# print('2同類です')
# if 'あいうえお' == 'あいうえお':
# print('3同類です')
# if ['apple', 'banana'] == ['apple', 'banana']:
# print('1リスト同類')
# if ['apple', 'banana'] == ['APPLE', 'BANANA']:
# print('2リスト同類')
# if [1, 2, 3] == ['1', '2', '3']:
# print('3リスト同類')
# if [1, 2, 3] == [1, 2, 3]:
# print('4リスト同類')
# 文字列を検索する/リストの要素を検索する
if 'abc' in 'ABC':
print('1ヒットしました!')
if 'ドリフト' in '僕はドリフトが好きです':
print('2ヒットしました!')
if 'japan' in 'japanese domestic market vehicle':
print('3ヒットしました!')
if 12 in [12, 3, 4]:
print('1あります!')
if 345 in [3, 4, 5]:
print('2あります!')
# else文
# elif文
|
[
"kustomape@gmail.com"
] |
kustomape@gmail.com
|
8b09045d5961b139ce54084cacb2092c4503929d
|
352b4d34a5d6f9b5fb6949f92f32cb1154c738c3
|
/bin/quota-alignment/scripts/bed_utils.py
|
52af9c802e37b56f44eb4667f6be7ef0494dfab8
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
LyonsLab/coge
|
f03d812273a5ff18880c61663a4114f3cbc6d3d5
|
1d9a8e84a8572809ee3260ede44290e14de3bdd1
|
refs/heads/master
| 2022-01-21T22:29:57.269859
| 2021-12-21T19:56:46
| 2021-12-21T19:56:46
| 11,158,196
| 41
| 24
| null | 2017-02-03T20:57:44
| 2013-07-03T18:28:31
|
Perl
|
UTF-8
|
Python
| false
| false
| 3,433
|
py
|
"""
Classes to handle the .bed file and .raw file
"""
# get the gene order given a Bed object
get_order = lambda bed: dict((f['accn'], (i, f)) for (i, f) in enumerate(bed))
class BedLine(object):
# the Bed format supports more columns. we only need
# the first 4, but keep the information in 'stuff'.
__slots__ = ("seqid", "start", "end", "accn", "stuff")
def __init__(self, sline):
args = sline.strip().split("\t")
self.seqid = args[0]
self.start = int(args[1])
self.end = int(args[2])
self.accn = args[3]
self.stuff = args[4:] if len(args) > 4 else None
def __str__(self):
s = "\t".join(map(str, [getattr(self, attr) \
for attr in BedLine.__slots__[:-1]]))
if self.stuff:
s += "\t" + "\t".join(self.stuff)
return s
def __getitem__(self, key):
return getattr(self, key)
class Bed(object):
def __init__(self, filename):
self.filename = filename
self.beds = []
for line in open(filename):
if line[0] == "#": continue
if line.startswith('track'): continue
self.beds.append(BedLine(line))
self.seqids = sorted(set(b.seqid for b in self.beds))
self.beds.sort(key=lambda a: (a.seqid, a.start, a.accn))
def __getitem__(self, i):
return self.beds[i]
def __len__(self):
return len(self.beds)
def __iter__(self):
for b in self.beds:
yield b
def get_order(self):
return dict((f.accn, (i, f)) for (i, f) in enumerate(self))
def get_simple_bed(self):
return [(b.seqid, i) for (i, b) in enumerate(self)]
class RawLine(object):
__slots__ = ("seqid_a", "pos_a", "seqid_b", "pos_b", "score")
def __init__(self, sline):
args = sline.strip().split("\t")
self.seqid_a = args[0]
self.pos_a = int(args[1])
self.seqid_b = args[2]
self.pos_b = int(args[3])
self.score = int(args[4])
def __str__(self):
return "\t".join(map(str, [getattr(self, attr) \
for attr in RawLine.__slots__]))
def __getitem__(self, key):
return getattr(self, key)
class Raw(list):
def __init__(self, filename):
self.filename = filename
for line in open(filename):
if line[0] == "#": continue
self.append(RawLine(line))
class BlastLine(object):
__slots__ = ('query', 'subject', 'pctid', 'hitlen', 'nmismatch', 'ngaps', \
'qstart', 'qstop', 'sstart', 'sstop', 'evalue', 'score', \
'qseqid', 'sseqid', 'qi', 'si')
def __init__(self, sline):
args = sline.split("\t")
self.query = args[0]
self.subject = args[1]
self.pctid = float(args[2])
self.hitlen = int(args[3])
self.nmismatch = int(args[4])
self.ngaps = int(args[5])
self.qstart = int(args[6])
self.qstop = int(args[7])
self.sstart = int(args[8])
self.sstop = int(args[9])
self.evalue = float(args[10])
self.score = float(args[11])
def __repr__(self):
return "BlastLine('%s' to '%s', eval=%.3f, score=%.1f)" % \
(self.query, self.subject, self.evalue, self.score)
def __str__(self):
return "\t".join(map(str, [getattr(self, attr) \
for attr in BlastLine.__slots__][:-4]))
|
[
"lyonsden@gmail.com"
] |
lyonsden@gmail.com
|
dfe9d224723292a459b863bc082a0024999c5ff6
|
f971b59661f080752f5ff09daf1afc6eed855c25
|
/genteams.py
|
da3c97bf653d54f382f847d46da7b92f8745b586
|
[] |
no_license
|
firstwiki/_scripts
|
ec11790e788e6627612711f018108d872e0edde0
|
c7471955d4dc2368489c5270cbf05a8db714aec0
|
refs/heads/master
| 2020-04-06T04:24:33.468256
| 2018-05-12T06:52:12
| 2018-05-12T06:52:12
| 56,561,704
| 1
| 2
| null | 2016-12-18T00:13:13
| 2016-04-19T03:28:46
|
Python
|
UTF-8
|
Python
| false
| false
| 4,972
|
py
|
#!/usr/bin/env python
from collections import OrderedDict
import csv
import sys
import os
from os.path import abspath, dirname, exists, join
import optparse
import frontmatter
import code_from_gh
import yaml
def read_team_csv(csv_fname):
with open(csv_fname) as fp:
reader = csv.reader(fp)
for row in reader:
yield [r.strip() for r in row]
def add_maybe(d, f, v):
if not v:
if f not in d:
d[f] = None
else:
d[f] = v
def add_maybe_web(d, k, nv):
if nv:
v = d.get(k)
if v is None or v.lower().strip('/') != nv.lower().strip('/'):
d[k] = nv
def main():
# input is teams csv datafile from TBA
# -> https://github.com/the-blue-alliance/the-blue-alliance-data
csv_fname = abspath(sys.argv[1])
max_team = int(sys.argv[2])
mode = sys.argv[3]
if mode not in ['new', 'update']:
print("Error: invalid mode")
return
os.chdir(abspath(join(dirname(__file__), '..')))
cwd = os.getcwd()
for row in read_team_csv(csv_fname):
# this changes on occasion...
number, name, sponsors, l1, l2, l3, website, rookie_year, \
facebook, twitter, youtube, github, instagram, periscope = row
name = name
rookie_year = rookie_year
if rookie_year:
rookie_year = int(rookie_year)
number = number[3:]
if int(number) > max_team:
continue
d1 = '%04d' % (int(int(number)/1000)*1000,)
d2 = '%03d' % (int(int(number)/100)*100,)
f = join(cwd, 'frc%s' % d1, '_frc', d2, '%s.md' % number)
if mode == 'new' and exists(f):
continue
if 'firstinspires' in website:
website = ''
if l3:
location = '%s, %s, %s' % (l1, l2, l3)
elif l2:
location = '%s, %s' % (l1, l2)
else:
location = l1
sponsors = [s.strip() for s in sponsors.split('/')]
if sponsors == ['']:
sponsors = None
else:
if '&' in sponsors[-1]:
sN = sponsors[-1].split('&')
del sponsors[-1]
sponsors += [s.strip() for s in sN]
if mode == 'update':
try:
fm = frontmatter.load(f)
except:
print("Error at %s" % f)
raise
reformatted = str(frontmatter.dumps(fm))
if 'team' not in fm.metadata:
raise Exception("Error in %s" % f)
team = fm.metadata['team']
if 'links' not in fm.metadata['team']:
links = OrderedDict()
else:
links = fm.metadata['team']['links']
else:
data = OrderedDict()
team = OrderedDict()
links = OrderedDict()
data['title'] = 'FRC Team %s' % number
data['team'] = team
team['type'] = 'FRC'
team['number'] = int(number)
add_maybe(team, 'name', name)
add_maybe(team, 'rookie_year', rookie_year)
add_maybe(team, 'location', location)
if sponsors and mode != 'update':
team['sponsors'] = sponsors
if 'Github' in links:
links['GitHub'] = links['Github']
del links['Github']
add_maybe_web(links, 'Website', website)
add_maybe_web(links, 'Facebook', facebook)
add_maybe_web(links, 'Twitter', twitter)
add_maybe_web(links, 'YouTube', youtube)
add_maybe_web(links, 'GitHub', github)
add_maybe_web(links, 'Instagram', instagram)
add_maybe_web(links, 'Periscope', periscope)
if mode == 'update':
if links:
fm.metadata['team']['links'] = links
if fm.content.strip() == 'No content has been added for this team':
fm.content = '{% include remove_this_line_and_add_a_paragraph %}'
page = str(frontmatter.dumps(fm))
if reformatted == page:
# don't make gratuitious changes
continue
elif mode == 'new':
if links:
team['links'] = links
page = '---\n%s\n---\n\n{%% include remove_this_line_and_add_a_paragraph %%}\n' % (
yaml.safe_dump(data)
)
# roundtrip through frontmatter to get the formatting consistent
page = frontmatter.dumps(frontmatter.loads(page))
if not exists(dirname(f)):
os.makedirs(dirname(f))
with open(f, 'w') as fp:
fp.write(page)
if __name__ == '__main__':
main()
|
[
"dustin@virtualroadside.com"
] |
dustin@virtualroadside.com
|
0bfc3d132f23f8bb46e283bf3c17aac860f485f1
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/errorlog_for_lts_request.py
|
7d39c203c27e140395863f4f45c45340b5c45009
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 8,936
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ErrorlogForLtsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'start_time': 'str',
'end_time': 'str',
'level': 'str',
'line_num': 'str',
'limit': 'int',
'search_type': 'str'
}
attribute_map = {
'start_time': 'start_time',
'end_time': 'end_time',
'level': 'level',
'line_num': 'line_num',
'limit': 'limit',
'search_type': 'search_type'
}
def __init__(self, start_time=None, end_time=None, level=None, line_num=None, limit=None, search_type=None):
"""ErrorlogForLtsRequest
The model defined in huaweicloud sdk
:param start_time: 开始日期,格式为“yyyy-mm-ddThh:mm:ssZ”。 其中,T指某个时间的开始;Z指时区偏移量,例如北京时间偏移显示为+0800。
:type start_time: str
:param end_time: 结束时间,格式为“yyyy-mm-ddThh:mm:ssZ”。 其中,T指某个时间的开始;Z指时区偏移量,例如北京时间偏移显示为+0800。只能查询当前时间前一个月内的慢日志。
:type end_time: str
:param level: 日志级别,默认为ALL。
:type level: str
:param line_num: 日志单行序列号,第一次查询时不需要此参数,后续分页查询时需要使用,可从上次查询的返回信息中获取。line_num应在start_time和end_time之间。
:type line_num: str
:param limit: 每页多少条记录(查询结果),取值范围是1~100,不填时默认为10。
:type limit: int
:param search_type: 搜索方式。默认forwards。配合line_num使用,以line_num为起点,向前搜索或向后搜索。
:type search_type: str
"""
self._start_time = None
self._end_time = None
self._level = None
self._line_num = None
self._limit = None
self._search_type = None
self.discriminator = None
self.start_time = start_time
self.end_time = end_time
if level is not None:
self.level = level
if line_num is not None:
self.line_num = line_num
if limit is not None:
self.limit = limit
if search_type is not None:
self.search_type = search_type
@property
def start_time(self):
"""Gets the start_time of this ErrorlogForLtsRequest.
开始日期,格式为“yyyy-mm-ddThh:mm:ssZ”。 其中,T指某个时间的开始;Z指时区偏移量,例如北京时间偏移显示为+0800。
:return: The start_time of this ErrorlogForLtsRequest.
:rtype: str
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this ErrorlogForLtsRequest.
开始日期,格式为“yyyy-mm-ddThh:mm:ssZ”。 其中,T指某个时间的开始;Z指时区偏移量,例如北京时间偏移显示为+0800。
:param start_time: The start_time of this ErrorlogForLtsRequest.
:type start_time: str
"""
self._start_time = start_time
@property
def end_time(self):
"""Gets the end_time of this ErrorlogForLtsRequest.
结束时间,格式为“yyyy-mm-ddThh:mm:ssZ”。 其中,T指某个时间的开始;Z指时区偏移量,例如北京时间偏移显示为+0800。只能查询当前时间前一个月内的慢日志。
:return: The end_time of this ErrorlogForLtsRequest.
:rtype: str
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this ErrorlogForLtsRequest.
结束时间,格式为“yyyy-mm-ddThh:mm:ssZ”。 其中,T指某个时间的开始;Z指时区偏移量,例如北京时间偏移显示为+0800。只能查询当前时间前一个月内的慢日志。
:param end_time: The end_time of this ErrorlogForLtsRequest.
:type end_time: str
"""
self._end_time = end_time
@property
def level(self):
"""Gets the level of this ErrorlogForLtsRequest.
日志级别,默认为ALL。
:return: The level of this ErrorlogForLtsRequest.
:rtype: str
"""
return self._level
@level.setter
def level(self, level):
"""Sets the level of this ErrorlogForLtsRequest.
日志级别,默认为ALL。
:param level: The level of this ErrorlogForLtsRequest.
:type level: str
"""
self._level = level
@property
def line_num(self):
"""Gets the line_num of this ErrorlogForLtsRequest.
日志单行序列号,第一次查询时不需要此参数,后续分页查询时需要使用,可从上次查询的返回信息中获取。line_num应在start_time和end_time之间。
:return: The line_num of this ErrorlogForLtsRequest.
:rtype: str
"""
return self._line_num
@line_num.setter
def line_num(self, line_num):
"""Sets the line_num of this ErrorlogForLtsRequest.
日志单行序列号,第一次查询时不需要此参数,后续分页查询时需要使用,可从上次查询的返回信息中获取。line_num应在start_time和end_time之间。
:param line_num: The line_num of this ErrorlogForLtsRequest.
:type line_num: str
"""
self._line_num = line_num
@property
def limit(self):
"""Gets the limit of this ErrorlogForLtsRequest.
每页多少条记录(查询结果),取值范围是1~100,不填时默认为10。
:return: The limit of this ErrorlogForLtsRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ErrorlogForLtsRequest.
每页多少条记录(查询结果),取值范围是1~100,不填时默认为10。
:param limit: The limit of this ErrorlogForLtsRequest.
:type limit: int
"""
self._limit = limit
@property
def search_type(self):
"""Gets the search_type of this ErrorlogForLtsRequest.
搜索方式。默认forwards。配合line_num使用,以line_num为起点,向前搜索或向后搜索。
:return: The search_type of this ErrorlogForLtsRequest.
:rtype: str
"""
return self._search_type
@search_type.setter
def search_type(self, search_type):
"""Sets the search_type of this ErrorlogForLtsRequest.
搜索方式。默认forwards。配合line_num使用,以line_num为起点,向前搜索或向后搜索。
:param search_type: The search_type of this ErrorlogForLtsRequest.
:type search_type: str
"""
self._search_type = search_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ErrorlogForLtsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
c06ba9ae8cbb221882a80e62e7997bda650aa489
|
3a69627c55058a4c0933d16b29d0d7a7c91e172c
|
/students/management/commands/enroll_reminder.py
|
60b687eb6604cecdd298fc70012469af8e5fe88a
|
[] |
no_license
|
xor0x/educa
|
bb3647eba80fbabd07cb3604dfb3bb76e3a7fe12
|
6b97146d2e9f412645ccf04b63bfaee246a43b9e
|
refs/heads/master
| 2020-11-28T03:29:37.144939
| 2020-01-05T10:17:50
| 2020-01-05T10:17:50
| 229,693,351
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,350
|
py
|
import datetime
from django.conf import settings
from django.core.management.base import BaseCommand
from django.core.mail import send_mass_mail
from django.contrib.auth.models import User
from django.db.models import Count
"""
from django.core import management
management.call_command('enroll_reminder', days=20)
"""
class Command(BaseCommand):
help = 'Sends an e-mail reminder to users registered more \
than N days that are not enrolled into any courses yet'
def add_arguments(self, parser):
parser.add_argument('--days', dest='days', type=int)
def handle(self, *args, **options):
emails = []
subject = 'Enroll in a course'
date_joined = datetime.date.today() - datetime.timedelta(days=options['days'])
users = User.objects.annotate(course_count=Count('courses_joined'))\
.filter(course_count=0, date_joined__lte=date_joined)
for user in users:
message = f"Dear {user.first_name},\n\n We noticed that you didn't" \
f"enroll in any courses yet. What are you waiting for?"
emails.append((subject,
message,
settings.DEFAULT_FROM_EMAIL,
[user.email]))
send_mass_mail(emails)
self.stdout.write(f'Sent {len(emails)} reminders')
|
[
"you@example.com"
] |
you@example.com
|
e08edfe5a814aa3fdc8decd50ddae5f935b47d4f
|
f462679e25ee5dbae2a761f0222bc547f7b9da65
|
/backup/srcPython/srcPython_desk_100119/out_minxss_sma_average.py
|
780a1e8e0761b427b46f5edd893a2329c3362ac5
|
[
"Apache-2.0"
] |
permissive
|
FengYongQ/spock
|
f31a2f9cac58fbb1912f8e7b066b5318e0223835
|
08c01c01521429a70b5387e8769558e788f7cd3e
|
refs/heads/master
| 2021-06-13T03:09:23.903196
| 2020-01-25T03:32:41
| 2020-01-25T03:32:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 200
|
py
|
# Reads the output of minxss_sma_average.py
from read_input_file import *
from read_output_file import *
from orbit_average import *
def out_minxss_sma_average(main_input_filename_list, date_ini):
|
[
"bussyvirat@gmail.com"
] |
bussyvirat@gmail.com
|
00935ac28db148eb7bb1523af3c2be6cecafadc1
|
fcdfe976c9ed60b18def889692a17dc18a8dd6d7
|
/python/torch/list_cuda.py
|
c612de25cf8031c6597471eb13d2694a2b9c0425
|
[] |
no_license
|
akihikoy/ay_test
|
4907470889c9bda11cdc84e8231ef3156fda8bd7
|
a24dfb720960bfedb94be3b4d147e37616e7f39a
|
refs/heads/master
| 2023-09-02T19:24:47.832392
| 2023-08-27T06:45:20
| 2023-08-27T06:45:20
| 181,903,332
| 6
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
#!/usr/bin/python3
#\file list_cuda.py
#\brief List available CUDA devices.
#\author Akihiko Yamaguchi, info@akihikoy.net
#\version 0.1
#\date Oct.01, 2021
import torch
if __name__=='__main__':
print('Number of CUDA devices:', torch.cuda.device_count())
for i in range(torch.cuda.device_count()):
print(' cuda:{}: {}'.format(i,torch.cuda.get_device_name('cuda:{}'.format(i))))
|
[
"info@akihikoy.net"
] |
info@akihikoy.net
|
5ad34a82314714deb74abfef98c2187642cbb641
|
ac1fdf53359b53e183fb9b2602328595b07cf427
|
/ParlAI/parlai/mturk/tasks/turn_annotations/constants.py
|
643cf8e3310559f96493d3bed502bd3c31b66074
|
[] |
no_license
|
Ufukdogann/MasterThesis
|
780410c5df85b789136b525bce86ba0831409233
|
b09ede1e3c88c4ac3047800f5187c671eeda18be
|
refs/heads/main
| 2023-01-24T18:09:52.285718
| 2020-11-27T16:14:29
| 2020-11-27T16:14:29
| 312,416,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:f604cb6189404aba2ec507ce5de08423d480fef202b1e438be8dfa3fbc7537bb
size 3113
|
[
"134679852Ufuk*"
] |
134679852Ufuk*
|
eeacb3481567c6c734549b300e411fe3b2b860d9
|
b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1
|
/tensorflow/python/eager/remote_test.py
|
edd00bc1f3f14fe6cc0d3a71becd864c3a8016f0
|
[
"Apache-2.0"
] |
permissive
|
uve/tensorflow
|
e48cb29f39ed24ee27e81afd1687960682e1fbef
|
e08079463bf43e5963acc41da1f57e95603f8080
|
refs/heads/master
| 2020-11-29T11:30:40.391232
| 2020-01-11T13:43:10
| 2020-01-11T13:43:10
| 230,088,347
| 0
| 0
|
Apache-2.0
| 2019-12-25T10:49:15
| 2019-12-25T10:49:14
| null |
UTF-8
|
Python
| false
| false
| 8,185
|
py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for remote execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import remote
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import server_lib
class SingleWorkerTest(test.TestCase):
def setUp(self):
super(SingleWorkerTest, self).setUp()
workers, _ = test_util.create_local_cluster(1, 0)
remote.connect_to_remote_host(workers[0].target)
def testMultiDeviceFunctionBasic(self):
@def_function.function
def basic(i):
with ops.device('/job:localhost/replica:0/task:0/cpu:0'):
a = constant_op.constant([2]) + i
with ops.device('/job:worker/replica:0/task:0/cpu:0'):
b = constant_op.constant([1])
return a + b
self.assertAllEqual(basic(constant_op.constant([2])).numpy(), [5])
self.assertAllEqual(basic(constant_op.constant([1])).numpy(), [4])
def testMultiDeviceFunctionVariable(self):
with ops.device('/job:worker/replica:0/task:0/cpu:0'):
variable_b = variables.Variable(1)
@def_function.function
def with_variable(i):
return i + variable_b
self.assertAllEqual(with_variable(constant_op.constant([2])).numpy(), [3])
def testMultiDeviceFunctionRemoteOutput(self):
with ops.device('/job:worker/replica:0/task:0/cpu:0'):
variable_b = variables.Variable(1)
@def_function.function
def remote_output(i):
return variable_b, i + variable_b
with self.assertRaises(errors.UnimplementedError) as cm:
remote_output(constant_op.constant([1]))
self.assertIn(
'Currently, outputting tensors on remote devices is not supported.',
cm.exception.message)
def testMultiDeviceFunctionAmbiguousDevice(self):
@def_function.function
def ambiguous_device(i):
with ops.device('cpu:0'):
return i + constant_op.constant([2])
with self.assertRaises(errors.InvalidArgumentError) as cm:
with ops.device('/job:worker/replica:0/task:0/cpu:0'):
self.assertAllEqual(
ambiguous_device(constant_op.constant([2])).numpy(), [3])
self.assertIn('the output node must match exactly one device',
cm.exception.message)
class MultiWorkersTest(test.TestCase):
def setUp(self):
super(MultiWorkersTest, self).setUp()
workers, _ = test_util.create_local_cluster(3, 0)
remote.connect_to_remote_host(
[workers[0].target, workers[1].target, workers[2].target])
def testMultiDeviceFunctionOnLocalDevice(self):
with ops.device('/job:worker/replica:0/task:1'):
variable_b = variables.Variable(1.0)
@def_function.function
def remote_function(i):
with ops.device('/job:worker/replica:0/task:0'):
a = i + variable_b
c = a + 1.0
return c
self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0])
def testMultiDeviceFunctionOnRemoteDevice(self):
with ops.device('/job:worker/replica:0/task:1'):
variable_b = variables.Variable(1.0)
@def_function.function
def remote_function(i):
with ops.device('/job:worker/replica:0/task:0'):
a = i + variable_b
c = a + 1.0
return c
context.context().mirroring_policy = context.MIRRORING_NONE
with ops.device('/job:worker/replica:0/task:0'):
self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0])
if test_util.is_gpu_available():
with ops.device('/job:worker/replica:0/task:0/device:GPU:0'):
self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0])
context.context().mirroring_policy = context.MIRRORING_ALL
with ops.device('/job:worker/replica:0/task:0'):
self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0])
if test_util.is_gpu_available():
with ops.device('/job:worker/replica:0/task:0/device:GPU:0'):
self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0])
def testMultiDeviceWhileLoopOnRemoteDevice(self):
with ops.device('/job:worker/replica:0/task:1'):
variable_b = variables.Variable(1.0)
@def_function.function
def remote_function(i):
def body(i, _):
with ops.device('/job:worker/replica:0/task:0'):
a = i + variable_b
return a + 1.0, 1
return control_flow_ops.while_loop_v2(lambda _, d: d < 1, body, [i, 0])[0]
context.context().mirroring_policy = context.MIRRORING_NONE
with ops.device('/job:worker/replica:0/task:0'):
self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0])
if test_util.is_gpu_available():
with ops.device('/job:worker/replica:0/task:0/device:GPU:0'):
self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0])
context.context().mirroring_policy = context.MIRRORING_ALL
with ops.device('/job:worker/replica:0/task:0'):
self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0])
if test_util.is_gpu_available():
with ops.device('/job:worker/replica:0/task:0/device:GPU:0'):
self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0])
def testSimpleParameterServer(self):
with ops.device('/job:worker/task:2/device:CPU:0'):
v1 = variables.Variable(initial_value=0)
v2 = variables.Variable(initial_value=10)
@def_function.function
def worker_fn():
v1.assign_add(1)
v2.assign_sub(2)
return v1.read_value() + v2.read_value()
with ops.device('/job:worker/task:0/device:CPU:0'):
self.assertAllEqual(worker_fn(), 9)
with ops.device('/job:worker/task:1/device:CPU:0'):
self.assertAllEqual(worker_fn(), 8)
_GRPC_PREFIX = 'grpc://'
class MultiJobsTest(test.TestCase):
def setUp(self):
super(MultiJobsTest, self).setUp()
workers, ps = test_util.create_local_cluster(2, 1)
cluster = {
'my_worker': [
_strip_prefix(workers[0].target, _GRPC_PREFIX),
_strip_prefix(workers[1].target, _GRPC_PREFIX),
],
'my_ps': [_strip_prefix(ps[0].target, _GRPC_PREFIX)],
}
remote.connect_to_cluster(server_lib.ClusterSpec(cluster))
def testSimpleParameterServer(self):
with ops.device('/job:my_ps/task:0/device:CPU:0'):
v1 = variables.Variable(initial_value=0)
v2 = variables.Variable(initial_value=10)
@def_function.function
def worker_fn():
v1.assign_add(1)
v2.assign_sub(2)
return v1.read_value() + v2.read_value()
with ops.device('/job:my_worker/task:0/device:CPU:0'):
self.assertAllEqual(worker_fn(), 9)
with ops.device('/job:my_worker/task:1/device:CPU:0'):
self.assertAllEqual(worker_fn(), 8)
def _strip_prefix(s, prefix):
return s[len(prefix):] if s.startswith(prefix) else s
if __name__ == '__main__':
test.main()
|
[
"v-grniki@microsoft.com"
] |
v-grniki@microsoft.com
|
44719b0024dc4b15ac8a0ec83ddb63cfba8e3093
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-4/d278160c1fd0e8c3b24ee96c4fd91dddbbfab668-<_print_figure_tex>-bug.py
|
60b91491bf1a7503ebe33d299abe5c10dfd4340c
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,481
|
py
|
def _print_figure_tex(self, outfile, format, dpi, facecolor, edgecolor, orientation, isLandscape, papertype, metadata=None, **kwargs):
"\n If text.usetex is True in rc, a temporary pair of tex/eps files\n are created to allow tex to manage the text layout via the PSFrags\n package. These files are processed to yield the final ps or eps file.\n\n metadata must be a dictionary. Currently, only the value for\n the key 'Creator' is used.\n "
isEPSF = (format == 'eps')
if is_string_like(outfile):
title = outfile
elif is_writable_file_like(outfile):
title = None
else:
raise ValueError('outfile must be a path or a file-like object')
self.figure.dpi = 72
(width, height) = self.figure.get_size_inches()
xo = 0
yo = 0
(l, b, w, h) = self.figure.bbox.bounds
llx = xo
lly = yo
urx = (llx + w)
ury = (lly + h)
bbox = (llx, lly, urx, ury)
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
dryrun = kwargs.get('dryrun', False)
if dryrun:
class NullWriter(object):
def write(self, *kl, **kwargs):
pass
self._pswriter = NullWriter()
else:
self._pswriter = io.StringIO()
_bbox_inches_restore = kwargs.pop('bbox_inches_restore', None)
ps_renderer = self._renderer_class(width, height, self._pswriter, imagedpi=dpi)
renderer = MixedModeRenderer(self.figure, width, height, dpi, ps_renderer, bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
if dryrun:
return
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
if ((metadata is not None) and ('Creator' in metadata)):
creator_str = metadata['Creator']
else:
creator_str = (('matplotlib version ' + __version__) + ', http://matplotlib.org/')
(fd, tmpfile) = mkstemp()
with io.open(fd, 'w', encoding='latin-1') as fh:
print('%!PS-Adobe-3.0 EPSF-3.0', file=fh)
if title:
print(('%%Title: ' + title), file=fh)
print(('%%Creator: ' + creator_str), file=fh)
source_date_epoch = os.getenv('SOURCE_DATE_EPOCH')
if source_date_epoch:
source_date = datetime.datetime.utcfromtimestamp(int(source_date_epoch)).strftime('%a %b %d %H:%M:%S %Y')
else:
source_date = time.ctime()
print(('%%CreationDate: ' + source_date), file=fh)
print(('%%%%BoundingBox: %d %d %d %d' % bbox), file=fh)
print('%%EndComments', file=fh)
Ndict = len(psDefs)
print('%%BeginProlog', file=fh)
print(('/mpldict %d dict def' % Ndict), file=fh)
print('mpldict begin', file=fh)
for d in psDefs:
d = d.strip()
for l in d.split('\n'):
print(l.strip(), file=fh)
print('end', file=fh)
print('%%EndProlog', file=fh)
print('mpldict begin', file=fh)
print(('%s translate' % _nums_to_str(xo, yo)), file=fh)
print(('%s clipbox' % _nums_to_str((width * 72), (height * 72), 0, 0)), file=fh)
print(self._pswriter.getvalue(), file=fh)
print('end', file=fh)
print('showpage', file=fh)
fh.flush()
if isLandscape:
isLandscape = True
(width, height) = (height, width)
bbox = (lly, llx, ury, urx)
if isEPSF:
(paperWidth, paperHeight) = self.figure.get_size_inches()
if isLandscape:
(paperWidth, paperHeight) = (paperHeight, paperWidth)
else:
temp_papertype = _get_papertype(width, height)
if (papertype == 'auto'):
papertype = temp_papertype
(paperWidth, paperHeight) = papersize[temp_papertype]
else:
(paperWidth, paperHeight) = papersize[papertype]
if (((width > paperWidth) or (height > paperHeight)) and isEPSF):
(paperWidth, paperHeight) = papersize[temp_papertype]
verbose.report(('Your figure is too big to fit on %s paper. %s paper will be used to prevent clipping.' % (papertype, temp_papertype)), 'helpful')
texmanager = ps_renderer.get_texmanager()
font_preamble = texmanager.get_font_preamble()
custom_preamble = texmanager.get_custom_preamble()
psfrag_rotated = convert_psfrags(tmpfile, ps_renderer.psfrag, font_preamble, custom_preamble, paperWidth, paperHeight, orientation)
if (rcParams['ps.usedistiller'] == 'ghostscript'):
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox, rotated=psfrag_rotated)
elif (rcParams['ps.usedistiller'] == 'xpdf'):
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox, rotated=psfrag_rotated)
elif rcParams['text.usetex']:
if False:
pass
else:
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox, rotated=psfrag_rotated)
if is_writable_file_like(outfile):
if file_requires_unicode(outfile):
with io.open(tmpfile, 'rb') as fh:
outfile.write(fh.read().decode('latin-1'))
else:
with io.open(tmpfile, 'rb') as fh:
outfile.write(fh.read())
else:
with io.open(outfile, 'wb') as fh:
pass
mode = os.stat(outfile).st_mode
shutil.move(tmpfile, outfile)
os.chmod(outfile, mode)
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
6e4c7c1f9873362de85924eedfa8590b9c5b2ebb
|
d7ee76b7f1d6cd038982335792f15959a58a8395
|
/SWEA/3234. 준환이의 양팔저울.py
|
4a705c6bef72acc0c16a6712a19c0da4eed31725
|
[] |
no_license
|
min1378/-algorithm
|
1c5dea6b2f03e4d376275cfccbf11b240bc659d9
|
bfb720277160077a816deec21469a7e597c62d14
|
refs/heads/master
| 2021-08-02T06:54:10.478501
| 2021-07-31T14:03:01
| 2021-07-31T14:03:01
| 202,688,865
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,127
|
py
|
def make_per(k):
if k == N:
scale(0, 0, 0)
else:
for i in range(N):
if visited[i]:
continue
visited[i] = True
per[k] = mass[i]
make_per(k + 1)
visited[i] = False
def scale(k, left, right):
global cnt
if k == N:
cnt += 1
return
else:
a = per[k]
k += 1
if left + a >= half and k < N:
remain = (N - k)
cnt += (2 ** remain)
else:
scale(k, left + a, right)
if left >= right + a:
if left >= half and k < N:
remain = (N - k)
cnt += (2 ** remain)
return
scale(k, left, right + a)
T = int(input())
for tc in range(1, T + 1):
N = int(input())
mass = list(map(int, input().split()))
mass_sum = sum(mass)
if mass_sum % 2 == 0:
half = mass_sum // 2
else:
half = mass_sum // 2 + 1
left = 0
right = 0
cnt = 0
k = 0
visited = [False] * N
per = [0] * N
make_per(k)
print("#%d %d" % (tc, cnt))
|
[
"qwes123@naver.com"
] |
qwes123@naver.com
|
e2e9258cf1c302d8bd7e00834cca4cb299126b6b
|
a95cf706c3111069c75055d558a710dfe8538195
|
/collective/dexteritytextindexer/tests/test_utils.py
|
ee435d7e486a1d2b1037d5c9026fa6f0d0928a0f
|
[] |
no_license
|
gusunavarro/collective.dexteritytextindexer
|
2854409eff0f843be8ed92febbbb6698e452c4d4
|
34394c1c9b2016a14985ae3314d45b3a695790eb
|
refs/heads/master
| 2021-01-16T21:28:00.404619
| 2012-03-16T11:49:09
| 2012-03-16T11:49:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 708
|
py
|
from collective.dexteritytextindexer.directives import SEARCHABLE_KEY
from collective.dexteritytextindexer.utils import searchable
from plone.directives import form
from plone.supermodel.utils import mergedTaggedValueList
from unittest2 import TestCase
from zope import schema
class IExample(form.Schema):
foo = schema.TextLine(title=u'foo')
class TestUtils(TestCase):
"""Test utils module.
"""
def test_marking_field_as_searchable(self):
self.assertEquals([], mergedTaggedValueList(IExample, SEARCHABLE_KEY))
searchable(IExample, u'foo')
self.assertEquals([(IExample, 'foo', 'true')],
mergedTaggedValueList(IExample, SEARCHABLE_KEY))
|
[
"jone@jone.ch"
] |
jone@jone.ch
|
04ec7b26a157c4907b015ca5d1c3f74ae18fd6f0
|
4ad0cfa350552458df8a0270038ed436bd1d06f4
|
/interface/login.py
|
c25249f7829acc58a9d598047ec4c22ca03e182d
|
[] |
no_license
|
fzk466569/python_tkinter
|
4b2e505f91bc4f73d632bb4fe029bd3a3b07c590
|
8c63ac171d171cd13c7891426841279f2ef53262
|
refs/heads/master
| 2021-01-21T11:26:38.127214
| 2017-08-31T13:15:27
| 2017-08-31T13:15:27
| 102,001,271
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,364
|
py
|
from tkinter import *
import tkinter.messagebox
from repository.user import login_check
from interface.main_form import MainForm
class Login(object):
def __init__(self):
self.login = Tk()
self.login.title('基于webshell的校园网络安全系统')
self.login.iconbitmap('../images/title.ico')
input = LabelFrame(self.login, text='输入你的个人账号', padx=5, pady=5)
input.pack(padx=10, pady=10)
Label(input, text='账号:').grid(row=0, column=0, sticky=W, padx=5, pady=10)
Label(input, text='密码:').grid(row=1, column=0, sticky=W, padx=5, pady=10)
self.username = Entry(input)
self.username.grid(row=0, column=1, padx=5, pady=10)
self.password = Entry(input, show='*')
self.password.grid(row=1, column=1, padx=5, pady=10)
commit = Button(input, text='提交', width=10,
command=self.confirm)
commit.grid(row=2, columnspan=3, pady=5)
mainloop()
def confirm(self):
name = self.username.get()
passwd = self.password.get()
if login_check(name, passwd):
self.login.destroy()
# self.login.withdraw()
MainForm()
else:
tkinter.messagebox._show(title='ERROR!', message='账号或密码错误')
if __name__ == '__main__':
Login()
|
[
"fzk466569"
] |
fzk466569
|
298c771a14c37a067196a174028e535fb052e119
|
47d1beba77ebde115c5d41b25a15ef144068c930
|
/news/forms.py
|
886b4b2d2460532d39973216f0431677c60234a9
|
[] |
no_license
|
uchicago-library/library_website
|
f32d7dcaf793b4646cac37ba7270715dccf84820
|
e5912a17ed2de3a61ede2fbebda4a258664ff696
|
refs/heads/master
| 2023-08-16T20:20:45.063253
| 2023-08-10T21:19:12
| 2023-08-10T21:19:12
| 39,917,251
| 5
| 4
| null | 2023-08-10T21:19:14
| 2015-07-29T21:27:58
|
Python
|
UTF-8
|
Python
| false
| false
| 405
|
py
|
from django import forms
class EmailNotificationTestForm(forms.Form):
email_from = forms.EmailField(label='From email address:')
email_to = forms.EmailField(label='To email address:')
num_days = forms.IntegerField(label='Number of days of news stories to summarize:')
email_as_if_date = forms.DateField(label='Send the message as if it were the following date. (Use YYYY-MM-DD format.)')
|
[
"jej@moss.lib.uchicago.edu"
] |
jej@moss.lib.uchicago.edu
|
6cbb7370e69db54a2b4d182c802e8fcd78aa82c8
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_cosmologies.py
|
e9122b4cb835c96b392e497a2cc11aa129f2ef0e
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
#calss header
class _COSMOLOGIES():
def __init__(self,):
self.name = "COSMOLOGIES"
self.definitions = cosmology
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['cosmology']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
1c71026b627e3c90fc1188ef5c29c34a20fee5de
|
991cd70073c162f637fbec3a9e921707aa434b8e
|
/opentelemetry-resourcedetector-gcp/src/opentelemetry/resourcedetector/gcp_resource_detector/_gke.py
|
85588604c1b20ff45c51d068a2f19032307303ed
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/opentelemetry-operations-python
|
700a59c954cd18ae6428032339d01908580a4f2d
|
b0ca7decb6a5bb01409822e746b7463f4a7a76ba
|
refs/heads/main
| 2023-08-18T11:24:59.282098
| 2023-08-15T17:02:54
| 2023-08-15T17:02:54
| 244,484,614
| 49
| 42
|
Apache-2.0
| 2023-09-01T14:42:48
| 2020-03-02T22:00:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,677
|
py
|
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from dataclasses import dataclass
from opentelemetry.resourcedetector.gcp_resource_detector import (
_gce,
_metadata,
)
# TODO: remove when Python 3.7 is dropped
from typing_extensions import Literal
KUBERNETES_SERVICE_HOST_ENV = "KUBERNETES_SERVICE_HOST"
def on_gke() -> bool:
return os.environ.get(KUBERNETES_SERVICE_HOST_ENV) is not None
def host_id() -> str:
return _gce.host_id()
def cluster_name() -> str:
return _metadata.get_metadata()["instance"]["attributes"]["cluster-name"]
@dataclass
class ZoneOrRegion:
type: Literal["zone", "region"]
value: str
def availability_zone_or_region() -> ZoneOrRegion:
cluster_location = _metadata.get_metadata()["instance"]["attributes"][
"cluster-location"
]
hyphen_count = cluster_location.count("-")
if hyphen_count == 1:
return ZoneOrRegion(type="region", value=cluster_location)
if hyphen_count == 2:
return ZoneOrRegion(type="zone", value=cluster_location)
raise Exception(
f"unrecognized format for cluster location: {cluster_location}"
)
|
[
"noreply@github.com"
] |
GoogleCloudPlatform.noreply@github.com
|
064eff60e1077689e1a891e530570737476af76d
|
749faa57b9adbe1ee762b0ad0e5f8fa1f71e1f20
|
/python/tests/utils_test.py
|
a552d125000c72741270f610a5266754f5b25655
|
[
"Apache-2.0"
] |
permissive
|
bitcard/olm-mirror
|
8c97257bdca8f9083a0234fd37e941883b298a9d
|
769d013ef7b20757d2f83ab2e933f660e38de2a7
|
refs/heads/master
| 2022-01-05T22:09:17.326695
| 2019-04-30T22:25:21
| 2019-04-30T22:25:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 716
|
py
|
import base64
import hashlib
from future.utils import bytes_to_native_str
from hypothesis import given
from hypothesis.strategies import text
from olm import sha256
from olm._compat import to_bytes
class TestClass(object):
@given(text(), text())
def test_sha256(self, input1, input2):
first_hash = sha256(input1)
second_hash = sha256(input2)
hashlib_hash = base64.b64encode(
hashlib.sha256(to_bytes(input1)).digest()
)
hashlib_hash = bytes_to_native_str(hashlib_hash[:-1])
if input1 == input2:
assert first_hash == second_hash
else:
assert first_hash != second_hash
assert hashlib_hash == first_hash
|
[
"poljar@termina.org.uk"
] |
poljar@termina.org.uk
|
3342dd166c1a4b52cda6b0d424dc3bfcc3d8b674
|
677002b757c0a1a00b450d9710a8ec6aeb9b9e9a
|
/tiago_public_ws/build/openslam_gmapping/catkin_generated/pkg.installspace.context.pc.py
|
00673387ed81d830eb08b81f6a79356ed99d2359
|
[] |
no_license
|
mrrocketraccoon/tiago_development
|
ce686c86459dbfe8623aa54cf4279021342887fb
|
a0539bdcf21b67ab902a4649b516dcb929c54042
|
refs/heads/main
| 2023-06-16T19:39:33.391293
| 2021-07-08T21:20:03
| 2021-07-08T21:20:03
| 384,249,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 575
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lgridfastslam;-lscanmatcher;-lsensor_base;-lsensor_range;-lsensor_odometry;-lutils".split(';') if "-lgridfastslam;-lscanmatcher;-lsensor_base;-lsensor_range;-lsensor_odometry;-lutils" != "" else []
PROJECT_NAME = "openslam_gmapping"
PROJECT_SPACE_DIR = "/tiago_public_ws/install"
PROJECT_VERSION = "0.1.2"
|
[
"ricardoxcm@hotmail.com"
] |
ricardoxcm@hotmail.com
|
5c9577919a2bb8b1da8842933a4f2f8656fe7c2c
|
c63fa9ad899c461aa6550a5404ffddb2d868a674
|
/scripts/simulator_scripts/simple_international_simulator.py
|
cf96c290d24922ef5d7c76a84d31d714f0a0e017
|
[] |
no_license
|
cooperoelrichs/model_of_australia
|
4f7dc065f8afad456c9700a33f11399731c91eff
|
40eb9d8bf093aa7af0ae939108e1ed13fe08e6a2
|
refs/heads/master
| 2021-07-20T03:43:29.293008
| 2017-10-26T04:30:14
| 2017-10-26T04:30:14
| 103,367,029
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 547
|
py
|
n_years = 20
n_iter = 1e4
simple_internation_gdp_sim = SimpleInternationalGDPSimulator.run(
un_gdp_pc['Australia'].values,
shared_variance_international_gdp_model_parameters,
n_years, n_iter
)
subset = un_gdp_pc.columns.difference(['date'])
fig = plt.figure(figsize=(20, 10))
plt.plot(un_gdp_pc['date'], un_gdp_pc[subset]) # un_gdp_pc['Australia'])
date_range = pd.date_range(un_gdp_pc['date'].max() + pd.Timedelta(1, 'Y'), periods=n_years, freq='BAS')
plt.plot(date_range, simple_internation_gdp_sim.T)
plt.show()
print('Done.')
|
[
"c.oelrichs@gmail.com"
] |
c.oelrichs@gmail.com
|
454ad9a3d78b7229f76b2ccc28782e5ec2a0a0a2
|
ecf62aae48e02420cd99008f58c4725c6da56d22
|
/models/city.py
|
3cf1c50199c3fed43886a70749accf1730c301a9
|
[] |
no_license
|
ThibautBernard/AirBnB_clone
|
e3110415acd98b56134928eee0d2befb6bd68a25
|
d495dd85add4332880eacf00b338704c2799d3e5
|
refs/heads/main
| 2023-03-08T15:51:46.968249
| 2021-03-03T15:58:29
| 2021-03-03T15:58:29
| 337,568,140
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
#!/usr/bin/python3
from models.base_model import BaseModel
"""
Class that represent a city
"""
class City(BaseModel):
state_id = ""
name = ""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
|
[
"thibautbernard@sfr.fr"
] |
thibautbernard@sfr.fr
|
b5e0a9f369d3fccfbf3ceeae1aacce0df53aed4a
|
e3f64d087afb4f6dfd09940370d77e724a1886d7
|
/ex12.py
|
30b8c31a817dec36e526eb26c0ebe73fc45b2122
|
[] |
no_license
|
wangyi26/lpthw
|
41e95d0414bb706b8c85d61737be982cd8c712f5
|
8e8705bf5f0eb070cacf7e82648d70c5637e6ec4
|
refs/heads/master
| 2020-03-26T19:27:17.014680
| 2018-08-19T02:04:16
| 2018-08-19T02:04:16
| 145,265,999
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
age = input("How old are you?")
height = input("How old are you?")
weight = input("How much do you weigh?")
print(f"So,you're {age} old,{height} tall and {weight} heavy.")
|
[
"you@example.com"
] |
you@example.com
|
107ffaf4a6924bb92f6a0915474629036599fe63
|
d66818f4b951943553826a5f64413e90120e1fae
|
/hackerearth/Basic Programming/Implementation/Basics of Implementation/Very Cool Numbers/solution.py
|
d0b76a17de7dac94d7100683084222e2f12c7eb2
|
[
"MIT"
] |
permissive
|
HBinhCT/Q-project
|
0f80cd15c9945c43e2e17072416ddb6e4745e7fa
|
19923cbaa3c83c670527899ece5c3ad31bcebe65
|
refs/heads/master
| 2023-08-30T08:59:16.006567
| 2023-08-29T15:30:21
| 2023-08-29T15:30:21
| 247,630,603
| 8
| 1
|
MIT
| 2020-07-22T01:20:23
| 2020-03-16T06:48:02
|
Python
|
UTF-8
|
Python
| false
| false
| 567
|
py
|
from collections import Counter
from re import findall
t = int(input())
cases = []
x = 0
for _ in range(t):
r, k = map(int, input().strip().split())
cases.append((r, k))
x = max(x, r)
cools = []
for i in range(x + 1):
cools.append(len(findall("(?=101)", bin(i)[2:])))
cache = {}
for case in cases:
if case in cache:
print(cache[case])
continue
r, k = case
counter = Counter(cools[: r + 1])
occur = 0
for i, v in counter.items():
if i >= k:
occur += v
cache[(r, k)] = occur
print(occur)
|
[
"hbinhct@gmail.com"
] |
hbinhct@gmail.com
|
ff921394e8df92a8806a79e4023bf057ce4b5314
|
b7125b27e564d2cc80a2ce8d0a6f934aa22c8445
|
/.history/sudoku_20201101151526.py
|
388887e85bc5a316b4aaff19358fd78c533e9129
|
[] |
no_license
|
JensVL96/Puzzle-solver-for-fun
|
4c15dcd570c3705b7ac555efb56b52913e81083c
|
6d8a4378a480372213a596a336a4deca727a00fc
|
refs/heads/master
| 2021-07-15T05:19:42.185495
| 2020-11-08T13:59:49
| 2020-11-08T13:59:49
| 224,855,888
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,354
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from config import *
from create_board import *
from solve_bloard import *
from display_board import *
from string import *
import pygame as pg
import numpy as np
# For error highlighting
def set_highlight(row, col, blk, lock):
global input_lock
input_lock = lock
global row_index
row_index = row
global col_index
col_index = blk
global blk_index
blk_index = col
def get_cord(pos):
global box_index_x
box_index_x = (pos[0] - TOP_LX)//BLOCK_SIZE
global box_index_y
box_index_y = (pos[1] - TOP_LY)//BLOCK_SIZE
def valid(grid, x, y, val, increase):
input_lock = 0
row = col = blk = (0, 0)
for index in range(9):
# Check if value in column
if grid[x][index] == val:
col = (x, index)
input_lock = 1
# Check if value in row
if grid[index][y] == val:
row = (index, y)
input_lock = 1
# Finds the block
index_x = x // 3 # integer division
index_y = y // 3
# Check if value in block
for i in range(index_x * 3, index_x * 3 + 3):
for j in range (index_y * 3, index_y * 3 + 3):
if grid[i][j] == val:
blk = (i, j)
input_lock = 1
if input_lock == 1:
set_highlight(row, col, blk, input_lock)
return False
return True
class Main():
def __init__(self):
self.board = []
self.run()
def run(self):
pg.init()
self.screen = pg.display.set_mode(SCREEN_RES)
pg.display.set_caption('Sudoku solver')
display = Display_board(self.screen)
flag1 = 0
val = 0
pos = (0, 0)
input_lock = 0
get_cord((0, 0))
set_highlight((0, 0), (0, 0), (0, 0), input_lock)
board = create_board().board
while 1:
for event in pg.event.get():
if event.type == pg.QUIT or (event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE):
exit()
if event.type == pg.MOUSEBUTTONDOWN:
flag1 = 1
pos = pg.mouse.get_pos()
get_cord(pos)
if event.type == pg.KEYDOWN and input_lock != 1:
if event.key == pg.K_1:
val = 1
if event.key == pg.K_2:
val = 2
if event.key == pg.K_3:
val = 3
if event.key == pg.K_4:
val = 4
if event.key == pg.K_5:
val = 5
if event.key == pg.K_6:
val = 6
if event.key == pg.K_7:
val = 7
if event.key == pg.K_8:
val = 8
if event.key == pg.K_9:
val = 9
elif event.type == pg.KEYDOWN and input_lock == 1:
if event.key == pg.K_BACKSPACE:
val = 0
set_highlight((0, 0), (0, 0), (0, 0), 0)
if val != 0:
display.draw_val(val, box_index_x, box_index_y)
if valid(board, int(box_index_x), int(box_index_y), val, display):
board[int(box_index_x)][int(box_index_y)] = val
else:
board[int(box_index_x)][int(box_index_y)] = 0
val = 0
pg.draw.rect(self.screen, BLACK, (0, 0, self.screen.get_width(), self.screen.get_height()))
self.screen.fill(BEIGE)
display.draw(board)
cell = display.find_cell(box_index_x, box_index_y)
alpha = display.blink()
rect = pg.Surface(int(cell[2] - cell[0]), int(cell[3] - cell[1]))
rect.set_alpha(alpha)
self.screen.blit(rect, (cell[0], cell,[1]))
# print(box_index_x, box_index_y)
if input_lock == 1:
display.update(board, row_index, col_index, blk_index)
# display.draw_box()
pg.display.update()
self.solution = solve_board(board)
self.solution.assign_flags(board)
if __name__ == '__main__':
Main()
|
[
"jle040@uit.no"
] |
jle040@uit.no
|
71800abbf276d7b24cd982bdac42bd2ef4473a07
|
b8fb00ee277478c368f5b7512bfd265f3ecea356
|
/python/if_condition/venv/Scripts/pip3.7-script.py
|
9ef9b0fdd160350cb17bebb111a37435f82b0d3d
|
[] |
no_license
|
DharmilShahJBSPL/DharmilShah
|
574477c38a8b76616618130f3b0679a23a9c1af8
|
0d197189c8dcf794d38145e8f1edba6766b02df9
|
refs/heads/master
| 2021-07-07T11:47:03.770219
| 2019-01-19T13:13:38
| 2019-01-19T13:13:38
| 152,415,037
| 0
| 1
| null | 2020-07-20T10:44:20
| 2018-10-10T11:48:36
|
Python
|
UTF-8
|
Python
| false
| false
| 414
|
py
|
#!E:\dharmil\python_task\if_condition\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
|
[
"dharmil@jbspl.com"
] |
dharmil@jbspl.com
|
b7ba8e7bdee898506b7d76ce65bc25678a96140d
|
fe22e8ffdb1b2f1e11becc027e71a7a512fe56eb
|
/util/merge.py
|
83aafc267e2cfcc1d70ca06e26f9f32cd1dd0443
|
[] |
no_license
|
HEP-KBFI/stpol
|
3cdb5dc125bb0394f4531abfdfe9629b0c8d0fa4
|
962837a3341dd26391025b9a07a9c1c93084bf64
|
refs/heads/master
| 2020-06-03T16:15:14.743807
| 2015-08-05T09:00:28
| 2015-08-05T09:00:28
| 5,716,481
| 0
| 1
| null | 2015-03-04T08:23:28
| 2012-09-07T12:27:30
|
Python
|
UTF-8
|
Python
| false
| false
| 426
|
py
|
from subprocess import check_call
from os import walk
from os.path import join
import sys
from glob import glob
if __name__=="__main__":
ind = sys.argv[1]
for root, dirs, items in walk(ind):
items_in_dirs = map(lambda x: glob(join(root, x, "*.root")), dirs)
tot = sum(map(lambda x: len(x), items_in_dirs))
if tot>0:
for d, i in zip(dirs, items_in_dirs):
print d, i
|
[
"joosep.pata@gmail.com"
] |
joosep.pata@gmail.com
|
4550cce6732f67469786d7760b44a1040f883e0d
|
0e647273cffc1fb6cbd589fa3c7c277b221ba247
|
/configs/hpt-pretrain/bdd_crop_blur/moco_v2_800ep_basetrain/5000-iters.py
|
1294176225d78d0b2d97f068e432c64d14c478c4
|
[
"Apache-2.0"
] |
permissive
|
Berkeley-Data/OpenSelfSup
|
e9976bf011b69ebf918506ba184f464b1073ec13
|
221191b88d891de57725b149caf237ffef72e529
|
refs/heads/master
| 2023-05-12T07:34:52.268476
| 2021-04-08T00:58:37
| 2021-04-08T00:58:37
| 343,654,823
| 0
| 1
|
Apache-2.0
| 2021-04-08T00:58:37
| 2021-03-02T05:20:27
|
Python
|
UTF-8
|
Python
| false
| false
| 222
|
py
|
_base_="../base-bdd_crop_blur-config.py"
# this will merge with the parent
model=dict(pretrained='data/basetrain_chkpts/moco_v2_800ep.pth')
# epoch related
total_iters=5000
checkpoint_config = dict(interval=total_iters)
|
[
"taeil.goh@gmail.com"
] |
taeil.goh@gmail.com
|
9bbf7a1e9839497b6c4bf70667b9bdd845fc9d37
|
552bc626603a1757cf7836401cff5f0332a91504
|
/flask/doit_JumpToFlask/chap03/03-8/회원가입_기능추가/views/question_views.py
|
613d79cb4fa7e8e0f4c9bf1896a9b710c5702646
|
[] |
no_license
|
anifilm/webapp
|
85f3d0aae34f46917b3c9fdf8087ec8da5303df1
|
7ef1a9a8c0dccc125a8c21b22db7db4b9d5c0cda
|
refs/heads/master
| 2023-08-29T18:33:00.323248
| 2023-08-26T07:42:39
| 2023-08-26T07:42:39
| 186,593,754
| 1
| 0
| null | 2023-04-21T12:19:59
| 2019-05-14T09:49:56
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,448
|
py
|
from datetime import datetime
from flask import Blueprint, render_template, request, url_for, g
from werkzeug.utils import redirect
from pybo import db
from pybo.models import Question
from pybo.forms import QuestionForm, AnswerForm
from pybo.views.auth_views import login_required
bp = Blueprint("question", __name__, url_prefix="/question")
@bp.route("/list/")
def _list():
page = request.args.get("page", type=int, default=1) # 페이지
question_list = Question.query.order_by(Question.create_date.desc())
question_list = question_list.paginate(page, per_page=10)
return render_template("question/question_list.html", question_list=question_list)
@bp.route("/detail/<int:question_id>/")
def detail(question_id):
form = AnswerForm()
question = Question.query.get_or_404(question_id)
return render_template(
"question/question_detail.html", question=question, form=form
)
@bp.route("/create/", methods=("GET", "POST"))
@login_required
def create():
form = QuestionForm()
if request.method == "POST" and form.validate_on_submit():
question = Question(
subject=form.subject.data,
content=form.content.data,
create_date=datetime.now(),
user=g.user,
)
db.session.add(question)
db.session.commit()
return redirect(url_for("main.index"))
return render_template("question/question_form.html", form=form)
|
[
"anifilm02@gmail.com"
] |
anifilm02@gmail.com
|
cabf99b46a69479e6f2d4ebc046f5591e50b087b
|
09c39de5aad7b283cfac2f09a2b93e43086846d2
|
/Unit 10 Advanced Topics in Python/01 Advanced Topics in Python/Review/15-Iterating Over Dictionaries.py
|
f3e71da8c2d4077bb403646af94b8975a0d49188
|
[
"MIT"
] |
permissive
|
lpython2006e/python-samples
|
b4e84080259faf75b41fb2fd4fb9d2fbc9f857aa
|
b94ba67ce0d7798ecf796dadae206aa75da58301
|
refs/heads/master
| 2023-01-21T13:16:13.295163
| 2020-11-29T11:01:50
| 2020-11-29T11:01:50
| 278,653,779
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
movies = {
"Monty Python and the Holy Grail": "Great",
"Monty Python's Life of Brian": "Good",
"Monty Python's Meaning of Life": "Okay"
}
print(movies.items())
|
[
"lent@hivetech.vn"
] |
lent@hivetech.vn
|
5855ed722d94538a3f665167b38f410e54779d80
|
c51eef37bb983a9c35635c7ccc96a0cf689a7438
|
/sites/ncbi/04_links.py
|
8f13b4f8f330a1d06c2cd610f90644165411818d
|
[] |
no_license
|
Kyeongrok/python_crawler
|
0a717b43be36584af1b0f7c1ad0c79108a5d11e0
|
5a5da8af7bb080f752a9a066741ac8adab136a3a
|
refs/heads/master
| 2022-09-13T03:15:08.053639
| 2022-08-02T15:45:03
| 2022-08-02T15:45:03
| 124,719,435
| 40
| 34
| null | 2019-02-27T08:29:52
| 2018-03-11T03:20:32
|
HTML
|
UTF-8
|
Python
| false
| false
| 564
|
py
|
from bs4 import BeautifulSoup
def getLinks(pageStr):
bs_obj = BeautifulSoup(pageStr, "html.parser")
rprts = bs_obj.findAll("div", {"class":"rprt"})
links = []
for item in rprts:
atag = item.find("a")
links.append(atag["href"].replace("/pubmed/", ""))
return links
links = []
for num in range(2, 194):
print(num)
file = open("./links_page/"+str(num) + ".html")
result = getLinks(file.read())
links = links + result
links.sort()
file = open("./links.txt", "w+")
for link in links:
file.write(link + "\n")
|
[
"oceanfog1@gmail.com"
] |
oceanfog1@gmail.com
|
cdbcfbbd7551dcd8dd76c054b1f050e4cc635f35
|
5e9bb224cc0e79670016c78f5262f530a343f71a
|
/run/get_rede_3_class_counts.py
|
d28773b0a2251ba3271deaba6ac6d518753d6a22
|
[
"MIT"
] |
permissive
|
olavosamp/semiauto-video-annotation
|
6f86bb3b8b98bb1b910be1c95abf5474cd8526bb
|
b1a46f9c0ad3bdcedab76b4cd730747ee2afd2fd
|
refs/heads/master
| 2020-06-06T02:48:16.677814
| 2020-03-10T05:22:49
| 2020-03-10T05:22:49
| 192,616,435
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,164
|
py
|
import numpy as np
import pandas as pd
from tqdm import tqdm
from glob import glob
from pathlib import Path
# from copy import copy
import libs.dirs as dirs
import libs.utils as utils
import libs.dataset_utils as dutils
import libs.commons as commons
''' Get class counts from rede 3 dataset csv file resulting from fuse_binary_datasets script '''
# rede = int(input("\nEnter desired net number.\n"))
rede = 3
classList = commons.rede3_classes
compiledPositivesPath = Path(dirs.iter_folder) / "dataset_rede_{}_positives_binary.csv".format(rede)
datasetDf = pd.read_csv(compiledPositivesPath)
datasetGroup = datasetDf.groupby('rede3')
print(datasetGroup.count()['FrameHash'])
countDf = pd.DataFrame(datasetGroup.count()['FrameHash'])
countDf['Counts'] = countDf['FrameHash']
total = countDf['Counts'].sum()
countDf['Percentage'] = countDf['Counts'].apply(lambda x: x/total)
print(countDf)
print(total)
countDf.drop("FrameHash", axis=1, inplace=True)
countDf.to_excel(compiledPositivesPath.with_name("semiauto_class_counts.xlsx"))
|
[
"olavosamp@poli.ufrj.br"
] |
olavosamp@poli.ufrj.br
|
fbff749542a7d8d29c63a1b8284959aa9c8f310d
|
865aeaf85b7cf0a27b04b5c563dee2b79443e6b7
|
/docs/support/trace_support.py
|
f5044d28a57d4fbd35f16e3094092849c63f2f4c
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
pmacosta/peng
|
d8cc6e8757d62fb9968a42eaf11b5b87227a8b47
|
ab05fac3c0a6c0f8c70ab3e456b5cc57f0484389
|
refs/heads/master
| 2021-01-21T14:32:42.863574
| 2019-06-11T14:30:38
| 2019-06-11T14:30:38
| 58,596,416
| 0
| 2
|
MIT
| 2019-03-08T15:49:44
| 2016-05-12T01:24:18
|
Python
|
UTF-8
|
Python
| false
| false
| 3,331
|
py
|
# trace_support.py
# Copyright (c) 2013-2019 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0111,C0411,E0401,E0611,W0212
# Standard library imports
from __future__ import print_function
import collections
import copy
import datetime
import os
import warnings
# PyPI imports
with warnings.catch_warnings():
from _pytest.warning_types import PytestWarning
warnings.filterwarnings("ignore", category=PytestWarning)
import pytest
import pmisc
import pexdoc.exdoc
###
# Functions
###
def trace_pars(mname):
"""Define trace parameters."""
pickle_fname = os.path.join(os.path.dirname(__file__), "{0}.pkl".format(mname))
ddir = os.path.dirname(os.path.dirname(__file__))
moddb_fname = os.path.join(ddir, "moddb.json")
in_callables_fname = moddb_fname if os.path.exists(moddb_fname) else None
out_callables_fname = os.path.join(ddir, "{0}.json".format(mname))
noption = os.environ.get("NOPTION", None)
exclude = ["_pytest", "execnet"]
partuple = collections.namedtuple(
"ParTuple",
[
"pickle_fname",
"in_callables_fname",
"out_callables_fname",
"noption",
"exclude",
],
)
return partuple(
pickle_fname, in_callables_fname, out_callables_fname, noption, exclude
)
def run_trace(
mname,
fname,
module_prefix,
callable_names,
no_print,
module_exclude=None,
callable_exclude=None,
debug=False,
):
"""Run module tracing."""
# pylint: disable=R0913
module_exclude = [] if module_exclude is None else module_exclude
callable_exclude = [] if callable_exclude is None else callable_exclude
par = trace_pars(mname)
start_time = datetime.datetime.now()
with pexdoc.exdoc.ExDocCxt(
exclude=par.exclude + module_exclude,
pickle_fname=par.pickle_fname,
in_callables_fname=par.in_callables_fname,
out_callables_fname=par.out_callables_fname,
_no_print=no_print,
) as exdoc_obj:
fname = os.path.realpath(
os.path.join(
os.path.dirname(__file__),
"..",
"..",
"tests",
"test_{0}.py".format(fname),
)
)
test_cmd = (
["--color=yes"]
+ (["-s", "-vv"] if debug else ["-q", "-q", "-q"])
+ ["--disable-warnings"]
+ ["-x"]
+ ([par.noption] if par.noption else [])
+ ["-m " + mname]
+ [fname]
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=PytestWarning)
if pytest.main(test_cmd):
raise RuntimeError("Tracing did not complete successfully")
stop_time = datetime.datetime.now()
if not no_print:
print(
"Auto-generation of exceptions documentation time: {0}".format(
pmisc.elapsed_time_string(start_time, stop_time)
)
)
for callable_name in callable_names:
callable_name = module_prefix + callable_name
print("\nCallable: {0}".format(callable_name))
print(exdoc_obj.get_sphinx_doc(callable_name, exclude=callable_exclude))
print("\n")
return copy.copy(exdoc_obj)
|
[
"pmasdev@gmail.com"
] |
pmasdev@gmail.com
|
3ef27a31c380abbfdb38ff95db3f8d2b9b8d285f
|
36e12b65922ebbb6d95aff6cbac0777c47e24153
|
/distinctregions_cGcC.py
|
352b42f3b5be614d3c66d47944522d02075bcf1d
|
[
"MIT"
] |
permissive
|
NailouZhang/AnalysisScripts
|
d0d00174f642d6722cc907f9a392084600630780
|
3df37d2f8fca9bc402afe5ea870c42200fca1ed3
|
refs/heads/master
| 2023-06-06T08:14:39.064920
| 2021-06-22T16:46:26
| 2021-06-22T16:46:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,677
|
py
|
#Want to look at cGcC scores for "distinct" 3' UTR regions to get weighted cGcC scores based on the "distalness" of a sequence.
import re
import sys
from Bio import SeqIO
import numpy as np
import gffutils
#Need to get UTR regions that are distinct to each isoform
#Start with gff of 3' UTR regions
def getdistinctregions(gff, genomefasta):
distinctregions = {} #{geneid : {transcriptid(s) : [3UTR number, distinctUTRseq]}}
print 'Indexing gff...'
gff_fn = gff
db_fn = os.path.basename(gff_fn) + '.db'
if os.path.isfile(db_fn) == False:
gffutils.create_db(gff_fn, db_fn, merge_strategy = 'merge', verbose = True)
db = gffutils.FeatureDB(db_fn)
print 'Done indexing!'
print 'Indexing genome sequence...'
seq_dict = SeqIO.to_dict(SeqIO.parse(gzip.open(genomefasta), 'fasta'))
print 'Done indexing!'
genes = db.features_of_type('gene')
for gene in genes:
distinctseqs = {} #{transcriptid(s) : [pAsite counter (may be different than number of UTRs because not all UTRs are represented here, distinctUTRseq]}
seenseqs = []
utrcounter = 0
mostdownstreamcoord = 0 #The most downstream coordinate of any UTR we've seen so far for this gene.
geneid = str(gene.id).replace('gene:', '').split('.')[0]
if gene.strand == '+':
for UTR3 in db.children(gene, featuretype = 'UTR3', level = 1, order_by = 'end'):
distinctseq = ''
UTRid = str(UTR3.id).replace('UTR3:', '').split('.')[0]
#If this is the first UTR for this gene
if utrcounter == 0:
for exon in db.children(UTR3, featuretype = 'exon', level = 1, order_by = 'start'):
exonseq = seq_dict[exon.chrom].seq[exon.start-1:exon.end].upper()
distinctseq += exonseq
mostdownstreamcoord = UTR3.end
utrcounter +=1
distinctseqs[UTRid] = [utrcounter, str(distinctseq)]
elif utrcounter >= 1:
for exon in db.children(UTR3, featuretype = 'exon', level = 1, order_by = 'start'):
#If this exon is somehow contained within the last one (should not be possible), skip it
if exon.end <= mostdownstreamcoord:
pass
elif exon.end > mostdownstreamcoord:
if exon.start < mostdownstreamcoord:
exonseq = seq_dict[exon.chrom].seq[mostdownstreamcoord:exon.end].upper()
distinctseq += exonseq
elif exon.start >= mostdownstreamcoord:
exonseq = seq_dict[exon.chrom].seq[exon.start - 1:exon.end].upper()
distinctseq += exonseq
mostdownstreamcoord = UTR3.end
#Only going to call something a new polyA site if it's at least 50 nt away from the previous one
#As a proxy for this, it must have at least 50 nt of "distinct" sequence
if len(str(distinctseq)) >= 50:
utrcounter +=1
distinctseqs[UTRid] = [utrcounter, str(distinctseq)]
elif gene.strand == '-':
for UTR3 in db.children(gene, featuretype = 'UTR3', level = 1, order_by = 'start', reverse = True):
distinctseq = ''
UTRid = str(UTR3.id).replace('UTR3:', '').split('.')[0]
#If this is the first UTR for this gene
if utrcounter == 0:
for exon in db.children(UTR3, featuretype = 'exon', level = 1, order_by = 'end', reverse = True):
exonseq = seq_dict[exon.chrom].seq[exon.start-1:exon.end].reverse_complement().upper()
#Must prepend instead of append this time
distinctseq = distinctseq + exonseq
mostdownstreamcoord = UTR3.start
utrcounter +=1
distinctseqs[UTRid] = [utrcounter, str(distinctseq)]
elif utrcounter >= 1:
for exon in db.children(UTR3, featuretype = 'exon', level = 1, order_by = 'end', reverse = True):
#If this exon is somehow contained within the last one (should not be possible), skip it
if exon.start >= mostdownstreamcoord:
continue
elif exon.start < mostdownstreamcoord:
if exon.end > mostdownstreamcoord:
exonseq = seq_dict[exon.chrom].seq[exon.start-1:mostdownstreamcoord-1].reverse_complement().upper()
distinctseq = distinctseq + exonseq
elif exon.start <= mostdownstreamcoord:
exonseq = seq_dict[exon.chrom].seq[exon.start-1:exon.end].reverse_complement().upper()
distinctseq = distinctseq + exonseq
mostdownstreamcoord = UTR3.start
if len(str(distinctseq)) >= 50:
utrcounter +=1
distinctseqs[UTRid] = [utrcounter, str(distinctseq)]
distinctregions[geneid] = distinctseqs
return distinctregions
def getcGcC(seq):
#Do we want cGcC over the whole seq?
#Mean over 80 bp windows?
#Max score over all windows?
windowsize = 50
cGcCscores = []
for i in range(len(seq) - windowsize + 1):
window = seq[i:i+windowsize]
if window.count('G') == 0:
maxG = 0
else:
maxG = max(len(s) for s in re.findall(r'G+', window))
if window.count('C') == 0:
maxC = 0
else:
maxC = max(len(s) for s in re.findall(r'C+', window))
longestrun = max(maxG, maxC)
cGscore = 0
cCscore = 0
#First get the cG score
for i in range(1, longestrun + 1):
searchstring = 'G' * i
matches = re.findall(r'(?=({0}))'.format(searchstring), window)
score = len(matches) * i
cGscore += score
#Now the cC score
for i in range(1, longestrun + 1):
searchstring = 'C' * i
matches = re.findall(r'(?=({0}))'.format(searchstring), window)
score = len(matches) * i
cCscore += score
if cCscore == 0:
cGcCscore = cGscore
else:
cGcCscore = cGscore / float(cCscore)
cGcCscores.append(cGcCscore)
meanscore = np.mean(cGcCscores)
maxscore = max(cGcCscores)
return maxscore
#For every transcript with > 1 distinct region, calculate scores for each region. Then multiply that score by position factor.
#Take sum of weighted scores and divide by sum of all scores to produce a "PSI" value.
|
[
"taliaferrojm@gmail.com"
] |
taliaferrojm@gmail.com
|
356b615d823669751feac2502978c4b41465f695
|
17ec70a0387905f84f7fc1e3ee7f3428dd4e7874
|
/Aoj/dpl/dpl_1_b.py
|
b1eb71bc3859fb849884a5eabde842e4d01b4db4
|
[] |
no_license
|
onikazu/ProgramingCompetitionPractice
|
da348e984b6bcb79f96f461d9df15a33730169b2
|
5a682943976bcac8646176feef9b70a6784abd8a
|
refs/heads/master
| 2021-02-09T06:27:54.994621
| 2020-03-14T02:28:50
| 2020-03-14T02:28:50
| 244,252,119
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
n, limit = map(int, input().split())
items = [tuple(map(int, input().split())) for _ in range(n)]
dp = [([0] + [0] * limit) for _ in range(n+1)]
for i in range(1, n+1):
v, w = items[i-1]
for j in range(limit+1):
if j < w:
dp[i][j] = dp[i-1][j]
else:
dp[i][j] = max(dp[i-1][j], dp[i-1][j-w]+v)
print(dp[-1][-1])
|
[
"programingmanagement@gmail.com"
] |
programingmanagement@gmail.com
|
40bf41cc14776bfc93ee66abe4f2e8c67807d90d
|
3090b3e964601e0392a03c903d28f324b4351936
|
/src/demo/urls.py
|
fd264174fd4d5bde963b1d4b28312df4909f73b0
|
[] |
no_license
|
infoxchange/django-verification
|
eeebb4f7372ed95d43d8afd6f7b20ebdaa0e295e
|
51ac7a648863393d44fe7a2813eccbfbee2eb615
|
refs/heads/master
| 2021-01-24T23:42:01.913727
| 2014-07-29T08:29:12
| 2014-07-29T08:29:12
| 24,980,017
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 711
|
py
|
from __future__ import unicode_literals
from django.contrib import admin
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
admin.autodiscover()
urlpatterns = staticfiles_urlpatterns()
urlpatterns += patterns("",
url(r'^$', TemplateView.as_view(template_name='index.html')),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('demo.projectapp.urls')),
url(r'^verify/', include('verification.urls')),
url(r'^accounts/login/$', 'django.contrib.auth.views.login', name='login'),
url(r'^accounts/logout/$', 'django.contrib.auth.views.logout', name='logout'),
)
|
[
"kaleissin@gmail.com"
] |
kaleissin@gmail.com
|
e9406c5e1be4ae6d6959da2fb43a7d0f3fdd50d9
|
779afab3a8fd338a8a5b82284ca1269090ff92ee
|
/3 Processing/2D/BubbleSort/BubbleSort.pyde
|
77c8f6c2685f9e8c88d84b4121f49d1b271ba1ac
|
[] |
no_license
|
vitroid/PythonTutorials
|
341ea037d168f744bee42c26c1c9408763b4bb50
|
32dd7325ca7099954f5eb33927ec9e122bb54066
|
refs/heads/master
| 2023-08-03T05:15:48.254380
| 2023-08-01T08:29:52
| 2023-08-01T08:29:52
| 36,958,329
| 3
| 0
| null | 2016-05-06T01:13:23
| 2015-06-05T22:22:43
|
Python
|
UTF-8
|
Python
| false
| false
| 835
|
pyde
|
"""
ソーティング(並べ替え)のプロセスを可視化したものです。
"""
def bubblesort(data):
for x in range(len(data)):
for y in range(x+1,len(data)):
if data[x] > data[y]:
data[x],data[y] = data[y],data[x]
yield data
pix = 8
import random
data = [random.random() for i in range(50)]
iter = bubblesort(data)
def setup():
size(pix*len(data),pix*len(data))
#frameRate(3)
def draw():
d = next(iter, None)
if d == None:
noLoop()
else:
background(0)
fill(255)
noStroke()
textSize(24)
text("{0}".format(frameCount+1),10,20)
noFill()
drawone(d)
def drawone(d):
noFill()
stroke(255)
strokeWeight(2)
for i in range(len(d)):
line(0,pix*i,d[i]*width,pix*i)
|
[
"vitroid@gmail.com"
] |
vitroid@gmail.com
|
e6d8834ca974ac3aabfaa64cbd846a640643e735
|
d57ebd908a78ad6b34f6b217f14d53fc894ff791
|
/Algorithm/1836.py
|
e826069535f81c8de1cf17c21aeef474811d8727
|
[] |
no_license
|
ruanhq/Leetcode
|
df732730b63988b808a382ad17bddc49e00bc3a5
|
fcb4ccbc9e84e1d2bd4541efc040bd957a96a3a2
|
refs/heads/master
| 2023-07-05T04:01:43.097980
| 2021-08-08T06:12:14
| 2021-08-08T06:12:14
| 320,983,033
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,885
|
py
|
#1836. Remove duplicates from an unsorted linked list:
import collections
class Solution:
def deleteDuplicatesUnsorted(self, head: ListNode) -> ListNode:
currentHead = head
freqMap = {}
#Construct a frequency map of the element in the linked list by scanning through.
while currentHead:
if currentHead.val in freqMap:
freqMap[currentHead.val] += 1
else:
freqMap[currentHead.val] = 1
currentHead = currentHead.next
result = ListNode(None)
currentResult = result
currentHead = head
#Only maintain those element which has frequency 1 in the linked list:
while currentHead:
if freqMap[currentHead.val] == 1:
currentResult.next = ListNode(currentHead.val)
currentResult = currentResult.next
currentHead = currentHead.next
#Return the head of the newly constructed linked list:
return result.next
#Different methodology using defaultdict:
def deleteDuplicatesUnsorted(self, head: ListNode) -> ListNode:
dicts = collections.defaultdict(int)
currHead = head
while currHead:
dicts[currHead.val] += 1
currHead = currHead.next
dummyNode = ListNode(None)
dummyNode = head
previousNode = dummyNode
while head:
if dicts[head.val] > 1:
previousNode.next = head.next
else:
previousNode = previousNode.next
head = head.next
return dummyNode.next
reducing size headaches: why use stride of 1 in CONV?
Why use padding?
Compromising based on memory constraints?
Bi-directional LSTM -> LSTM:
Compare with the logistic regression ->
Took a calculated risk
Worked beyond your responsibility?
Talk about a true failure?
|
[
"hruan@ucdavis.edu"
] |
hruan@ucdavis.edu
|
e38ce9a9d274b921ae53a30071c5f87e314be6a9
|
67487a6df520e3ddbd918fdf4b9c8dcd4c783147
|
/PythonExercicios/Mundo 3/17_modulos_e_pacotes/ex111/teste.py
|
a08e94b1c22d868580dc033e894c3bfcd761062e
|
[
"MIT"
] |
permissive
|
GuilhermoCampos/Curso-Python3-curso-em-video
|
c7d7a25f87f5a8fd706f34030282f12b1dc88e01
|
723767bc6069e9c1fa9e28fe412e694f9eb8d05e
|
refs/heads/master
| 2021-05-18T22:33:48.062467
| 2020-09-20T10:52:40
| 2020-09-20T10:52:40
| 251,457,675
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
# Crie um pacote chamado utilidadesCeV que tenha dois módulos
# internos chamados moeda e dado.
# Transfira as funçoesutilizadas nos desafios 107, 108 e 109
# para o primeiro pacotee mantenha tuo funcionando
from utilidadescev import moeda
# Programa Principal
num = float(input('Digite um valor: '))
moeda.resumo(num, 80, 35)
|
[
"GuilhermoCampos@users.noreply.github.com"
] |
GuilhermoCampos@users.noreply.github.com
|
352740fa47b921639904c9a26c20c6b293eda782
|
61357ffa1e3b1174864a52da12c23500cfbc462a
|
/chatter.py
|
02689dcecbb652d21784630c84d74085dc2c3d74
|
[] |
no_license
|
alexcg1/wechat-tools
|
6968229a6c3440552b5612f9f9402bd3e613dc46
|
0709dedec7127a78a21eab2d56c49958aedd199f
|
refs/heads/master
| 2022-12-13T12:00:43.164734
| 2018-10-17T04:06:14
| 2018-10-17T04:06:14
| 149,095,345
| 0
| 0
| null | 2022-12-08T02:56:23
| 2018-09-17T08:45:44
|
Python
|
UTF-8
|
Python
| false
| false
| 5,843
|
py
|
#!/usr/bin/env python3
import pyperclip
import readline
import threading
import time
import sys
import os
import itchat
from datetime import datetime
from itchat.content import *
from wem_functions import *
from pprint import pprint
from colorama import Fore, Back, Style
global chat_partner
App.start()
global output_widget
# download_dir = os.getenv("HOME")+'/Downloads/wechat'
# download_dir = File.download_dir
command_char = "\\"
to_me, from_me = Fore.GREEN, Fore.BLUE
unstyle = Style.RESET_ALL
Account.login()
friends = Account.friends_and_rooms()
me = friends[0]
print("Using WeChat as"+from_me,me['NickName'],me['UserName']+unstyle)
print("Files will be downloaded to",File.download_dir)
# @itchat.msg_register([TEXT, MAP, CARD, NOTE, SHARING, PICTURE, RECORDING, ATTACHMENT, VIDEO])
# def msg_show(msg):
# print(datetime.now().strftime('%H:%M:%S'+": "), end="", flush=True)
# msg = Contact.give_name(msg) # Give a contact name to the message
# # pprint(msg)
# # Dig out who message was from and assign their human-readable name
# FromUser = {}
# FromUser['UserName'] = msg['FromUserName']
# for i in friends:
# if i['UserName'] == msg['FromUserName']:
# FromUser['Name'] = i['Name']
# # What to do for different message types
# if msg.type == "Text":
# print(to_me+FromUser['Name']+": "+unstyle+msg.text)
# Message.notify(FromUser['Name'],msg.text)
# elif msg.type in ['Attachment', 'Picture', 'Video']:
# download_files(msg, download_dir)
# print(to_me+FromUser['Name']+": "+unstyle+" ["+msg['FileName']+"] "+Style.DIM + "downloaded to "+download_dir+Style.RESET_ALL)
# global last_file
# last_file = msg['FileName']
# MessageStuff['FileName'] = msg['FileName']
# Message.notify(FromUser['Name'],"[File] "+msg['FileName'])
# else:
# print(to_me+FromUser['Name']+": "+unstyle+" ["+msg.type+"]")
@itchat.msg_register([TEXT, MAP, CARD, NOTE, SHARING, PICTURE, RECORDING, ATTACHMENT, VIDEO])
def msg_show(msg):
FromUser = Message.get_sender_human(msg, friends)
Message.indicator = Message.date_human+": "+to_me+FromUser['Name']+": "+unstyle
msg = Contact.give_name(msg) # Give a contact name to the message
msg_show.msg_text = msg.text
msg_show.from_name = FromUser['Name']
msg_show.from_username = msg.FromUserName
msg_show.from_username_stack = []
if msg.FromUserName != me.UserName:
msg_show.from_username_stack.append(msg.FromUserName)
if msg.type == "Text":
Message.display_text = Message.indicator+msg.text
Message.notification_text = msg.text
elif msg.type in ['Attachment', 'Picture', 'Video']:
download_files(msg, File.download_dir)
Message.display_text = Message.indicator+" ["+msg.FileName+"] "+Style.DIM + "downloaded to "+File.download_dir+unstyle
Message.notification_text = msg.FileName
msg_show.filename = msg.FileName
if msg.type in ['Picture', 'Video']:
print(Err.vidpic_issue)
else:
Message.display_text = Message.indicator+"["+msg.type+"]"
Message.notification_text = msg.type
# print(Message.separator)
print(Message.display_text)
try:
if msg_show.from_username_stack[-1] != msg_show.from_username_stack[-2]:
print(Message.separator) # Print separator if sender is different to last sender
except:
pass
Message.notify(FromUser['Name'], Message.notification_text)
# @itchat.msg_register([ATTACHMENT, PICTURE, VIDEO])
# def msg_show(msg):
# msg = Contact.give_name(msg) # Give a contact name to the message
# FromUser = Message.get_sender_human(msg, friends)
# if msg.type =
# download_files(msg, download_dir)
# print(to_me+FromUser['Name']+": "+unstyle+" ["+msg['FileName']+"] "+Style.DIM + "downloaded to "+download_dir+Style.RESET_ALL)
# Message.notify(FromUser['Name'],"[File] "+msg['FileName'])
# msg_show.msg_text = msg.text
# msg_show.from_name = FromUser['Name']
# msg_show.from_username = msg.FromUserName
# if msg.type in ['Picture', 'Attachment', 'Video']:
# msg_show.filename = msg.FileName
# @itchat.msg_register([MAP, CARD, NOTE, SHARING, RECORDING])
# def msg_show(msg):
def msg_receiver():
App.listen()
# now threading1 runs regardless of user input
threading1 = threading.Thread(target=msg_receiver)
threading1.daemon = True
threading1.start()
while True:
if 'user_to_contact' in locals():
prompt = input(from_me+"To: "+user_to_contact[1]+": "+unstyle)
else:
prompt = input("> ")
if prompt.startswith(command_char):
command = prompt[1:] # Cuts off the command char, to give us raw command text
if command in ["quit", "q"]:
App.quit()
elif command == "open":
File.open(msg.FileName)
elif command in ["pp", "paste"]:
if 'recipient' in locals():
Message.paste(pyperclip.paste(), recipient)
else:
print(Err.no_recipient)
elif command in ['yy', 'copy']:
Message.copy(msg_show.msg_text)
elif command.startswith("contact "):
arg = prompt[9:]
user_to_contact = Contact.chooser(arg, friends)
if user_to_contact != None:
recipient = user_to_contact[0]
else:
del user_to_contact
elif command in ['translate', 'tr']:
Message.translate(msg_show.msg_text)
elif command == "stack":
print(msg_show.from_username_stack)
# Debug.userstack()
elif command.startswith("send "):
filename = prompt[6:]
try:
File.send(filename, recipient)
except:
print(Err.no_recipient)
else:
print(Err.unrecognized_command)
# Now, if there's no command, send a message to selected recipient
else:
if 'recipient' in locals():
message = itchat.send_msg(msg=prompt, toUserName=recipient)
if message['BaseResponse']['RawMsg'] != "请求成功":
print("Message failed with error:",message['BaseResponse']['ErrMsg'])
# print(me['NickName']+": "+prompt) # For now we still see the prompt above where I typed the message, so disabling for now
else:
print(Err.no_recipient)
|
[
"alexcg@outlook.com"
] |
alexcg@outlook.com
|
03f1ca426fc4cd4120dc4c58bf06a5217f7229ac
|
282b1d7bce60803580f9e0e234606dfc69a3be9e
|
/cougar/graphs/loss/triplet_loss.py
|
99ca2e66090e3da78d7b8a820ed097ac7837906d
|
[
"MIT"
] |
permissive
|
Swall0w/cougar
|
4b2894089ac4857dcd319e388077b7330c228512
|
9161b2b1d0c256f4bb952ec190351684f28ec1b7
|
refs/heads/master
| 2022-11-01T14:57:18.077650
| 2019-09-17T14:30:56
| 2019-09-17T14:30:56
| 205,391,913
| 1
| 0
|
MIT
| 2022-10-18T19:27:35
| 2019-08-30T13:51:48
|
Python
|
UTF-8
|
Python
| false
| false
| 771
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class TripletLoss(nn.Module):
"""
Triplet loss
Takes embeddings of an anchor sample, a positive sample and a negative sample
"""
def __init__(self, margin):
super(TripletLoss, self).__init__()
self.margin = margin
def forward(self, anchor, positive, negative, size_average=True):
distance_positive = F.cosine_similarity(anchor, positive) #Each is batch X 512
distance_negative = F.cosine_similarity(anchor, negative) # .pow(.5)
losses = (1- distance_positive)**2 + (0 - distance_negative)**2 #Margin not used in cosine case.
if size_average:
return losses.mean()
else:
return losses.sum()
|
[
"technext.jpn@gmail.com"
] |
technext.jpn@gmail.com
|
68915f0176b12bb9db7d9f9e48a878345db3dade
|
1f96392824039390531a17b43156b2a26a956d4e
|
/treeserver/manage.py
|
02da027d3b243dbf10e99cfffe063c9e7598ae1e
|
[] |
no_license
|
benthomasson/tree
|
9eca1e61406effa95b3d707fde44fe0f309180f6
|
4ccf7fa47e614f1a263a312f477c08af5f2f8d0b
|
refs/heads/master
| 2020-04-10T22:46:04.574886
| 2012-12-22T22:11:05
| 2012-12-22T22:11:05
| 5,841,945
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "treeserver.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"ben.thomasson@gmail.com"
] |
ben.thomasson@gmail.com
|
00b77addd4d58bcfcf481433d1298aaedd8b394f
|
bbd4d550b5f60763872b2ecd58c5b2bde21ecd0b
|
/test2/file_workflow.py
|
7b5ed83826e9c4a2c7b196c1f39780e76410aa0d
|
[] |
no_license
|
MaLei666/workflow_test
|
4894a96f1b9858e6beb454f62cb6acad34e8fc3e
|
312d23e36086bb0967f20b290a3f9b1f5f6be184
|
refs/heads/master
| 2022-07-31T00:13:46.307736
| 2020-05-24T14:32:18
| 2020-05-24T14:32:18
| 266,557,985
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,510
|
py
|
# #-*- coding:utf-8 -*-
# # @author : MaLei
# # @datetime : 2020/4/21 7:22 下午
# # @file : file_workflow.py
# # @software : PyCharm
#
# from SpiffWorkflow.specs import WorkflowSpec,ExclusiveChoice,Simple,Cancel
# from SpiffWorkflow.serializer.json import JSONSerializer
# from SpiffWorkflow.operators import Equal,Attrib
# from SpiffWorkflow import Workflow
# import json
# def file_open(msg):
# print("file:",msg)
#
#
# class DoubleCheck(WorkflowSpec):
# '''一、 子类不重写__init__ , 实例化子类时,会自动调用父类定义的__init__
#
# 二、 子类重写了__init__时,实例化子类,就不会调用父类已经定义的__init__
#
# 三、为了能使用或扩展父类的行为,要显示调用父类的__init__方法,有以下两种调用方式:1,调用未绑定的父类构造方法。2,super继承'''
# def __init__(self):
# WorkflowSpec.__init__(self) #调用未绑定的超类构造方法【必须显式调用父类的构造方法,否则不会执行父类构造方法】
# people1_choice=ExclusiveChoice(self,'people1') #定义排他性选择任务
# self.start.connect(people1_choice) #start方法调用StartTask模块,实现放置在工作流开始处的任务,没有输入,至少有一个输出,
# cancel=Cancel(self,'workflow_aborted') # 定义取消工作流程
# people1_choice.connect(cancel)
# people2_choice=ExclusiveChoice(self,'people2')
# cond=Equal(Attrib('confirmation'),'yes') #equal运算符,Attrib标记一个值,使它可以通过valueof()被识别为一个属性值
# people1_choice.connect_if(cond,people2_choice) #如果条件匹配,则连接执行taskspec,
# people2_choice.connect(cancel) #如果没有其他条件匹配,则连接执行的任务规范。
# open=Simple(self,'file_open')
# people2_choice.connect_if(cond,open)
# open.completed_event.connect(file_open)
#
# # spec=DoubleCheck()
# #
# # serializer=JSONSerializer()
# # """
# # 执行工作流的引擎。
# #
# # 它本质上是一个管理所有分支的工具。
# #
# # 工作流也是存放正在运行的工作流的数据的地方。
# # """
# # workflow=Workflow(spec)
# # data=workflow.serialize(serializer)
# # pretty=json.dumps(json.loads(data),indent=4,separators=(',',':'))
# # open('workflow-spec.json','w').write(pretty)
#
# serializer = JSONSerializer()
# with open('workflow-spec.json') as fp:
# workflow_json = fp.read()
# spec = WorkflowSpec.deserialize(serializer, workflow_json)
# open('workflow-spec.py','w').write(spec)
from __future__ import print_function
from SpiffWorkflow.specs import WorkflowSpec, ExclusiveChoice, Simple, Cancel
from SpiffWorkflow.operators import Equal, Attrib
def my_nuclear_strike(msg):
print("Launched:", msg)
class DoubleDeckBox(WorkflowSpec):
def __init__(self):
WorkflowSpec.__init__(self)
peopleA_choice = ExclusiveChoice(self, 'peopleA')
self.start.connect(peopleA_choice)
cancel = Cancel(self, 'workflow_aborted')
peopleA_choice.connect(cancel)
peopleB_choice = ExclusiveChoice(self, 'peopleB')
cond = Equal(Attrib('confirmation'), 'yes')
peopleA_choice.connect_if(cond, peopleB_choice)
peopleB_choice.connect(cancel)
strike = Simple(self, 'nuclear_strike')
peopleB_choice.connect_if(cond, strike)
strike.completed_event.connect(my_nuclear_strike)
|
[
"826908021@qq.com"
] |
826908021@qq.com
|
200d1b874c07190b35bb0877add010ffb64f521a
|
780900f95f56d9272a01bd50f01642efa771bd16
|
/scrape.py
|
95fbb4de529518f0b2404683a92f94fd932e7c8b
|
[] |
no_license
|
pallih/veidigjald-signatures
|
107ab19153807ee45ab0cdbfe4142e5c9731a49e
|
78e301e55f76c3ff5c3672a7bf21b779080864a9
|
refs/heads/master
| 2021-01-22T02:49:02.968416
| 2013-06-20T00:37:59
| 2013-06-20T00:37:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,786
|
py
|
import scraperwiki
import requests
import lxml.html
url = 'http://www.petitions24.com/signatures/obreytt_veidigjald/start/%s'
headers = {'User-Agent': 'Mozilla/5.0'}
#setup at start
'''
record = {}
record['last_page'] = '0'
scraperwiki.sqlite.save(['last_page'], data=record, table_name='runtime_info')
exit()
'''
selection_statement = 'last_page from runtime_info'
last_page = int(scraperwiki.sqlite.select(selection_statement)[0]['last_page'])
s = requests.Session()
s.headers.update(headers)
def scrape(last_page):
print last_page
response = s.get(url % str(int(last_page)*10).strip())
html = response.text
root = lxml.html.fromstring(html)
signatures = root.xpath('//table[@id="signatures"]/tr')
batch = []
for signature in signatures:
data = {}
data['nr'] = signature[0].text_content().strip()
data['name'] = signature[1].text_content()
if data['name'] != 'The signatory decided not to show his/her name on the Internet.':
data['place'] = signature[2].text_content()
data['place_url'] = signature[2][0].attrib['href']
data['kt'] = signature[3].text_content()
data['date'] = signature[4].text_content()
batch.append(data)
scraperwiki.sqlite.save(['nr'], data=batch, table_name='veidigjald')
update_statement= 'update runtime_info SET last_page=' + str(last_page)
scraperwiki.sqlite.execute(update_statement)
scraperwiki.sqlite.commit()
pagination = root.xpath('//div[@class="pagination"]/a[@class="go_next"]')
if pagination:
return True
else:
return False
for x in range(last_page,10000000): # How crappy is this? Probably 11.
result = scrape(x)
if result != False:
x = x +1
else:
break
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
03151d8300aa740a122197a386f399d7e9451450
|
d272b041f84bbd18fd65a48b42e0158ef6cceb20
|
/catch/datasets/tho.py
|
58fee5408125281b49c221f55d5e91b94fc7a0a6
|
[
"MIT"
] |
permissive
|
jahanshah/catch
|
bbffeadd4113251cc2b2ec9893e3d014608896ce
|
2fedca15f921116f580de8b2ae7ac9972932e59e
|
refs/heads/master
| 2023-02-19T13:30:13.677960
| 2021-01-26T03:41:10
| 2021-01-26T03:41:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
"""Dataset with 'T'Ho virus' sequences.
A dataset with 1 'T'Ho virus' genomes.
THIS PYTHON FILE WAS GENERATED BY A COMPUTER PROGRAM! DO NOT EDIT!
"""
import sys
from catch.datasets import GenomesDatasetSingleChrom
ds = GenomesDatasetSingleChrom(__name__, __file__, __spec__)
ds.add_fasta_path("data/tho.fasta.gz", relative=True)
sys.modules[__name__] = ds
|
[
"hmetsky@gmail.com"
] |
hmetsky@gmail.com
|
cf7c9dcfbb59460a88b0b3e568306304f7583811
|
ff853d7b3773db8de783fd26a76bd92742f85384
|
/0x0F-python-object_relational_mapping/2-my_filter_states.py
|
c480b9cf408630749b250433478da0e0b9fde823
|
[] |
no_license
|
stuartses/holbertonschool-higher_level_programming
|
1b3315f624f9c2dc0c63ee3481021c5ed093a81d
|
40497b632bf71c3b877cb61fce79b9d82b4519da
|
refs/heads/master
| 2020-09-29T00:51:57.791491
| 2020-05-14T16:51:44
| 2020-05-14T16:51:44
| 226,905,912
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 681
|
py
|
#!/usr/bin/python3
"""2. Filter states by user input
This module make a MySQL Query using MySQLdb
Filter states with a input name
Holberton School
Foundations - Higher-level programming - Python
By Stuart Echeverry
"""
if __name__ == "__main__":
import sys
import MySQLdb
len_args = len(sys.argv) - 1
args = sys.argv
db = MySQLdb.connect(host='localhost', user=args[1], passwd=args[2],
db=args[3])
cur = db.cursor()
cur.execute("SELECT * FROM states WHERE BINARY name='{}'"
" ORDER BY id ASC".format(args[4]))
rows = cur.fetchall()
for row in rows:
print(row)
cur.close()
db.close()
|
[
"stuart.ses@hotmail.com"
] |
stuart.ses@hotmail.com
|
8ce62413b34f91d34f9f8084f3428a42d543ee3b
|
037d5d18b9b81205305e158d7d9fdad131d318cb
|
/tests/revert/test_many_to_many_relationships.py
|
973dd97bac9f110e036e38a0e9b662a28abc0024
|
[] |
permissive
|
kvesteri/sqlalchemy-continuum
|
ee7acf2c961b27eab3dd8f61598d9159d801ee21
|
a7a6bd7952185b1f82af985c0271834d886a617c
|
refs/heads/master
| 2023-08-24T09:14:33.515416
| 2022-11-17T05:41:09
| 2023-07-24T23:37:12
| 10,312,759
| 479
| 134
|
BSD-3-Clause
| 2023-09-12T20:07:04
| 2013-05-27T10:30:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,806
|
py
|
import sqlalchemy as sa
from tests import TestCase
class TestRevertManyToManyRelationship(TestCase):
def create_models(self):
class Article(self.Model):
__tablename__ = 'article'
__versioned__ = {
'base_classes': (self.Model, )
}
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255))
article_tag = sa.Table(
'article_tag',
self.Model.metadata,
sa.Column(
'article_id',
sa.Integer,
sa.ForeignKey('article.id', ondelete='CASCADE'),
primary_key=True,
),
sa.Column(
'tag_id',
sa.Integer,
sa.ForeignKey('tag.id', ondelete='CASCADE'),
primary_key=True
)
)
class Tag(self.Model):
__tablename__ = 'tag'
__versioned__ = {
'base_classes': (self.Model, )
}
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255))
Tag.articles = sa.orm.relationship(
Article,
secondary=article_tag,
backref='tags'
)
self.Article = Article
self.Tag = Tag
def test_revert_remove(self):
article = self.Article()
article.name = u'Some article'
tag = self.Tag(name=u'some tag')
article.tags.append(tag)
self.session.add(article)
self.session.commit()
assert len(article.versions[0].tags) == 1
article.tags.remove(tag)
self.session.commit()
self.session.refresh(article)
assert article.tags == []
article.versions[0].revert(relations=['tags'])
self.session.commit()
assert article.name == u'Some article'
assert len(article.tags) == 1
assert article.tags[0].name == u'some tag'
def test_revert_remove_with_multiple_parents(self):
article = self.Article(name=u'Some article')
tag = self.Tag(name=u'some tag')
article.tags.append(tag)
self.session.add(article)
article2 = self.Article(name=u'Some article')
tag2 = self.Tag(name=u'some tag')
article2.tags.append(tag2)
self.session.add(article2)
self.session.commit()
article.tags.remove(tag)
self.session.commit()
self.session.refresh(article)
assert len(article.tags) == 0
article.versions[0].revert(relations=['tags'])
self.session.commit()
assert article.name == u'Some article'
assert len(article.tags) == 1
assert article.tags[0].name == u'some tag'
|
[
"konsta.vesterinen@gmail.com"
] |
konsta.vesterinen@gmail.com
|
6d355c5c5093df76111c6227f1d90df7885f252b
|
2744fbd0c33c181f6bb71abbb26982f57a07ae9a
|
/config.py
|
ff0067d0d32bc4e56fcd10d7032f29d1dfc7de97
|
[] |
no_license
|
mehulchopradev/yannick-webapp
|
f2d777cfc23786b142551deefb2437cd24fb7592
|
4eb18a574177fb3f2d595255b4d3d421d5518944
|
refs/heads/master
| 2020-04-01T21:01:15.273151
| 2018-10-18T14:01:47
| 2018-10-18T14:01:47
| 153,633,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
class Config:
SQLALCHEMY_DATABASE_URI='mysql+mysqlconnector://root:root@localhost/yannickwebapp_db'
SQLALCHEMY_TRACK_MODIFICATIONS = False
|
[
"mehul.chopra.dev@gmail.com"
] |
mehul.chopra.dev@gmail.com
|
317568248fa7b20af7cf4cfac25cb622ab9e5eb1
|
46244bb6af145cb393846505f37bf576a8396aa0
|
/algorithms/ch4/4.1.13.py
|
1d5cc8e306e2a491210d8e21daa6a91adca28254
|
[] |
no_license
|
aoeuidht/homework
|
c4fabfb5f45dbef0874e9732c7d026a7f00e13dc
|
49fb2a2f8a78227589da3e5ec82ea7844b36e0e7
|
refs/heads/master
| 2022-10-28T06:42:04.343618
| 2022-10-15T15:52:06
| 2022-10-15T15:52:06
| 18,726,877
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 809
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import graph
class BreadthFirstPaths:
def __init__(self, g, s):
self.g = g
self.s = s
self.marked = set()
def dist_to(self, v):
# the queue for the vertex list
return dist_to_wrapper(self.s, v, 0)
def dist_to_wrapper(self, s, v, depth):
vertex_queue = [(s, 0)]
while vertex_queue:
s, depth = vertex_queue.pop(0)
self.marked.add(s)
for _s in self.g.adj(s):
if _s in self.marked:
continue
# add them to the queue
self.marked.add(_s)
# check if target
if _s == v:
return depth + 1
self.vertex_queue.append((_s, depth+1))
|
[
"sockpuppet.lea@gmail.com"
] |
sockpuppet.lea@gmail.com
|
ebc5a7e1b4004327a673f2af979efc3495396138
|
9fcd6a91132fd12731d259fe7d709cdf222381bb
|
/2020/19/foo.py
|
8c0e0c373ff1f394ff39b4c49fbfbf9a2b6b2b61
|
[] |
no_license
|
protocol7/advent-of-code
|
f5bdb541d21414ba833760958a1b9d05fc26f84a
|
fa110cef83510d86e82cb5d02f6af5bb7016f2c7
|
refs/heads/master
| 2023-04-05T15:33:26.146031
| 2023-03-18T14:22:43
| 2023-03-18T14:22:43
| 159,989,507
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 654
|
py
|
import sys
from collections import *
import re
rs, msgs = sys.stdin.read().split("\n\n")
rules = defaultdict(list)
for r in rs.split("\n"):
a, b = r.split(": ")
if '"' in b:
rules[a] = b[1:-1]
else:
for o in b.split(" | "):
rules[a].append(o.split())
def build(rule):
reg = ""
r = rules[rule]
if type(r) == str:
return r
else:
for oi, ors in enumerate(r):
if oi > 0:
reg += "|"
for rx in ors:
reg += build(rx)
return "(%s)" % reg
r = "^%s$" % build("0")
print(sum(bool(re.match(r, m)) for m in msgs.split()))
|
[
"niklas@protocol7.com"
] |
niklas@protocol7.com
|
ed6a9ce70978a4f43af6c5e0df07abcb7cda0242
|
c61a28aba19f7cdf9a5127e8a782bf115c265e70
|
/apps/recruitpro/recruitpro/recruitpro/doctype/project_user/project_user.py
|
2485ab02a13cefca8d30d07a52de8ce60c0592d9
|
[
"MIT"
] |
permissive
|
sharmilaviji/RecruitPRO-NEW
|
fa72c8fc00f469a41798b1047c11dcc470fbc495
|
dcfaedebe56b45acd6ddcab7e24c939b853a2c8c
|
refs/heads/master
| 2021-05-26T12:14:12.611154
| 2020-04-27T04:40:50
| 2020-04-27T04:40:50
| 254,125,640
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, teampro and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class ProjectUser(Document):
pass
|
[
"sharmiviji1997@gmail.com"
] |
sharmiviji1997@gmail.com
|
7977a21d9cd1ff00ad98f7bdf786cad24b051af1
|
423ba09a145b3468a322acf4ddf7d2c2446e227d
|
/atcoder/abc119/abc119_c.py
|
4d8b05eaf0c2223837bb66ee8d6761db24fbe938
|
[] |
no_license
|
hirosuzuki/procon
|
13d3bc332d6e4368fd54fec742b32b09729658ed
|
533f40e13456542b202905a61814ad926c3c206e
|
refs/heads/master
| 2021-05-12T10:42:51.920561
| 2020-05-20T15:49:05
| 2020-05-20T15:49:05
| 117,356,746
| 0
| 0
| null | 2018-01-29T14:46:28
| 2018-01-13T15:56:40
|
Python
|
UTF-8
|
Python
| false
| false
| 832
|
py
|
N, A, B, C = [int(_) for _ in input().split()]
L = [int(input()) for i in range(N)]
from itertools import combinations
def combi(xs, left):
for i in range(len(xs) - left):
for r in combinations(xs, i + 1):
yield r
def diff(a, b):
result = a[:]
for x in b:
if x in result:
result.remove(x)
return result
def calc(xs):
result = 10**100
for ra in combi(xs, 2):
sa = abs(A - sum(ra)) + len(ra) * 10 - 10
xsa = diff(xs, ra)
for rb in combi(xsa, 1):
sb = abs(B - sum(rb)) + len(rb) * 10 - 10
xsb = diff(xsa, rb)
for rc in combi(xsb, 0):
sc = abs(C - sum(rc)) + len(rc) * 10 - 10
total = sa + sb + sc
result = min(result, total)
return result
print(calc(L))
|
[
"hirosuzuki@users.noreply.github.com"
] |
hirosuzuki@users.noreply.github.com
|
cb551e3a871a6f00f0bc6847baa76f890601f945
|
6ad55b80c944e123fef29468aa018e15505cd4a3
|
/references/lr/parser/adder.py
|
41435d2272f198ac21eec7fa984834313768a71b
|
[] |
no_license
|
ramalho/kaminpy
|
c7799d58edd81ada1ba7914528d16872ecb771f2
|
2f5038a9ebacb378fc45fd7fd840a50ac47c940e
|
refs/heads/master
| 2021-01-13T02:22:16.765278
| 2018-11-19T12:34:45
| 2018-11-19T12:34:45
| 2,436,569
| 17
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,444
|
py
|
import token as py_token
import util
class Token:
def __init__(self, src, start, end, line):
self.src = src
self.start = start
self.end = end
self.line = line
self.evaluate()
def evaluate(self):
pass
def nud(self):
raise SyntaxError('unexpected %r in line:\n%s' % (self.src, self.line))
class Literal(Token):
def evaluate(self):
self.value = int(self.src)
def nud(self):
return self.value
class OperatorAdd(Token):
lbp = 10
def led(self, left):
right = expression(10)
return left + right
class End(Token):
lbp = 0
TOKENS = {
py_token.NUMBER: Literal,
py_token.PLUS: OperatorAdd,
py_token.ENDMARKER: End,
}
def tokenize(src):
for token_info in util.tokenize(src):
token_class = TOKENS[token_info.type]
yield token_class(*token_info[1:])
token = Ellipsis
def expression(rbp=0):
global token
t = token
token = next()
left = t.nud()
while rbp < token.lbp:
t = token
token = next()
left = t.led(left)
return left
def evaluate(src):
"""
>>> evaluate("1 + 2")
3
"""
global token, next
next = tokenize(src).__next__
token = next()
if isinstance(token, End):
return None
try:
return expression()
except StopIteration:
raise SyntaxError('unexpected end of source')
|
[
"luciano@ramalho.org"
] |
luciano@ramalho.org
|
044e6cd2bcf67dd9722c851da59f18e8d33316c7
|
c6dfa13fad324443e1e93a6a3e165938f6114bfe
|
/mysite_register/views.py
|
8f05f5163c8230d657018d89d436baab542e335a
|
[] |
no_license
|
keystonecyberstop/testTimePass
|
d94687647b2ad5b933a1ac99db856778e96a4acd
|
ff505f12889157c69694b648fe608de9f9511e43
|
refs/heads/master
| 2022-12-24T03:24:52.013959
| 2020-10-11T11:32:38
| 2020-10-11T11:32:38
| 303,101,369
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 626
|
py
|
from django.shortcuts import render
from django.views.generic import ListView
from .models import Entry
class HomeView(ListView):
model = Entry
template_name = 'entries/index.html'
# def get_context_data(self, **kwargs):
# print(kwargs)
# context = super().get_context_data(**kwargs)
# print('>>>>>>>>>>>')
# print(context)
# print('>>>>>>>>>>>')
# return context
# def home(request):
# """
# docstring
# """
# context = {
# 'entry_list' : Entry.objects.all()
# }
# return render(request, 'entries/index.html', context=context)
|
[
"--global"
] |
--global
|
492309989d063058f709494cce513673f29adee7
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2190/48117/253305.py
|
9bedb5ef7bc7e20973cbbca26cc70d059c294ec9
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 691
|
py
|
questNum = int(input())
for i in range(questNum):
quest = input().split(' ')
s = quest[0]
k = int(quest[1])
sa = []
index = 0
for count in range(1, len(s) + 1):
for i in range(len(s)):
if i + count <= len(s):
sa.append(s[i:i + count])
countList = [0] * len(sa)
for string in sa:
count = sa.count(string)
countList[len(string) - 1] = count
lengthList = [0]*len(s)
for index in range(len(sa)):
if countList[index] == k:
lengthList[len(sa[index]) - 1] += 1
ans = -1
for l in lengthList:
if l != 0 :
if l > ans:
ans = l
print(ans)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
2ed3942ae4abf416e93c8b60d8e80353a2aeb6ce
|
62b066da43081895098e43b31e843a1af3700546
|
/CNN/FractalNet/test.py
|
136d42b6cc9bbe3229ba7c4cb1573cdeec72665b
|
[
"MIT"
] |
permissive
|
bssrdf/Paper2Code
|
6945251cf508e99c2d7d40b2b8639e993ab30bff
|
2fb18b3d4ed66b68cab86139c4d9a0dcbf1eba39
|
refs/heads/master
| 2022-03-30T16:52:21.834511
| 2020-04-18T13:25:59
| 2020-04-18T13:25:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,812
|
py
|
import os
import glob
from keras.callbacks import LearningRateScheduler, ModelCheckpoint
from keras.datasets import cifar10
from keras.layers import Activation, Input, Dense, Flatten
from keras.models import Model
from keras.optimizers import SGD, RMSprop, Adam, Nadam
from keras.utils import plot_model
from keras.utils.np_utils import to_categorical
from keras import backend as K
from fractalnet import FractalNet
# paper implementation details
NB_CLASSES = 10
NB_EPOCHS = 400
LEARN_START = 0.02
BATCH_SIZE = 100
MOMENTUM = 0.9
Dropout = [0., 0.1, 0.2, 0.3, 0.4]
CONV = [(3, 3, 64), (3, 3, 128), (3, 3, 256), (3, 3, 512), (2, 2, 512)]
# cifar10
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
Y_train = to_categorical(y_train, NB_CLASSES)
Y_test = to_categorical(y_test, NB_CLASSES)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255.
X_test /= 255.
def learning_rate(epoch):
if epoch < 200:
return 0.02
if epoch < 300:
return 0.002
if epoch < 350:
return 0.0002
if epoch < 375:
return 0.00002
return 0.000002
# build network
im_in = Input(shape=(32, 32, 3))
output = FractalNet(B=5, C=3, conv=CONV, drop_path=0.15, dropout=Dropout, deepest=False)(im_in)
output = Flatten()(output)
output = Dense(NB_CLASSES, init='glorot_normal')(output)
output = Activation('softmax')(output)
model = Model(im_in, output)
optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM, nesterov=True)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['acc'])
plot_model(model, to_file='model.png', show_shapes=True)
# train
learn = LearningRateScheduler(learning_rate)
model.fit(x=X_train, y=Y_train, batch_size=BATCH_SIZE, epochs=NB_EPOCHS,
validation_data=(X_test, Y_test), callbacks=[learn])
|
[
"cutz309@gmail.com"
] |
cutz309@gmail.com
|
c72e1e6f41a202b2b1cd09a03a1bee06830d0410
|
1f1e8b335470065b67cce28338cfb4a6da503c95
|
/0x08-python-more_classes/1-rectangle.py
|
3c53e4b25f9cde658c140933cf6de96b3862d9a7
|
[] |
no_license
|
guxal/holbertonschool-higher_level_programming
|
48353071e719a509e10f3067f0c3f88cb44bd27d
|
fffd287c510602dc45e36df486f60cdfa1205335
|
refs/heads/master
| 2020-07-22T23:40:03.266880
| 2020-02-14T02:42:11
| 2020-02-14T02:42:11
| 207,370,750
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,192
|
py
|
#!/usr/bin/python3
class Rectangle:
"""
The Rectangle class create a new object
Args:
width (int): integer value of the width
height (int): integer value of the height
Attributes:
width (int): integer value of the width
height (int): integer value of the height
"""
def __init__(self, width=0, height=0):
self.width = width
self.height = height
@property
def width(self):
"""Get width"""
return self.__width
@width.setter
def width(self, value):
"""Set width"""
if isinstance(value, int) is False:
raise TypeError("width must be an integer")
elif value < 0:
raise ValueError("width must be >= 0")
else:
self.__width = value
@property
def height(self):
"""Get height"""
return self.__height
@height.setter
def height(self, value):
"""Set heigth"""
if isinstance(value, int) is False:
raise TypeError("height must be an integer")
elif value < 0:
raise ValueError("height must be >= 0")
else:
self.__height = value
|
[
"jonathanacp93@gmail.com"
] |
jonathanacp93@gmail.com
|
a79112df59f5d14d3ae00fe0a7bcfb6ee357574e
|
87af51366dd4597fb6ecf7887e44a53dacf67364
|
/juaphe/wsgi.py
|
19cbcbef0a0746d16594e05a93fdf826304ac291
|
[] |
no_license
|
MoTechStore/CIS
|
8707e17c1a1702dfdf17dbbba08e6ccf232acb45
|
b169cf374efdf1db315fde345f865f0831e58694
|
refs/heads/master
| 2023-06-24T07:43:14.144910
| 2021-04-22T15:31:50
| 2021-04-22T15:31:50
| 360,562,193
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
"""
WSGI config for juaphe project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'juaphe.settings')
application = get_wsgi_application()
|
[
"mosesnoel02@gmail.com"
] |
mosesnoel02@gmail.com
|
15e6250031174b4e039ad1e59a431cd4543e313f
|
25e481ef7fba79285f4c8a7fa2e81c8b2b7f9cce
|
/saleor/product/urls.py
|
792a3322de14affc3b6b5778fce32134a08c3d1e
|
[
"BSD-2-Clause"
] |
permissive
|
arslanahmd/Ghar-Tameer
|
59e60def48a14f9452dfefe2edf30e362878191d
|
72401b2fc0079e6d52e844afd8fcf57122ad319f
|
refs/heads/master
| 2023-01-31T04:08:26.288332
| 2018-06-07T18:02:01
| 2018-06-07T18:02:01
| 136,231,127
| 0
| 0
|
NOASSERTION
| 2023-01-11T22:21:42
| 2018-06-05T20:28:11
|
Python
|
UTF-8
|
Python
| false
| false
| 423
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^(?P<slug>[a-z0-9-_]+?)-(?P<product_id>[0-9]+)/$',
views.product_details, name='details'),
url(r'^category/(?P<path>[a-z0-9-_/]+?)-(?P<category_id>[0-9]+)/$',
views.category_index, name='category'),
url(r'(?P<slug>[a-z0-9-_]+?)-(?P<product_id>[0-9]+)/add/$',
views.product_add_to_cart, name="add-to-cart"),
]
|
[
"arslanahmad085@gmail.com"
] |
arslanahmad085@gmail.com
|
65784261a12349ca676e73e89cd86d05c20a0261
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/N/NicolaHughes/oecd_international_ngo_identifiers.py
|
9f0a72e55aef8b2b9a0796f890d51a673f3013f3
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,286
|
py
|
import scraperwiki
from bs4 import BeautifulSoup
url = "http://old.iatistandard.org/codelists/organisation_identifier_ingo"
html = scraperwiki.scrape(url)
soup = BeautifulSoup(html)
table = soup.find("tbody")
for td in table.find_all("tr"):
code = td.find("td", "column-1").get_text()
country = td.find("td", "column-2").get_text()
abbrev = td.find("td", "column-3").get_text()
name = td.find("td", "column-4").get_text()
organisation = "international NGO"
data = {"Code": code, "Country": country, "Abbreviation": abbrev, "Name": name, "Organisation_type": organisation}
scraperwiki.sqlite.save(["Name"], data)import scraperwiki
from bs4 import BeautifulSoup
url = "http://old.iatistandard.org/codelists/organisation_identifier_ingo"
html = scraperwiki.scrape(url)
soup = BeautifulSoup(html)
table = soup.find("tbody")
for td in table.find_all("tr"):
code = td.find("td", "column-1").get_text()
country = td.find("td", "column-2").get_text()
abbrev = td.find("td", "column-3").get_text()
name = td.find("td", "column-4").get_text()
organisation = "international NGO"
data = {"Code": code, "Country": country, "Abbreviation": abbrev, "Name": name, "Organisation_type": organisation}
scraperwiki.sqlite.save(["Name"], data)
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
2d2260b9fe36da45dcd750354ca6cccbdf07ab61
|
d49f90aac10fe247d3dab988860c47fc4cb6a38e
|
/test_coverage_site/wsgi.py
|
874b39ec3bf368563be03bcda8346571b0c4c8d2
|
[] |
no_license
|
slobdell/test-coverage-site
|
ae12220c8302f683f5513372920d471897b3c07f
|
95c9caff146061f602a1283e3bf87486daf9a8c6
|
refs/heads/master
| 2021-01-16T19:20:47.312548
| 2015-05-04T23:41:40
| 2015-05-04T23:41:40
| 34,973,209
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
"""
WSGI config for test_coverage_site project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_coverage_site.settings")
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
application = Cling(get_wsgi_application())
|
[
"slobdell@hearsaycorp.com"
] |
slobdell@hearsaycorp.com
|
7fea7b0f0a2dc5011bde9c1504e953d155324690
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/oauthlib/oauthlib/oauth2/rfc6749/grant_types/base.pyi
|
ba8171b0111fc0f9461d642587b7be5a8fd91df1
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 1,127
|
pyi
|
from typing import Any
log: Any
class ValidatorsContainer:
pre_auth: Any
post_auth: Any
pre_token: Any
post_token: Any
def __init__(self, post_auth, post_token, pre_auth, pre_token) -> None: ...
@property
def all_pre(self): ...
@property
def all_post(self): ...
class GrantTypeBase:
error_uri: Any
request_validator: Any
default_response_mode: str
refresh_token: bool
response_types: Any
def __init__(self, request_validator: Any | None = ..., **kwargs) -> None: ...
def register_response_type(self, response_type) -> None: ...
def register_code_modifier(self, modifier) -> None: ...
def register_token_modifier(self, modifier) -> None: ...
def create_authorization_response(self, request, token_handler) -> None: ...
def create_token_response(self, request, token_handler) -> None: ...
def add_token(self, token, token_handler, request): ...
def validate_grant_type(self, request) -> None: ...
def validate_scopes(self, request) -> None: ...
def prepare_authorization_response(self, request, token, headers, body, status): ...
|
[
"intellij-monorepo-bot-no-reply@jetbrains.com"
] |
intellij-monorepo-bot-no-reply@jetbrains.com
|
6eb33391091d6da8cb07157f81214fd33aad700a
|
1424812c4f211d3d5e356e8b3889a689162062f3
|
/arcade/python/65_calc_bonuses.py
|
f7049b69442fdc3417020fec21d30b0c0719e3bf
|
[] |
no_license
|
nazomeku/codefights
|
cb7d3c40be0809695ec524a87c88dbebcf5b47bc
|
b23f6816f9b5b0720feac1c49c31163923e0a554
|
refs/heads/master
| 2021-01-22T12:49:35.905165
| 2017-11-21T19:03:37
| 2017-11-21T19:03:37
| 102,357,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
"""Given the bonuses the player got, your task is to return his final
score for the level."""
def calc_bonuses(bonuses, n):
it = (x for x in bonuses)
res = 0
try:
for _ in range(n):
res += next(it)
except StopIteration:
res = 0
return res
|
[
"cols.nazo@gmail.com"
] |
cols.nazo@gmail.com
|
a68323c8d34f11fe9205e298a3142449f9f35bd1
|
999879f8d18e041d7fa313132408b252aded47f8
|
/01-codes/scipy-master/benchmarks/benchmarks/go_benchmark_functions/go_funcs_Z.py
|
948d35ef731d2bb98ed71725b8de543e3329add0
|
[
"MIT"
] |
permissive
|
QPanProjects/Surrogate-Model
|
ebcaf05728e82dcbcd924c2edca1b490ab085173
|
848c7128201218b0819c9665e2cec72e3b1d29ac
|
refs/heads/master
| 2022-10-11T19:03:55.224257
| 2020-06-09T14:37:35
| 2020-06-09T14:37:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,799
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import abs, sum, sign, arange
from .go_benchmark import Benchmark
class Zacharov(Benchmark):
r"""
Zacharov objective function.
This class defines the Zacharov [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Zacharov}}(x) = \sum_{i=1}^{n} x_i^2 + \left ( \frac{1}{2}
\sum_{i=1}^{n} i x_i \right )^2
+ \left ( \frac{1}{2} \sum_{i=1}^{n} i x_i
\right )^4
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [10.0] * self.N))
self.custom_bounds = ([-1, 1], [-1, 1])
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
u = sum(x ** 2)
v = sum(arange(1, self.N + 1) * x)
return u + (0.5 * v) ** 2 + (0.5 * v) ** 4
class ZeroSum(Benchmark):
r"""
ZeroSum objective function.
This class defines the ZeroSum [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{ZeroSum}}(x) = \begin{cases}
0 & \textrm{if} \sum_{i=1}^n x_i = 0 \\
1 + \left(10000 \left |\sum_{i=1}^n x_i\right|
\right)^{0.5} & \textrm{otherwise}
\end{cases}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` where :math:`\sum_{i=1}^n x_i = 0`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
if abs(sum(x)) < 3e-16:
return 0.0
return 1.0 + (10000.0 * abs(sum(x))) ** 0.5
class Zettl(Benchmark):
r"""
Zettl objective function.
This class defines the Zettl [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Zettl}}(x) = \frac{1}{4} x_{1} + \left(x_{1}^{2} - 2 x_{1}
+ x_{2}^{2}\right)^{2}
with :math:`x_i \in [-1, 5]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.0037912` for :math:`x = [-0.029896, 0.0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-0.02989597760285287, 0.0]]
self.fglob = -0.003791237220468656
def fun(self, x, *args):
self.nfev += 1
return (x[0] ** 2 + x[1] ** 2 - 2 * x[0]) ** 2 + 0.25 * x[0]
class Zimmerman(Benchmark):
r"""
Zimmerman objective function.
This class defines the Zimmerman [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Zimmerman}}(x) = \max \left[Zh1(x), Zp(Zh2(x))
\textrm{sgn}(Zh2(x)), Zp(Zh3(x))
\textrm{sgn}(Zh3(x)),
Zp(-x_1)\textrm{sgn}(x_1),
Zp(-x_2)\textrm{sgn}(x_2) \right]
Where, in this exercise:
.. math::
\begin{cases}
Zh1(x) = 9 - x_1 - x_2 \\
Zh2(x) = (x_1 - 3)^2 + (x_2 - 2)^2 \\
Zh3(x) = x_1x_2 - 14 \\
Zp(t) = 100(1 + t)
\end{cases}
Where :math:`x` is a vector and :math:`t` is a scalar.
Here, :math:`x_i \in [0, 100]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [7, 2]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO implementation from Gavana
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [100.0] * self.N))
self.custom_bounds = ([0.0, 8.0], [0.0, 8.0])
self.global_optimum = [[7.0, 2.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
Zh1 = lambda x: 9.0 - x[0] - x[1]
Zh2 = lambda x: (x[0] - 3.0) ** 2.0 + (x[1] - 2.0) ** 2.0 - 16.0
Zh3 = lambda x: x[0] * x[1] - 14.0
Zp = lambda x: 100.0 * (1.0 + x)
return max(Zh1(x),
Zp(Zh2(x)) * sign(Zh2(x)),
Zp(Zh3(x)) * sign(Zh3(x)),
Zp(-x[0]) * sign(x[0]),
Zp(-x[1]) * sign(x[1]))
class Zirilli(Benchmark):
r"""
Zettl objective function.
This class defines the Zirilli [1]_ global optimization problem. This is a
unimodal minimization problem defined as follows:
.. math::
f_{\text{Zirilli}}(x) = 0.25x_1^4 - 0.5x_1^2 + 0.1x_1 + 0.5x_2^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.3523` for :math:`x = [-1.0465, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = ([-2.0, 2.0], [-2.0, 2.0])
self.global_optimum = [[-1.0465, 0.0]]
self.fglob = -0.35238603
def fun(self, x, *args):
self.nfev += 1
return 0.25 * x[0] ** 4 - 0.5 * x[0] ** 2 + 0.1 * x[0] + 0.5 * x[1] ** 2
|
[
"quanpan302@hotmail.com"
] |
quanpan302@hotmail.com
|
9324e5dad5c5d41d9712524e5664e5b589a4683a
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02735/s229136257.py
|
7525f09b2d7be32d9b898770a7896fc24d111e97
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 696
|
py
|
H,W = map(int,input().split())
s = []
for i in range(H):
S = list(input())
s.append(S)
dis = [[float("inf")] * W for i in range(H)]
if s[0][0] == "#":
dis[0][0] = 1
else:
dis[0][0] = 0
for i in range(H):
for j in range(W):
if i != H-1:
if s[i][j] == "." and s[i+1][j] == "#":
dis[i+1][j] = min(dis[i][j] + 1,dis[i+1][j])
else:
dis[i+1][j] = min(dis[i][j],dis[i+1][j])
if j != W-1:
if s[i][j] == "." and s[i][j+1] == "#":
dis[i][j+1] = min(dis[i][j] + 1,dis[i][j+1])
else:
dis[i][j+1] = min(dis[i][j],dis[i][j+1])
print (dis[-1][-1])
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
e7c530faea76708714903acd3aee7445bd19cd73
|
6bf336bc8d6ba061e0c707bdd8595368dee4d27b
|
/algorithms/implementation/chocolate_feast.py
|
167c71bac7ce61c4730dfdc418b5534bdd26088e
|
[
"MIT"
] |
permissive
|
avenet/hackerrank
|
aa536214dbccf5a822a30ea226e1dbaac9afb243
|
e522030a023af4ff50d5fc64bd3eba30144e006c
|
refs/heads/master
| 2021-01-01T20:15:06.647873
| 2017-11-24T23:59:19
| 2017-11-24T23:59:19
| 98,801,566
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
n = int(input())
for i in range(n):
money, item_price, exchange_wrapper = [int(x) for x in input().split(' ')]
bought = money // item_price
answer = bought
wrappers = bought
while wrappers >= exchange_wrapper:
extra_items = wrappers // exchange_wrapper
answer += extra_items
wrappers = (wrappers % exchange_wrapper) + extra_items
print(int(answer))
|
[
"andy.venet@gmail.com"
] |
andy.venet@gmail.com
|
f9f5c7d65ca52302442ac3af9842e3e0c2658298
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/x12/4042/261004042.py
|
270e503879fc4b824982174d8abaee91802da7e1
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 2,478
|
py
|
from bots.botsconfig import *
from records004042 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'ME',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGN', MIN: 1, MAX: 1},
{ID: 'TRN', MIN: 0, MAX: 1},
{ID: 'N1', MIN: 0, MAX: 3, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 10},
]},
{ID: 'LX', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'AM1', MIN: 0, MAX: 99999},
{ID: 'DTP', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'LN1', MIN: 0, MAX: 99999},
{ID: 'AMT', MIN: 0, MAX: 99999},
{ID: 'QTY', MIN: 0, MAX: 99999},
{ID: 'PWK', MIN: 0, MAX: 99999},
{ID: 'NTE', MIN: 0, MAX: 99999},
{ID: 'NM1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 10},
{ID: 'DTP', MIN: 0, MAX: 2},
]},
{ID: 'NX1', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'NX2', MIN: 1, MAX: 99999},
{ID: 'DTP', MIN: 1, MAX: 7},
{ID: 'YNQ', MIN: 0, MAX: 16},
{ID: 'REF', MIN: 0, MAX: 5},
{ID: 'PDS', MIN: 0, MAX: 99999},
{ID: 'PDE', MIN: 0, MAX: 99999},
{ID: 'PEX', MIN: 0, MAX: 5},
{ID: 'REC', MIN: 0, MAX: 1},
{ID: 'REA', MIN: 0, MAX: 1},
{ID: 'III', MIN: 0, MAX: 30},
{ID: 'AM1', MIN: 0, MAX: 99999},
{ID: 'API', MIN: 0, MAX: 10},
{ID: 'AMT', MIN: 0, MAX: 10},
{ID: 'QTY', MIN: 0, MAX: 10},
{ID: 'PCT', MIN: 0, MAX: 4},
{ID: 'NTE', MIN: 0, MAX: 10},
{ID: 'PWK', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 2},
]},
{ID: 'IN1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'IN2', MIN: 0, MAX: 10},
{ID: 'III', MIN: 0, MAX: 99999},
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'PER', MIN: 0, MAX: 2},
{ID: 'DTM', MIN: 0, MAX: 2},
]},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
2e93b69a71388f075c9082fa5e2e275073673eb8
|
edeeb2fa2ece9f2f19a792bedbf842319e90e417
|
/model/model.py
|
d77c10ee6a5a18179c282acd64d92cee86ba17fa
|
[] |
no_license
|
ywl0911/TextCnn
|
9461023cba84b24cf46d79c42720822baebe0c4c
|
15af406ba05c71f9d18929ecd6d958216a1b53c2
|
refs/heads/master
| 2021-02-06T07:14:55.221200
| 2018-12-15T00:30:09
| 2018-12-15T00:30:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,474
|
py
|
from __future__ import print_function
from model.text_cnn import TextCnn
import tensorflow as tf
from tensorflow.contrib import layers
import json
import os
path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
config_path = os.path.join(path, 'config')
params_path = os.path.join(config_path, 'kaggle_movie_review.json')
with open(params_path, 'r') as fin:
options = json.load(fin)
config = tf.contrib.training.HParams(**options)
class Model:
def __init__(self):
pass
def model_fn(self, mode, features, labels, params):
self.dtype = tf.float32
self.mode = mode
self.params = params
self.loss, self.train_op, self.metrics, self.predictions = None, None, None, None
self._init_placeholder(features, labels)
self.build_graph()
# train mode: required loss and train_op
# eval mode: required loss
# predict mode: required predictions
return tf.estimator.EstimatorSpec(
mode=mode,
loss=self.loss,
train_op=self.train_op,
eval_metric_ops=self.metrics,
predictions={"prediction": self.predictions})
def _init_placeholder(self, features, labels):
self.input_data = features
if type(features) == dict:
self.input_data = features["input_data"]
self.targets = labels
def build_graph(self):
graph = TextCnn(self.mode)
output = graph.build(self.input_data)
self._build_prediction(output)
if self.mode != tf.estimator.ModeKeys.PREDICT:
self._build_loss(output)
self._build_optimizer()
self._build_metric()
def _build_loss(self, output):
self.loss = tf.losses.softmax_cross_entropy(
self.targets,
output,
scope="loss")
def _build_prediction(self, output):
tf.argmax(output[0], name='train/pred_0') # for print_verbose
self.predictions = tf.argmax(output, axis=1)
def _build_optimizer(self):
self.train_op = layers.optimize_loss(
self.loss, tf.train.get_global_step(),
optimizer='Adam',
learning_rate=config.train['learning_rate'],
summaries=['loss', 'learning_rate'],
name="train_op")
def _build_metric(self):
self.metrics = {
"accuracy": tf.metrics.accuracy(tf.argmax(self.targets, axis=1), self.predictions)
}
|
[
"1318394945@qq.com"
] |
1318394945@qq.com
|
024c1ce5c3908c2e84353f3e0600a60e88bec43e
|
59aed92059824381f34e1585e9600878d91cadb0
|
/supervised_learning/0x0D-RNNs/5-bi_forward.py
|
3bea5f82bfbcd5608895cf8bd8aabce38d00fccf
|
[] |
no_license
|
jdarangop/holbertonschool-machine_learning
|
3509eaa6d191f6887be9bbbb5a1df565b0b51cc8
|
75274394adb52d740f6cd4000cc00bbde44b9b72
|
refs/heads/master
| 2021-05-17T04:51:44.675655
| 2020-12-02T02:52:31
| 2020-12-02T02:52:31
| 250,633,089
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,192
|
py
|
#!/usr/bin/env python3
""" Bidirectional Cell Forward """
import numpy as np
class BidirectionalCell(object):
""" BidirectionalCell class. """
def __init__(self, i, h, o):
""" Initializer.
Args:
i: the dimensionality of the data.
h: the dimensionality of the hidden states.
o: the dimensionality of the outputs.
"""
self.Whf = np.random.normal(size=(i + h, h))
self.bhf = np.zeros((1, h))
self.Whb = np.random.normal(size=(i + h, h))
self.bhb = np.zeros((1, h))
self.Wy = np.random.normal(size=(i + h + o, o))
self.by = np.zeros((1, o))
def forward(self, h_prev, x_t):
""" calculates the hidden state in the forward
direction for one time step.
Args:
h_prev: (numpy.ndarray) contains the data input for the cell.
x_t: (numpy.ndarray) containing the previous hidden state.
Returns:
h_next: is the next hidden state.
"""
X = np.concatenate((h_prev, x_t), axis=1)
h_next = np.tanh(np.matmul(X, self.Whf) + self.bhf)
return h_next
|
[
"juandiegoap33@gmail.com"
] |
juandiegoap33@gmail.com
|
579bff385dfa8e145f010cd9a5919a7d939ee332
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/ShopOrderModifyResult.py
|
9082302886be32ebbbbaf87862cab8d4893c57bb
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,321
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ShopOrderModifyResult(object):
def __init__(self):
self._ext_infos = None
self._result_code = None
self._shop_id = None
self._store_id = None
@property
def ext_infos(self):
return self._ext_infos
@ext_infos.setter
def ext_infos(self, value):
self._ext_infos = value
@property
def result_code(self):
return self._result_code
@result_code.setter
def result_code(self, value):
self._result_code = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
@property
def store_id(self):
return self._store_id
@store_id.setter
def store_id(self, value):
self._store_id = value
def to_alipay_dict(self):
params = dict()
if self.ext_infos:
if hasattr(self.ext_infos, 'to_alipay_dict'):
params['ext_infos'] = self.ext_infos.to_alipay_dict()
else:
params['ext_infos'] = self.ext_infos
if self.result_code:
if hasattr(self.result_code, 'to_alipay_dict'):
params['result_code'] = self.result_code.to_alipay_dict()
else:
params['result_code'] = self.result_code
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
if self.store_id:
if hasattr(self.store_id, 'to_alipay_dict'):
params['store_id'] = self.store_id.to_alipay_dict()
else:
params['store_id'] = self.store_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ShopOrderModifyResult()
if 'ext_infos' in d:
o.ext_infos = d['ext_infos']
if 'result_code' in d:
o.result_code = d['result_code']
if 'shop_id' in d:
o.shop_id = d['shop_id']
if 'store_id' in d:
o.store_id = d['store_id']
return o
|
[
"liuqun.lq@alibaba-inc.com"
] |
liuqun.lq@alibaba-inc.com
|
78336ba51d32b62181a4ce2bb2a6dbf2229c8c53
|
dde7259b842de982208caff054d9c9cb3c18c7be
|
/debug .rock.py
|
71e80dfc08454845c7b70946650d47577162cdad
|
[] |
no_license
|
pooja-pichad/more_excersise
|
d1137ab4b757e68bf8109405d7a8cf47e9a0bbfd
|
f9f94b04ad533a5a5ca983d33775b94af8bd146e
|
refs/heads/main
| 2023-06-18T22:21:03.690142
| 2021-07-20T06:56:34
| 2021-07-20T06:56:34
| 387,700,700
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,108
|
py
|
from random import randint
def win():
print ('You win!')
def lose():
print ('You lose!')
while True:
player_choice = input('What do you pick? (rock, paper, scissors)')
player_choice.strip()
random_move = randint(0, 2)
moves = ['rock', 'paper', 'scissors']
computer_choice = moves[random_move]
print(computer_choice)
if player_choice == computer_choice:
print ('Draw!')
elif player_choice == 'rock' or computer_choice == 'scissors':
win()
elif player_choice== 'paper' or computer_choice == 'scissors':
lose()
elif player_choice == 'scissors' or computer_choice == 'paper':
win()
elif player_choice == 'scissors' or computer_choice == 'rock':
lose()
elif player_choice == 'paper' or computer_choice == 'rock':
win()
elif player_choice == 'rock' or computer_choice == 'paper':
lose()
aGain = input('Do you want to play again? (y or n)').strip()
if aGain == 'n':
break
# import random
# while True:
# user_action = input("Enter a choice (rock, paper, scissors): ")
# possible_actions = ["rock", "paper", "scissors"]
# computer_action = random.choice(possible_actions)
# print(f"\nYou chose {user_action}, computer chose {computer_action}.\n")
# if user_action == computer_action:
# print(f"Both players selected {user_action}. It's a tie!")
# elif user_action == "rock":
# if computer_action == "scissors":
# print("Rock smashes scissors! You win!")
# else:
# print("Paper covers rock! You lose.")
# elif user_action == "paper":
# if computer_action == "rock":
# print("Paper covers rock! You win!")
# else:
# print("Scissors cuts paper! You lose.")
# elif user_action == "scissors":
# if computer_action == "paper":
# print("Scissors cuts paper! You win!")
# else:
# print("Rock smashes scissors! You lose.")
# play_again = input("Play again? (y/n): ")
# if play_again.lower() != "y":
# break
|
[
"noreply@github.com"
] |
pooja-pichad.noreply@github.com
|
b1ae0c007b11135e8f16664aa1a43c6d13c5c89c
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/all-gists/9276216/snippet.py
|
27e3f6d7e286538a706d09b067ed2b79a247d7b7
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398
| 2023-02-11T18:20:10
| 2023-02-11T18:20:10
| 119,861,038
| 76
| 19
| null | 2020-07-26T03:14:55
| 2018-02-01T16:19:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,494
|
py
|
#!/usr/bin/python
''' Python command line argument example using argparse module
Example output:
./parser.py --server=pyserver --port=8080,443,25,22,21 --keyword=pyisgood
Server name: [ pyserver ]
Port: [ 8080 ]
Port: [ 443 ]
Port: [ 25 ]
Port: [ 22 ]
Port: [ 21 ]
Keyword assigned: [ pyisgood ]
'''
import argparse
__author__ = 'Jason Vasquez Orona'
def get_args():
'''This function parses and return arguments passed in'''
# Assign description to the help doc
parser = argparse.ArgumentParser(
description='Script retrieves schedules from a given server')
# Add arguments
parser.add_argument(
'-s', '--server', type=str, help='Server name', required=True)
parser.add_argument(
'-p', '--port', type=str, help='Port number', required=True, nargs='+')
parser.add_argument(
'-k', '--keyword', type=str, help='Keyword search', required=False, default=None)
# Array for all arguments passed to script
args = parser.parse_args()
# Assign args to variables
server = args.server
port = args.port[0].split(",")
keyword = args.keyword
# Return all variable values
return server, port, keyword
# Run get_args()
# get_args()
# Match return values from get_arguments()
# and assign to their respective variables
server, port, keyword = get_args()
# Print the values
print "\nServer name: [ %s ]\n" % server
for p in port:
print "Port: [ %s ]" % p
print "\nKeyword assigned: [ %s ]\n" % keyword
|
[
"gistshub@gmail.com"
] |
gistshub@gmail.com
|
746f58db6baa85c1093e025491da54c87739e1c6
|
dd6c5f31a2a14f610bd9ae7ebffa37586cfdf6fa
|
/jingfen_app/manage.py
|
7a8b7fb2debbfdb45fa14d397a32c43df9c8696f
|
[] |
no_license
|
wean/jingfen
|
534945e8fe12686e25a3fd9788b29bca2ba49be1
|
ed9cc88f57b83c5c77ff85fab58ddf5094f7793f
|
refs/heads/master
| 2020-04-05T07:11:46.721676
| 2018-04-10T05:21:33
| 2018-04-10T05:21:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
#! /usr/bin/python
# -*- coding:utf-8 -*-
# @Time : 2018/3/12 下午9:21
# from jingfen.jingfen_app import create_app, d
# b
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return 'hello world!'
pass
if __name__ == '__main__':
app.run(debug=True)
|
[
"448290415@qq.com"
] |
448290415@qq.com
|
6df0cb04a29969a2e6fb06c94560c3eb5217a2e8
|
5e22728a45dc131b5abcdde3c10928557177898b
|
/tests/nb_test.py
|
0634d44b1e6eb8abdd7c5d0d810154d9280e95fc
|
[
"MIT"
] |
permissive
|
microsoft/msticnb
|
74fc9636964be68900702ee0c85b0c992f0779ad
|
cefc4ee5a22285d33e7abd91371c617fe42f8129
|
refs/heads/main
| 2023-06-30T02:00:29.253130
| 2023-03-16T20:14:27
| 2023-03-16T20:14:27
| 250,417,186
| 25
| 11
|
MIT
| 2023-03-16T20:14:28
| 2020-03-27T02:00:17
|
Python
|
UTF-8
|
Python
| false
| false
| 3,942
|
py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Notebooklet for Host Summary."""
from typing import Any, Dict, Iterable, Optional
import pandas as pd
from msticpy.common.timespan import TimeSpan
from msticpy.datamodel import entities
from msticnb import nb_metadata
from msticnb._version import VERSION
from msticnb.common import nb_print, set_text
from msticnb.notebooklet import NBMetadata, Notebooklet, NotebookletResult
__version__ = VERSION
__author__ = "Ian Hellen"
_CLS_METADATA: NBMetadata
_CELL_DOCS: Dict[str, Any]
_CLS_METADATA, _CELL_DOCS = nb_metadata.read_mod_metadata(__file__, __name__)
# pylint: disable=too-few-public-methods
class TstSummaryResult(NotebookletResult):
"""Test Results."""
def __init__(
self,
description: Optional[str] = None,
timespan: Optional[TimeSpan] = None,
notebooklet: Optional["Notebooklet"] = None,
):
"""
Create new Notebooklet result instance.
Parameters
----------
description : Optional[str], optional
Result description, by default None
timespan : Optional[TimeSpan], optional
TimeSpan for the results, by default None
notebooklet : Optional[, optional
Originating notebooklet, by default None
"""
super().__init__(description, timespan, notebooklet)
self.host_entity: entities.Host = None
self.related_alerts: pd.DataFrame = None
self.related_bookmarks: pd.DataFrame = None
self.default_property: pd.DataFrame = None
self.optional_property: pd.DataFrame = None
# pylint: disable=too-few-public-methods
class TstNBSummary(Notebooklet):
"""Test Notebooklet class."""
metadata = _CLS_METADATA
__doc__ = nb_metadata.update_class_doc(__doc__, metadata)
_cell_docs = _CELL_DOCS
# pylint: disable=too-many-branches
@set_text(docs=_CELL_DOCS, key="run") # noqa MC0001
def run(
self,
value: Any = None,
data: Optional[pd.DataFrame] = None,
timespan: Optional[TimeSpan] = None,
options: Optional[Iterable[str]] = None,
**kwargs,
) -> TstSummaryResult:
"""Return host summary data."""
super().run(
value=value, data=data, timespan=timespan, options=options, **kwargs
)
# pylint: disable=attribute-defined-outside-init
self._last_result = TstSummaryResult(
notebooklet=self, description=self.metadata.description, timespan=timespan
)
host_entity = entities.Host(HostName="testhost")
_test_inline_text(host_entity)
_test_yaml_text(host_entity)
self._last_result.host_entity = host_entity
self._last_result.related_alerts = pd.DataFrame()
self._last_result.related_bookmarks = pd.DataFrame()
if "default_opt" in self.options:
self._last_result.default_property = pd.DataFrame()
if "optional_opt" in self.options:
self._last_result.optional_property = pd.DataFrame()
return self._last_result
# %%
# Get IP Information from Heartbeat
@set_text(
title="Host Entity details",
hd_level=3,
text="""
These are the host entity details gathered from Heartbeat
and, if applicable, AzureNetworkAnalytics and Azure management
API.
The data shows OS information, IP Addresses assigned the
host and any Azure VM information available.
""",
md=True,
)
def _test_inline_text(host_entity):
nb_print("TestInline")
nb_print(host_entity)
@set_text(docs=_CELL_DOCS, key="show_host_entity")
def _test_yaml_text(host_entity):
nb_print("TestYaml")
nb_print(host_entity)
|
[
"ianhelle@microsoft.com"
] |
ianhelle@microsoft.com
|
41fc9b8079910692b8adde95dc4604182ba9dbc7
|
58ade65dffc7cbe103d93d7c769096a20d9f9815
|
/src/smach_based_introspection_framework/offline_part/anomaly_classification_feature_selection/msg_filters_with_scaling_and_clip.py
|
a375c305a1c8e01a004943519b8b7a4c2b4adc1a
|
[
"BSD-3-Clause"
] |
permissive
|
birlrobotics/smach_based_introspection_framework
|
2cff69ecec030a5b5046dea99f9e15105f52361b
|
f16742339cddfc86effba4dbf6e5062304704b89
|
refs/heads/master
| 2021-05-09T12:02:26.946473
| 2019-05-29T02:46:47
| 2019-05-29T02:46:47
| 119,001,821
| 7
| 1
| null | 2018-07-05T04:58:40
| 2018-01-26T03:37:58
|
Python
|
UTF-8
|
Python
| false
| false
| 634
|
py
|
from rostopics_to_timeseries import TopicMsgFilter
import numpy as np
class TactileStaticStdScaleClipMaxFilter(TopicMsgFilter):
def __init__(self):
super(TactileStaticStdScaleClipMaxFilter, self).__init__()
def convert(self, msg):
ret = np.array([
np.std(msg.taxels[0].values),
np.std(msg.taxels[1].values),
])
return [np.clip(ret/60.0, -1, 1).max()]
@staticmethod
def vector_size():
return 1
@staticmethod
def vector_meaning():
return [
'tactile_static_data.left.std.clip(ret/60.0, -1, 1).max()', \
]
|
[
"sk.law.lsq@gmail.com"
] |
sk.law.lsq@gmail.com
|
724e03976eb11b0ba1df23167f2f294516dc6dad
|
a6719f4815ff41d3a1f09e9a63a64c4582d03702
|
/error_handling/try-except.py
|
881cf6cfddef1e2ef9f597573159206f16b57db8
|
[
"MIT"
] |
permissive
|
thanh-vt/python-basic-programming
|
8136007b8435dae6339ae33015fe536e21b19d1d
|
5fe817986fbef2649b4b03955f07b59d2a2035d8
|
refs/heads/main
| 2023-01-30T12:57:36.819687
| 2020-12-13T17:27:05
| 2020-12-13T17:27:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
try:
print(x)
except NameError:
print("Variable x is not defined")
except Exception as ex:
print(ex.__cause__)
print("An exception occurred")
try:
print("Hello")
except Exception as ex:
print(ex)
print("Something went wrong")
else:
print("Nothing went wrong")
|
[
"thanhvt@vissoft.vn"
] |
thanhvt@vissoft.vn
|
5ed01a03bd11edf9b7d88470801ecb14f19ac080
|
a1119965e2e3bdc40126fd92f4b4b8ee7016dfca
|
/trunk/repy/tests/ut_repytests_veryslownetsend-testsend.py
|
29cb9e30a07108c0c3fa576534605471fbacbae1
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
SeattleTestbed/attic
|
0e33211ddf39efdbcf5573d4fc7fa5201aa7310d
|
f618a962ce2fd3c4838564e8c62c10924f5df45f
|
refs/heads/master
| 2021-06-10T23:10:47.792847
| 2017-05-15T12:05:43
| 2017-05-15T12:05:43
| 20,154,061
| 0
| 1
| null | 2014-10-16T17:21:06
| 2014-05-25T12:34:00
|
Python
|
UTF-8
|
Python
| false
| false
| 331
|
py
|
#pragma repy restrictions.veryslownetsend
def foo(ip,port,mess, ch):
stopcomm(ch)
exitall()
if callfunc == 'initialize':
ip = getmyip()
recvmess(ip,<messport>,foo)
sleep(.1)
sendmess(ip,<messport>,'hi')
sendmess(ip,<messport>,'Hello, this is too long of a message to be received in such a short time')
print "hi"
|
[
"USER@DOMAIN"
] |
USER@DOMAIN
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.