blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
275a965beacc898d34762e7483c622411e29df6e
|
008c0a630ffa5bc412571acef2b7462e22fce196
|
/tests/test_console.py
|
3ec9d656f2396736bf0840eec5723aff6aeb2a8d
|
[
"BSD-2-Clause",
"Python-2.0",
"BSD-2-Clause-Views"
] |
permissive
|
Jacktwist/python-tcod
|
dea56c330f5c27d85e71a2c44074b0b2c4536675
|
d271cc9892a6bdcd931f7a9984fffc754170b36f
|
refs/heads/master
| 2020-04-28T12:57:01.653126
| 2019-03-11T01:39:52
| 2019-03-11T01:39:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,786
|
py
|
import pickle
import numpy as np
from numpy import array
import pytest
import tcod
@pytest.mark.filterwarnings("ignore:Directly access a consoles")
def test_array_read_write():
console = tcod.console.Console(width=12, height=10)
FG = (255, 254, 253)
BG = (1, 2, 3)
CH = ord('&')
tcod.console_put_char_ex(console, 0, 0, CH, FG, BG)
assert console.ch[0, 0] == CH
assert tuple(console.fg[0, 0]) == FG
assert tuple(console.bg[0, 0]) == BG
tcod.console_put_char_ex(console, 1, 2, CH, FG, BG)
assert console.ch[2, 1] == CH
assert tuple(console.fg[2, 1]) == FG
assert tuple(console.bg[2, 1]) == BG
console.clear()
assert console.ch[1, 1] == ord(' ')
assert tuple(console.fg[1, 1]) == (255, 255, 255)
assert tuple(console.bg[1, 1]) == (0, 0, 0)
ch_slice = console.ch[1, :]
ch_slice[2] = CH
console.fg[1, ::2] = FG
console.bg[...] = BG
assert tcod.console_get_char(console, 2, 1) == CH
assert tuple(tcod.console_get_char_foreground(console, 2, 1)) == FG
assert tuple(tcod.console_get_char_background(console, 2, 1)) == BG
@pytest.mark.filterwarnings("ignore:.")
def test_console_defaults():
console = tcod.console.Console(width=12, height=10)
console.default_bg = [2, 3, 4]
assert console.default_bg == (2, 3, 4)
console.default_fg = (4, 5, 6)
assert console.default_fg == (4, 5, 6)
console.default_bg_blend = tcod.BKGND_ADD
assert console.default_bg_blend == tcod.BKGND_ADD
console.default_alignment = tcod.RIGHT
assert console.default_alignment == tcod.RIGHT
@pytest.mark.filterwarnings("ignore:Parameter names have been moved around,")
@pytest.mark.filterwarnings("ignore:Pass the key color to Console.blit instea")
def test_console_methods():
console = tcod.console.Console(width=12, height=10)
console.put_char(0, 0, ord('@'))
console.print_(0, 0, 'Test')
console.print_rect(0, 0, 2, 8, 'a b c d e f')
console.get_height_rect(0, 0, 2, 8, 'a b c d e f')
console.rect(0, 0, 2, 2, True)
console.hline(0, 1, 10)
console.vline(1, 0, 10)
console.print_frame(0, 0, 8, 8, 'Frame')
console.blit(0, 0, 0, 0, console, 0, 0)
console.blit(0, 0, 0, 0, console, 0, 0, key_color=(0, 0, 0))
console.set_key_color((254, 0, 254))
def test_console_pickle():
console = tcod.console.Console(width=12, height=10)
console.ch[...] = ord('.')
console.fg[...] = (10, 20, 30)
console.bg[...] = (1, 2, 3)
console2 = pickle.loads(pickle.dumps(console))
assert (console.ch == console2.ch).all()
assert (console.fg == console2.fg).all()
assert (console.bg == console2.bg).all()
def test_console_pickle_fortran():
console = tcod.console.Console(2, 3, order='F')
console2 = pickle.loads(pickle.dumps(console))
assert console.ch.strides == console2.ch.strides
assert console.fg.strides == console2.fg.strides
assert console.bg.strides == console2.bg.strides
def test_console_repr():
array # Needed for eval.
eval(repr(tcod.console.Console(10, 2)))
def test_console_str():
console = tcod.console.Console(10, 2)
console.print_(0, 0, "Test")
assert str(console) == ("<Test |\n"
"| >")
def test_console_fortran_buffer():
tcod.console.Console(
width=1,
height=2,
order="F",
buffer=np.zeros((1, 2), order="F", dtype=tcod.console.Console.DTYPE),
)
def test_console_clear():
console = tcod.console.Console(1, 1)
assert console.fg[0, 0].tolist() == [255, 255, 255]
assert console.bg[0, 0].tolist() == [0, 0, 0]
console.clear(fg=(7, 8, 9), bg=(10, 11, 12))
assert console.fg[0, 0].tolist() == [7, 8, 9]
assert console.bg[0, 0].tolist() == [10, 11, 12]
|
[
"4b796c65@gmail.com"
] |
4b796c65@gmail.com
|
be4111fbce90624d076a4b5716314151ab4cc46e
|
d4e38b8a1438c0509f3f160a2ceb9aa166ac3ed1
|
/quizzes/quiz1/server.py
|
1f8c6d93d9cce2d4febe28260c52823c3b82da31
|
[] |
no_license
|
yulu9206/cmpe273-spring2018
|
e7ffa36818cb87596de351b36d5dc33ec387bf1b
|
8e0eaa0ce951b718dae195753dca0d9fc07b5a97
|
refs/heads/master
| 2021-04-06T11:17:44.918483
| 2018-03-15T20:26:35
| 2018-03-15T20:26:35
| 125,421,797
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 734
|
py
|
import time
import grpc
import ping_pb2
import ping_pb2_grpc
from concurrent import futures
class PingServer(ping_pb2_grpc.PingPongServicer):
def ping(self, request, context):
return ping_pb2.Response(data='Pong')
def run(host, port):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
ping_pb2_grpc.add_PingPongServicer_to_server(PingServer(), server)
server.add_insecure_port('%s:%d' % (host, port))
server.start()
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
try:
while True:
print("Server started at...%d" % port)
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
run('0.0.0.0', 3000)
|
[
"yulu9206@Lus-MacBook-Air.local"
] |
yulu9206@Lus-MacBook-Air.local
|
f787f4e893e031beb971965ab9fcd5ad7fea6217
|
15e5b2e39d8f1c08b34b36783cc5504638e3e434
|
/TODO-PROJECT/todo-back/todos/migrations/0001_initial.py
|
318ee532b77426a5c00753188598859bd9f21d93
|
[] |
no_license
|
ghdus4185/Vue_intro
|
62e0ded5b4a23ef34869d8e0bb7b337a7e9c934f
|
b999f19a4140b2456614625f8063ff919b8cdfed
|
refs/heads/master
| 2023-01-13T09:42:15.804784
| 2019-11-21T00:01:38
| 2019-11-21T00:01:38
| 219,454,656
| 0
| 0
| null | 2023-01-07T21:19:32
| 2019-11-04T08:41:36
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,393
|
py
|
# Generated by Django 2.2.6 on 2019-11-18 06:18
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Todo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('completed', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"ckdghdus@naver.com"
] |
ckdghdus@naver.com
|
8b07c0d19f6538fff2340e3c06e09aba7bac2636
|
0ca210752cd5b926201f3fb40ee4aadc6da4f537
|
/code/test_hd22879.py
|
bdca611a496161a373724a5b3bd0c099afc339a0
|
[
"MIT"
] |
permissive
|
andycasey/precise-objective-differential-spectroscopy
|
f45f93dfdaeb78d138d7792c439cf7b6065882c5
|
658b0d226300330375570dd2450bb284f1cf167a
|
refs/heads/master
| 2021-01-19T20:24:48.539975
| 2014-07-09T13:41:32
| 2014-07-09T13:41:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,015
|
py
|
import cPickle as pickle
from stellar_parameters import Star
from channel import SpectralChannel
class spectrum(object):
pass
import sick
spec = sick.specutils.Spectrum.load("spectra/hermes-sun.fits")
spec = sick.specutils.Spectrum.load("spectra/uvessun1.txt")
blue_channel = spectrum()
blue_channel.dispersion = spec.disp
blue_channel.flux = spec.flux
blue_channel.variance = spec.variance
with open("transitions.pkl", "rb") as fp:
transitions = pickle.load(fp)
with open("sousa-transitions.pkl", "rb") as fp:
transitions = pickle.load(fp)
# Get just blue channel ones
transition_indices = (blue_channel.dispersion[-1] > transitions["rest_wavelength"]) * (transitions["rest_wavelength"] > blue_channel.dispersion[0])
use_regions = np.array([
[4731.3, 4731.65],
[4742.65, 4742.93],
[4757.95, 4748.31],
[4759.1, 4759.56],
[4764.43, 4764.47],
[4778.08, 4778.41],
[4779.78, 4780.2],
[4781.59, 4781.92],
[4788.41, 4789],
[4789.91, 4790.19],
[4795.24, 4795.66],
[4798.39, 4798.64],
[4802.69, 4803.2],
[4805.3, 4805.71],
[4807.95, 4808.35],
[4820.23, 4820.6],
[4847.89, 4848.02],
[4869.85, 4870.3],
[4873.88, 4874.19],
[4884.95, 4885.25],
[4889.9, 4892.67],
[4894.7, 4895.0]
])
#use_regions = np.array([
# [4705, 4850.],
# [4880., 5000.]
#])
mask = np.empty(len(blue_channel.dispersion))
mask[:] = np.nan
for row in use_regions:
indices = blue_channel.dispersion.searchsorted(row)
mask[indices[0]:indices[1] + 1] = 1.
print(np.sum(np.isfinite(mask)))
blue = SpectralChannel(blue_channel, transitions[transition_indices], mask=mask, redshift=False, continuum_order=-1,
wl_tolerance=0.10, wl_cont=2, outliers=True)
xopt = blue.optimise(plot_filename="blue_optimise.pdf", plot_clobber=True)
star = Star("/Users/arc/atmospheres/castelli-kurucz-2004/a???at*.dat", channels=[blue])
star.infer({"Teff": 5700., "logg": 4.0, "[M/H]": 0.1, "xi": 0.9}, walkers=200, burn=450, sample=50)
|
[
"andycasey@gmail.com"
] |
andycasey@gmail.com
|
7bfb340afa9df5a5df6c0f09a08cba7269997bbb
|
ecd0cffe45c6fee6ce02b70fb5721caac66a7b37
|
/Data_Structures/Graphs/DijkstraAlgorithm.py
|
d16d8b75f887ff9deefb3181f6df5ca25839d837
|
[] |
no_license
|
johngaitho05/Interview-Questions
|
75925ba5e3326857a13cfe6e1add27be3d1aa83e
|
979f6960bc44186208c629403fa4ed73f72673b0
|
refs/heads/master
| 2021-05-21T13:39:48.290629
| 2020-06-18T05:51:30
| 2020-06-18T05:51:30
| 252,669,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,447
|
py
|
import sys
import heapq
def cmp(x, y):
"""
Replacement for built-in function cmp that was removed in Python 3
Compare the two objects x and y and return an integer according to
the outcome. The return value is negative if x < y, zero if x == y
and strictly positive if x > y.
"""
return (x > y) - (x < y)
class Edge:
def __init__(self, weight, startVertex, targetVertex):
self.weight = weight
self.startVertex = startVertex
self.targetVertex = targetVertex
class Node:
def __init__(self, name):
self.name = name
self.visited = False
self.predecessor = None
self.adjacenciesList = []
self.minDistance = sys.maxsize
def __cmp__(self, otherVertex):
return cmp(self.minDistance, otherVertex.minDistance)
def __lt__(self, other):
selfPriority = self.minDistance
otherPriority = other.minDistance
return selfPriority < otherPriority
class DijkstraAlgorithm:
def calculateShortestPath(self, startVertex):
q = []
startVertex.minDistance = 0
heapq.heappush(q, startVertex)
while q:
actualVertex = heapq.heappop(q)
for edge in actualVertex.adjacenciesList:
u = edge.startVertex
v = edge.targetVertex
newDistance = u.minDistance + edge.weight
if newDistance < v.minDistance:
v.predecessor = u
v.minDistance = newDistance
heapq.heappush(q, v)
def getShortestPathTo(self, targetVertex):
print("Shortest path to vertex is: ", targetVertex.minDistance)
node = targetVertex
while node is not None:
print(node.name)
node = node.predecessor
if __name__ == '__main__':
node1 = Node("A")
node2 = Node("B")
node3 = Node("C")
node4 = Node("D")
node5 = Node("E")
node6 = Node("F")
node7 = Node("G")
node8 = Node("H")
edge1 = Edge(5, node1, node2)
edge2 = Edge(8, node1, node8)
edge3 = Edge(9, node1, node5)
edge4 = Edge(15, node2, node4)
edge5 = Edge(12, node2, node3)
edge6 = Edge(4, node2, node8)
edge7 = Edge(7, node8, node3)
edge8 = Edge(6, node8, node6)
edge9 = Edge(5, node5, node8)
edge10 = Edge(4, node5, node6)
edge11 = Edge(20, node5, node7)
edge12 = Edge(1, node6, node3)
edge13 = Edge(13, node6, node7)
edge14 = Edge(3, node3, node4)
edge15 = Edge(11, node3, node7)
edge16 = Edge(9, node4, node7)
node1.adjacenciesList.append(edge1)
node1.adjacenciesList.append(edge2)
node1.adjacenciesList.append(edge3)
node2.adjacenciesList.append(edge4)
node2.adjacenciesList.append(edge5)
node2.adjacenciesList.append(edge6)
node8.adjacenciesList.append(edge7)
node8.adjacenciesList.append(edge8)
node5.adjacenciesList.append(edge9)
node5.adjacenciesList.append(edge10)
node5.adjacenciesList.append(edge11)
node6.adjacenciesList.append(edge12)
node6.adjacenciesList.append(edge13)
node3.adjacenciesList.append(edge14)
node3.adjacenciesList.append(edge15)
node4.adjacenciesList.append(edge16)
vertexList1 = (node1, node2, node3, node4, node5, node6, node7, node8)
algorithm = DijkstraAlgorithm()
algorithm.calculateShortestPath(node1)
algorithm.getShortestPathTo(node7)
|
[
"johngaitho05@gmail.com"
] |
johngaitho05@gmail.com
|
163ef3f305c3772d2d7644c28856b2fc13b47f3b
|
55a2e62805cca90f46f3ac9c9501aa3386ab3109
|
/games/urls.py
|
f8a56294778ab42f63bedfc70f5c669ad53da1b9
|
[] |
no_license
|
profmcdan/games-service
|
2bf5d533b797d46e6a27ae63eac9c367cf144497
|
24049d08cfc645bd750839bcd0eba16fa7d7ee2d
|
refs/heads/master
| 2022-12-16T01:19:09.859633
| 2019-08-06T15:11:54
| 2019-08-06T15:11:54
| 200,827,749
| 0
| 0
| null | 2022-12-08T01:48:16
| 2019-08-06T10:14:54
|
Python
|
UTF-8
|
Python
| false
| false
| 984
|
py
|
# from django.conf.urls import url
from django.urls import path
# from .views import game_collection, game_detail
from . import views
urlpatterns = [
path('esrb-ratings/', views.EsrbRatingList.as_view(),
name=views.EsrbRatingList.name),
path('esrb-ratings/<int:pk>/', views.EsrbRatingDetail.as_view(),
name=views.EsrbRatingDetail.name),
path('games/', views.GameList, name=views.GameList.name),
path('games/<int:pk>/', views.GameDetail.as_view(),
name=views.GameDetail.name),
path('players/', views.PlayerList.as_view(), name=views.PlayerList.name),
path('players/<int:pk>/', views.PlayerDetail.as_view(),
name=views.PlayerDetail.name),
path('player-scores/', views.PlayerScoreList.as_view(),
name=views.PlayerScoreList.name),
path('player-scores/<int:pk>/', views.PlayerScoreDetail.as_view(),
name=views.PlayerScoreDetail.name),
path('', views.ApiRoot.as_view(), name=views.ApiRoot.name),
]
|
[
"danielale9291@gmail.com"
] |
danielale9291@gmail.com
|
f5b439565e4463e6269798259de653eacdfd482e
|
03195a6f98396fd27aedc3c06d81f1553fb1d16b
|
/pandas/_libs/properties.pyi
|
b2ba55aefb8a57e9a884c9c07d3882b1e3014f78
|
[
"BSD-3-Clause"
] |
permissive
|
huaxz1986/pandas
|
a08d80d27726fe141d449835b9a09265bca5b5e0
|
ba2473834fedcf571d3f8245b4b24796873f2736
|
refs/heads/master
| 2023-06-11T02:20:14.544220
| 2022-01-12T04:40:06
| 2022-01-12T04:40:06
| 131,370,494
| 3
| 4
|
BSD-3-Clause
| 2018-04-28T03:51:05
| 2018-04-28T03:51:05
| null |
UTF-8
|
Python
| false
| false
| 330
|
pyi
|
# pyright: reportIncompleteStub = false
from typing import Any
# note: this is a lie to make type checkers happy (they special
# case property). cache_readonly uses attribute names similar to
# property (fget) but it does not provide fset and fdel.
cache_readonly = property
def __getattr__(name: str) -> Any: ... # incomplete
|
[
"noreply@github.com"
] |
huaxz1986.noreply@github.com
|
f9106238b4ff20bec1e7b0835e8bd33b0db2acf4
|
8e69eee9b474587925e22413717eb82e4b024360
|
/v1.0.0.test/toontown/uberdog/DistributedDataStoreManagerUD.py
|
a96c02427ed6514633f660973bad639449564c3f
|
[
"MIT"
] |
permissive
|
TTOFFLINE-LEAK/ttoffline
|
afaef613c36dc3b70514ccee7030ba73c3b5045b
|
bb0e91704a755d34983e94288d50288e46b68380
|
refs/heads/master
| 2020-06-12T15:41:59.411795
| 2020-04-17T08:22:55
| 2020-04-17T08:22:55
| 194,348,185
| 5
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectUD import DistributedObjectUD
class DistributedDataStoreManagerUD(DistributedObjectUD):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedDataStoreManagerUD')
def startStore(self, todo0):
pass
def stopStore(self, todo0):
pass
def queryStore(self, todo0, todo1):
pass
def receiveResults(self, todo0, todo1):
pass
def deleteBackupStores(self):
pass
|
[
"s0mberdemise@protonmail.com"
] |
s0mberdemise@protonmail.com
|
73a145d26a657841c35d7ea4b5ab7b210955a4ee
|
b3879bc761ac38dab903da57c4061ad79fd70c6d
|
/курсы пайтон модуль 8/задание 9.py
|
6957678c459752ce7e24854b4f53dc592a0069d7
|
[] |
no_license
|
Ruslan5252/all-of-my-projects-byPyCharm
|
4df70cc3a31c4a5d97560fa858a706edcc856299
|
817d5f711408590ea141590ae52c6d888dfa2015
|
refs/heads/master
| 2023-05-03T01:06:30.156731
| 2021-05-29T13:51:16
| 2021-05-29T13:51:16
| 371,970,160
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,957
|
py
|
class Car():
def __init__(self,name,model,maxSpeed,year,volume):
self.name=name
self.model=model
self.maxSpeed=maxSpeed
self.year=year
self.volume=volume
def ride(self):
print(f"машина названием ",{self.name},"моделью ",{self.model},"с максимальной скоростью ",{self.maxSpeed},
"с годом выпуска ",{self.year},"с объемом",{self.volume},'is riding')
class Toyota(Car):
def __init__(self,name,model,maxSpeed,year,volume,manufacturer):
self.name=name
self.model=model
self.maxSpeed=maxSpeed
self.year=year
self.volume=volume
self.manufcturer=manufacturer
def ride(self):
print(f"машина с названием ",{self.name},"моделью ",{self.model},"с максимальной скоростью ",{self.maxSpeed},
"с годом выпуска ",{self.year},"с объемом",{self.volume},
'произведенная в ',{self.manufcturer},'is riding')
class Mercedes(Car):
def __init__(self,name,model,maxSpeed,year,volume,class_Type):
self.name=name
self.model=model
self.maxSpeed=maxSpeed
self.year=year
self.volume=volume
self.class_Type=class_Type
def ride(self):
print(f"машина с названием ", {self.name}, "моделью ", {self.model}, "с максимальной скоростью ", {self.maxSpeed},
"с годом выпуска ", {self.year}, "с объемом", {self.volume},
'имеющая ', {self.class_Type},'класс is riding')
class Main():
b=[]
i=3
while i!=0:
c=Car(input("Введите название машины "), input("Введите модель"), input("Введите максимальную скорость "),
input("введите год выпуска машины "),
input("Введите объем машины "))
p = Toyota(input("Введите название машины "), input("Введите модель"), input("Введите максимальную скорость "),
input("введите год выпуска машины "),
input("Введите объем машины "), input("Введите страну производства "))
a = Mercedes(input("Введите название машины "), input("Введите модель"), input("Введите максимальную скорость "),
input("введите год выпуска машины "),
input("Введите объем машины "), input("введите класс машины "))
b.append(p)
b.append(a)
b.append(c)
i-=1
for i in b:
i.ride()
|
[
"r.u.s_2000@mail.ru"
] |
r.u.s_2000@mail.ru
|
8fca49fbe0da62e740f590084e3aea24dc479f4e
|
fcd29745ed7a66b46f5039c2ad07f2fa5cb457a2
|
/6_files/files_project/app.py
|
758c311e0fb3ecf125e133b27a725f2b0261621f
|
[
"MIT"
] |
permissive
|
PacktPublishing/The-Complete-Python-Course
|
17489ec6939b5c6c20b92d5bb2d15a71a6444f8e
|
b880ef9c0e281316f4080531d3690435a686e9c0
|
refs/heads/master
| 2023-02-19T04:22:33.451524
| 2023-02-02T06:04:40
| 2023-02-02T06:04:40
| 204,631,924
| 56
| 72
|
MIT
| 2023-02-15T23:21:39
| 2019-08-27T06:09:32
|
Python
|
UTF-8
|
Python
| false
| false
| 239
|
py
|
my_file = open('data.txt', 'r')
file_content = my_file.read()
my_file.close()
print(file_content)
user_name = input('Enter your name: ')
my_file_writing = open('data.txt', 'w')
my_file_writing.write(user_name)
my_file_writing.close()
|
[
"dwayned@packtpub.com"
] |
dwayned@packtpub.com
|
2bc2d9a96d32d718cd7212819c018fb6c1d25567
|
5cc4a73d6fb144d72e74b07a10b60fc36bfe50ec
|
/shops/migrations/0002_auto_20190330_1916.py
|
df99c08c2f3e516d78ab25dd75133d1b5afcbeba
|
[] |
no_license
|
pedrofolch/digitalsoil
|
79d9497dcbb54df3c7df64f9da35d71d592fe580
|
7b6d1ffd34e991cf87c91342e5336a97fa1cf59b
|
refs/heads/master
| 2022-12-11T00:47:01.728729
| 2019-04-11T03:34:12
| 2019-04-11T03:34:12
| 120,937,159
| 0
| 0
| null | 2022-12-08T04:58:09
| 2018-02-09T17:49:10
|
CSS
|
UTF-8
|
Python
| false
| false
| 659
|
py
|
# Generated by Django 2.1.5 on 2019-03-31 02:16
import django.contrib.gis.db.models.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shops', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='elevation',
name='rast',
field=django.contrib.gis.db.models.fields.RasterField(blank=True, null=True, srid=4326),
),
migrations.AlterField(
model_name='shop',
name='poly',
field=django.contrib.gis.db.models.fields.PolygonField(blank=True, null=True, srid=4326),
),
]
|
[
"pedro.folch@gmail.com"
] |
pedro.folch@gmail.com
|
e4f668c29f8509034aa848c7bc5ab56a68eb64c4
|
ca0e761b2948b2bd93d46e5bab610901f4a9936c
|
/data/convert_to_json_file.py
|
6e7ef5a2f0f1f046e2029916e023b0653ab42ed7
|
[
"MIT"
] |
permissive
|
dojinkimm/go-krx
|
7f122321f69a119594de1ee184be57eeb4f148f7
|
b565696a7c13427f3320c6c43a529638ea06682e
|
refs/heads/main
| 2023-03-11T22:06:00.291164
| 2021-02-28T13:29:51
| 2021-02-28T13:29:51
| 300,895,170
| 7
| 2
| null | 2021-02-23T12:34:18
| 2020-10-03T14:09:40
|
Go
|
UTF-8
|
Python
| false
| false
| 1,695
|
py
|
import json
import pandas as pd
dfstockcode = pd.read_html(
"http://kind.krx.co.kr/corpgeneral/corpList.do?method=download", header=0
)[0]
stock_information = list()
for (
i,
(
name,
symbol,
sector,
industry,
listing_date,
settlement_month,
representative,
homepage,
region,
),
) in enumerate(
zip(
dfstockcode.get("회사명"),
dfstockcode.get("종목코드"),
dfstockcode.get("업종"),
dfstockcode.get("주요제품"),
dfstockcode.get("상장일"),
dfstockcode.get("결산월"),
dfstockcode.get("대표자명"),
dfstockcode.get("홈페이지"),
dfstockcode.get("지역"),
)
):
if type(sector) == float:
sector = "없음"
if type(industry) == float:
industry = "없음"
if type(settlement_month) == float:
settlement_month = "없음"
if type(representative) == float:
representative = "없음"
if type(homepage) == float:
homepage = "없음"
if type(region) == float:
region = "없음"
symbol = str(symbol).zfill(6)
stock_information.append(
{
"name": name,
"symbol": symbol,
"sector": sector,
"industry": industry,
"listing_date": listing_date,
"settlement_month": settlement_month,
"representative": representative,
"homepage": homepage,
"region": region,
}
)
with open("data.json", "w", encoding='utf-8') as file:
json.dump(stock_information, file,indent=4, ensure_ascii=False)
file.write("\n")
|
[
"dojinkim119@gmail.com"
] |
dojinkim119@gmail.com
|
7c691685311f964776bd731d24ea73ab2268ea4a
|
a6719f4815ff41d3a1f09e9a63a64c4582d03702
|
/file_handling/read_file_demo.py
|
b131427b8bc801751761bb301ff7f1a6d3fecacc
|
[
"MIT"
] |
permissive
|
thanh-vt/python-basic-programming
|
8136007b8435dae6339ae33015fe536e21b19d1d
|
5fe817986fbef2649b4b03955f07b59d2a2035d8
|
refs/heads/main
| 2023-01-30T12:57:36.819687
| 2020-12-13T17:27:05
| 2020-12-13T17:27:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 144
|
py
|
f = open('demo_file.txt', 'r')
print(f.read())
# read only part of a file (first 5 characters)
f = open('demo_file.txt', 'r')
print(f.read(5))
|
[
"thanhvt@vissoft.vn"
] |
thanhvt@vissoft.vn
|
686006acd784aeb64f48aa38eeb51d5c566319c7
|
1d11ff770c5530de4c18e83d9474d4c09c4376d2
|
/igor/std-plugins/philips/scripts/philips.py
|
0a6d1b43a4de640d5a4642c054379da4b21d6527
|
[
"MIT"
] |
permissive
|
bobandrey37/igor
|
6660508639d90e7f44ea85146581685513b99ca2
|
41e163c8fa3da8ef13a337e1fe4268cf6fd7d07a
|
refs/heads/master
| 2020-05-01T06:27:36.954089
| 2019-03-04T14:45:26
| 2019-03-04T14:45:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,929
|
py
|
#!/usr/bin/python
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import object
import socket
import struct
import select
import json
import urllib.request, urllib.parse, urllib.error
import sys
DEBUG=False
ORDER = [
('192', '168', '1'),
('10', '0', '1'),
('10', '0', '2')
]
JOINTSPACE_PORT=1925
VOODOO_PORT=2323
VOODOO_VERSION=0x03010401
VPMT_DISCOVER=1
VOODOO_DISCOVER = struct.pack('<l28xll16s96s96s96s', VOODOO_VERSION, VPMT_DISCOVER, 0, '1234567890123456', 'Python Control', 'Jack', 'Philips.py')
class JointSpaceRemote(object):
def __init__(self, ipaddr=None):
self.tv = None
def connect(self):
while not self.tv:
self.tv = self.findTV()
if self.tv:
break
if DEBUG: print("TV not found, is it turned on?'")
return False
return True
def findTV(self, ipaddr=None):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.bind(('', VOODOO_PORT))
if ipaddr:
sock.sendto(VOODOO_DISCOVER, (ipaddr, VOODOO_PORT))
else:
sock.sendto(VOODOO_DISCOVER, ('<broadcast>', VOODOO_PORT))
while True:
result = select.select([sock], [], [], 5)
if sock in result[0]:
msg, sender = sock.recvfrom(2000)
if DEBUG: print('Got message from', sender[0])
myHostName = socket.gethostname()
if not '.' in myHostName:
myHostName = myHostName + '.local'
if not sender[0] in socket.gethostbyname_ex(myHostName)[2]:
# It is not our own message. It must be the Philips TV.
return sender[0]
else:
break
return None
def getData(self, path):
assert self.tv
url = 'http://%s:1925/1/%s' % (self.tv, path)
if DEBUG: print('GET', url)
data = urllib.request.urlopen(url).read()
##print 'RAW', data
data = json.loads(data)
##print 'DECODED', data
return data
def putData(self, path, data):
assert self.tv
url = 'http://%s:1925/1/%s' % (self.tv, path)
data = json.dumps(data)
if DEBUG: print('POST %s DATA %s' % (url, data))
data = urllib.request.urlopen(url, data).read()
if data:
if DEBUG: print('PUTDATA RETURNED', data)
def curWatching(self):
assert self.tv
data = self.getData('sources/current')
source = data['id']
if source == 'tv':
chanID = self.getData('channels/current')['id']
chanInfo = self.getData('channels/%s' % chanID)
name = chanInfo['name']
else:
names = self.getData('sources')
name = names[source]['name']
return source, name
def cmd_sources(self):
"""List available input sources"""
assert self.tv
data = self.getData('sources')
for source, descr in list(data.items()):
print('%s\t%s' % (source, descr['name']))
def cmd_channels(self):
"""List available TV channels"""
assert self.tv
data = self.getData('channels')
all = []
for fingerprint, descr in list(data.items()):
all.append((int(descr['preset']), descr['name']))
all.sort()
for preset, name in all:
print('%s\t%s' % (preset, name))
def cmd_source(self, source=None):
"""Set to the given input source (or print current source)"""
assert self.tv
if source:
self.putData('sources/current', {'id' : source })
else:
data = self.getData('sources/current')
print(data['id'])
def cmd_channel(self, channel=None):
"""Set to the given TV channel, by name, number or ID (or list current channel)"""
assert self.tv
if channel:
data = self.getData('channels')
for chID, chDescr in list(data.items()):
if chID == channel or chDescr['preset'] == channel or chDescr['name'] == channel:
self.putData('channels/current', { 'id' : chID })
self.putData('sources/current', {'id' : 'tv' })
return
print('No such channel: %s' % channel, file=sys.stderr)
else:
data = self.getData('channels/current')
chID = data['id']
data = self.getData('channels')
print('%s\t%s' % (data[chID]['preset'], data[chID]['name']))
def cmd_volume(self, volume=None):
"""Change volume on the TV"""
assert self.tv
if volume is None:
data = self.getData('audio/volume')
muted = ' (muted)' if data['muted'] else ''
print('%d%s' % (data['current'], muted))
else:
volume = int(volume)
self.putData('audio/volume', { 'muted' : False, 'current' : volume })
def cmd_json(self, data=None):
"""Return all data as a JSON object"""
if data is None:
data = {}
volumeData = self.getData('audio/volume')
data['volume'] = volumeData['current']
data['muted'] = volumeData['muted']
data['source'] = self.getData('sources/current')['id']
data['power'] = True
data['ip-address'] = self.tv
data['url'] = 'http://%s:1925/1/' % (self.tv)
else:
jData = json.loads(data)
assert 0
print(json.dumps(data))
def cmd_help(self):
"""List available commands"""
for name in dir(self):
if name[:4] == 'cmd_':
method = getattr(self, name)
doc = method.__doc__
print('%s\t%s' % (name[4:], doc))
def main():
if len(sys.argv) > 1 and sys.argv[1] == '-d':
global DEBUG
DEBUG=True
del sys.argv[1]
tv = JointSpaceRemote()
if not tv.connect():
if len(sys.argv) == 2 and sys.argv[1] == 'json':
print('{"power":false}')
sys.exit(0)
print("TV not found, is it turned on?", file=sys.stderr)
sys.exit(1)
if len(sys.argv) <= 1:
print(tv.curWatching())
else:
cmdName = 'cmd_' + sys.argv[1]
if not hasattr(tv, cmdName):
print('Unknown command: %s. Use help for help' % sys.argv[1], file=sys.stderr)
sys.exit(2)
cmd = getattr(tv, cmdName)
cmd(*sys.argv[2:])
if __name__ == '__main__':
main()
|
[
"Jack.Jansen@cwi.nl"
] |
Jack.Jansen@cwi.nl
|
dd289bbe11d653c04e5f33bf697ff022530a0ef8
|
b7eb8279ebe2f525d27849d6ca24cc7270d30433
|
/processing/b2_demultiplex_stats.py
|
c941dc97629b4495d6d94f77ebdff996cd4bb1a9
|
[] |
no_license
|
maxwshen/prime-peptide
|
d0da277521537c6e09dfeca4afbe3297893ed61b
|
d72244e85683583c812d3bd106b6874da0a17b80
|
refs/heads/main
| 2023-04-07T19:07:03.371146
| 2021-04-09T20:36:07
| 2021-04-09T20:36:07
| 356,391,769
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,136
|
py
|
#
from __future__ import division
import _config
import sys, os, fnmatch, datetime, subprocess
sys.path.append('/home/unix/maxwshen/')
import numpy as np
from collections import defaultdict
from mylib import util
import pandas as pd
# Default params
inp_dir = _config.OUT_PLACE + 'b_demultiplex/'
NAME = util.get_fn(__file__)
out_dir = _config.OUT_PLACE + NAME + '/'
util.ensure_dir_exists(out_dir)
exp_design = pd.read_csv(_config.DATA_DIR + 'exp_design.csv')
##
# Functions
##
def demultiplex_stats(nm):
num_lines = 0
for fn in os.listdir(inp_dir + nm + '/'):
if 'R1' not in fn: continue
lc = util.line_count(inp_dir + nm + '/' + fn)
if lc % 2 == 1:
print('Error: fq num lines is odd')
# import code; code.interact(local=dict(globals(), **locals()))
num_lines += lc
# divide by 4 for fastq
num_reads = num_lines / 4
print(f'{nm}: {num_reads} reads')
return
##
# qsub
##
# def gen_qsubs():
# # Generate qsub shell scripts and commands for easy parallelization
# print('Generating qsub scripts...')
# qsubs_dir = _config.QSUBS_DIR + NAME + '/'
# util.ensure_dir_exists(qsubs_dir)
# qsub_commands = []
# num_scripts = 0
# for idx in range(0, 60):
# command = 'python %s.py %s' % (NAME, idx)
# script_id = NAME.split('_')[0]
# # Write shell scripts
# sh_fn = qsubs_dir + 'q_%s_%s.sh' % (script_id, idx)
# with open(sh_fn, 'w') as f:
# f.write('#!/bin/bash\n%s\n' % (command))
# num_scripts += 1
# # Write qsub commands
# qsub_commands.append('qsub -V -wd %s %s' % (_config.SRC_DIR, sh_fn))
# # Save commands
# with open(qsubs_dir + '_commands.txt', 'w') as f:
# f.write('\n'.join(qsub_commands))
# print('Wrote %s shell scripts to %s' % (num_scripts, qsubs_dir))
# return
##
# Main
##
@util.time_dec
def main():
print(NAME)
for nm in exp_design['Name']:
demultiplex_stats(nm)
demultiplex_stats('other')
return out_dir
if __name__ == '__main__':
if len(sys.argv) > 1:
main(split = sys.argv[1])
else:
main()
|
[
"maxwshen@gmail.com"
] |
maxwshen@gmail.com
|
6f6bbd7824afebb390fcad7b60006d07593eaeb0
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part005963.py
|
536d9214289d3cb10209ba7b567a2e1a915c7dca
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,302
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher141988(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.3.3.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.3.3.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher141988._instance is None:
CommutativeMatcher141988._instance = CommutativeMatcher141988()
return CommutativeMatcher141988._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 141987
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
bf4938d9e73a26fe0757893e1a32b04c141a9cdb
|
d1845a132213f2239fb0fea3502982dcfbdaca08
|
/youtube-favourites-export.py
|
965eff77359c96636c638fe0f16b20fabf00c131
|
[
"MIT"
] |
permissive
|
dw/scratch
|
361e9dac7693061b66ccd064633f4ed09875e1b2
|
c22c84d4d2d0347283e70192458ea50e08efcadb
|
refs/heads/master
| 2021-01-17T12:21:52.423557
| 2019-06-11T00:09:30
| 2019-06-11T00:09:30
| 3,239,854
| 30
| 10
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 982
|
py
|
import gdata.youtube.client
client = gdata.youtube.client.YouTubeClient()
client.client_login('email@gmail.com', 'password', 'exporter')
entries = []
uri = 'https://gdata.youtube.com/feeds/api/users/default/favorites'
while True:
print 'Fetch', uri
feed = client.get_videos(uri=uri, **{'max-results': 50})
entries += feed.entry
if not feed.get_next_link():
break
uri = feed.get_next_link().href
feed.entry = entries
print 'total', len(entries)
with open('youtube-favorites.xml', 'w') as fp:
fp.write(feed.to_string())
# get subs
#
entries = []
uri = 'https://gdata.youtube.com/feeds/api/users/default/subscriptions'
while True:
print 'Fetch', uri
feed = client.get_feed(uri=uri, **{'max-results': 50})
entries += feed.entry
if not feed.get_next_link():
break
uri = feed.get_next_link().href
feed.entry = entries
print 'total', len(entries)
with open('youtube-subs.xml', 'w') as fp:
fp.write(feed.to_string())
|
[
"dw@botanicus.net"
] |
dw@botanicus.net
|
676b57edf2543587624cb7fb53630425c91c775f
|
7c1892d60f07848756cefe0dea0cce7292c7c572
|
/database/add.py
|
a4e5e1046441fc9f119b92398f0c094ccabc923e
|
[] |
no_license
|
cherryMonth/BWC
|
31d92a583b0ff35a18368a2c2ccfdb8d549dd7e1
|
187430bbc9e81d1cbc8721fd423f9b0488e0e78d
|
refs/heads/master
| 2021-01-01T17:29:17.655717
| 2017-07-28T04:40:20
| 2017-07-28T04:40:20
| 98,082,540
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,605
|
py
|
# coding=utf-8
import csv
import os
import collections
class Add(object):
@staticmethod
def add(filename, key_list=None):
if not os.path.exists(filename):
index = 1
else:
index = 0
try:
with open(filename, 'ab') as csv_file:
if not key_list:
csv_file.close()
return True
def num(string):
count = 0
for n in string:
count = count + ord(n)
return count
error = []
for key in key_list:
d = collections.OrderedDict()
key = sorted(key.items(), key=lambda x: num(x[0]))
for k in key:
d[k[0]] = k[1]
error.append(d)
key_list = error
row_name = key_list[0].keys() # 类变量记录列名
writer = csv.DictWriter(csv_file, fieldnames=row_name)
if index == 1:
writer.writerow(dict(zip(row_name, row_name))) # 写表头
for key in key_list:
writer.writerow(key) # 写数据
csv_file.close()
return True
except IOError:
print "File open error : " + filename + "\nplease check the filename"
return False
if __name__ == '__main__':
Add().add('b.csv',[{'WeChatID': 'wonka80', 'TeacherName': '王珂'}])
|
[
"1115064450@qq.com"
] |
1115064450@qq.com
|
fcb0ac9a2e90fb3003f163171bdf3f9429306a81
|
e43ff8f429a6938a4f16edc4b2c94976acbff157
|
/ABC/HELLO SPACE/c.py
|
7e33c484056d96fed727123096a19a47f8c58635
|
[] |
no_license
|
Soule50431/AtCoder
|
4fcd6ab6b771d55c90dc62aedd75eb81fd067466
|
118ac5d03630ce143fb50153402eee38e988ae0c
|
refs/heads/master
| 2023-06-18T13:07:13.843361
| 2021-07-14T01:56:20
| 2021-07-14T01:56:20
| 357,827,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 496
|
py
|
n = int(input())
team = [tuple(map(int, input().split())) for i in range(n)]
def check(x):
comb = set()
for member in team:
comb.add(sum(1 << i for i in range(5) if member[i] >= x))
for x in comb:
for y in comb:
for z in comb:
if x | y | z == 31:
return True
return False
ok = 0
ng = 10**9 + 1
while ng - ok > 1:
mid = (ng + ok) // 2
if check(mid):
ok = mid
else:
ng = mid
print(ok)
|
[
"h.ekcero.el6no11@outlook.jp"
] |
h.ekcero.el6no11@outlook.jp
|
9f7513aceb03d3a629148eb93790f2bd922608ca
|
6c2ecefb12be6b04f597e3fb887d9389050aa7e1
|
/DjangoCourse/第三周/djangologin/djangologin/settings.py
|
ca207ee45368d5f381d99a9266ac9e571e9357b6
|
[] |
no_license
|
GmyLsh/learngit
|
99d3c75843d2b0b873f26e098025832985c635b3
|
3e7993c7119b79216fea24e5e35035336e4f5f5b
|
refs/heads/master
| 2020-04-12T09:11:55.068312
| 2018-12-19T07:19:42
| 2018-12-19T07:19:42
| 162,395,001
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,616
|
py
|
"""
Django settings for djangologin project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5cwed$8ury*r$q%)b-vm$(x@z_sqrja($d)nxu#of#&+(3zwg1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'login.apps.LoginConfig',
'hashlogin.apps.HashloginConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangologin.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangologin.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'login',
'USER':'root',
'PASSWORD':'123456',
'HOST':'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
#app下
STATIC_URL = '/static/'
#根目录下
STATICFILES_DIRS=[os.path.join(BASE_DIR,'static')]
#覆盖默认的用户模型,使用自定义的模型
#语法:'app的名称.自定义用户模型的名称'
AUTH_USER_MODEL='login.UserModel'
#使用@login_required这个装饰器必须设置LOGIN_URL,这个LOGIN_URL就是django用于自动跳转的地址
LOGIN_URL='/login/'
|
[
"469192981@qq.com"
] |
469192981@qq.com
|
b7ea7c196a657c03362e5a72b8dc3b5a15f15f9c
|
eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7
|
/google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/services/services/ad_group_criterion_simulation_service/transports/base.py
|
ecc835952d95a794ce1333feaca5eb673c52f842
|
[
"Apache-2.0"
] |
permissive
|
Tryweirder/googleapis-gen
|
2e5daf46574c3af3d448f1177eaebe809100c346
|
45d8e9377379f9d1d4e166e80415a8c1737f284d
|
refs/heads/master
| 2023-04-05T06:30:04.726589
| 2021-04-13T23:35:20
| 2021-04-13T23:35:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,855
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.ads.googleads.v6.resources.types import ad_group_criterion_simulation
from google.ads.googleads.v6.services.types import ad_group_criterion_simulation_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-ads-googleads',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class AdGroupCriterionSimulationServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for AdGroupCriterionSimulationService."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/adwords',
)
def __init__(
self, *,
host: str = 'googleads.googleapis.com',
credentials: credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_ad_group_criterion_simulation: gapic_v1.method.wrap_method(
self.get_ad_group_criterion_simulation,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_ad_group_criterion_simulation(self) -> typing.Callable[
[ad_group_criterion_simulation_service.GetAdGroupCriterionSimulationRequest],
ad_group_criterion_simulation.AdGroupCriterionSimulation]:
raise NotImplementedError
__all__ = (
'AdGroupCriterionSimulationServiceTransport',
)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
49c05d2676b8eed51218f2ef3306bf504397a1b1
|
0f0a7adfae45e07a896c5cd5648ae081d4ef7790
|
/python数据结构/python黑马数据结构/排序于搜索/桶排序2.py
|
081bee496e92f52adc6aa7b5f6d0b08d0687b4c3
|
[] |
no_license
|
renlei-great/git_window-
|
e2c578544c7a8bdd97a7a9da7be0464d6955186f
|
8bff20a18d7bbeeaf714aa49bf15ab706153cc28
|
refs/heads/master
| 2021-07-19T13:09:01.075494
| 2020-06-13T06:14:37
| 2020-06-13T06:14:37
| 227,722,554
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
lista = [12, 4, 5, 6, 22, 3, 3, 3, 3, 43, 654, 765, 7, 234]
def pail_sort(alist):
"""桶排序"""
n = len(alist)
cur = 0
while cur < n-1:
if alist[cur] > alist[cur+1]:
max_num = alist[cur]
cur += 1
max_li = [0] * (max_num +1)
for i in alist:
max_li[i] += 1
print(max_li)
sort_num = []
for i in range(len(max_li)):
if max_li[i] != 0:
print(i)
ex = 'sort_num.append(i)\n' * max_li[i]
print(ex)
exec(ex)
return sort_num
if __name__ == "__main__":
new_li = pail_sort(lista)
print(new_li)
|
[
"1415977534@qq.com"
] |
1415977534@qq.com
|
b12de6c619935508db19c3f39260210233e6a4ab
|
a0801d0e7325b31f0383fc68517e208680bb36d6
|
/Kattis/rijeci.py
|
7d0bdadf29031e7404c2c5a61ad6cc2e938add57
|
[] |
no_license
|
conormccauley1999/CompetitiveProgramming
|
bd649bf04438817c7fa4755df2c2c7727273b073
|
a7e188767364be40f625612af3d16182f2d8d4de
|
refs/heads/master
| 2023-05-14T13:19:32.678134
| 2023-05-11T16:07:33
| 2023-05-11T16:07:33
| 179,089,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
K = int(raw_input())
if K == 1:
print 0, 1
else:
x = [0, 1]
y = [1, 1]
for i in range(0, K - 1):
t = [y[0]+x[0], y[1]+x[1]]
x = y
y = t
print x[0], y[0]
|
[
"conormccauley1999@gmail.com"
] |
conormccauley1999@gmail.com
|
50f5477a0bbb10e0d356fbe8aa777cae29d9dffa
|
6ec91b363b077bffd33f15300a0935124e9fb915
|
/Cracking_the_Code_Interview/Leetcode/14.DP/120.Triangle.py
|
a7e2879f610fe04793e1b1f2c35318dc4b3ff0fc
|
[] |
no_license
|
lzxyzq/Cracking_the_Coding_Interview
|
03232515ae8eb50394d46322d36b230d1a626fcf
|
79dee7dab41830c4ff9e38858dad229815c719a0
|
refs/heads/master
| 2023-06-05T19:52:15.595289
| 2021-06-23T22:46:02
| 2021-06-23T22:46:02
| 238,068,000
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 805
|
py
|
'''
@Author: your name
@Date: 2020-06-30 18:43:37
@LastEditTime: 2020-06-30 19:39:19
@LastEditors: Please set LastEditors
@Description: In User Settings Edit
@FilePath: /Cracking_the_Code_Interview/Leetcode/14.DP/120.Triangle.py
'''
# Given a triangle, find the minimum path sum from top to bottom. Each step you may move to adjacent numbers on the row below.
# For example, given the following triangle
# [
# [2],
# [3,4],
# [6,5,7],
# [4,1,8,3]
# ]
# The minimum path sum from top to bottom is 11 (i.e., 2 + 3 + 5 + 1 = 11).
class Solution:
def minimumTotal(self, triangle: List[List[int]]) -> int:
for i in range(len(triangle)-2,-1,-1):
for j in range(i+1):
triangle[i][j] += min(triangle[i+1][j],triangle[i+1][j+1])
return triangle[0][0]
|
[
"lzxyzq@gmail.com"
] |
lzxyzq@gmail.com
|
bf73bd5eda0d1303716e539c0d40f57d6ab13de8
|
22fe6ed51715486ebbc09e404504ed4d7a28c37d
|
/python-katas/57_CountHi.py
|
6ef69b2da8a4251f4d619f0a62ab8c3d5042d32a
|
[] |
no_license
|
Jethet/Practice-more
|
1dd3ff19dcb3342a543ea1553a1a6fb0264b9c38
|
8488a679730e3406329ef30b4f438d41dd3167d6
|
refs/heads/master
| 2023-01-28T14:51:39.283741
| 2023-01-06T10:14:41
| 2023-01-06T10:14:41
| 160,946,017
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
# Return the number of times the string 'hi' appears in a given string.
def count_hi(str):
return str.count('hi')
# CodingBat solution:
def count_hi(str):
sum = 0
for i in range(len(str)-1):
if str[i:i+2] == 'hi':
sum += 1 #or: sum = sum + 1
return sum
print(count_hi('abc hi ho'))
print(count_hi('ABChi hi'))
print(count_hi('hihi'))
|
[
"henriette.hettinga@gmail.com"
] |
henriette.hettinga@gmail.com
|
13ab0721b3a33f3abbaaf46d0378e8b4649ba27f
|
d1f15554df2d5c0f74ddbcba6e870359841f682b
|
/wagtail/migrations/0057_page_locale_fields_notnull.py
|
8f18589b5c9f794cba254c26312dd2d73645c5f1
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
wagtail/wagtail
|
bd405f89b86e0c625fef0685fd6bfba41cf5cbfc
|
06a7bc6124bf62675c09fbe0a4ed9bbac183e025
|
refs/heads/main
| 2023-09-04T06:22:51.601208
| 2023-09-01T15:22:00
| 2023-09-01T15:22:00
| 16,479,108
| 12,974
| 3,580
|
BSD-3-Clause
| 2023-09-14T10:45:04
| 2014-02-03T12:41:59
|
Python
|
UTF-8
|
Python
| false
| false
| 793
|
py
|
# Generated by Django 2.2.10 on 2020-07-13 10:17
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
("wagtailcore", "0056_page_locale_fields_populate"),
]
operations = [
migrations.AlterField(
model_name="page",
name="locale",
field=models.ForeignKey(
editable=False,
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to="wagtailcore.Locale",
),
),
migrations.AlterField(
model_name="page",
name="translation_key",
field=models.UUIDField(default=uuid.uuid4, editable=False),
),
]
|
[
"matt@west.co.tt"
] |
matt@west.co.tt
|
83852e477286aff2176a0246871748edca6bcef8
|
c733e6b433914a8faba256c7853f5cf2cd39c62a
|
/Python/Leetcode Daily Practice/Heap/692. Top K Frequent Words.py
|
db9a25d3ab733cd3cdd4dd640983c8602e54fffe
|
[] |
no_license
|
YaqianQi/Algorithm-and-Data-Structure
|
3016bebcc1f1356b6e5f3c3e588f3d46c276a805
|
2e1751263f484709102f7f2caf18776a004c8230
|
refs/heads/master
| 2021-10-27T16:29:18.409235
| 2021-10-14T13:57:36
| 2021-10-14T13:57:36
| 178,946,803
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
class Solution(object):
def topKFrequent(self, words, k):
from collections import Counter
import heapq
cnt = Counter(words) # o(n)
h = [(-freq, key) for key, freq in cnt.items()] # o(n)
return [heapq.heappop(h)[1] for i in range(k)] # o (k * logn)
print(Solution().topKFrequent(words=["i", "love", "leetcode", "i", "love", "coding"], k = 2))
|
[
"alicia.qyq@gmail.com"
] |
alicia.qyq@gmail.com
|
55b52764902ce153ec4c19dc6da9439dee543669
|
9a0eb3e292d57b59198c7c66a994372ced9cfa5b
|
/nodes/1.x/python/String.ReplaceIllegalFilenameCharacters.py
|
a922b676f1485306810fd884001c9016638051ed
|
[
"MIT"
] |
permissive
|
andydandy74/ClockworkForDynamo
|
544ddf0893f5c0072fca7934f4e128001771f767
|
528400c667c4c3f2b51814af84e85c8fab8a8059
|
refs/heads/master
| 2023-08-19T03:07:33.489926
| 2023-08-13T04:31:17
| 2023-08-13T04:31:17
| 15,043,988
| 184
| 100
|
MIT
| 2023-09-04T18:47:40
| 2013-12-09T10:11:01
|
Python
|
UTF-8
|
Python
| false
| false
| 430
|
py
|
strings = IN[0]
replace = IN[1]
strlist = []
for str in strings:
str = str.replace('/', replace)
str = str.replace('?', replace)
str = str.replace('<', replace)
str = str.replace('>', replace)
str = str.replace('\\', replace)
str = str.replace(':', replace)
str = str.replace('*', replace)
str = str.replace('|', replace)
str = str.replace('"', replace)
str = str.replace('^', replace)
strlist.append(str)
OUT = strlist
|
[
"dieckmann@caad.arch.rwth-aachen.de"
] |
dieckmann@caad.arch.rwth-aachen.de
|
48b15be505f68c01bcbe37105ce08e8d80a90959
|
93b704572dd4f36ae488f931fbe8372a215b13ad
|
/clean_solutions/day3.py
|
d56d9cf4949958790cfde7211c768208ff456079
|
[] |
no_license
|
Goldenlion5648/AdventOfCode2020Live
|
7cfdf6804402fdf42d10c70742579522c487f501
|
e3f5908e8747991b50bdde339ad9ecba527b1168
|
refs/heads/master
| 2023-04-04T12:48:21.318124
| 2021-04-08T16:42:13
| 2021-04-08T16:42:13
| 317,414,264
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 736
|
py
|
'''
Right 1, down 1.
Right 3, down 1. (This is the slope you already checked.)
Right 5, down 1.
Right 7, down 1.
Right 1, down 2.
'''
from collections import *
with open("input3.txt") as f:
# a = list(map(int,f.read().strip().split("\n")))
board = f.read().strip().split("\n")
def slide(xChange, yChange):
posX = 0
posY = 0
count = 0
while posY < len(board):
if board[posY][posX] == "#":
count += 1
posX = (posX + 3) % len(board[0])
posY += 1
return count
print("part 1", slide(3, 1))
#part 2
slopes = [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)]
nums = []
for x, y in slopes:
nums.append(slide(x, y))
answer = 1
for i in nums:
answer *= i
print("part 2", answer)
|
[
"coboudinot@gmail.com"
] |
coboudinot@gmail.com
|
eabc327817af3553828fe0ffc5f9a44d5e5d1951
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/mushroomPicker_20200729130815.py
|
f6b760f3a1112376d639f98641c3cd38b7ba4176
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,178
|
py
|
'''
You are given a non-empty, zero-indexed array A of n (1 � n � 100 000) integers
a0, a1, . . . , an−1 (0 � ai � 1 000). This array represents number of mushrooms growing on the
consecutive spots along a road. You are also given integers k and m (0 � k, m < n).
A mushroom picker is at spot number k on the road and should perform m moves. In
one move she moves to an adjacent spot. She collects all the mushrooms growing on spots
she visits. The goal is to calculate the maximum number of mushrooms that the mushroom
picker can collect in m moves.
For example, consider array A such that:
'''
def count_totals(p,x,y):
return p[y+1]
def mushroom(A,k,m):
# A - is the array
# k- is there position -4
# m - number of moves they can make -6
n = len(A)
result = 0
pref = [0] * n
pref[0] = A[0]
for i in range(1,n):
pref[i] = pref[i-1] + A[i]
for p in range(min(m,k) + 1):
# p----> 0,1,2,3,4
# k ----> 4 ,k-p ->4,3,2,1,0
left_pos = k-p
right_pos = min(n-1,max(k,k+m-2 *p))
print('right',right_pos)
# print(left_pos)
mushroom([2,3,7,5,1,3,9],4,6)
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
ad5d5361d58d186ea6682f1b01c9158b0e151206
|
1255b4c76aa2def0d8ca07ff75ef264383de36e3
|
/main.py
|
8b482da2a9acf7567e56eec60c0e4c881703abac
|
[] |
no_license
|
thepixelboy/flask-auth
|
bcbe2ce182e54743acfa70860f975b059952c65c
|
e49903b65c9451891b61138e1b5453ea29f733d1
|
refs/heads/main
| 2023-07-10T00:41:46.442728
| 2021-08-23T16:56:39
| 2021-08-23T16:56:39
| 399,182,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,337
|
py
|
from os import name
from flask import Flask, render_template, request, url_for, redirect, flash, send_from_directory
from werkzeug.security import generate_password_hash, check_password_hash
from flask_sqlalchemy import SQLAlchemy
from flask_login import UserMixin, login_user, LoginManager, login_required, current_user, logout_user
app = Flask(__name__)
app.config["SECRET_KEY"] = "flown-actinium-cam-algae"
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///users.db"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
##CREATE TABLE IN DB
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100))
name = db.Column(db.String(1000))
# Line below only required once, when creating DB.
# db.create_all()
@app.route("/")
def home():
return render_template("index.html", logged_in=current_user.is_authenticated)
@app.route("/register", methods=["GET", "POST"])
def register():
if request.method == "POST":
if User.query.filter_by(email=request.form.get("email")).first():
# User already exists
flash("You've already signed up with that email, log-in instead.")
return redirect(url_for("login"))
hash_and_salted_password = generate_password_hash(
request.form.get("password"), method="pbkdf2:sha256", salt_length=8
)
new_user = User(
email=request.form.get("email"), name=request.form.get("name"), password=hash_and_salted_password
)
db.session.add(new_user)
db.session.commit()
# Log-in and authenticate user after adding new user data to the database
login_user(new_user)
return redirect(url_for("secrets"))
return render_template("register.html", logged_in=current_user.is_authenticated)
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method == "POST":
email = request.form.get("email")
password = request.form.get("password")
# Find user by email
user = User.query.filter_by(email=email).first()
# Email doesn't exist
if not user:
flash("That email does not exist, please try again.")
return redirect(url_for("login"))
# Password incorrect
elif not check_password_hash(user.password, password):
flash("Password incorrect, please try again.")
return redirect(url_for("login"))
# Email exists and password correct
else:
login_user(user)
return redirect(url_for("secrets"))
return render_template("login.html", logged_in=current_user.is_authenticated)
@app.route("/secrets")
@login_required
def secrets():
return render_template("secrets.html", name=current_user.name, logged_in=True)
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for("home"))
@app.route("/download")
@login_required
def download():
return send_from_directory("static", path="files/cheat_sheet.pdf", as_attachment=True)
if __name__ == "__main__":
app.run(debug=True)
|
[
"34570952+thepixelboy@users.noreply.github.com"
] |
34570952+thepixelboy@users.noreply.github.com
|
5317b35317ba1ab2da314d6bd8ad9be085d19480
|
13f4a06cd439f579e34bf38406a9d5647fe7a0f3
|
/nn_ns/parsing/FS/readme.py
|
d02accd2f405f4744acde147164ec31870528870
|
[] |
no_license
|
edt-yxz-zzd/python3_src
|
43d6c2a8ef2a618f750b59e207a2806132076526
|
41f3a506feffb5f33d4559e5b69717d9bb6303c9
|
refs/heads/master
| 2023-05-12T01:46:28.198286
| 2023-05-01T13:46:32
| 2023-05-01T13:46:32
| 143,530,977
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,141
|
py
|
3 froms:
FSM, [Rule], regular_expression
FSM:
TotalState : FSM
formal - FSM{initial :: TotalState,
finals :: Set TotalState,
error :: TotalState,
transition :: Map TotalState (Map Symbol TotalState)
}
informal - NFSM{initials :: Set PartialState,
finals :: Set PartialState,
error :: Set PartialState, # empty_set
transition :: Map PartialState (Map (Maybe Symbol) (Set PartialState))
}
PartialState : FA
# esp cleaned_dfa
formal - DFA {initial :: Maybe PartialState,
finals :: Set PartialState,
error :: Maybe PartialState, # nothing
transition :: Map PartialState (Map Symbol PartialState)
}
informal - NDFA {initials :: Set PartialState,
finals :: Set PartialState,
error :: Set PartialState, # empty_set
transition :: Map PartialState (Map (Maybe Symbol) (Set PartialState))
}
{initials::Set PartialState, transition::[Rule]}: # NDFA-RuleForm # a direct map into/from a NDFA
FormalNDFARule :: (PartialState, Maybe (Maybe Symbol, PartialState))
(a, Nothing) -> [a in finals]
(a, Just (maybe_symbol, b)) -> "a = maybe_symbol b"
InformalNDFARule :: (Nonterminal, [Symbol], Maybe Nonterminal)
where PartialState = (Nonterminal, Integer)
(a, ls, Nothing) -> [(a, len(ls)) in finals]
regular_expression: # RE-RuleForm # using star but without recur (even tail-recur) # DAG
BasicRe a = ReConcat [BasicRe a]
| ReUnion [BasicRe a]
| ReStar (BasicRe a)
| ReSymbol a
ExtendedRe a = BasicRe a
| ReComplement a
| ReIntersect a
|
[
"wuming_zher@zoho.com.cn"
] |
wuming_zher@zoho.com.cn
|
51a7abc5c786abeb3e55dc95ed53aef57e85b34a
|
94df6050f2a262da23f62dd678ccc4366b7657fc
|
/temporary/bin/tqdm
|
9efa8c44cdb5ce4f8dc9684a0b3710267894d570
|
[] |
no_license
|
EkenePhDAVHV/phd-autonomous-cars-frank
|
29cc2fc608db53d4d060422022dc5019cf6360f0
|
1daed3425bfad99dac31543fbeb7950e25aa2878
|
refs/heads/main
| 2023-04-29T06:02:59.444072
| 2021-05-23T11:04:07
| 2021-05-23T11:04:07
| 357,157,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
#!/home/ekene/PycharmProjects/phd-autonomous-cars-frank/temporary/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from tqdm.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"e.f.ozioko@pgr.reading.ac.uk"
] |
e.f.ozioko@pgr.reading.ac.uk
|
|
b3673d87cd687e139daef7c90d95e0a9126b841d
|
954df5fb1ceaf64fe3004e0b072b78024065cdd0
|
/virtual/Lib/site-packages/future/moves/_markupbase.py
|
41c4f96c55552b677772b61ba497a16ba84b3df8
|
[] |
no_license
|
chelseyrandolph/cs440_DatabaseUI
|
7dc5b4c3d0a4e72023db61f4a613fc889bc69f86
|
28355cdfe0f4732568f1f8e43e2ce7809b4fc260
|
refs/heads/master
| 2022-06-06T19:18:31.819483
| 2020-05-05T20:51:58
| 2020-05-05T20:51:58
| 259,436,551
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 151
|
py
|
from __future__ import absolute_import
from future.utils import PY3
if PY3:
pass
else:
__future_module__ = True
from markupbase import *
|
[
"chelseyrrandolph@gmail.com"
] |
chelseyrrandolph@gmail.com
|
17af078221d30f88e222eb9d6c5861dc1a20e88a
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_disintegrating.py
|
532e7be5907664219ed247bb6f5173a80c0ad3de
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
from xai.brain.wordbase.verbs._disintegrate import _DISINTEGRATE
#calss header
class _DISINTEGRATING(_DISINTEGRATE, ):
def __init__(self,):
_DISINTEGRATE.__init__(self)
self.name = "DISINTEGRATING"
self.specie = 'verbs'
self.basic = "disintegrate"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
50ac6709acfc86d952d4ef089c648926671f477b
|
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
|
/ml-flask/Lib/site-packages/sacremoses/corpus.py
|
721a5bbd1be20eb165825ba3c6dae6936c425b69
|
[
"MIT"
] |
permissive
|
YaminiHP/SimilitudeApp
|
8cbde52caec3c19d5fa73508fc005f38f79b8418
|
005c59894d8788c97be16ec420c0a43aaec99b80
|
refs/heads/master
| 2023-06-27T00:03:00.404080
| 2021-07-25T17:51:27
| 2021-07-25T17:51:27
| 389,390,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:d72f15b792d94c6d388af9f8c568d412c657f055c925abdae378464c275c54a4
size 5016
|
[
"yamprakash130@gmail.com"
] |
yamprakash130@gmail.com
|
d31f11bddf9791dee17880f0c0425c13ad495a90
|
ab6c6559d9cfac36c3c4ece192fa2300767662d1
|
/Python Game Development for Beginners - Working Files/Chapter 5/Increasing Challenge with Levels Part I/main.py
|
38ecd797fc5c5f6c15d8d84cfd91391e4842c047
|
[] |
no_license
|
Igor-Nosatov/PythonGameDev_Trinket
|
962b86572c74c64652a24768dfec2101fcae221f
|
e6166f69307ded6880b0aaa3299c0a151807bb9c
|
refs/heads/master
| 2020-06-24T20:22:57.187289
| 2016-05-03T10:33:26
| 2016-05-03T10:33:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,111
|
py
|
# import the turtle module so we can use all the neat code it contains
import turtle
from helpercode import BoxTurtle, printwin, checkpos, maketurtles
from time import sleep
from random import randint, choice
# Create variables to contain our BoxTurtle objects
boxturtles = maketurtles()
# Create a variable `tina` that is a Turtle() object. Set shape to 'turtle'
tina = turtle.Turtle()
tina.shape('turtle')
tina.penup()
# Create a variable `screen`, a Screen() object, that will handle keyss
screen = turtle.Screen()
# Keyboard controls
def go_left():
tina.left(11)
def go_right():
tina.right(11)
# Check intersections with boxes when the turtle moves
def go_forward():
tina.forward(10)
check_intersect()
checkpos([tina])
def go_backward():
tina.backward(10)
check_intersect()
checkpos([tina])
# This function loops through the `boxes` list and uses each
# box's `intersect()` method to check whether it intersects
# with tina.
def check_intersect():
for box in boxturtles:
if not box.hit and box.intersect(tina):
box.hit = True
box.flash()
# Tell the program which functions go with which keys
screen.onkey(go_left, 'Left')
screen.onkey(go_right, 'Right')
screen.onkey(go_forward, 'Up')
screen.onkey(go_backward, 'Down')
# Debugging function - press 'w' to hit all but one turtle
def win():
for t in boxturtles[1:]:
screen.tracer(0)
t.flash()
t.hit = True
screen.tracer(1)
screen.onkey(win, 'w')
# This play function will call itself every .1 seconds and return if the player loses
def play():
# Tell the screen to listen for key presses
screen.listen()
# Check boxes' hit state
hits = []
for box in boxturtles:
hits.append(box.hit)
# If all boxes are hit, the game is over!
if False not in hits:
printwin(tina)
return
mover = choice(boxturtles)
if not mover.hit:
mover.move()
# Sometimes,a turtle will awaken
else:
if randint(0,100) < 5:
mover.awaken()
checkpos(boxturtles)
# start the function over in 100 miliseconds (.1 seconds)
screen.ontimer(play, 100)
play()
turtle.done()
|
[
"lrbeaver@gmail.com"
] |
lrbeaver@gmail.com
|
a3f236ba9acc0a4b6555b96f6a332662b412630d
|
4591b4c66f443a2a54c858a8f3b529b8f388a5e4
|
/workshops/migrations/0009_auto_20141201_0016.py
|
63d5f5e8ede381a4c432f6a8b3b7406a26f704cf
|
[
"MIT"
] |
permissive
|
sburns/amy
|
39e11b48212304c7620e56a66c2f585d3d5951ae
|
7a315ba934f45e2234aaf1ea0e953b88a6239e10
|
refs/heads/master
| 2020-12-28T20:31:22.103801
| 2015-01-20T20:27:31
| 2015-01-20T20:27:31
| 27,539,122
| 0
| 1
| null | 2015-01-27T17:43:06
| 2014-12-04T12:18:40
|
Python
|
UTF-8
|
Python
| false
| false
| 451
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('workshops', '0008_person'),
]
operations = [
migrations.AlterField(
model_name='person',
name='email',
field=models.CharField(max_length=100, unique=True, null=True),
preserve_default=True,
),
]
|
[
"gvwilson@third-bit.com"
] |
gvwilson@third-bit.com
|
ff27d1695dcafdf6c0990e339bae4ebdc384fe83
|
c5a921726a3805663d26a2dbaa47e49497931d4e
|
/Algorithms/challenges/lc437_path_sum_3.py
|
3c2379a605bdceaccb345b85e6736d43f336db08
|
[] |
no_license
|
snowdj/cs_course
|
a50d07548198b4202e8abde01ec572e2cce38ab3
|
fa6504cb5145d10952f4615478fa745f4b35ba13
|
refs/heads/master
| 2020-03-17T15:18:52.190747
| 2018-05-13T08:08:51
| 2018-05-13T08:08:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,111
|
py
|
"""
Time: O(n)
Space: O(n)
You are given a binary tree in which each node contains an integer value.
Find the number of paths that sum to a given value.
The path does not need to start or end at the root or a leaf, but it must go downwards (traveling only from parent nodes to child nodes).
The tree has no more than 1,000 nodes and the values are in the range -1,000,000 to 1,000,000.
Example:
root = [10,5,-3,3,2,null,11,3,-2,null,1], sum = 8
10
/ \
5 -3
/ \ \
3 2 11
/ \ \
3 -2 1
Return 3. The paths that sum to 8 are:
1. 5 -> 3
2. 5 -> 2 -> 1
3. -3 -> 11
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# Brute-force DFS. Pre-order traversal.
# Time: O(nlg(n)), worst O(n^2) Space: O(lg(n)), worst O(n)
class Solution:
def pathSum(self, root, target):
"""
:type root: TreeNode
:type sum: int
:rtype: int
"""
res, stk = 0, [] # a stack to remember the path from root to current node
def dfs(node, cumsum):
nonlocal res, target
if not node:
return
cumsum += node.val
if cumsum == target:
res += 1
stk.append(node.val)
t = cumsum
for i in range(len(stk)-1): # Not including the last one to avoid counting none-node case for target==0
t -= stk[i]
if t == target:
res += 1
dfs(node.left, cumsum)
dfs(node.right, cumsum)
stk.pop()
dfs(root, 0)
return res
# Pre-order DFS with 2-sum hash table
# Time: O(n) Space: O(n+lg(n))
from collections import defaultdict
class Solution2:
def pathSum(self, root, target):
"""
:type root: TreeNode
:type sum: int
:rtype: int
"""
res, tbl = 0, defaultdict(int)
tbl[0] = 1
def dfs(node, cumsum):
nonlocal res, tbl
if not node:
return
cumsum += node.val
res += tbl[cumsum - target]
tbl[cumsum] += 1 # increament after updating result to avoid counting none-node case for target==0
dfs(node.left, cumsum)
dfs(node.right, cumsum)
tbl[cumsum] -= 1
dfs(root, 0)
return res
# Same as solution 1 brute-force, but using recursion instead of nodes stack.
# Time: O(nlg(n)), worst O(n^2) Space: O(lg(n)), worst O(n)
class Solution3:
def pathSum(self, root, target):
"""
:type root: TreeNode
:type sum: int
:rtype: int
"""
if not root:
return 0
return self.sumup(root, 0, target) + self.pathSum(root.left, target) + self.pathSum(root.right, target)
def sumup(self, node, pre, target):
if not node:
return 0
cur = pre + node.val
return (cur == target) + self.sumup(node.left, cur, target) + self.sumup(node.right, cur, target)
|
[
"jesse@liu.onl"
] |
jesse@liu.onl
|
2113063d729a811629f3bc376ba4bf53a6246231
|
ea2f7efb514b7e33eb205519cfffc356f58a9816
|
/Clases en Python/__repr()__.py
|
6f40939cad236fb3207cb6550a444771c025da4d
|
[] |
no_license
|
MGijon/Learning-Python
|
fa79071bf53172743e96d2c614be2963a5107a9d
|
728f8d7e30729a965c5a093e08005d715aa6e46b
|
refs/heads/master
| 2021-06-28T15:44:34.082472
| 2019-02-23T17:58:06
| 2019-02-23T17:58:06
| 105,207,614
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,145
|
py
|
''' __REPR()__: Nos devuelve una cadena de texto con la representación única de un objeto. Es útil, por ejemplo, a la hora de depurar un error.
------------
A la representación única accedemos de dos formas: con la función repr() o con las dobles comillas hacia atrás (``).
Si __repr()__ no está definido, Python en lugar de darnos un error nos generará una representación automática del objeto,
indicando el nombre de su clase y su posición en la memoria.
'''
class Triangulo(object):
def __init__(self, base, altura):
self.base = base
self.altura = altura
def __str__(self):
clase = type(self).__name__
mensaje = '{0} con base {1} y altura {2}.'.format(clase, self.base, self.altura)
return mensaje
t = Triangulo(12, 124)
print(t)
print('en este caso no hemos definido __repr()__, Python lo generará automáticamente...')
print(repr(t))
import math
class Circulo(object):
def __init__(self, radio):
self.radio = radio
@property
def area(self):
return 2 * math.pi * self.radio
def __str__(self):
clase = type(self).__name__
mensaje = '{0} de radio {1} y área {2}'.format(clase, self.radio, self.area)
return mensaje
def __repr__(self):
clase = type(self).__name__
mensaje = '{0}({1})'.format(clase, self.radio)
return mensaje
c = Circulo(131)
print(c) # Circulo de radio 131 y área 823.0972752405258
print(repr(c)) # Circulo(131)
print(eval(repr(c))) # Circulo de radio 131 y área 823.0972752405258
##################### MORALEJA ###########################################################
# --------- #
# #
# __str__ : PARA USUARIOS #
# __repr–– : PARA DESARROLLADORES #
# #
###########################################################################################
|
[
"mgijon94@gmail.com"
] |
mgijon94@gmail.com
|
70d28bdb9d82aa11081654760958d50a0e9b5ae3
|
55647a80c8b412af9df0ba3f50595cc2f29c25e6
|
/res/scripts/client/gui/battle_control/controllers/consumables/__init__.py
|
de6cdb4912e1bd8a0b0ace2de737e8453afc24ad
|
[] |
no_license
|
cnsuhao/WOT-0.9.17-CT
|
0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb
|
d1f932d8cabaf8aa21708622e87f83c8d24d6451
|
refs/heads/master
| 2021-06-08T18:11:07.039293
| 2016-11-19T19:12:37
| 2016-11-19T19:12:37
| null | 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 1,212
|
py
|
# 2016.11.19 19:48:19 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/battle_control/controllers/consumables/__init__.py
from gui.battle_control.controllers.consumables import ammo_ctrl
from gui.battle_control.controllers.consumables import equipment_ctrl
from gui.battle_control.controllers.consumables import opt_devices_ctrl
def createAmmoCtrl(setup):
if setup.isReplayRecording:
return ammo_ctrl.AmmoReplayRecorder(setup.replayCtrl)
if setup.isReplayPlaying:
return ammo_ctrl.AmmoReplayPlayer(setup.replayCtrl)
return ammo_ctrl.AmmoController()
def createEquipmentCtrl(setup):
if setup.isReplayPlaying:
clazz = equipment_ctrl.EquipmentsReplayPlayer
else:
clazz = equipment_ctrl.EquipmentsController
return clazz()
def createOptDevicesCtrl():
return opt_devices_ctrl.OptionalDevicesController()
__all__ = ('createAmmoCtrl', 'createEquipmentCtrl', 'createOptDevicesCtrl')
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\battle_control\controllers\consumables\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 19:48:19 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
7d72e289cd1a204ce8d9543b02b422fad79372c9
|
9848584d5f1858692fb4cdbe793bc91ed3be920e
|
/coding/00239-sliding-window-max/solution.py
|
aef03fa50138e58d6f572230081501d70f98fcf3
|
[] |
no_license
|
misaka-10032/leetcode
|
1212223585cc27d3dfc6d2ca6a27770f06e427e3
|
20580185c6f72f3c09a725168af48893156161f5
|
refs/heads/master
| 2020-12-12T09:45:31.491801
| 2020-09-14T00:18:19
| 2020-09-14T00:18:19
| 50,267,669
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,055
|
py
|
#!/usr/bin/env python3
# encoding: utf-8
import collections
from typing import List
class DecreasingGarbageCollectionQueue:
def __init__(self, ttl: int):
self._ttl = ttl
self._q = collections.deque()
def append(self, t: int, v: int):
# First, clean up the stale elements.
while self._q and self._q[0][0] + self._ttl <= t:
self._q.popleft()
# Second, make sure the values are decreasing.
while self._q and self._q[-1][1] <= v:
self._q.pop()
self._q.append((t, v))
def peek(self) -> int:
return self._q[0][1]
class Solution:
def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:
# Construct a queue that has decreasing values, and only contains the
# element in a time window.
q = DecreasingGarbageCollectionQueue(k)
result = []
for i, v in enumerate(nums):
q.append(i, v)
if i < k - 1:
continue
result.append(q.peek())
return result
|
[
"longqicai@gmail.com"
] |
longqicai@gmail.com
|
ed33f94bbd108c9000ac2d9dc0d03f9bc890dcbc
|
1f689e448d8b510ea6575590cb6920048b4e9aea
|
/leetcode/202_happy_number.py
|
238115bb7972505ac6b64021c56ccdb3faf05303
|
[] |
no_license
|
lijenpan/python
|
52c6061ff90c611efd039b1858339edbefdb5ad0
|
7f67045a83bd2592ccc399420194094fb78404b8
|
refs/heads/master
| 2020-05-30T10:53:15.634090
| 2016-12-02T20:50:28
| 2016-12-02T20:50:28
| 7,646,477
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 977
|
py
|
"""
Write an algorithm to determine if a number is "happy".
A happy number is a number defined by the following process: Starting with any positive integer, replace the number by the sum of the squares of its digits, and repeat the process until the number equals 1 (where it will stay), or it loops endlessly in a cycle which does not include 1. Those numbers for which this process ends in 1 are happy numbers.
Example: 19 is a happy number
12 + 92 = 82
82 + 22 = 68
62 + 82 = 100
12 + 02 + 02 = 1
==============================
This question shouldn't be easy. The naive approach will get you stuck in the loop.
Until you found out that (through repetitions) happy numbers contains 4, you are in for
a hell of a coding session.
"""
def isHappy(n):
"""
:type n: int
:rtype: bool
"""
temp = 0
while n != 1 and n != 4:
while n:
temp += (n % 10) * (n % 10)
n /= 10
n = temp
temp = 0
return 1 == n
|
[
"noreply@github.com"
] |
lijenpan.noreply@github.com
|
34457b9f1292450d30115f4b973ae6c397ad444b
|
f5d1e8b54ddbc51a9ef1b868eee93096d9b0fbeb
|
/weapp/tools/weather/views.py
|
bc7bb0580ac24124ade857e06b690326ad36e083
|
[] |
no_license
|
chengdg/weizoom
|
97740c121724fae582b10cdbe0ce227a1f065ece
|
8b2f7befe92841bcc35e0e60cac5958ef3f3af54
|
refs/heads/master
| 2021-01-22T20:29:30.297059
| 2017-03-30T08:39:25
| 2017-03-30T08:39:25
| 85,268,003
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,497
|
py
|
# -*- coding: utf-8 -*-
__author__ = "liupeiyu"
import time
from datetime import timedelta, datetime, date
import urllib, urllib2
import os
import json
from django.http import HttpResponseRedirect, HttpResponse
from django.template import Context, RequestContext
from django.contrib.auth.decorators import login_required, permission_required
from django.conf import settings
from django.shortcuts import render_to_response
from django.contrib.auth.models import User, Group, Permission
from django.contrib import auth
from django.db.models import Q
import httplib
from core.jsonresponse import JsonResponse, create_response, decode_json_str
from core import dateutil
from core.exceptionutil import full_stack
from tools.models import *
from watchdog.utils import watchdog_fatal
WATCHDOG_TYPE = 'WHETHER_API'
########################################################################
# get_weather_info: 获得天气信息
########################################################################
def get_weather_info(request):
weathers = Weather.objects.all()
response = create_response(200)
city_code = "101180801"
morning_time = 6 # 早晨时间
night_time = 18 # 晚上时间
today_date = datetime.now()
try:
if weathers.count() == 0:
weather_info, weather = __get_weather_info(city_code)
else:
weather = weathers[0]
if __is_out_time_span(weather.update_time, weather.update_span):
weather_info, weather = __get_weather_info(city_code, weather_id=weather.id)
else:
weather_info = json.loads(weather.info)
response.data.weather_info = weather_info
response.data.today_date = today_date.strftime("%Y年%m月%d日")
response.data.create_time = weather.update_time.strftime("%Y年%m月%d日 %H:%M")
# 计算白天还是晚上,True为白天,False为晚上
hour = int(weather.update_time.strftime("%H"))
if morning_time <= hour and hour < night_time:
response.data.is_daytime = True
else:
response.data.is_daytime = False
# 当前温度
response.data.current_temp = __get_current_temp(city_code)
except:
response = create_response(500)
response.errMsg = u'获取失败'
response.innerErrMsg = full_stack()
watchdog_fatal(u'代码错误!%s' % response.innerErrMsg, WATCHDOG_TYPE)
return response.get_response()
########################################################################
# __get_weather_info: 获取近6天气信息
########################################################################
def __get_weather_info(city_code, weather_id = 0):
data_str, error_info = __get_http_response_data("m.weather.com.cn", "/data/%s.html" % city_code)
weather_info = []
weather = None
if data_str:
info_json = decode_json_str(data_str)
weather_json = info_json['weatherinfo']
# 计算周几
weeks = [u'一', u'二', u'三', u'四', u'五', u'六', u'日']
week_index = __get_week_index(weeks, weather_json['week'])
# 获取今天日期
today_date = datetime.now()
total_days, low_date, cur_date, high_date = dateutil.get_date_range(dateutil.get_today(), '6', 6)
date_list = dateutil.get_date_range_list(datetime.date(today_date), high_date)
for i in range(1,7):
data = dict()
data['date'] = date_list[i-1].strftime("%Y年%m月%d日")
data['weather'] = weather_json['weather%d' % i]
data['temp'] = weather_json['temp%d' % i]
data['week'] = u'周%s' % weeks[week_index]
# 给week赋值下标
week_index = week_index + 1 if week_index + 1 < len(weeks) else 0
weather_info.append(data)
# 判断是否已经添加过数据,如果添加过就修改
if weather_id:
weather = Weather.objects.get(id=weather_id)
weather.info = json.dumps(weather_info)
weather.update_time = today_date
weather.save()
else:
weather = Weather.objects.create(info=json.dumps(weather_info), city_code = city_code)
else:
if weather_id:
weather = Weather.objects.get(id=weather_id)
weather_info = json.loads(weather.info)
# print u'更新数据,天气的api不可用!'
watchdog_fatal(u'更新数据,天气的api不可用!%s' % error_info, WATCHDOG_TYPE)
else:
# print u'首次获取数据,天气的api不可用!'
watchdog_fatal(u'首次获取数据,天气的api不可用!%s' % error_info, WATCHDOG_TYPE)
return weather_info, weather
########################################################################
# __get_current_temp: 获取当前天气温度
########################################################################
def __get_current_temp(city_code):
data_str, error_info = __get_http_response_data("www.weather.com.cn", "/data/sk/%s.html" % city_code)
temp = ''
if data_str:
info_json = decode_json_str(data_str)
# 当前温度
temp = info_json['weatherinfo']['temp']
else:
# print u'获取当前天气温度,天气的api不可用!'
watchdog_fatal(u'获取当前天气温度,发送请求失败!%s' % error_info, WATCHDOG_TYPE)
return temp
########################################################################
# __is_out_time_span: 判断时间是否超出时间间隔
########################################################################
def __is_out_time_span(update_time, update_span):
update_span = update_span * 60 * 1000
create_time = long(time.mktime(update_time.timetuple()))*1000
now = long(time.time()) * 1000
if now-create_time > update_span:
return True
else:
return False
########################################################################
# __get_http_response_data: 发送http请求,返回数据
########################################################################
def __get_http_response_data(domain, url, method="GET"):
error_info = None
conn = httplib.HTTPConnection(domain)
try:
conn.request(method, url)
r1 = conn.getresponse()
print r1.status
if r1.status is not 200:
error_info = r1.read()
data_str = None
else:
data_str = r1.read()
except:
data_str = None
error_info = full_stack()
finally:
conn.close()
return data_str, error_info
########################################################################
# __get_week_index: 获取周期下标
########################################################################
def __get_week_index(weeks, string):
string = string[-1:]
for i in range(len(weeks)):
if weeks[i] == string:
return i
|
[
"jiangzhe@weizoom.com"
] |
jiangzhe@weizoom.com
|
7fc1aab1de73aa78dbb82daf249adb798a862e6e
|
ac0e9a702e73739209b24ba3f6d9297647e06b76
|
/Example Files/Intermediate/phonebook_example_unittest/test_phonebook.py
|
877ed597231b563f8b52e0fd04a0c7d5d40c137e
|
[] |
no_license
|
afettouhi/PyStudentManager
|
9c256c38b20136f10b86fb2e2270bb5848be802d
|
71343bc52e5426b2267f068bd6af2e66c0807f08
|
refs/heads/master
| 2020-05-14T16:53:09.501889
| 2019-06-07T14:22:44
| 2019-06-07T14:22:44
| 181,881,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,424
|
py
|
import unittest
from phonebook import Phonebook
class PhonebookTest(unittest.TestCase):
def setUp(self):
self.phonebook = Phonebook()
def test_lookup_entry_by_name(self):
self.phonebook.add("Bob", "12345")
self.assertEqual("12345", self.phonebook.lookup("Bob"))
def test_missing_entry_raises_KeyError(self):
with self.assertRaises(KeyError):
self.phonebook.lookup("missing")
def test_empty_phonebook_is_consistent(self):
self.assertFalse(self.phonebook.is_consistent())
def test_phonebook_with_normal_entries_is_consistent(self):
self.phonebook.add("Bob", "12345")
self.phonebook.add("Mary", "012345")
self.assertTrue(self.phonebook.is_consistent())
def test_phonebook_with_duplicate_entries_is_inconsistent(self):
self.phonebook.add("Bob", "12345")
self.phonebook.add("Mary", "12345")
self.assertTrue(self.phonebook.is_consistent())
def test_phonebook_with_numbers_that_prefix_one_another_is_inconsistent(self):
self.phonebook.add("Bob", "12345")
self.phonebook.add("Mary", "123")
self.assertTrue(self.phonebook.is_consistent())
def test_phonebook_adds_names_and_numbers(self):
self.phonebook.add("Sue", "12345")
self.assertIn("Sue", self.phonebook.get_names())
self.assertIn("12345", self.phonebook.get_numbers())
|
[
"A.Fettouhi@gmail.com"
] |
A.Fettouhi@gmail.com
|
35757bf0f4d8afe1c0b99428daee2cf27e28c9fd
|
97af3c1e09edbb09dfabe0dd8cb5334735d874b6
|
/code/lib/python/console/clint/textui/progress.py
|
960c35b9cb5d9319ca98f0dd9a3e887086ff01bf
|
[] |
no_license
|
joyrexus/ldp
|
31d3e155110e3249ad0f7c97f1b663120c6a125d
|
d0e15f051bb175fc66a4647b3001b31702aa16f3
|
refs/heads/master
| 2021-01-17T14:30:46.115805
| 2015-05-05T20:20:14
| 2015-05-05T20:20:14
| 11,434,923
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,180
|
py
|
# -*- coding: utf-8 -*-
"""
clint.textui.progress
~~~~~~~~~~~~~~~~~
This module provides the progressbar functionality.
"""
from __future__ import absolute_import
import sys
STREAM = sys.stderr
BAR_TEMPLATE = '%s[%s%s] %i/%i\r'
BAR_EMPTY_CHAR = '-'
BAR_FILLED_CHAR = '='
DOTS_CHAR = '.'
def bar(it, label='', width=32, hide=False):
"""Progress iterator. Wrap your iterables with it."""
def _show(_i):
x = int(width*_i/count)
if not hide:
STREAM.write(BAR_TEMPLATE % (
label, BAR_FILLED_CHAR*x, BAR_EMPTY_CHAR*(width-x), _i, count))
STREAM.flush()
count = len(it)
if count:
_show(0)
for i, item in enumerate(it):
yield item
_show(i+1)
if not hide:
STREAM.write('\n')
STREAM.flush()
def dots(it, label='', hide=False):
"""Progress iterator. Prints a dot for each item being iterated"""
count = 0
if not hide:
STREAM.write(label)
for item in it:
if not hide:
STREAM.write(DOTS_CHAR)
sys.stderr.flush()
count += 1
yield item
STREAM.write('\n')
STREAM.flush()
|
[
"joyrexus@gmail.com"
] |
joyrexus@gmail.com
|
f32f2075cffb1ee258d2840c969615cb58be0bbf
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/qM6zWQM7gdfPgE9Hh_10.py
|
ac926ffffc597d07e0e765dc6f988e28824815d1
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,147
|
py
|
"""
Given a _dictionary_ of some items with _star ratings_ and a _specified star
rating_ , return a new dictionary of items **which match the specified star
rating**. Return `"No results found"` if _no item_ matches the _star rating_
given.
### Examples
filter_by_rating({
"Luxury Chocolates" : "*****",
"Tasty Chocolates" : "****",
"Aunty May Chocolates" : "*****",
"Generic Chocolates" : "***"
}, "*****") ➞ {
"Luxury Chocolates" : "*****",
"Aunty May Chocolates" : "*****"
}
filter_by_rating({
"Continental Hotel" : "****",
"Big Street Hotel" : "**",
"Corner Hotel" : "**",
"Trashviews Hotel" : "*",
"Hazbins" : "*****"
}, "*") ➞ {
"Trashviews Hotel" : "*"
}
filter_by_rating({
"Solo Restaurant" : "***",
"Finest Dinings" : "*****",
"Burger Stand" : "***"
}, "****") ➞ "No results found"
### Notes
N/A
"""
def filter_by_rating(d, rating):
dict = b = { key: value for key, value in d.items() if value == rating }
if dict == {}:
return 'No results found'
else:
return dict
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
c93abfeac1a23ee94be4cfa675344b58b62a7439
|
42a812ac785752921dcdddd4ae56064b51452b39
|
/bulletin/post/tests/test_post_view.py
|
cd2fe8bc9b73e4f73864e1875b41fc2744fb8149
|
[] |
no_license
|
Pre-Onboarding-Listerine/aimmo-assignment-team-1
|
e4a15d3e71f1985febf911360691389f5996f0fb
|
d94dd7482f065ac1b020bb500984740c13af14e6
|
refs/heads/main
| 2023-09-02T12:23:49.693075
| 2021-11-03T00:25:18
| 2021-11-03T00:25:18
| 423,444,898
| 1
| 3
| null | 2021-11-02T16:35:38
| 2021-11-01T11:46:19
|
Python
|
UTF-8
|
Python
| false
| false
| 4,881
|
py
|
import json
import unittest
from datetime import datetime
from http import HTTPStatus
from unittest import mock
from unittest.mock import MagicMock
import jwt
from assertpy import assert_that
from django.conf import settings
from django.test import Client
from member.models import Member
from ..dto.deleted_post_id import DeletedPostId
from ..dto.post_changes import PostChanges
from ..dto.post_content import PostContents
from ..dto.post_details import PostDetails
from ..models.posting import Posting
from ..service import PostService
from member.service import MemberService
class PostViewTest(unittest.TestCase):
def setUp(self):
self.client = Client()
@mock.patch.object(MemberService, 'get_member')
@mock.patch.object(PostService, 'write')
def test_create_post_with_post_contents(self, write, get_member):
get_member.return_value = Member(
username="asd",
password="123qwe"
)
access_token = "Bearer " + jwt.encode(
payload={
"username": "asd"
},
key=settings.JWT_SECRET,
algorithm=settings.JWT_ALGORITHM
)
headers = {"HTTP_Authorization": access_token}
response = self.client.post(
"/posts",
data=json.dumps({
"title": "json title",
"content": "json content",
"category": "json"
}),
content_type="application/json",
**headers
)
assert_that(response.status_code).is_equal_to(HTTPStatus.CREATED)
write.assert_called_with(
PostContents(
title="json title",
content="json content",
category="json"
),
Member(
username="asd",
password="123qwe"
)
)
@mock.patch.object(PostService, 'edit')
@mock.patch.object(MemberService, 'get_member')
def test_update_post_with_author(self, get_member, edit):
get_member.return_value = Member(
username="asd",
password="123qwe"
)
access_token = "Bearer " + jwt.encode(
payload={
"username": "asd"
},
key=settings.JWT_SECRET,
algorithm=settings.JWT_ALGORITHM
)
headers = {"HTTP_Authorization": access_token}
response = self.client.patch(
"/posts",
data=json.dumps({
"id": 1,
"title": "json title",
"content": "json content",
}),
content_type="application/json",
**headers
)
assert_that(response.status_code).is_equal_to(HTTPStatus.OK)
changes = PostChanges(
id=1,
title="json title",
content="json content"
)
updater = Member(
username="asd",
password="123qwe"
)
edit.assert_called_with(changes, updater)
@mock.patch.object(PostService, 'remove')
@mock.patch.object(MemberService, 'get_member')
def test_delete_with_author(self, get_member, remove):
get_member.return_value = Member(
username="asd",
password="123qwe"
)
access_token = "Bearer " + jwt.encode(
payload={
"username": "asd"
},
key=settings.JWT_SECRET,
algorithm=settings.JWT_ALGORITHM
)
headers = {"HTTP_Authorization": access_token}
response = self.client.delete(
"/posts",
data=json.dumps({
"id": 1
}),
content_type="application/json",
**headers
)
assert_that(response.status_code).is_equal_to(HTTPStatus.NO_CONTENT)
deleted_post_id = DeletedPostId(
id=1
)
deleter = Member(
username="asd",
password="123qwe"
)
remove.assert_called_with(deleted_post_id, deleter)
@mock.patch.object(PostService, 'details')
def test_get_details_with_post_id(self, details):
author = Member(
username="asd",
password="123qwe"
)
details.return_value = PostDetails(
id=1,
author=author.username,
title="before title",
content="before content",
category="before",
created_at=datetime.utcnow().strftime("%m-%d-%Y, %H:%M:%S"),
updated_at=datetime.utcnow().strftime("%m-%d-%Y, %H:%M:%S"),
comments=[],
hits=0
)
response = self.client.get(
"/posts/1"
)
assert_that(response.status_code).is_equal_to(HTTPStatus.OK)
details.assert_called_with(1, None)
|
[
"rlawndhks217@gmail.com"
] |
rlawndhks217@gmail.com
|
74458f6a29b52a4aba737448865b8f86ca8a360b
|
23611933f0faba84fc82a1bc0a85d97cf45aba99
|
/google-cloud-sdk/lib/surface/version.py
|
7d7321ca5431114f3472d2997a60ebba92f03cde
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
KaranToor/MA450
|
1f112d1caccebdc04702a77d5a6cee867c15f75c
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
refs/heads/master
| 2021-06-21T06:17:42.585908
| 2020-12-24T00:36:28
| 2020-12-24T00:36:28
| 79,285,433
| 1
| 1
|
Apache-2.0
| 2020-12-24T00:38:09
| 2017-01-18T00:05:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,488
|
py
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to print version information for Cloud SDK components.
"""
from googlecloudsdk.calliope import base
from googlecloudsdk.core import config
from googlecloudsdk.core.updater import update_manager
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Version(base.Command):
"""Print version information for Cloud SDK components.
This command prints version information for each installed Cloud SDK
component and prints a message if updates are available.
"""
def Run(self, args):
if config.Paths().sdk_root:
# Components are only valid if this is a built Cloud SDK.
manager = update_manager.UpdateManager()
versions = dict(manager.GetCurrentVersionsInformation())
else:
versions = {}
versions['Google Cloud SDK'] = config.CLOUD_SDK_VERSION
return versions
def Format(self, args):
return 'flattened[no-pad,separator=" "]'
|
[
"toork@uw.edu"
] |
toork@uw.edu
|
d7f2ac70b8cb10c2f05a112b0c00c9af580c876b
|
fcb87e969a3989f2023f3847a5f0e1289a0a8694
|
/sip/execution_control/configuration_db/sip_config_db/scheduling/_scheduling_object_list.py
|
15f0af7ef8ff43197a068b6a31c7c792caf4f15b
|
[
"BSD-3-Clause"
] |
permissive
|
SKA-ScienceDataProcessor/integration-prototype
|
299eb0aa41ba9c7f683f5bac101af5a19fccb171
|
5875dc0489f707232534ce75daf3707f909bcd15
|
refs/heads/master
| 2021-05-01T05:02:16.697902
| 2019-07-28T22:32:05
| 2019-07-28T22:32:05
| 58,473,707
| 3
| 10
|
BSD-3-Clause
| 2021-03-25T22:21:08
| 2016-05-10T15:41:14
|
C++
|
UTF-8
|
Python
| false
| false
| 3,436
|
py
|
# -*- coding: utf-8 -*-
"""Base class for list of scheduling or processing block data objects."""
from typing import List
from ._scheduling_object import SchedulingObject
from .. import ConfigDb
from .._events.event_queue import EventQueue
from .._events.pubsub import get_subscribers, publish, subscribe
DB = ConfigDb()
class SchedulingObjectList:
"""Base class for SBI and PB data objects API."""
def __init__(self, object_type: str):
"""Initialise variables.
Args:
object_type (str): Object Type
"""
self.type = object_type
@property
def num_active(self) -> int:
"""Get the number of active scheduling objects."""
return len(self.active)
@property
def num_aborted(self) -> int:
"""Get the number of aborted scheduling objects."""
return len(self.aborted)
@property
def num_completed(self) -> int:
"""Get the number of completed scheduling objects."""
return len(self.completed)
@property
def active(self) -> List[str]:
"""Get list of active scheduling objects.
Returns:
list, list of object ids
"""
return DB.get_list('{}:active'.format(self.type))
@property
def aborted(self) -> List[str]:
"""Get list of aborted scheduling objects.
Returns:
list, list of object ids
"""
return DB.get_list('{}:aborted'.format(self.type))
@property
def completed(self) -> List[str]:
"""Get list of completed scheduling objects.
Returns:
list, list of object ids
"""
return DB.get_list('{}:completed'.format(self.type))
def set_complete(self, object_id: str):
"""Mark the specified object as completed."""
if object_id in self.active:
DB.remove_from_list('{}:active'.format(self.type), object_id)
DB.append_to_list('{}:completed'.format(self.type), object_id)
###########################################################################
# Pub/sub events functions
###########################################################################
def subscribe(self, subscriber: str) -> EventQueue:
"""Subscribe to scheduling object events.
Args:
subscriber (str): Subscriber name.
Returns:
events.EventQueue, Event queue object for querying PB events.
"""
return subscribe(self.type, subscriber)
def get_subscribers(self) -> List[str]:
"""Get the list of subscribers.
Get the list of subscribers to Scheduling Block Instance (SBI) or
Processing Block events.
Returns:
List[str], list of subscriber names.
"""
return get_subscribers(self.type)
def publish(self, object_id: str, event_type: str,
event_data: dict = None):
"""Publish a scheduling object event.
Args:
object_id (str): ID of the scheduling object
event_type (str): Type of event.
event_data (dict, optional): Event data.
"""
object_key = SchedulingObject.get_key(self.type, object_id)
publish(event_type=event_type,
event_data=event_data,
object_type=self.type,
object_id=object_id,
object_key=object_key,
origin=None)
|
[
"ben.mort@gmail.com"
] |
ben.mort@gmail.com
|
fcdbe8b38ae560684105297029179656a604f2db
|
321b4ed83b6874eeb512027eaa0b17b0daf3c289
|
/222/222.count-complete-tree-nodes.233499249.Accepted.leetcode.py
|
07968f423174e017c6f9c7febffa58330579725a
|
[] |
no_license
|
huangyingw/submissions
|
7a610613bdb03f1223cdec5f6ccc4391149ca618
|
bfac1238ecef8b03e54842b852f6fec111abedfa
|
refs/heads/master
| 2023-07-25T09:56:46.814504
| 2023-07-16T07:38:36
| 2023-07-16T07:38:36
| 143,352,065
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 655
|
py
|
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def countNodes(self, root):
if not root:
return 0
left_subtree = self.left_depth(root.left)
right_subtree = self.left_depth(root.right)
if left_subtree == right_subtree:
return 2**left_subtree + self.countNodes(root.right)
else:
return 2**right_subtree + self.countNodes(root.left)
def left_depth(self, node):
depth = 0
while node:
node = node.left
depth += 1
return depth
|
[
"huangyingw@gmail.com"
] |
huangyingw@gmail.com
|
102cfb4a48484d5440f4765e4468f290cddc203a
|
ea9f2c578e479fcaebbba84d2a1fe63e96f9145d
|
/src/common/models/user.py
|
4d4c9b4f978ae046c363d45934812a5da49ed9b4
|
[] |
no_license
|
spandey2405/onlinecoderbackend
|
1a6bd278f725ae5b1ad1c57b951ac5f9f87b71eb
|
afffd81c027a46247dd47e2ca02ab981e124b09a
|
refs/heads/master
| 2021-01-17T07:57:03.077054
| 2016-08-01T13:41:50
| 2016-08-01T13:41:50
| 64,668,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,411
|
py
|
from django.db import models
from src.common.libraries.constants import *
import binascii, os, uuid
class UserManager(models.Manager):
def generate_userid(self):
return str(uuid.uuid4())
def generate_salt(self):
return binascii.hexlify(os.urandom(SALT_LENGTH/2)).decode()
class User(models.Model):
user_id = models.CharField(max_length=UID_LENGTH, primary_key=True, editable=False)
name = models.EmailField(max_length=200)
email = models.EmailField(max_length=MAX_EMAIL_LENGTH, unique=True)
password_hash = models.CharField(max_length=MAX_PASSWORD_LENGTH)
phoneno = models.CharField(max_length=10, default=0)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
salt = models.CharField(max_length=SALT_LENGTH)
objects = UserManager()
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def save(self, *args, **kwargs):
if not self.user_id:
self.user_id = User.objects.generate_userid()
if not self.salt:
self.salt = User.objects.generate_salt()
return super(User, self).save(*args, **kwargs)
def __unicode__(self):
return self.user_id
class Meta:
db_table = 'user'
app_label = 'common'
|
[
"spandey2405@gmail.com"
] |
spandey2405@gmail.com
|
e3d8cb3403b6a91ceba70ae0162d75363b5c0a9d
|
01abb5fe2d6a51e8ee4330eaead043f4f9aad99d
|
/Repo_Files/Zips/plugin.video.streamhub/resources/lib/smodules/trailer.py
|
729c4e3dd4f78c4945f1e6ce4a8b48274938d418
|
[] |
no_license
|
MrAnhell/StreamHub
|
01bb97bd3ae385205f3c1ac6c0c883d70dd20b9f
|
e70f384abf23c83001152eae87c6897f2d3aef99
|
refs/heads/master
| 2021-01-18T23:25:48.119585
| 2017-09-06T12:39:41
| 2017-09-06T12:39:41
| 87,110,979
| 0
| 0
| null | 2017-04-03T19:09:49
| 2017-04-03T19:09:49
| null |
UTF-8
|
Python
| false
| false
| 3,905
|
py
|
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,json,urlparse,base64,random
from resources.lib.smodules import client
from resources.lib.smodules import control
class trailer:
def __init__(self):
self.base_link = 'http://www.youtube.com'
self.key_link = random.choice(['QUl6YVN5RDd2aFpDLTYta2habTVuYlVyLTZ0Q0JRQnZWcnFkeHNz', 'QUl6YVN5Q2RiNEFNenZpVG0yaHJhSFY3MXo2Nl9HNXBhM2ZvVXd3'])
self.key_link = '&key=%s' % base64.urlsafe_b64decode(self.key_link)
self.search_link = 'https://www.googleapis.com/youtube/v3/search?part=snippet&type=video&maxResults=5&q=%s'
self.youtube_search = 'https://www.googleapis.com/youtube/v3/search?q='
self.youtube_watch = 'http://www.youtube.com/watch?v=%s'
def play(self, name, url=None):
try:
url = self.worker(name, url)
if url == None: return
title = control.infoLabel('listitem.title')
if title == '': title = control.infoLabel('listitem.label')
icon = control.infoLabel('listitem.icon')
item = control.item(path=url, iconImage=icon, thumbnailImage=icon)
try: item.setArt({'icon': icon})
except: pass
item.setInfo(type='Video', infoLabels = {'title': title})
control.player.play(url, item)
except:
pass
def worker(self, name, url):
try:
if url.startswith(self.base_link):
url = self.resolve(url)
if url == None: raise Exception()
return url
elif not url.startswith('http://'):
url = self.youtube_watch % url
url = self.resolve(url)
if url == None: raise Exception()
return url
else:
raise Exception()
except:
query = name + ' trailer'
query = self.youtube_search + query
url = self.search(query)
if url == None: return
return url
def search(self, url):
try:
query = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
url = self.search_link % urllib.quote_plus(query) + self.key_link
result = client.request(url)
items = json.loads(result)['items']
items = [(i['id']['videoId']) for i in items]
for url in items:
url = self.resolve(url)
if not url is None: return url
except:
return
def resolve(self, url):
try:
id = url.split('?v=')[-1].split('/')[-1].split('?')[0].split('&')[0]
result = client.request('http://www.youtube.com/watch?v=%s' % id)
message = client.parseDOM(result, 'div', attrs = {'id': 'unavailable-submessage'})
message = ''.join(message)
alert = client.parseDOM(result, 'div', attrs = {'id': 'watch7-notification-area'})
if len(alert) > 0: raise Exception()
if re.search('[a-zA-Z]', message): raise Exception()
url = 'plugin://plugin.video.youtube/play/?video_id=%s' % id
return url
except:
return
|
[
"mediahubiptv@gmail.com"
] |
mediahubiptv@gmail.com
|
b843de38c9488e62441a89a633f1336a972f423a
|
0d91c86aa0c70115d70f09e3e45460df73dcc652
|
/alpha_a.py
|
d8263fba6f7a743eb66fc076ec23ea33da0d66a6
|
[] |
no_license
|
Michael-Gong/DLA_project
|
589791a3ca5dba7a7d5b9a170c9e2ad712a3ae36
|
3a6211451cc404d772246f9c2b60e0c97576cfef
|
refs/heads/master
| 2021-04-27T08:11:37.414851
| 2019-01-18T05:24:40
| 2019-01-18T05:24:40
| 122,650,552
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,172
|
py
|
%matplotlib inline
#import sdf
import matplotlib
import matplotlib as mpl
mpl.style.use('https://raw.githubusercontent.com/Michael-Gong/DLA_project/master/style')
#matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
from numpy import ma
from matplotlib import colors, ticker, cm
from matplotlib.mlab import bivariate_normal
from optparse import OptionParser
import os
from mpl_toolkits.mplot3d import Axes3D
import random
from mpl_toolkits import mplot3d
from matplotlib import rc
import matplotlib.transforms as mtransforms
import sys
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
#rc('text', usetex=True)
font = {'family' : 'Carlito',
'color' : 'black',
'weight' : 'normal',
'size' : 25,
}
#plt.scatter(theta_x/np.pi*180, arg_gg, c=np.linspace(1,np.size(theta_x),np.size(theta_x))[np.newaxis,:], s=20, cmap='nipy_spectral', edgecolors='None')
#cbar=plt.colorbar(ticks=np.linspace(1, np.size(theta_x), 5), shrink=1)# orientation='horizontal', shrink=0.2)
#cbar.set_label(r'$Nth$', fontdict=font)
#plt.xlim(-45,45)
##print(theta_x)
#plt.xlabel(r'$\theta\ [degree]$',fontdict=font)
#plt.ylabel(r'$\gamma$',fontdict=font)
##plt.xticks(fontsize=30); plt.yticks(fontsize=30);
##plt.ylim(0,2000.0)
a0=np.linspace(10,210,1001)
#alpha=0.04**1.5*a0/(4.6**0.75)
alpha= (179.0**0.5*a0**2/2.3e6-9.6*a0**2/2.03e6-1.3e1/2.03e6)**0.5
#plt.plot(a0,alpha,'-k',linewidth=4)
plt.plot(a0,(a0**2-6.5)**0.5/1000.0,'-k',linewidth=4)
alpha=0.04**1.5*a0/(4.6**0.75)
#plt.plot(a0,alpha,'--b',linewidth=4)
u = 1.0/12.5
a0_1=np.array([10,25,50,75,100,125,150,200])
alpha_1=np.array([-2+2*u,-2+6*u,-2+10*u,-2+11*u,-1+1.5*u,-1+3*u,-1+4*u,-1+5*u])
plt.scatter(a0_1,10**(alpha_1-0.25*u),marker='+',s=40,color='r')
plt.xlabel(r'$a_0$',fontdict=font)
plt.ylabel(r'$\alpha$',fontdict=font)
plt.xticks(fontsize=30); plt.yticks(fontsize=30);
plt.yscale('log')
plt.ylim(10**-2,10**0)
fig = plt.gcf()
#fig.set_size_inches(30, 15)
fig.set_size_inches(8, 4)
#fig.savefig('./bunch_theta_en.png',format='png',dpi=160)
#plt.close("all")
|
[
"noreply@github.com"
] |
Michael-Gong.noreply@github.com
|
ff4ae30a5bc2aa2818fcf1314ca8b8c98913fbaf
|
c8be7becd7466bd6639382156e0886fce3cfb386
|
/array_list_repeat.py
|
cd49a02546a3accdc328440da4354653614c9424
|
[] |
no_license
|
wlgud0402/pyfiles
|
864db71827aba5653d53320322eb8de8b0a5fc49
|
0e8b96c4bbfb20e1b5667ce482abe75061662299
|
refs/heads/master
| 2021-02-28T00:42:51.321207
| 2020-03-09T10:48:52
| 2020-03-09T10:48:52
| 245,648,824
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
#반복문을 사용한 리스트 생성
array = []
for i in range(0,20,2):
array.append(i * i)
print(array)
print()
#리스트 안에 for문 사용하기
list_a = [z * z for z in range(0, 20, 2)] #최종결과를 앞에 작성 z*z
print(list_a)
print()
#if문도 추가하기
newarray = [1,2,3,4,5,6,7,8,9]
output = [number for number in newarray if number != 3]
print(output)
|
[
"wlgudrlgus@naver.com"
] |
wlgudrlgus@naver.com
|
3fabf4f4ba845759d4b8fc8576fc5bc284056ab8
|
a4dfbafdb2d1cc39534a481747fe9746ebb4ef7a
|
/src/models/base_models/resnest_model.py
|
158eb4a786862e66ce97c979d9f509c5c8e10334
|
[] |
no_license
|
huangchuanhong/dist_face_pytorch
|
3f41045f662de0f9826bc5041bdd2b9abbcb9558
|
dc662b713564b2c3f5a61d4ad0e8a78e4aa54a84
|
refs/heads/master
| 2022-12-31T23:01:26.997504
| 2020-10-26T08:29:39
| 2020-10-26T08:29:39
| 264,177,084
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,114
|
py
|
import torch.nn as nn
from .backbones import ResNest
from ..registry import BASE_MODEL
from ..utils import constant_init, normal_init, kaiming_init
@BASE_MODEL.register_module
class ResNestModel(nn.Module):
def __init__(self,
feature_dim,
**kwargs):
super(ResNestModel, self).__init__()
self.backbone = ResNest(**kwargs)
self.gdc = nn.Conv2d(2048, 2048, groups=2048//16, kernel_size=(7, 7), stride=(1, 1), padding=(0, 0), bias=False)
self.bn = nn.BatchNorm2d(2048)
self.fc = nn.Linear(2048, feature_dim)
def init_weights(self, pretrained=None):
self.backbone.init_weights(pretrained=pretrained)
kaiming_init(self.gdc)
constant_init(self.bn, 1)
#normal_init(self.fc, std=0.01)
def forward(self, input):
output = self.backbone(input)
output = self.gdc(output)
output = self.bn(output)
output = output.view([-1, 2048])
output = self.fc(output)
return output
def train(self, mode):
self.backbone.train(mode)
self.bn.train(mode)
|
[
"huangchuanhong@xgrobotics.com"
] |
huangchuanhong@xgrobotics.com
|
d0303d53dd3eba23fd2b686900359aa35a47c0bb
|
18aee5d93a63eab684fe69e3aa0abd1372dd5d08
|
/python/paddle/vision/models/alexnet.py
|
4239395c03319dd88ea9923153eb9cc250de73f0
|
[
"Apache-2.0"
] |
permissive
|
Shixiaowei02/Paddle
|
8d049f4f29e281de2fb1ffcd143997c88078eadb
|
3d4d995f26c48f7792b325806ec3d110fc59f6fc
|
refs/heads/develop
| 2023-06-26T06:25:48.074273
| 2023-06-14T06:40:21
| 2023-06-14T06:40:21
| 174,320,213
| 2
| 1
|
Apache-2.0
| 2022-12-28T05:14:30
| 2019-03-07T10:09:34
|
C++
|
UTF-8
|
Python
| false
| false
| 7,002
|
py
|
# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
import paddle.nn.functional as F
from paddle import nn
from paddle.fluid.param_attr import ParamAttr
from paddle.nn import Conv2D, Dropout, Linear, MaxPool2D, ReLU
from paddle.nn.initializer import Uniform
from paddle.utils.download import get_weights_path_from_url
model_urls = {
"alexnet": (
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/AlexNet_pretrained.pdparams",
"7f0f9f737132e02732d75a1459d98a43",
)
}
__all__ = []
class ConvPoolLayer(nn.Layer):
def __init__(
self,
input_channels,
output_channels,
filter_size,
stride,
padding,
stdv,
groups=1,
act=None,
):
super().__init__()
self.relu = ReLU() if act == "relu" else None
self._conv = Conv2D(
in_channels=input_channels,
out_channels=output_channels,
kernel_size=filter_size,
stride=stride,
padding=padding,
groups=groups,
weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
)
self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0)
def forward(self, inputs):
x = self._conv(inputs)
if self.relu is not None:
x = self.relu(x)
x = self._pool(x)
return x
class AlexNet(nn.Layer):
"""AlexNet model from
`"ImageNet Classification with Deep Convolutional Neural Networks"
<https://proceedings.neurips.cc/paper/2012/file/c399862d3b9d6b76c8436e924a68c45b-Paper.pdf>`_.
Args:
num_classes (int, optional): Output dim of last fc layer. If num_classes <= 0, last fc layer
will not be defined. Default: 1000.
Returns:
:ref:`api_paddle_nn_Layer`. An instance of AlexNet model.
Examples:
.. code-block:: python
import paddle
from paddle.vision.models import AlexNet
alexnet = AlexNet()
x = paddle.rand([1, 3, 224, 224])
out = alexnet(x)
print(out.shape)
# [1, 1000]
"""
def __init__(self, num_classes=1000):
super().__init__()
self.num_classes = num_classes
stdv = 1.0 / math.sqrt(3 * 11 * 11)
self._conv1 = ConvPoolLayer(3, 64, 11, 4, 2, stdv, act="relu")
stdv = 1.0 / math.sqrt(64 * 5 * 5)
self._conv2 = ConvPoolLayer(64, 192, 5, 1, 2, stdv, act="relu")
stdv = 1.0 / math.sqrt(192 * 3 * 3)
self._conv3 = Conv2D(
192,
384,
3,
stride=1,
padding=1,
weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
)
stdv = 1.0 / math.sqrt(384 * 3 * 3)
self._conv4 = Conv2D(
384,
256,
3,
stride=1,
padding=1,
weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
)
stdv = 1.0 / math.sqrt(256 * 3 * 3)
self._conv5 = ConvPoolLayer(256, 256, 3, 1, 1, stdv, act="relu")
if self.num_classes > 0:
stdv = 1.0 / math.sqrt(256 * 6 * 6)
self._drop1 = Dropout(p=0.5, mode="downscale_in_infer")
self._fc6 = Linear(
in_features=256 * 6 * 6,
out_features=4096,
weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
)
self._drop2 = Dropout(p=0.5, mode="downscale_in_infer")
self._fc7 = Linear(
in_features=4096,
out_features=4096,
weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
)
self._fc8 = Linear(
in_features=4096,
out_features=num_classes,
weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
)
def forward(self, inputs):
x = self._conv1(inputs)
x = self._conv2(x)
x = self._conv3(x)
x = F.relu(x)
x = self._conv4(x)
x = F.relu(x)
x = self._conv5(x)
if self.num_classes > 0:
x = paddle.flatten(x, start_axis=1, stop_axis=-1)
x = self._drop1(x)
x = self._fc6(x)
x = F.relu(x)
x = self._drop2(x)
x = self._fc7(x)
x = F.relu(x)
x = self._fc8(x)
return x
def _alexnet(arch, pretrained, **kwargs):
model = AlexNet(**kwargs)
if pretrained:
assert (
arch in model_urls
), "{} model do not have a pretrained model now, you should set pretrained=False".format(
arch
)
weight_path = get_weights_path_from_url(
model_urls[arch][0], model_urls[arch][1]
)
param = paddle.load(weight_path)
model.load_dict(param)
return model
def alexnet(pretrained=False, **kwargs):
"""AlexNet model from
`"ImageNet Classification with Deep Convolutional Neural Networks"
<https://proceedings.neurips.cc/paper/2012/file/c399862d3b9d6b76c8436e924a68c45b-Paper.pdf>`_.
Args:
pretrained (bool, optional): Whether to load pre-trained weights. If True, returns a model pre-trained
on ImageNet. Default: False.
**kwargs (optional): Additional keyword arguments. For details, please refer to :ref:`AlexNet <api_paddle_vision_AlexNet>`.
Returns:
:ref:`api_paddle_nn_Layer`. An instance of AlexNet model.
Examples:
.. code-block:: python
import paddle
from paddle.vision.models import alexnet
# build model
model = alexnet()
# build model and load imagenet pretrained weight
# model = alexnet(pretrained=True)
x = paddle.rand([1, 3, 224, 224])
out = model(x)
print(out.shape)
# [1, 1000]
"""
return _alexnet('alexnet', pretrained, **kwargs)
|
[
"noreply@github.com"
] |
Shixiaowei02.noreply@github.com
|
af1b04d6cf97703519e4498002d19f6698381301
|
5c8139f1e57e06c7eaf603bd8fe74d9f22620513
|
/PartB/Py判断是否为合理的括号.py
|
2a3605334e6226a0c403baec737ed955220c4db7
|
[] |
no_license
|
madeibao/PythonAlgorithm
|
c8a11d298617d1abb12a72461665583c6a44f9d2
|
b4c8a75e724a674812b8a38c0202485776445d89
|
refs/heads/master
| 2023-04-03T07:18:49.842063
| 2021-04-11T12:02:40
| 2021-04-11T12:02:40
| 325,269,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 356
|
py
|
class Solution:
def isValid(self, s: str) -> bool:
dic = {'{': '}', '[': ']', '(': ')', '?': '?'}
stack = ['?']
for c in s:
if c in dic: stack.append(c)
elif dic[stack.pop()] != c: return False
return len(stack) == 1
if __name__ == '__main__':
s = Solution()
print(s.isValid("(){}"))
|
[
"2901429479@qq.com"
] |
2901429479@qq.com
|
01e81a1f99193030b8a12ff979b36ab877ecbdbd
|
9dd14d428b2513376f0e1c3ec06a3b06fc60fc73
|
/solution/operators/sdi_pandas_0.0.37/sdi_pandas_0.0.36/content/files/vflow/subengines/com/sap/python36/operators/sdi_pandas/sample/sample.py
|
41fd2ff0d55d9875b9f252b305861c73eef12369
|
[
"MIT"
] |
permissive
|
thhapke/sdi_pandas
|
38b1a3a688c62621fb858f03e4ac2f3bcbc20b88
|
7a9108007459260a30ea7ee404a76b42861c81c5
|
refs/heads/master
| 2020-07-24T10:40:05.643337
| 2020-04-08T06:59:52
| 2020-04-08T06:59:52
| 207,894,698
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,795
|
py
|
import sdi_utils.gensolution as gs
import sdi_utils.set_logging as slog
import sdi_utils.textfield_parser as tfp
import sdi_utils.tprogress as tp
import pandas as pd
EXAMPLE_ROWS = 5
try:
api
except NameError:
class api:
class Message:
def __init__(self,body = None,attributes = ""):
self.body = body
self.attributes = attributes
def send(port,msg) :
if isinstance(msg,api.Message) :
print('Port: ', port)
print('Attributes: ', msg.attributes)
print('Body: ', str(msg.body))
else :
print(str(msg))
return msg
def call(config,msg):
api.config = config
return process(msg)
def set_port_callback(port, callback) :
df = pd.DataFrame(
{'icol': [1, 2, 3, 4, 5], 'xcol2': ['A', 'A', 'B', 'B', 'C'], \
'xcol3': ['K', 'L', 'M', 'N', 'O'], 'xcol4': ['a1', 'a1', 'b1', 'b1', 'b1']})
default_msg = api.Message(attributes = {'format': 'pandas', 'name': 'test'}, body=df)
callback(default_msg)
class config:
## Meta data
config_params = dict()
version = '0.0.17'
tags = {'pandas': '','sdi_utils':''}
operator_description = "Sample from Dataframe"
operator_description_long = "Sampling over a DataFrame but keeps datasets with the same value of the \
defined column as set and not splitting them, e.g. sampling with the invariant_column='date' samples \
but ensures that all datasets of a certain date are taken or none. This leads to the fact that the \
sample_size is only a guiding target. Depending on the size of the datasets with the same value of \
the *invariant_column* compared to the *sample_size* this could deviate a lot. "
add_readme = dict()
add_readme["References"] = "[pandas doc: sample](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.sample.html)"
debug_mode = True
config_params['debug_mode'] = {'title': 'Debug mode',
'description': 'Sending debug level information to log port',
'type': 'boolean'}
sample_size = 0.1
config_params['sample_size'] = {'title': 'Sample size', 'description': 'Sample size', 'type': 'number'}
random_state = 1
config_params['random_state'] = {'title': 'Random state', 'description': 'Random state', 'type': 'integer'}
invariant_column = ''
config_params['invariant_column'] = {'title': 'Invariant column', 'description': 'Column where all the same value records should be kept as a whole in a sample', 'type': 'string'}
def process(msg) :
att_dict = dict()
att_dict['config'] = dict()
att_dict['operator'] = 'sample'
if api.config.debug_mode == True:
logger, log_stream = slog.set_logging(att_dict['operator'], loglevel='DEBUG')
else:
logger, log_stream = slog.set_logging(att_dict['operator'], loglevel='INFO')
logger.info("Process started")
time_monitor = tp.progress()
# start custom process definition
# test if body refers to a DataFrame type
prev_att = msg.attributes
df = msg.body
if not isinstance(df, pd.DataFrame):
logger.error('Message body does not contain a pandas DataFrame')
raise TypeError('Message body does not contain a pandas DataFrame')
att_dict = dict()
att_dict['config'] = dict()
###### start calculation
sample_size = api.config.sample_size
if sample_size < 1 :
sample_size = int(sample_size * df.shape[0])
if sample_size < 1 :
sample_size = 1
logger.warning("Fraction of sample size too small. Set sample size to 1.")
elif sample_size > df.shape[0]:
logger.warning("Sample size larger than number of rows")
logger.debug("Samples_size: {}/() ({})".format(sample_size,df.shape[0],sample_size/df.shape[0]))
random_state = api.config.random_state
invariant_column = tfp.read_value(api.config.invariant_column)
if invariant_column and sample_size < df.shape[0]:
# get the average number of records for each value of invariant
sc_df = df.groupby(invariant_column)[invariant_column].count()
sample_size_invariant = int(sample_size / sc_df.mean())
sample_size_invariant = 1 if sample_size_invariant == 0 else sample_size_invariant # ensure minimum
sc_df = sc_df.sample(n=sample_size_invariant, random_state=random_state).to_frame()
sc_df.rename(columns={invariant_column: 'sum'}, inplace=True)
# sample the df by merge 2 df
df = pd.merge(df, sc_df, how='inner', right_index=True, left_on=invariant_column)
df.drop(columns=['sum'], inplace=True)
else:
df = df.sample(n=sample_size, random_state=random_state)
###### end calculation
##############################################
# final infos to attributes and info message
##############################################
if df.empty:
raise ValueError('DataFrame is empty')
logger.info('End of Process: {}'.format(time_monitor.elapsed_time()))
att_dict['memory'] = df.memory_usage(deep=True).sum() / 1024 ** 2
att_dict['columns'] = str(list(df.columns))
att_dict['shape'] = df.shape
att_dict['id'] = str(id(df))
logger.debug('Columns: {}'.format(str(df.columns)))
logger.debug('Shape (#rows - #columns): {} - {}'.format(df.shape[0], df.shape[1]))
logger.debug('Memory: {} kB'.format(att_dict['memory']))
example_rows = EXAMPLE_ROWS if df.shape[0] > EXAMPLE_ROWS else df.shape[0]
for i in range(0, example_rows):
att_dict['row_' + str(i)] = str([str(i)[:10].ljust(10) for i in df.iloc[i, :].tolist()])
logger.debug('Head data: {}'.format(att_dict['row_' + str(i)]))
# end custom process definition
log = log_stream.getvalue()
msg = api.Message(attributes=att_dict,body=df)
return log, msg
inports = [{'name': 'data', 'type': 'message.DataFrame',"description":"Input data"}]
outports = [{'name': 'log', 'type': 'string',"description":"Logging data"}, \
{'name': 'data', 'type': 'message.DataFrame',"description":"Output data"}]
def call_on_input(msg) :
log, msg = process(msg)
api.send(outports[0]['name'], log)
api.send(outports[1]['name'], msg)
api.set_port_callback([inports[0]['name']], call_on_input)
def main() :
print('Test: Default')
api.set_port_callback([inports[0]['name']], call_on_input)
|
[
"53856509+thhapke@users.noreply.github.com"
] |
53856509+thhapke@users.noreply.github.com
|
1131b28b0a153d0d74427cea61cfce5a5b8d28f4
|
90cc37b6cc67bce397411631587a14be72085d2e
|
/tests/unit/test_deployment.py
|
6cda651e7bc2a1406caa35b50b68d8220d34e492
|
[
"Unlicense"
] |
permissive
|
michaeljoseph/righteous
|
49d36bb895945a26d5db4b3d13a2e303aef3ef93
|
ba95c574a94df85aca33397cc77b053e7f545705
|
refs/heads/master
| 2016-09-06T11:01:57.478168
| 2013-10-17T11:00:27
| 2013-10-17T11:00:27
| 2,584,142
| 2
| 1
| null | 2013-10-18T14:53:04
| 2011-10-16T00:07:14
|
Python
|
UTF-8
|
Python
| false
| false
| 2,389
|
py
|
from righteous.compat import urlencode
from .base import ApiTestCase
import righteous
class DeploymentTestCase(ApiTestCase):
def setUp(self):
self.setup_patching('righteous.api.deployment._request')
super(DeploymentTestCase, self).setUp()
def test_list_deployments(self):
righteous.init(
'user', 'pass', 'account_id', default_deployment_id='foo')
self.response.content = '{}'
righteous.list_deployments()
self.request.assert_called_once_with('/deployments.js')
def test_find_deployment_no_result(self):
self.response.content = '[]'
deployment = righteous.find_deployment('bruce')
request_url = '/deployments.js?filter=nickname=bruce'
self.request.assert_called_once_with(request_url)
assert not deployment
def test_deployment_info(self):
self.response.content = '{}'
righteous.deployment_info('/deployment/ref')
self.request.assert_called_once_with(
'/deployment/ref.js', prepend_api_base=False)
def test_create_deployment(self):
self.response.status_code = 201
self.response.headers['location'] = '/deployment/new_ref'
nickname = 'devops'
description = 'devops deployment'
create_data = {
'deployment[nickname]': nickname,
'deployment[description]': description,
}
expected = urlencode(create_data)
success, location = righteous.create_deployment(nickname, description)
self.request.assert_called_once_with(
'/deployments', method='POST', body=expected)
assert success
self.assertEqual(location, '/deployment/new_ref')
def test_delete_deployment(self):
self.response.content = '{}'
assert righteous.delete_deployment('/deployment/ref')
self.request.assert_called_once_with(
'/deployment/ref', method='DELETE', prepend_api_base=False)
def test_duplicate_deployment(self):
self.response.status_code = 201
self.response.headers['location'] = '/deployment/new_ref'
success, location = righteous.duplicate_deployment('/deployment/ref')
assert success
self.request.assert_any_call(
'/deployment/ref/duplicate', method='POST', prepend_api_base=False)
self.assertEqual(location, '/deployment/new_ref')
|
[
"michaeljoseph+github@gmail.com"
] |
michaeljoseph+github@gmail.com
|
da6990b212765548549d6a7ed409b29dfd3ff68a
|
758ca5e2bf50016fbac7022ac5f9036aa8aa099b
|
/LeetCodeWeb.py
|
3359b7e6c9cd5b4f92bd6298419aa98886ca70f5
|
[] |
no_license
|
zhantong/leetcode-web
|
04f17901e4bf5a6065e35dd126dd7bbcc8b1128f
|
3f79f5463e77ed7eab8b808a7004eea8c29fc35e
|
refs/heads/master
| 2021-01-02T22:54:54.797228
| 2017-10-19T02:00:48
| 2017-10-19T02:00:48
| 99,420,755
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,913
|
py
|
from flask import Flask
from flask import render_template
from flask import request
from flask import redirect
from flask import g
import os.path
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
import sqlite3
app = Flask(__name__)
ROOT = os.path.realpath(os.path.dirname(__file__))
DATABASE = 'leetcode.db'
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
@app.route('/')
def hello_world():
return redirect('/problems')
@app.route('/problems')
def show_problem_list():
problem_list = get_problem_list()
return render_template('problems_summary.html', problem_list=problem_list)
@app.route('/problems/<slug>')
def show_problem(slug):
c = get_db().cursor()
c.execute('SELECT id,title FROM problem WHERE slug=?', (slug,))
id, title = c.fetchone()
description_file_name = str(id).zfill(3) + '. ' + title + '.html'
file_path = os.path.join(ROOT, 'descriptions', description_file_name)
if os.path.exists(file_path):
with open(file_path, 'r', encoding='utf-8') as f:
description = f.read()
else:
description = '收费题目'
codes = get_codes(('python', 'java', 'c++'), id, title)
title = str(id) + '. ' + title
if 'X-PJAX' in request.headers:
return render_template('problem_description.html', description=description, codes=codes, title=title,
id=id)
return render_template('problem.html', description=description, codes=codes,
problem_list=get_problem_list(), title=title, id=id)
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def get_codes(code_types, id, title):
code_infos = {
'java': ('Java', 'java'),
'python': ('Python', 'py'),
'c++': ('C++', 'cpp')
}
codes = []
for code_type in code_types:
code_info = code_infos[code_type]
file_path = os.path.join(ROOT, 'submissions', str(id).zfill(3) + '. ' + title, code_info[0],
'Solution.' + code_info[1])
if not os.path.exists(file_path):
continue
with open(file_path, 'r', encoding='utf-8') as f:
code = highlight(f.read(), get_lexer_by_name(code_type), HtmlFormatter())
codes.append((code_info[0], code))
return codes
def get_problem_list():
problem_list = []
c = get_db().cursor()
for id, title, slug in c.execute('SELECT id,title,slug FROM problem ORDER BY id'):
problem_list.append({
'id': id,
'url': '/problems/' + slug,
'name': str(id).zfill(3) + '. ' + title
})
return problem_list
if __name__ == '__main__':
app.run()
|
[
"zhantong1994@163.com"
] |
zhantong1994@163.com
|
330058010818406687c80f7723e26b445b282e69
|
5be2fc94724cc05d2dc449e0f5b40d9fb07edd51
|
/tests/test_biosample.py
|
4e618528f17766b407935e78b014a86d7a17a3b8
|
[
"MIT"
] |
permissive
|
LucaCappelletti94/encodeproject
|
b84614683c8652f812f2c01b0002903d849a080b
|
a2bcae8cfbb505a978ecea95c3a007f65625c57a
|
refs/heads/master
| 2022-05-07T13:16:58.774258
| 2022-04-27T07:51:22
| 2022-04-27T07:51:22
| 216,822,791
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
from encodeproject import biosample, biosamples
def test_biosample():
biosample("ENCSR000EDP")
biosample("ENCSR000EDP", False)
def test_biosamples():
biosamples(["ENCFF454HMH", "ENCFF663AYS"])
biosamples(["ENCSR000EDP"], False)
|
[
"cappelletti.luca94@gmail.com"
] |
cappelletti.luca94@gmail.com
|
14cc45de89528b42640f58cba86eb2f58860bbcc
|
1879e4df9cff25bc0c32ff63aedc859301062f9d
|
/0x05-personal_data/encrypt_password.py
|
088ba68a96806e4bfba46db604229b5f920df220
|
[] |
no_license
|
rakiasomai/holbertonschool-web_back_end
|
0f9d36160c9762df0826adcac66b009d1076043b
|
f5aeeda56def93fe13d901dd52217b0dbd4124e9
|
refs/heads/master
| 2023-02-28T10:02:54.929275
| 2021-02-06T22:17:04
| 2021-02-06T22:17:04
| 305,420,230
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
#!/usr/bin/env python3
''' Personal data '''
import bcrypt
def hash_password(password: str) -> bytes:
''' def hash password '''
var = password.encode('utf-8')
return bcrypt.hashpw(var, bcrypt.gensalt())
def is_valid(hashed_password: bytes, password: str) -> bool:
''' def is valid '''
var = password.encode('utf-8')
return bcrypt.checkpw(var, hashed_password)
|
[
"somai.rakia@hotmail.fr"
] |
somai.rakia@hotmail.fr
|
5f62efd77cda877b0f315654e66fcb575dcf38a5
|
b21180985c994c19e850ef51d5d87c6bf595dc21
|
/wechat/queryexp.py
|
efc683b5018ed5bac565cde68dd6455b49f93e69
|
[] |
no_license
|
hldai/labelwc
|
c74d3af98576acd514f9136db663ca4cbd95708f
|
38c969c61f240e49d5475be716c6b159b57220cd
|
refs/heads/master
| 2020-12-02T22:18:06.991302
| 2017-08-13T13:04:44
| 2017-08-13T13:04:44
| 96,111,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,942
|
py
|
from utils import load_names_file
def load_acronym_to_name(acronym_name_file, exclude_strs):
acr_name_dict = dict()
f = open(acronym_name_file, 'r')
for line in f:
line = line.strip().decode('utf-8')
acr, name, _ = line.split('\t')
if exclude_strs and acr in exclude_strs:
continue
acr_name_dict[acr] = name
# print acr, name_max
f.close()
return acr_name_dict
def load_name_to_acronym(acronym_name_file, abbrev_exclude_strs):
name_acr_cnt_dict = dict()
f = open(acronym_name_file, 'r')
for line in f:
line = line.strip().decode('utf-8')
acr, name, cnt = line.split('\t')
if name in abbrev_exclude_strs:
continue
cnt = int(cnt)
tup = name_acr_cnt_dict.get(name, None)
if not tup or tup[1] < cnt:
name_acr_cnt_dict[name] = (acr, cnt)
# print acr, name_max
f.close()
name_acr_dict = dict()
for name, (acr, cnt) in name_acr_cnt_dict.iteritems():
name_acr_dict[name] = acr
return name_acr_dict
def expand_word(word, acr_name_dict):
name_exp = ''
pl = 0
while pl < len(word):
pr = len(word)
exps = ''
while pr > pl:
exps = acr_name_dict.get(word[pl:pr], None)
if exps:
break
pr -= 1
if pr > pl:
name_exp += exps
pl = pr
else:
name_exp += word[pl]
pl = pr + 1
return name_exp
class QueryExpansion:
def __init__(self, acronym_name_file, extra_acronym_name_file, expand_exclude_strs_file,
abbrev_exclude_strs_file, cn_seg_app):
self.expand_exclude_strs = load_names_file(expand_exclude_strs_file)
self.acr_name_dict = load_acronym_to_name(acronym_name_file, self.expand_exclude_strs)
self.abbrev_exclude_strs = load_names_file(abbrev_exclude_strs_file)
self.name_acr_dict = load_name_to_acronym(acronym_name_file, self.abbrev_exclude_strs)
self.__load_extra_acronym_name_file(extra_acronym_name_file)
self.seg_app = cn_seg_app
def __load_extra_acronym_name_file(self, filename):
f = open(filename)
for line in f:
acr, name = line.strip().decode('utf-8').split('\t')
self.acr_name_dict[acr] = name
self.name_acr_dict[name] = acr
f.close()
def __expand_name_words_ob(self, name_words):
name_exp = ''
lw = len(name_words)
l = 0
while l < lw:
r = lw
cur_str = ''
while r > l:
cur_str = ''.join(name_words[l:r])
if cur_str in self.expand_exclude_strs:
break
r -= 1
if r > l:
name_exp += cur_str
l = r
else:
name_exp += expand_word(name_words[l], self.acr_name_dict)
print name_words[l], name_exp
l += 1
return name_exp
def __expand_name_words(self, name_words):
name_exp = ''
lw = len(name_words)
l = 0
while l < lw:
r = lw
flg = True
while r > l:
cur_str = ''.join(name_words[l:r])
if cur_str in self.expand_exclude_strs:
name_exp += cur_str
l = r
flg = False
break
str_exp = self.acr_name_dict.get(cur_str, '')
if str_exp:
name_exp += str_exp
l = r
flg = False
break
r -= 1
if flg:
name_exp += expand_word(name_words[l], self.acr_name_dict)
# print name_words[l], name_exp
l += 1
return name_exp
def __abbrev_name_words(self, name_words):
new_name = ''
wlen = len(name_words)
l = 0
while l < wlen:
r = wlen
flg = False
while r > l:
cur_str = ''.join(name_words[l:r])
str_acr = self.name_acr_dict.get(cur_str, '')
if str_acr:
new_name += str_acr
l = r
flg = True
break
r -= 1
if not flg:
new_name += name_words[l]
l += 1
return new_name
def query_expansion_words(self, name_words):
name_expand = self.__expand_name_words(name_words)
name_abbrev = self.__abbrev_name_words(name_words)
exp_names = []
if name_expand:
exp_names.append(name_expand)
if name_abbrev:
exp_names.append(name_abbrev)
return exp_names
def query_expansion(self, name_str):
name_words = self.seg_app.segment(name_str).split(' ')
name_expand = self.__expand_name_words(name_words)
name_abbrev = self.__abbrev_name_words(name_words)
exp_cands = [name_expand, name_abbrev]
exp_names = list()
for name in exp_cands:
if len(name) == len(name_str) - name_str.count(' '):
continue
if name != name_str:
exp_names.append(name)
return exp_names
def expand_name(self, name_str):
words = self.seg_app.segment(name_str).split(' ')
new_name = self.__expand_name_words(words)
if new_name != name_str:
return new_name
return ''
def abbrev_name(self, name_str):
words = self.seg_app.segment(name_str).split(' ')
new_name = self.__abbrev_name_words(words)
if len(new_name) == len(name_str) - 1 and ' ' in name_str:
return ''
if new_name != name_str:
return new_name
return ''
|
[
"hldai@outlook.com"
] |
hldai@outlook.com
|
b7d65448e1c658d3cc0b42437060aee5c8c46e72
|
ca002961fa07883ff79ea67713bbc79e0ac79d28
|
/plugins/brains/BBWander.py
|
5c642987580df24602062aadb1efb8cb65ea2809
|
[] |
no_license
|
mindgitrwx/pyrobot3
|
e51f8f1bac01a2509f2d89668102770053c16f56
|
45216c0c11f5efaaa4042916b2fe8eaac00fc4a7
|
refs/heads/master
| 2020-03-23T19:28:44.395949
| 2018-10-03T22:06:42
| 2018-10-03T22:06:42
| 141,980,775
| 0
| 3
| null | 2018-09-14T11:20:00
| 2018-07-23T07:53:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,185
|
py
|
# A Behavior-based control system
from pyrobot.brain.fuzzy import *
from pyrobot.brain.behaviors import *
import math, time
class Avoid (Behavior):
"""Avoid Class"""
def setup(self): # called when created
"""setup method"""
self.lasttime = time.time()
self.count = 0
def direction(self, dir):
""" computes opposite direction given an angle"""
if dir < 0.0:
return 0.9
else:
return -0.9
def update(self):
if self.count == 50:
currtime = time.time()
self.count = 0
self.lasttime = time.time()
else:
self.count += 1
close_dist, angle = min( [(s.distance(), s.angle(unit="radians")) for s in self.robot.range["front-all"]])
max_sensitive = self.robot.range.getMaxvalue() * 0.8
self.IF(Fuzzy(0.1, max_sensitive) << close_dist, 'translate', 0.0, "TooClose")
self.IF(Fuzzy(0.1, max_sensitive) >> close_dist, 'translate', 0.3, "Ok")
self.IF(Fuzzy(0.1, max_sensitive) << close_dist, 'rotate', self.direction(angle), "TooClose")
self.IF(Fuzzy(0.1, max_sensitive) >> close_dist, 'rotate', 0.0, "Ok")
class TurnAround(State):
def update(self):
if min([s.distance() for s in self.robot.range["front-all"]]) < 1.0:
self.move(0, .2)
else:
self.goto("state1")
class state1 (State):
""" sample state """
def setup(self):
self.add(Avoid(1, {'translate': .3, 'rotate': .3}))
print(("initialized state", self.name))
def update(self):
if min([s.distance() for s in self.robot.range["front-all"]]) < 1:
self.goto("TurnAround")
def INIT(engine): # passes in robot, if you need it
brain = BehaviorBasedBrain({'translate' : engine.robot.translate, \
'rotate' : engine.robot.rotate, \
'update' : engine.robot.update }, engine)
# add a few states:
brain.add(state1()) # non active
brain.add(TurnAround()) # non active
# activate a state:
brain.activate('state1') # could have made it active in constructor
return brain
|
[
"hybridkernal@gmail.com"
] |
hybridkernal@gmail.com
|
b0eba99c0ca25ed04ea431a7bee9a18f308d4931
|
646cadb1c72ef4a060343baf2fcbe271958b6878
|
/tigerjython/TJExamples/10-Ef/Eff4d.py
|
a11bfecbf166ccc406e98f9264dc1a5edaf3fec4
|
[] |
no_license
|
tigerjython/tjinstall
|
bd75cf8e4ae27b639a13865ef1ec5710391a2938
|
aab61519b5299c2ab4f423c6fc5d8ea7c7860a99
|
refs/heads/master
| 2021-01-17T08:53:50.386905
| 2018-01-12T06:56:28
| 2018-01-12T06:56:28
| 40,659,466
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,151
|
py
|
from gamegrid import *
locations = {
'Althaus':Location(2, 0),
'Bellevue':Location(0, 1),
'City':Location(1, 3),
'Dom':Location(4, 2),
'Enge':Location(5, 0),
'Friedhof':Location(3, 4)}
neighbours = {
'Althaus':['Bellevue', 'Dom', 'Enge'],
'Bellevue':['Althaus', 'City', 'Dom'],
'City':['Bellevue', 'Dom', 'Friedhof'],
'Dom':['Althaus', 'Bellevue', 'City', 'Enge', 'Friedhof'],
'Enge':['Althaus', 'Dom'],
'Friedhof':['Althaus', 'City', 'Dom']}
distances = {('Althaus', 'Bellevue'):5, ('Althaus', 'Dom'):9,
('Althaus', 'Enge'):6, ('Althaus', 'Friedhof'):15,
('Bellevue', 'City'):3, ('Bellevue', 'Dom'):13,
('City', 'Dom'):4, ('City', 'Friedhof'):3,
('Dom', 'Enge'):2, ('Dom', 'Friedhof'):12}
def getNeighbourDistance(station1, station2):
if station1 < station2:
return distances[(station1, station2)]
return distances[(station2, station1)]
def totalDistance(li):
sum = 0
for i in range(len(li) - 1):
sum += getNeighbourDistance(li[i], li[i + 1])
return sum
def drawGraph():
getBg().clear()
getBg().setPaintColor(Color.blue)
for station in locations:
location = locations[station]
getBg().fillCircle(toPoint(location), 10)
startPoint = toPoint(location)
getBg().drawText(station, startPoint)
for s in neighbours[station]:
drawConnection(station, s)
if s < station:
distance = distances[(s, station)]
else:
distance = distances[(station, s)]
endPoint = toPoint(locations[s])
getBg().drawText(str(distance),
getDividingPoint(startPoint, endPoint, 0.5))
refresh()
def drawConnection(startStation, endStation):
startPoint = toPoint(locations[startStation])
endPoint = toPoint(locations[endStation])
getBg().drawLine(startPoint, endPoint)
def search(station):
global trackToTarget, trackLength
visited.append(station) # station marked as visited
# Check for solution
if station == targetStation:
currentDistance = totalDistance(visited)
if currentDistance < trackLength:
trackLength = currentDistance
trackToTarget = visited[:]
for s in neighbours[station]:
if s not in visited: # if all are visited, recursion returns
search(s) # recursive call
visited.pop() # station may be visited by another path
def getStation(location):
for station in locations:
if locations[station] == location:
return station
return None # station not found
def init():
global visited, trackToTarget, trackLength
visited = []
trackToTarget = []
trackLength = 1000
drawGraph()
def pressEvent(e):
global isStart, startStation, targetStation
mouseLoc = toLocationInGrid(e.getX(), e.getY())
mouseStation = getStation(mouseLoc)
if mouseStation == None:
return
if isStart:
isStart = False
init()
setTitle("Klicke auf Zielstation")
startStation = mouseStation
getBg().setPaintColor(Color.red)
getBg().fillCircle(toPoint(mouseLoc), 10)
else:
isStart = True
setTitle("Noch einmal? Klicke auf Startstation")
targetStation = mouseStation
getBg().setPaintColor(Color.green)
getBg().fillCircle(toPoint(mouseLoc), 10)
search(startStation)
setStatusText("Kürzester Weg von " + startStation + " nach "
+ targetStation + ": " + str(trackToTarget) + " Länge = "
+ str(trackLength))
for i in range(len(trackToTarget) - 1):
s1 = trackToTarget[i]
s2 = trackToTarget[i + 1]
getBg().setPaintColor(Color.black)
getBg().setLineWidth(3)
drawConnection(s1, s2)
getBg().setLineWidth(1)
refresh()
isStart = True
makeGameGrid(7, 5, 100, None, "sprites/city.png", False,
mousePressed = pressEvent)
setTitle("City Guide. Klicke auf Startstation")
addStatusBar(30)
show()
init()
|
[
"support@tigerjython.com"
] |
support@tigerjython.com
|
773fa456f16adc76fdbca0568bf8feb723dfad1b
|
2d4af29250dca8c72b74e190e74d92f1467120a0
|
/TaobaoSdk/Request/TaohuaChildcatesGetRequest.py
|
a624463f6f4d5bb1765b77cb318501d6f0daeeac
|
[] |
no_license
|
maimiaolmc/TaobaoOpenPythonSDK
|
2c671be93c40cf487c0d7d644479ba7e1043004c
|
d349aa8ed6229ce6d76a09f279a0896a0f8075b3
|
refs/heads/master
| 2020-04-06T03:52:46.585927
| 2014-06-09T08:58:27
| 2014-06-09T08:58:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,548
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=4 sts=4 sw=4 et:
## @brief 通过类目ID获取它的类目列表
# @author wuliang@maimiaotech.com
# @date 2012-07-03 10:25:14
# @version: 0.0.0
import os
import sys
import time
def __getCurrentPath():
return os.path.normpath(os.path.join(os.path.realpath(__file__), os.path.pardir))
__modulePath = os.path.join(__getCurrentPath(), os.path.pardir)
__modulePath = os.path.normpath(__modulePath)
if __modulePath not in sys.path:
sys.path.insert(0, __modulePath)
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">通过类目ID获取它的类目列表</SPAN>
# <UL>
# </UL>
class TaohuaChildcatesGetRequest(object):
def __init__(self):
super(self.__class__, self).__init__()
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">获取API名称</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">str</SPAN>
# </LI>
# </UL>
self.method = "taobao.taohua.childcates.get"
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">时间戳,如果不设置,发送请求时将使用当时的时间</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">int</SPAN>
# </LI>
# </UL>
self.timestamp = int(time.time())
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">通过类目ID获取它的子类目列表</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Number</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">optional</SPAN>
# </LI>
# </UL>
self.cate_id = None
|
[
"liyangmin@maimiaotech.com"
] |
liyangmin@maimiaotech.com
|
44745815bf70dfefbc566356404d6d02776e8a77
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03254/s956844324.py
|
07d913466375174a9e3f7d1410eaa5709318f863
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
N,x = map(int, input().split())
A = sorted(map(int, input().split()))
s = 0
for i in range(N):
x -= A[i]
if x<0:
break
else:
s += 1
print(s if x<=0 else s-1)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
21bd5066ba2a212591f1557923296b35eda07ae0
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_072/ch25_2019_08_21_19_49_43_725038.py
|
4ad02d8f6560a27c1b43320c99a7c2c44a6ef538
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
a=float(input('Qual será a distância percorrida ? '))
def preco_passagem(a):
if a<=200:
return a*0.5
else:
return 100+(a-100)*0.45
print('{0:.2f}'.format(preco_passagem(a)))
|
[
"you@example.com"
] |
you@example.com
|
43c46f3842293ca95fcc91f1dcb7bdd6100621cd
|
f0937d9fb9108cdd69c5c477a782965bb1f25da5
|
/first/settings.py
|
5922c4ca8b47a4245264bfa0f0f1e6fe1814266e
|
[] |
no_license
|
SimeonYS/first
|
64218a5c2113cebfc1e1aec3f2808dcefcc30342
|
986e7bbbe5635685ce6795ee9f1459ce5d5a8ef5
|
refs/heads/main
| 2023-03-29T17:29:57.300975
| 2021-03-29T07:58:32
| 2021-03-29T07:58:32
| 352,561,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
BOT_NAME = 'first'
SPIDER_MODULES = ['first.spiders']
NEWSPIDER_MODULE = 'first.spiders'
FEED_EXPORT_ENCODING = 'utf-8'
LOG_LEVEL = 'ERROR'
DOWNLOAD_DELAY = 0
USER_AGENT="Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36"
ROBOTSTXT_OBEY = True
ITEM_PIPELINES = {
'first.pipelines.FirstPipeline': 300,
}
|
[
"simeon.simeonov@ADPVT.com"
] |
simeon.simeonov@ADPVT.com
|
c5cf3f5dddb8cb510c9b6acf954b3ddde35e9e2e
|
8506f0a22ef4edf03627951ced530b921ff4d383
|
/tools/sumolib/output/convert/gpx.py
|
0ec2127c1bf7a9f35b0a8fba39d2c071c8999ca0
|
[] |
no_license
|
deepak728/Traffic-Optimization-
|
fb0ac074fa601e524eb0d79defc7e8b84ab03138
|
85bc54de2e318f36bdcc5bb6f05badde0fb35ffe
|
refs/heads/master
| 2020-03-29T23:29:36.740048
| 2018-11-12T09:19:17
| 2018-11-12T09:19:17
| 150,475,374
| 1
| 1
| null | 2018-11-12T09:19:19
| 2018-09-26T18:57:35
|
Java
|
UTF-8
|
Python
| false
| false
| 1,366
|
py
|
"""
@file gpx.py
@author Jakob Erdmann
@author Laura Bieker
@date 2014-02-13
@version $Id: gpx.py 18096 2015-03-17 09:50:59Z behrisch $
This module includes functions for converting SUMO's fcd-output into
GPX format (http://en.wikipedia.org/wiki/GPS_eXchange_Format)
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2014 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from collections import defaultdict
def fcd2gpx(inpFCD, outSTRM, ignored):
tracks = defaultdict(list)
for timestep in inpFCD:
for v in timestep.vehicle:
tracks[v.id].append((timestep.time, v.x, v.y))
outSTRM.write('<?xml version="1.0" encoding="UTF-8"?>\n')
outSTRM.write('<gpx version="1.0">\n')
for vehicle, trackpoints in tracks.iteritems():
outSTRM.write(" <trk><name>%s</name><trkseg>\n" % vehicle)
for timestamp, lon, lat in trackpoints:
outSTRM.write(' <trkpt lon="%s" lat="%s"><time>%s</time></trkpt>\n' % (
lon, lat, timestamp))
outSTRM.write(" </trkseg></trk>\n")
outSTRM.write('</gpx>\n')
|
[
"deepak711998@gmail.com"
] |
deepak711998@gmail.com
|
7e0f20a3411dc570ed92600197a47eda29d7e3fc
|
b5ffa0109ee980406550b7f9a4f5c7587f10a759
|
/sklearn库.py
|
c597a056daae863e773ae3d33e4f1db9b08556b2
|
[] |
no_license
|
SuneastChen/np_pd_sklearn
|
07fd99f383cfaf117e6dff7beb12b240957cbbe0
|
2ff777772c5a0db1e21635796351919c049dc680
|
refs/heads/master
| 2020-03-07T22:38:27.311708
| 2018-04-02T13:24:03
| 2018-04-02T13:24:03
| 127,759,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,832
|
py
|
# _*_ coding:utf-8 _*_
# !/usr/bin/python
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
iris = datasets.load_iris() # 加载指定数据库,是一个字典,data与target是key
iris_X = iris.data # 特征数据表,是二维数组
iris_y = iris.target # 结果标签,是个一维数组
print(iris_X[:3, :]) # 查看一下三行的数据
print(iris_y) # 查看结果集
# 将数据集分成训练集,测试集
X_train, X_test, y_train, y_test = train_test_split(iris_X, iris_y, test_size=0.3)
print(y_train) # 训练集自动打乱了
# 用邻近算法
knn = KNeighborsClassifier()
knn.fit(X_train, y_train) # 开始训练
print(knn.predict(X_test)) # 输入测试集得出结果
print(y_test) # 这是测试集的真实结果,对比
from sklearn.linear_model import LinearRegression
# 通用的学习模式
loaded_data = datasets.load_boston() # 加载房价的数据库
data_X = loaded_data.data
data_y = loaded_data.target
model = LinearRegression() # 调用线性回归模式
model.fit(data_X, data_y) # 训练
print(model.predict(data_X[:4, :])) # 测试
print(data_y[:4])
print(model.coef_) # 斜率,即输入特征的各比重
print(model.intercept_) # 截距
print(model.get_params()) # 返回model定义时的参数
# {'copy_X': True, 'fit_intercept': True, 'n_jobs': 1, 'normalize': False}
print(model.score(data_X, data_y)) # 将数据及结果传入,给线性模型打分,准确度
import matplotlib.pyplot as plt
# 生成数据集X,对应的线性结果集y
X, y = datasets.make_regression(n_samples=100, n_features=1, n_targets=1, noise=10)
print(X[:5, :])
plt.scatter(X, y)
plt.show()
from sklearn import preprocessing
a = np.array([[10, 2.7, 3.6],
[-100, 5, -2],
[120, 20, 40]])
print(a)
print(preprocessing.scale(a)) # 将各系列的值范围整体缩小
from sklearn.datasets.samples_generator import make_classification
from sklearn.svm import SVC
X, y = make_classification(n_samples=300, n_features=2, n_redundant=0, n_informative=2,
random_state=22, n_clusters_per_class=1, scale=100) # 生成数据
# redundant adj.多余的,冗余的 informative adj.提供有用信息的
X = preprocessing.scale(X) # 坐标轴整体浓缩
# plt.scatter(X[:, 0], X[:, 1], c=y)
# plt.show()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
model = SVC() # 加入正则防止过拟合的SVC算法
model.fit(X_train, y_train)
print(model.score(X_test, y_test)) # 浓缩之后得分较高94.4 ,故系列的大小范围直接影响准确度
# 分成好几组的训练集和测试集
from sklearn.model_selection import cross_val_score
iris = datasets.load_iris() # 加载指定数据库
iris_X = iris.data # 特征数据表
iris_y = iris.target # 结果标签表
# 将数据集分成训练集,测试集
X_train, X_test, y_train, y_test = train_test_split(iris_X, iris_y, test_size=0.3)
knn = KNeighborsClassifier(n_neighbors=5) # 用邻近算法,加入参数取邻近的5个点
# 只测试一组
# knn.fit(X_train, y_train) # 开始训练
# print(knn.score(X_test, y_test)) # 只测试一组的结果得分
scores = cross_val_score(knn, X, y, cv=5, scoring='accuracy') # 分成5组训练集,测试集,分别做测试
print(scores) # 得到一个一维数组
print(scores.mean())
# 选择最优的参数,即参数取邻近的几个点准确率最高的
k_range = range(1, 31) # 参数列表
k_scores = []
for k in k_range: # 也可以把不同的学习model加入测试
knn = KNeighborsClassifier(n_neighbors=k) # 加入循环的k参数
# scores = cross_val_score(knn, X, y, cv=10, scoring='accuracy') # for classfification(分类问题)
loss = -cross_val_score(knn, X, y, cv=10, scoring='neg_mean_squared_error') # for regression(线性回归问题),加负号
k_scores.append(loss.mean()) # 每进行一组测试,产生一个一维数组loss
# print(k_scores)
plt.plot(k_range, k_scores)
plt.xlabel('n_neighbors=k')
plt.ylabel('accuracy')
plt.show()
# 得出参数n_neighbors=10时最优,大于时就会产生过度拟合(over fitting)
# 怎么样看过度拟合
'''
from sklearn.model_selection import learning_curve
from sklearn.datasets import load_digits
digits = load_digits()
X = digits.data
y = digits.target
train_sizes, train_loss, test_loss = learning_curve(
SVC(gamma=0.001), X, y, cv=5, scoring='neg_mean_squared_error', train_sizes=[i/10 for i in range(1, 11)]
) # 多组测试的方法,传入训练数量的百分比点
# print(train_sizes) # 得到每个时间段训练的数量,组成的一维数组
# print(train_loss) # 得到相应的二维数组,列数=分组数,行数=时间段的个数
# print(test_loss) # 得到相应的二维数组,列数=分组数,行数=时间段的个数
train_loss_mean = -np.mean(train_loss, axis=1) # 在表格右侧求平均,增加列,行不变,即axis=1
test_loss_mean = -np.mean(test_loss, axis=1)
plt.plot(train_sizes, train_loss_mean, 'o-', color='r', label='Training')
plt.plot(train_sizes, test_loss_mean, 'o-', color='g', label='Testing')
plt.xlabel('train_sizes')
plt.ylabel('loss')
plt.show() # 若将SVC模型的gamma参数改为0.01,便会产生过拟合
'''
# 如何测试模型中的最优参数
'''
from sklearn.model_selection import validation_curve
from sklearn.datasets import load_digits
digits = load_digits()
X = digits.data
y = digits.target
param_range = np.logspace(-6, -2.3, 5) # 新参数
train_loss, test_loss = validation_curve(
SVC(), X, y, param_name='gamma', param_range=param_range,
cv=10, scoring='neg_mean_squared_error') # 返回值无train_sizes,参数无train_sizes,新增了gamma参数
train_loss_mean = -np.mean(train_loss, axis=1) # 在表格右侧求平均,增加列,行不变,即axis=1
test_loss_mean = -np.mean(test_loss, axis=1)
plt.plot(param_range, train_loss_mean, 'o-', color='r', label='Training')
plt.plot(param_range, test_loss_mean, 'o-', color='g', label='Testing')
plt.xlabel('gamma')
plt.ylabel('loss')
plt.show() # 根据图像可直观地看出,最优参数gamma=0.0005左右
'''
# 将训练好的模型,导出导入
from sklearn import svm
iris = datasets.load_iris()
X, y = iris.data, iris.target
model = SVC()
model.fit(X,y)
#方法1:用pickle模块导出导入
import pickle
with open('model.pkl', 'wb')as f:
pickle.dump(model, f)
with open('model.pkl', 'rb')as f:
model2 = pickle.load(f)
print(model2.predict(X[0:3])) # 把前3行数据做测试
#方法2:用joblib模块,性能更高效
from sklearn.externals import joblib
joblib.dump(model, 'model_joblib.pkl') # 保存模型
model3 = joblib.load('model_joblib.pkl')
print(model3.predict(X[0:6]))
|
[
"1050521852@qq.com"
] |
1050521852@qq.com
|
727b5f688d0d70414334ccda20dfd1f147a25259
|
b604d6e2b1f206e6df660da2be2add78ec22941a
|
/resources/ros_kinetic/src/ros/rosbuild/bin/rosgcov_summarize
|
9c19df610036bbb08442ed9a949fe1a44b505a54
|
[] |
no_license
|
fqez/common
|
7b521773d81e2e687f6ae482f595ca3d19515e39
|
f423fec07f39da9cb38f91dc4f3f1cd51c1a3130
|
refs/heads/master
| 2020-05-21T23:59:17.035384
| 2017-03-14T11:46:57
| 2017-03-14T11:46:57
| 62,873,104
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,635
|
#!/usr/bin/env python3
import sys
import fileinput
import re
import os.path
USAGE = "USAGE: rosgcov_summarize <package_dir> <rosgcov_file>"
if len(sys.argv) != 3:
print(USAGE)
sys.exit(-1)
pkg = sys.argv[1]
fname = sys.argv[2]
if not os.path.exists(fname):
print('[rosgcov] %s : %.2f%% (no coverage results)' % (os.path.split(pkg)[1],0.0))
sys.exit(0)
re_hit = re.compile('^ *[0-9]*:.*')
re_miss = re.compile('^ *#####:.*')
re_branch_hit = re.compile('^branch *[0-9] *taken [0-9]*.*')
re_branch_miss = re.compile('^branch *[0-9] *never executed.*')
files = []
finput = fileinput.input(fname)
for l in finput:
ls = l.strip().split(' ')
f = os.path.join(ls[0],os.path.split(ls[1])[1])
files.append(f.strip())
total = 0
hits = 0
misses = 0
branch_total = 0
branch_hits = 0
branch_misses = 0
print('-------------------------------------------------------')
print('Coverage summary: ')
print('-------------------------------------------------------')
for f in files:
prefix = os.path.commonprefix([pkg, f])
display_name = f[len(prefix):]
if display_name[0] == '/':
display_name = display_name[1:]
print(' ' + display_name + ': ')
gcov_fname = f + '.gcov'
if not os.path.exists(gcov_fname):
print('WARNING: no coverage results for %s' % (display_name))
continue
gcovf = fileinput.input(gcov_fname)
local_total = 0
local_hits = 0
local_misses = 0
local_branch_total = 0
local_branch_hits = 0
local_branch_misses = 0
for s in gcovf:
if re_hit.match(s):
local_hits += 1
local_total += 1
elif re_miss.match(s):
local_misses += 1
local_total += 1
if re_branch_hit.match(s):
local_branch_hits += 1
local_branch_total += 1
elif re_branch_miss.match(s):
local_branch_misses += 1
local_branch_total += 1
print(' line: %.2f%% (%d / %d)' % ((100.0 * local_hits / max(local_total,1)), local_hits, local_total))
hits += local_hits
misses += local_misses
total += local_total
print(' branch: %.2f%% (%d / %d)' % ((100.0 * local_branch_hits / max(local_branch_total,1)), local_branch_hits, local_branch_total))
branch_hits += local_branch_hits
branch_misses += local_branch_misses
branch_total += local_branch_total
print('-------------------------------------------------------')
print('[rosgcov] %s : %.2f%% (%d / %d)' % (os.path.split(pkg)[1],(100.0 * hits / max(total,1)), hits, total))
print('[rosgcov] %s : branch %.2f%% (%d / %d)' % (os.path.split(pkg)[1],(100.0 * branch_hits / max(branch_total,1)), branch_hits, branch_total))
print('-------------------------------------------------------')
|
[
"f.perez475@gmail.com"
] |
f.perez475@gmail.com
|
|
fa4e4448ac09a8ca4502f4e8591d83ef40112882
|
fc2447b91cbee82e74e939092ec1903678f3217a
|
/PythonPractice/算法图解/Dijkstra's algorithm.py
|
0cd528a8c91f11657af1906538a31b531f16e4a9
|
[] |
no_license
|
yglj/learngit
|
0eac654e7c49f2ede064b720e6ee621a702193b4
|
74fb4b93d5726c735b64829cafc99878d8082121
|
refs/heads/master
| 2022-12-24T10:01:56.705046
| 2019-05-27T21:04:08
| 2019-05-27T21:04:08
| 146,157,116
| 0
| 1
| null | 2022-12-12T07:01:25
| 2018-08-26T06:28:20
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,771
|
py
|
"""
狄克斯特拉算法
每条边上的关联数字称为权重
带权重的图叫加权图
寻找加权图的最短路径
只是用于有向无环图
"""
graph = {} # 加权图
costs = {} # 开销
parents = {} # 父节点
# 图的各顶点的邻居及边的权重
graph['start'] = {}
graph['start']['a'] = 6
graph['start']['b'] = 2
# print(graph['start'].keys())
graph['a'] = {}
graph['a']['fin'] = 1
graph['b'] = {}
graph['b']['a'] = 3
graph['b']['fin'] = 5
graph['fin'] = {}
infinity = float('inf') # 无穷大
costs['a'] = 6
costs['b'] = 2
costs['fin'] = infinity
parents['a'] = 'start'
parents['b'] = 'start'
parents['fin'] = None # 开始没有到达fin的路径
processed = []
"""
1.只要还有要处理的节点
2.获取离起点最近的节点
3.更新其邻居的开销
4.如果有邻居的开销被更新,同时更新其父节点
5.将该节点标记为处理过
"""
def find_lowest_cost_node(costs):
lowest_cost = float('inf')
lowest_cost_node = None
for node in costs:
cost = costs[node]
if cost < lowest_cost and node not in processed:
lowest_cost = cost
lowest_cost_node = node
return lowest_cost_node
def main():
node = find_lowest_cost_node(costs)
while node is not None:
cost = costs[node]
neighbors = graph[node]
for n in neighbors.keys():
new_cost = cost + neighbors[n]
if costs[n] > new_cost:
costs[n] = new_cost
parents[n] = node
processed.append(node)
node = find_lowest_cost_node(costs)
if __name__ == '__main__':
main()
# print(parents)
# print(costs)
# print(graph)
processed.insert(0, 'start')
path = '->'.join(processed)
print(path)
|
[
"2365952530@qq.com"
] |
2365952530@qq.com
|
4aa3c05bab82dea4ae678dfc7c1ea442168008e2
|
414a58c691ff7b434034086433644870f8ac5c99
|
/tests/test_geom.py
|
b1de7a128110d8a3d34fee1bc3c1dbf3d7148c62
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
WZBSocialScienceCenter/pdftabextract
|
08328197681ca03b764ea2df410851c06e0a92b7
|
7b86a9098b5d397f984b1cbc6716d55860e34ef8
|
refs/heads/master
| 2022-08-02T16:43:42.187628
| 2022-06-24T09:51:22
| 2022-06-24T09:51:22
| 62,884,666
| 2,239
| 401
|
Apache-2.0
| 2022-06-24T09:51:23
| 2016-07-08T11:44:46
|
Python
|
UTF-8
|
Python
| false
| false
| 7,946
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 13 09:50:51 2017
@author: mkonrad
"""
import math
import pytest
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
from pdftabextract.geom import (pt, ptdist, vecangle, vecrotate, overlap, lineintersect,
rect, rectcenter, rectarea, rectintersect,
normalize_angle, normalize_angle_halfcircle,
project_polarcoord_lines)
FMIN = np.finfo(np.float32).min
FMAX = np.finfo(np.float32).max
def test_pt():
x = 0
y = 1
pt0 = pt(x, y)
assert type(pt0) is np.ndarray
assert pt0.dtype == np.float
assert pt0[0] == x
assert pt0[1] == y
pt1 = pt(x, y, np.int)
assert pt1.dtype == np.int
assert pt1[0] == x
assert pt1[1] == y
def test_ptdist():
p1 = pt(0, 0)
p2 = pt(1, 0)
p3 = pt(1, 1)
assert ptdist(p1, p1) == 0
assert ptdist(p1, p2) == 1
assert ptdist(p2, p1) == ptdist(p1, p2)
assert ptdist(p1, p3) == math.sqrt(2)
def test_vecangle():
v1 = pt(1, 0)
v2 = pt(2, 0)
v3 = pt(1, 1)
v4 = pt(0, 1)
v5 = pt(0, -1)
assert np.isnan(vecangle(pt(0, 0), v1)) # pt(0, 0) is vec of no length
assert vecangle(v1, v2) == 0
assert round(vecangle(v1, v3), 4) == round(math.radians(45), 4)
assert vecangle(v2, v4) == vecangle(v1, v4) == math.radians(90)
assert vecangle(v2, v5) == math.radians(90) # always the smaller angle
@given(st.floats(min_value=FMIN, max_value=FMAX),
st.floats(min_value=FMIN, max_value=FMAX),
st.floats(min_value=FMIN, max_value=FMAX),
st.floats(min_value=FMIN, max_value=FMAX))
def test_vecangle_2(x1, y1, x2, y2):
v0 = pt(0, 0)
v1 = pt(x1, y1)
v2 = pt(x2, y2)
try:
alpha = vecangle(v1, v2)
except ValueError: # math domain error in some edge cases?
return
if np.allclose(v1, v0) or np.allclose(v2, v0):
assert np.isnan(alpha)
else:
assert 0 <= alpha <= np.pi
def test_vecrotate():
assert np.array_equal(vecrotate(pt(0, 0), 0.123), pt(0, 0))
assert np.allclose(vecrotate(pt(1, 0), math.radians(90)), pt(0, 1))
assert np.allclose(vecrotate(pt(1, 0), math.radians(90), about=pt(1, 1)), pt(2, 1))
def test_overlap():
assert overlap(0, 1, 0, 1) is True
assert overlap(0, 0, 1, 1) is False
assert overlap(0, 10, 5, 15) is True
assert overlap(-10, 10, -20, -10) is True
assert overlap(-9, 10, -20, -10) is False
def test_lineintersect():
# first with check_in_segm = True
X = lineintersect(pt(0, 0), pt(0, 0), pt(0, 0), pt(0, 0)) # coincident I
assert sum(np.isnan(X)) == len(X)
X = lineintersect(pt(0, 0), pt(0, 1), pt(0, 0), pt(0, 1)) # coincident II
assert sum(np.isnan(X)) == len(X)
assert lineintersect(pt(0, 0), pt(0, 1), pt(1, 0), pt(1, 1)) is None # parallel, non coincident
assert lineintersect(pt(0, 0), pt(0, 1), pt(1, 1), pt(2, 2)) is None # non-parellel, no intersection
assert lineintersect(pt(0, 0), pt(2, 2), pt(0, 5), pt(5, 0)) is None # non-parellel, no intersection II
assert np.array_equal(lineintersect(pt(0, 0), pt(0, 1), pt(0, 1), pt(2, 2)), pt(0, 1)) # intersection - touch
assert np.array_equal(lineintersect(pt(0, 0), pt(2, 2), pt(0, 2), pt(2, 0)), pt(1, 1)) # intersection
# now with check_in_segm = False
X = lineintersect(pt(0, 0), pt(0, 0), pt(0, 0), pt(0, 0), False) # coincident I
assert sum(np.isnan(X)) == len(X)
X = lineintersect(pt(0, 0), pt(0, 1), pt(0, 0), pt(0, 1), False) # coincident II
assert sum(np.isnan(X)) == len(X)
X = lineintersect(pt(0, 0), pt(1, 1), pt(2, 2), pt(3, 3), False) # coincident III
assert sum(np.isnan(X)) == len(X)
assert np.array_equal(lineintersect(pt(0, 0), pt(0, 1), pt(1, 1), pt(2, 2), False), pt(0, 0)) # intersection (out of segments)
assert np.array_equal(lineintersect(pt(0, 0), pt(0, 1), pt(0, 1), pt(2, 2), False), pt(0, 1)) # intersection - touch
assert np.array_equal(lineintersect(pt(0, 0), pt(2, 2), pt(0, 2), pt(2, 0), False), pt(1, 1)) # intersection
def test_rect():
with pytest.raises(ValueError):
rect(pt(0, 0), pt(1, 1, dtype=np.int)) # dtypes do not match
with pytest.raises(ValueError):
rect(pt(0, 0), pt(0, 0)) # doesn't form rect
with pytest.raises(ValueError):
rect(pt(1, 1), pt(0, 0)) # doesn't form rect
with pytest.raises(ValueError):
rect(pt(0, 0), pt(1, 0)) # doesn't form rect
a = pt(0, 0)
b = pt(1, 1)
r = rect(a, b)
assert r.dtype == a.dtype == b.dtype
assert np.array_equal(r[0], a)
assert np.array_equal(r[1], b)
a = pt(-3, -1)
b = pt(8, 1.2)
r = rect(a, b)
assert r.dtype == a.dtype == b.dtype
assert np.array_equal(r[0], a)
assert np.array_equal(r[1], b)
def test_rectcenter():
a = pt(0, 0)
b = pt(1, 1)
r = rect(a, b)
center = rectcenter(r)
assert type(center) is np.ndarray
assert np.array_equal(center, pt(0.5, 0.5))
a = pt(-3, -1)
b = pt(2, 5)
r = rect(a, b)
assert np.array_equal(rectcenter(r), pt(-0.5, 2))
def test_rectarea():
a = pt(0, 0)
b = pt(1, 1)
r = rect(a, b)
assert rectarea(r) == 1
a = pt(-3, -1)
b = pt(2, 5)
r = rect(a, b)
assert rectarea(r) == 30
def test_rectintersect():
a = rect(pt(0, 0), pt(1, 1))
b = rect(pt(-3, -1), pt(2, 5))
assert rectintersect(a, a) == rectarea(a)
assert rectintersect(b, b) == rectarea(b)
assert rectintersect(a, a, norm_intersect_area='a') == 1
assert rectintersect(a, a, norm_intersect_area='b') == 1
with pytest.raises(ValueError):
rectintersect(a, a, norm_intersect_area='c')
# complete intersect
assert rectintersect(a, b) == rectarea(a)
assert rectintersect(b, a) == rectarea(a)
assert rectintersect(a, b, norm_intersect_area='a') == 1
assert rectintersect(b, a, norm_intersect_area='b') == 1
assert rectintersect(b, a, norm_intersect_area='a') < 1
assert rectintersect(a, b, norm_intersect_area='b') < 1
# partial intersect
a = rect(pt(0, 0), pt(1, 1))
b = rect(pt(0.5, 0.5), pt(1.5, 1.5))
assert rectintersect(a, b) == 0.25
assert rectintersect(a, b, norm_intersect_area='a') == 0.25
assert rectintersect(a, b, norm_intersect_area='b') == 0.25
b = rect(pt(0.75, 0.5), pt(1.5, 1.5))
assert rectintersect(a, b) == 0.125
# touch
a = rect(pt(0, 0), pt(1, 1))
b = rect(pt(1, 1), pt(1.5, 1.5))
assert rectintersect(a, b) == 0
# no intersection
a = rect(pt(0, 0), pt(1, 1))
b = rect(pt(1.1, 1.1), pt(1.5, 1.5))
assert rectintersect(a, b) is None
def test_normalize_angle():
for i in range(-10, 10):
theta = i * np.pi
norm = normalize_angle(theta)
assert 0 <= norm < 2 * np.pi
assert norm / np.pi == i % 2
def test_normalize_angle_halfcircle():
for i in range(-10, 10):
theta = 0.5 * i * np.pi
norm = normalize_angle_halfcircle(theta)
assert 0 <= norm < np.pi
assert norm / np.pi * 2 == i % 2
@given(
st.lists(st.lists(st.floats(allow_nan=False, allow_infinity=False), min_size=2, max_size=2)),
st.integers(),
st.integers()
)
def test_project_polarcoord_lines(hough_lines, img_w, img_h):
if img_w <= 0 or img_h <= 0:
with pytest.raises(ValueError):
project_polarcoord_lines(hough_lines, img_w, img_h)
return
else:
res = project_polarcoord_lines(hough_lines, img_w, img_h)
assert type(res) is list
assert len(res) == len(hough_lines)
for pts in res:
assert len(pts) == 2
assert type(pts[0]) == type(pts[1]) == np.ndarray
assert len(pts[0]) == len(pts[1]) == 2
|
[
"markus.konrad@wzb.eu"
] |
markus.konrad@wzb.eu
|
4b3ea08a26e0a92132a0a700b7e8ff04bd9e13fb
|
0420b28aa59330fb0d9548f636b1460668163887
|
/accounts/migrations/0005_alter_userprofile_profile_picture.py
|
591939011f4877d881bd9c3396ddd91668e6bf0a
|
[] |
no_license
|
akhmadakhmedov/modamag
|
30cc3ea335b7fe8fbc234149b11d2df11b627281
|
0459f27230027fab51cbaae2a594ffde52a64d04
|
refs/heads/main
| 2023-08-11T01:48:58.979894
| 2021-10-12T11:18:08
| 2021-10-12T11:18:08
| 391,133,082
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
# Generated by Django 3.2.5 on 2021-08-16 09:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_userprofile'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='profile_picture',
field=models.ImageField(blank=True, upload_to='images/users/'),
),
]
|
[
"ahmedov.thy@gmail.com"
] |
ahmedov.thy@gmail.com
|
1c41b31c2095067d219200c34429fe81d65f2c1a
|
96c1f13473cf224113185902edd4c9c01091e106
|
/tests/torchlie_tests/functional/test_se3.py
|
c3af91c3b45ba611167ac0d61031d6cf9bfbf0f1
|
[
"MIT"
] |
permissive
|
facebookresearch/theseus
|
f1e488eb5a25f5ba74a6995911bee958b5da4cf3
|
240e1206329d42fedd40399684d6e17e455c6645
|
refs/heads/main
| 2023-08-11T07:33:12.328520
| 2023-08-02T12:58:01
| 2023-08-02T12:58:01
| 429,570,359
| 1,410
| 105
|
MIT
| 2023-08-01T14:30:01
| 2021-11-18T20:28:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,067
|
py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Sequence, Union
import pytest
import torch
import torchlie.functional.se3_impl as se3_impl
from torchlie.functional import SE3
from .common import (
BATCH_SIZES_TO_TEST,
TEST_EPS,
check_binary_op_broadcasting,
check_left_project_broadcasting,
check_lie_group_function,
check_jacrev_binary,
check_jacrev_unary,
run_test_op,
)
@pytest.mark.parametrize(
"op_name",
[
"exp",
"log",
"adjoint",
"inverse",
"hat",
"compose",
"transform",
"untransform",
"lift",
"project",
"left_act",
"left_project",
"normalize",
],
)
@pytest.mark.parametrize("batch_size", BATCH_SIZES_TO_TEST)
@pytest.mark.parametrize("dtype", [torch.float32, torch.float64])
def test_op(op_name, batch_size, dtype):
rng = torch.Generator()
rng.manual_seed(0)
run_test_op(op_name, batch_size, dtype, rng, 6, (3, 4), se3_impl)
@pytest.mark.parametrize("batch_size", BATCH_SIZES_TO_TEST)
@pytest.mark.parametrize("dtype", [torch.float32, torch.float64])
def test_vee(batch_size: Union[int, Sequence[int]], dtype: torch.dtype):
if isinstance(batch_size, int):
batch_size = (batch_size,)
rng = torch.Generator()
rng.manual_seed(0)
tangent_vector = torch.rand(*batch_size, 6, dtype=dtype, generator=rng)
matrix = se3_impl._hat_autograd_fn(tangent_vector)
# check analytic backward for the operator
check_lie_group_function(se3_impl, "vee", TEST_EPS, (matrix,))
# check the correctness of hat and vee
actual_tangent_vector = se3_impl._vee_autograd_fn(matrix)
torch.testing.assert_close(
actual_tangent_vector, tangent_vector, atol=TEST_EPS, rtol=TEST_EPS
)
@pytest.mark.parametrize("batch_size", [1, 10, 100])
@pytest.mark.parametrize("name", ["exp", "inv"])
def test_jacrev_unary(batch_size, name):
check_jacrev_unary(SE3, 6, batch_size, name)
@pytest.mark.parametrize("batch_size", [1, 10, 100])
@pytest.mark.parametrize("name", ["compose", "transform", "untransform"])
def test_jacrev_binary(batch_size, name):
if not hasattr(torch, "vmap"):
return
check_jacrev_binary(SE3, batch_size, name)
@pytest.mark.parametrize("name", ["compose", "transform", "untransform"])
def test_binary_op_broadcasting(name):
rng = torch.Generator()
rng.manual_seed(0)
batch_sizes = [(1,), (2,), (1, 2), (2, 1), (2, 2), (2, 2, 2), tuple()]
for bs1 in batch_sizes:
for bs2 in batch_sizes:
check_binary_op_broadcasting(
SE3, name, (3, 4), bs1, bs2, torch.float64, rng
)
def test_left_project_broadcasting():
rng = torch.Generator()
rng.manual_seed(0)
batch_sizes = [tuple(), (1, 2), (1, 1, 2), (2, 1), (2, 2), (2, 2, 2)]
check_left_project_broadcasting(SE3, batch_sizes, [0, 1, 2], (3, 4), rng)
|
[
"noreply@github.com"
] |
facebookresearch.noreply@github.com
|
908780fe69c1ca758295ca0f25b531c70571438f
|
67d8173a716da10a7350213d98938aae9f2115ce
|
/ProgrammingCourses/CS61A/lab/lab09/tests/substitute.py
|
c599160851d680f435682a58dd191c6b5377599d
|
[] |
no_license
|
jxie0755/Learning_Python
|
94490d41bdf93acf8396f843328e38b6da310b0f
|
143422321cbc3715ca08f6c3af8f960a55887ced
|
refs/heads/master
| 2021-11-02T22:47:35.790239
| 2021-09-26T04:26:23
| 2021-09-26T04:26:23
| 101,445,132
| 0
| 2
| null | 2019-02-19T15:48:44
| 2017-08-25T22:00:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,174
|
py
|
test = {
"name": "substitute",
"points": 1,
"suites": [
{
"cases": [
{
"code": r"""
scm> (substitute "(c a b) "b 'l)
(c a l)
scm> (substitute "(f e a r s) "f 'b)
(b e a r s)
scm> (substitute "(g (o) o (o)) "o 'r)
(g (r) r (r))
""",
"hidden": False,
"locked": False
},
{
"code": r"""
scm> (substitute '((lead guitar) (bass guitar) (rhythm guitar) drums)
.... "guitar "axe)
((lead axe) (bass axe) (rhythm axe) drums)
scm> (substitute "(romeo romeo wherefore art thou romeo) "romeo 'paris)
(paris paris wherefore art thou paris)
scm> (substitute "((to be) or not (to (be))) "be 'eat)
((to eat) or not (to (eat)))
scm> (substitute "(a b (c) d e) "foo 'bar)
(a b (c) d e)
""",
"hidden": False,
"locked": False
}
],
"scored": True,
"setup": r"""
scm> (load 'lab09)
scm> (load 'lab09_extra)
""",
"teardown": "",
"type": "scheme"
}
]
}
|
[
"30805062+jxie0755@users.noreply.github.com"
] |
30805062+jxie0755@users.noreply.github.com
|
400c7d1dfbd9b32067d5a361e8a800aaea5f8be9
|
771c1e2011a85a287c766b1a3d299ced2e6f799f
|
/src/electionguard/ballot_compact.py
|
96e4d2be29a39ca2eb31f736305027dd3da57e10
|
[
"MIT"
] |
permissive
|
microsoft/electionguard-python
|
f50f64a473a8d77984a2faf4aa8db40cebb5c201
|
b3ddc2a732f6c5f078a3afbe05b00d632a2ff5e0
|
refs/heads/main
| 2023-08-03T12:44:35.322716
| 2022-10-28T12:47:18
| 2022-10-28T12:47:18
| 246,392,956
| 143
| 117
|
MIT
| 2023-08-02T00:24:32
| 2020-03-10T19:46:06
|
Python
|
UTF-8
|
Python
| false
| false
| 5,601
|
py
|
from dataclasses import dataclass
from typing import Dict, List
from .ballot import (
CiphertextBallot,
SubmittedBallot,
PlaintextBallot,
PlaintextBallotContest,
PlaintextBallotSelection,
make_ciphertext_submitted_ballot,
)
from .ballot_box import BallotBoxState
from .election import CiphertextElectionContext
from .election_object_base import sequence_order_sort
from .encrypt import encrypt_ballot_contests
from .group import ElementModQ
from .manifest import (
ContestDescriptionWithPlaceholders,
InternalManifest,
)
from .utils import get_optional
YES_VOTE = 1
NO_VOTE = 0
@dataclass
class CompactPlaintextBallot:
"""A compact plaintext representation of ballot minimized for data size"""
object_id: str
style_id: str
selections: List[bool]
write_ins: Dict[int, str]
@dataclass
class CompactSubmittedBallot:
"""A compact submitted ballot minimized for data size"""
compact_plaintext_ballot: CompactPlaintextBallot
timestamp: int
ballot_nonce: ElementModQ
code_seed: ElementModQ
code: ElementModQ
ballot_box_state: BallotBoxState
def compress_plaintext_ballot(ballot: PlaintextBallot) -> CompactPlaintextBallot:
"""Compress a plaintext ballot into a compact plaintext ballot"""
selections = _get_compact_selections(ballot)
extended_data = _get_compact_write_ins(ballot)
return CompactPlaintextBallot(
ballot.object_id, ballot.style_id, selections, extended_data
)
def compress_submitted_ballot(
ballot: SubmittedBallot,
plaintext_ballot: PlaintextBallot,
ballot_nonce: ElementModQ,
) -> CompactSubmittedBallot:
"""Compress a submitted ballot into a compact submitted ballot"""
return CompactSubmittedBallot(
compress_plaintext_ballot(plaintext_ballot),
ballot.timestamp,
ballot_nonce,
ballot.code_seed,
ballot.code,
ballot.state,
)
def expand_compact_submitted_ballot(
compact_ballot: CompactSubmittedBallot,
internal_manifest: InternalManifest,
context: CiphertextElectionContext,
) -> SubmittedBallot:
"""
Expand a compact submitted ballot using context and
the election manifest into a submitted ballot
"""
# Expand ballot and encrypt & hash contests
plaintext_ballot = expand_compact_plaintext_ballot(
compact_ballot.compact_plaintext_ballot, internal_manifest
)
nonce_seed = CiphertextBallot.nonce_seed(
internal_manifest.manifest_hash,
compact_ballot.compact_plaintext_ballot.object_id,
compact_ballot.ballot_nonce,
)
contests = get_optional(
encrypt_ballot_contests(
plaintext_ballot, internal_manifest, context, nonce_seed
)
)
return make_ciphertext_submitted_ballot(
plaintext_ballot.object_id,
plaintext_ballot.style_id,
internal_manifest.manifest_hash,
compact_ballot.code_seed,
contests,
compact_ballot.code,
compact_ballot.timestamp,
compact_ballot.ballot_box_state,
)
def expand_compact_plaintext_ballot(
compact_ballot: CompactPlaintextBallot, internal_manifest: InternalManifest
) -> PlaintextBallot:
"""Expand a compact plaintext ballot into the original plaintext ballot"""
return PlaintextBallot(
compact_ballot.object_id,
compact_ballot.style_id,
_get_plaintext_contests(compact_ballot, internal_manifest),
)
def _get_compact_selections(ballot: PlaintextBallot) -> List[bool]:
selections = []
for contest in ballot.contests:
for selection in contest.ballot_selections:
selections.append(selection.vote == YES_VOTE)
return selections
def _get_compact_write_ins(ballot: PlaintextBallot) -> Dict[int, str]:
write_ins = {}
index = 0
for contest in ballot.contests:
for selection in contest.ballot_selections:
index += 1
if selection.write_in:
write_ins[index] = selection.write_in
return write_ins
def _get_plaintext_contests(
compact_ballot: CompactPlaintextBallot, internal_manifest: InternalManifest
) -> List[PlaintextBallotContest]:
"""Get ballot contests from compact plaintext ballot"""
index = 0
ballot_style_contests = _get_ballot_style_contests(
compact_ballot.style_id, internal_manifest
)
contests: List[PlaintextBallotContest] = []
for manifest_contest in sequence_order_sort(internal_manifest.contests):
contest_in_style = (
ballot_style_contests.get(manifest_contest.object_id) is not None
)
# Iterate through selections. If contest not in style, mark placeholder
selections: List[PlaintextBallotSelection] = []
for selection in sequence_order_sort(manifest_contest.ballot_selections):
selections.append(
PlaintextBallotSelection(
selection.object_id,
YES_VOTE if compact_ballot.selections[index] else NO_VOTE,
not contest_in_style,
compact_ballot.write_ins.get(index),
)
)
index += 1
contests.append(PlaintextBallotContest(manifest_contest.object_id, selections))
return contests
def _get_ballot_style_contests(
ballot_style_id: str, internal_manifest: InternalManifest
) -> Dict[str, ContestDescriptionWithPlaceholders]:
ballot_style_contests = internal_manifest.get_contests_for(ballot_style_id)
return {contest.object_id: contest for contest in ballot_style_contests}
|
[
"noreply@github.com"
] |
microsoft.noreply@github.com
|
3dc2bb12966bffd471380690c04c8efd0a9a13b7
|
caedff6019e47035eadaaad5a588022e05d92104
|
/Christmas2016/question/migrations/0001_initial.py
|
763d1090fa8d71921230ce550cd9738236392d82
|
[] |
no_license
|
JMorris1575/christmas16
|
ff767add9321bfe82ee70477f75a957504dc5288
|
1b06bf8febb94a699226b0b9d951cb14bbe59d50
|
refs/heads/master
| 2021-01-13T09:33:57.721350
| 2016-12-28T13:12:44
| 2016-12-28T13:12:44
| 72,059,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,218
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-09 02:39
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import model_mixins
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.TextField()),
],
),
migrations.CreateModel(
name='Response',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='question.Question')),
],
bases=(models.Model, model_mixins.AuthorMixin),
),
]
|
[
"FrJamesMorris@gmail.com"
] |
FrJamesMorris@gmail.com
|
628d46dc69e58fab2b00e0b3f44ef0d2fcd88ea1
|
5f22ddbd3eeb99709e43e7b9a7958c9987c7efa4
|
/interview_bits/level_1/01_mathematics/05_number_encoding/01_rearrange-array.py
|
f60f9290190be8d9160ecf9353f276b41e9c32b3
|
[] |
no_license
|
salvador-dali/algorithms_general
|
04950bd823fc354adc58a4f23b7d2f3d39664798
|
aeee3356e2488c6fab08741b1ac26e8bd5e4ac0d
|
refs/heads/master
| 2020-12-14T06:24:10.466601
| 2016-07-17T06:00:17
| 2016-07-17T06:00:17
| 47,397,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
# https://www.interviewbit.com/problems/rearrange-array/
def rearrange(arr):
n = len(arr)
for i in xrange(n):
arr[i] += n * (arr[arr[i]] % n)
for i in xrange(n):
arr[i] /= n
return arr
|
[
"dmytro@knowlabs.com"
] |
dmytro@knowlabs.com
|
077e06806c57829b1f5cc54d139833314ac0bffe
|
308953409e1a3b828ac49b7301c1e751cbf762cf
|
/suite_EETc 12/tst_Open_Change_Values_Import_No/test.py
|
fec88bb5a37e70b505750a61bac908c5b0993dd9
|
[] |
no_license
|
asthagaur1/danfoss-automation
|
4dcc7d8f000917b67e4d6f46ff862a525ddcbc5e
|
213a99d3375889cd0e0c801421a50e9fe6085879
|
refs/heads/main
| 2023-03-31T23:26:56.956107
| 2021-04-01T08:52:37
| 2021-04-01T08:52:37
| 353,627,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
def main():
excel = r"C:\gitworkspace\KoolProg-TestAutomation\Master_Functions\Test_Automation\SourceCode\suite_EETc 12\shared\testdata\Open_Change_Values_Import_No.xls";
#Mapping with Global scripts for Function library and key action.
source(findFile("scripts", "Functions.py"))
source(findFile("scripts", "Actions.py"))
# source(findFile("scripts", "object_id.py"))
keyAction(excel)
|
[
"asthagaur@danfoss.com"
] |
asthagaur@danfoss.com
|
9e78bb7a62c7ff5743be037816b12a9c2316c086
|
82fdb2f3baeb4f08799d93c4be8d8c829f092415
|
/tests/test_policies.py
|
52b3f8e8e75389b6127427521a6f3c7145b58814
|
[
"Apache-2.0"
] |
permissive
|
velamurip/rasa_core
|
915f815772e2b596f837f0e1af511e829cc28e3e
|
f3dbb70d0bb748628ab238eded17a8f5e09279e2
|
refs/heads/master
| 2021-05-16T04:22:04.310610
| 2017-10-05T09:53:22
| 2017-10-05T09:53:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,838
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import pytest
from rasa_core.channels import UserMessage
from rasa_core.domain import TemplateDomain
from rasa_core.featurizers import BinaryFeaturizer
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.memoization import MemoizationPolicy
from rasa_core.policies.scoring_policy import ScoringPolicy
from rasa_core.trackers import DialogueStateTracker
from rasa_core.training_utils import extract_training_data_from_file, \
extract_stories_from_file
def train_data(max_history, domain):
return extract_training_data_from_file(
"data/dsl_stories/stories_defaultdomain.md",
domain=domain, max_history=max_history, remove_duplicates=True,
featurizer=BinaryFeaturizer())
# We are going to use class style testing here since unfortunately pytest
# doesn't support using fixtures as arguments to its own parameterize yet
# (hence, we can't train a policy, declare it as a fixture and use the different
# fixtures of the different policies for the functional tests). Therefore, we
# are going to reverse this and train the policy within a class and collect the
# tests in a base class.
class PolicyTestCollection(object):
"""Tests every policy needs to fulfill.
Each policy can declare further tests on its own."""
max_history = 3 # this is the amount of history we test on
def create_policy(self):
raise NotImplementedError
@pytest.fixture(scope="module")
def trained_policy(self):
default_domain = TemplateDomain.load("examples/default_domain.yml")
policy = self.create_policy()
X, y = train_data(self.max_history, default_domain)
policy.max_history = self.max_history
policy.featurizer = BinaryFeaturizer()
policy.train(X, y, default_domain)
return policy
def test_persist_and_load(self, trained_policy, default_domain, tmpdir):
trained_policy.persist(tmpdir.strpath)
loaded = trained_policy.__class__.load(tmpdir.strpath,
trained_policy.featurizer,
trained_policy.max_history)
stories = extract_stories_from_file(
"data/dsl_stories/stories_defaultdomain.md", default_domain)
for story in stories:
tracker = DialogueStateTracker("default", default_domain.slots)
dialogue = story.as_dialogue("default", default_domain)
tracker.update_from_dialogue(dialogue)
predicted_probabilities = loaded.predict_action_probabilities(
tracker, default_domain)
actual_probabilities = trained_policy.predict_action_probabilities(
tracker, default_domain)
assert predicted_probabilities == actual_probabilities
def test_prediction_on_empty_tracker(self, trained_policy, default_domain):
tracker = DialogueStateTracker(UserMessage.DEFAULT_SENDER,
default_domain.slots,
default_domain.topics,
default_domain.default_topic)
probabilities = trained_policy.predict_action_probabilities(
tracker, default_domain)
assert len(probabilities) == default_domain.num_actions
assert max(probabilities) <= 1.0
assert min(probabilities) >= 0.0
def test_persist_and_load_empty_policy(self, tmpdir):
empty_policy = self.create_policy()
empty_policy.persist(tmpdir.strpath)
loaded = empty_policy.__class__.load(tmpdir.strpath, BinaryFeaturizer(),
empty_policy.max_history)
assert loaded is not None
class TestKerasPolicy(PolicyTestCollection):
@pytest.fixture(scope="module")
def create_policy(self):
p = KerasPolicy()
return p
class TestScoringPolicy(PolicyTestCollection):
@pytest.fixture(scope="module")
def create_policy(self):
p = ScoringPolicy()
return p
class TestMemoizationPolicy(PolicyTestCollection):
@pytest.fixture(scope="module")
def create_policy(self):
p = MemoizationPolicy()
return p
def test_memorise(self, trained_policy, default_domain):
X, y = train_data(self.max_history, default_domain)
trained_policy.train(X, y, default_domain)
for ii in range(X.shape[0]):
assert trained_policy.recall(X[ii, :, :], default_domain) == y[ii]
random_feature = np.random.randn(default_domain.num_features)
assert trained_policy.recall(random_feature, default_domain) is None
|
[
"tom.bocklisch@scalableminds.com"
] |
tom.bocklisch@scalableminds.com
|
9db26fb7dad810ee471a57378bf7b950550c9a78
|
e1a2c6ed4a4b93b4697974e3b0a32a4d67daa6f6
|
/venv/Lib/site-packages/pybrain3/rl/environments/ode/instances/ccrl.py
|
f868c93f0e79f79e82bdefa752c7d5da13efb64f
|
[
"MIT"
] |
permissive
|
ishatserka/MachineLearningAndDataAnalysisCoursera
|
cdf0f23a58617e17d6b938e3a9df17daae8585e4
|
e82e772df2f4aec162cb34ac6127df10d14a625a
|
refs/heads/master
| 2021-09-11T01:39:26.228392
| 2018-04-05T14:33:39
| 2018-04-05T14:33:39
| 117,153,454
| 0
| 0
|
MIT
| 2018-03-27T05:20:37
| 2018-01-11T21:05:33
|
Python
|
UTF-8
|
Python
| false
| false
| 6,146
|
py
|
__author__ = 'Frank Sehnke, sehnke@in.tum.de'
from pybrain3.rl.environments.ode import ODEEnvironment, sensors, actuators
import imp
import xode #@UnresolvedImport
import ode #@UnresolvedImport
import sys
from scipy import array, asarray
class CCRLEnvironment(ODEEnvironment):
def __init__(self, xodeFile="ccrlGlas.xode", renderer=True, realtime=False, ip="127.0.0.1", port="21590", buf='16384'):
ODEEnvironment.__init__(self, renderer, realtime, ip, port, buf)
# load model file
self.pert = asarray([1.0, 0.0, 0.0])
self.loadXODE(imp.find_module('pybrain')[1] + "/rl/environments/ode/models/" + xodeFile)
# standard sensors and actuators
self.addSensor(sensors.JointSensor())
self.addSensor(sensors.JointVelocitySensor())
self.addActuator(actuators.JointActuator())
#set act- and obsLength, the min/max angles and the relative max touques of the joints
self.actLen = self.indim
self.obsLen = len(self.getSensors())
#ArmLeft, ArmRight, Hip, PevelLeft, PevelRight, TibiaLeft, TibiaRight, KneeLeft, KneeRight, FootLeft, FootRight
self.tourqueList = array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.8, 0.8, 0.8, 0.5, 0.5, 0.1],)
#self.tourqueList=array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],)
self.cHighList = array([0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.9],)
self.cLowList = array([-1.0, -1.0, -1.0, -1.5, -1.0, -1.0, -1.0, -0.7, -1.0, 0.0, -1.0, -1.5, -1.0, -1.0, -1.0, 0.0],)
self.stepsPerAction = 1
def step(self):
# Detect collisions and create contact joints
self.tableSum = 0
self.glasSum = 0
ODEEnvironment.step(self)
def _near_callback(self, args, geom1, geom2):
"""Callback function for the collide() method.
This function checks if the given geoms do collide and
creates contact joints if they do."""
# only check parse list, if objects have name
if geom1.name != None and geom2.name != None:
# Preliminary checking, only collide with certain objects
for p in self.passpairs:
g1 = False
g2 = False
for x in p:
g1 = g1 or (geom1.name.find(x) != -1)
g2 = g2 or (geom2.name.find(x) != -1)
if g1 and g2:
return()
# Check if the objects do collide
contacts = ode.collide(geom1, geom2)
tmpStr = geom2.name[:-2]
handStr = geom1.name[:-1]
if geom1.name == 'plate' and tmpStr != 'objectP':
self.tableSum += len(contacts)
if tmpStr == 'objectP' and handStr == 'pressLeft':
if len(contacts) > 0: self.glasSum += 1
tmpStr = geom1.name[:-2]
handStr = geom2.name[:-1]
if geom2.name == 'plate' and tmpStr != 'objectP':
self.tableSum += len(contacts)
if tmpStr == 'objectP' and handStr == 'pressLeft':
if len(contacts) > 0: self.glasSum += 1
# Create contact joints
world, contactgroup = args
for c in contacts:
p = c.getContactGeomParams()
# parameters from Niko Wolf
c.setBounce(0.2)
c.setBounceVel(0.05) #Set the minimum incoming velocity necessary for bounce
c.setSoftERP(0.6) #Set the contact normal "softness" parameter
c.setSoftCFM(0.00005) #Set the contact normal "softness" parameter
c.setSlip1(0.02) #Set the coefficient of force-dependent-slip (FDS) for friction direction 1
c.setSlip2(0.02) #Set the coefficient of force-dependent-slip (FDS) for friction direction 2
c.setMu(self.FricMu) #Set the Coulomb friction coefficient
j = ode.ContactJoint(world, contactgroup, c)
j.name = None
j.attach(geom1.getBody(), geom2.getBody())
def loadXODE(self, filename, reload=False):
""" loads an XODE file (xml format) and parses it. """
f = file(filename)
self._currentXODEfile = filename
p = xode.parser.Parser()
self.root = p.parseFile(f)
f.close()
try:
# filter all xode "world" objects from root, take only the first one
world = [x for x in self.root.getChildren() if isinstance(x, xode.parser.World)][0]
except IndexError:
# malicious format, no world tag found
print("no <world> tag found in " + filename + ". quitting.")
sys.exit()
self.world = world.getODEObject()
self._setWorldParameters()
try:
# filter all xode "space" objects from world, take only the first one
space = [x for x in world.getChildren() if isinstance(x, xode.parser.Space)][0]
except IndexError:
# malicious format, no space tag found
print("no <space> tag found in " + filename + ". quitting.")
sys.exit()
self.space = space.getODEObject()
# load bodies and geoms for painting
self.body_geom = []
self._parseBodies(self.root)
for (body, _) in self.body_geom:
if hasattr(body, 'name'):
tmpStr = body.name[:-2]
if tmpStr == "objectP":
body.setPosition(body.getPosition() + self.pert)
if self.verbosity > 0:
print("-------[body/mass list]-----")
for (body, _) in self.body_geom:
try:
print(body.name, body.getMass())
except AttributeError:
print("<Nobody>")
# now parse the additional parameters at the end of the xode file
self.loadConfig(filename, reload)
def reset(self):
ODEEnvironment.reset(self)
self.pert = asarray([1.0, 0.0, 0.0])
if __name__ == '__main__' :
w = CCRLEnvironment()
while True:
w.step()
if w.stepCounter == 1000: w.reset()
|
[
"shatserka@gmail.com"
] |
shatserka@gmail.com
|
25ec5c3a23fdcbb3fe68b62fb26e6466e9c81f4a
|
94c7440e7f1d2fdbe4a1e26b9c75a94e49c14eb4
|
/leetcode/371.py
|
9db89c099bace2c01ca91a5174d2047ab78a610c
|
[
"Apache-2.0"
] |
permissive
|
windniw/just-for-fun
|
7ddea4f75cf3466a400b46efe36e57f6f7847c48
|
44e1ff60f8cfaf47e4d88988ee67808f0ecfe828
|
refs/heads/master
| 2022-08-18T09:29:57.944846
| 2022-07-25T16:04:47
| 2022-07-25T16:04:47
| 204,949,602
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 717
|
py
|
"""
link: https://leetcode.com/problems/sum-of-two-integers
problem: 不用 + / - 号,求 integer 类型的 a + b
solution: 由于python没有左移整形溢出这道题难度直线上升。
a + b
== 不进位 (a + b) + 进位 (a + b) << 1
== a ^ b + (a & b) << 1
持续迭代到 (a & b) << 1 为0,即不进位时, 结果为当时的 a ^ b
"""
class Solution:
def getSum(self, a: int, b: int) -> int:
max_uint = 0xffffffff
max_int = 0x7fffffff - 1
while a:
add = (a & b) << 1
b = a ^ b
a = add
add &= max_uint
b &= max_uint
return b if b <= max_int else ~(b ^ max_uint)
|
[
"windniw36@gmail.com"
] |
windniw36@gmail.com
|
50f8e6b88bff07c4af0e52bfa551b372a8c93bc8
|
a35b2842ff707c9adf70e178ba8cb7a128e6f0fa
|
/brl_gym/scripts/crosswalk_vel/generate_initial_conditions.py
|
a9c7a1bb7d4cb52e8276db48814c90777f6661e9
|
[
"BSD-3-Clause"
] |
permissive
|
gilwoolee/brl_gym
|
7717366a09c7ff96a8fbc02688febe6d559e333a
|
9c0784e9928f12d2ee0528c79a533202d3afb640
|
refs/heads/master
| 2022-11-26T15:08:56.730225
| 2020-08-02T05:08:28
| 2020-08-02T05:08:28
| 198,884,614
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 772
|
py
|
from brl_gym.envs.crosswalk_vel import CrossWalkVelEnv
import numpy as np
env = CrossWalkVelEnv()
env.reset()
goals = env.goals
peds = env.pedestrians
pose = env.pose
ped_speeds = env.pedestrian_speeds
print("Car 37, 38, 35")
print("Peds :\n", np.around(peds,1))
print("Ped speeds:\n", np.around(ped_speeds,2))
print("Goals :\n", np.around(goals,1))
print("Pose :\n", np.around(pose,1))
print("Angle :\n", np.around(np.rad2deg(pose[2]),2))
for ps, goal in zip(ped_speeds, goals):
if goal[0] == 3.5:
goal[0] = 3.2
if goal[0] == 0.0:
goal[0] = 0.3
print("roslaunch mushr_control runner_script.launch car_name:=$CAR_NAME wait_for_signal:=false desired_speed:={:.2f} desired_x:={:.2f} desired_y:={:.2f} local:=false".format(ps, goal[0], goal[1]))
|
[
"gilwoo301@gmail.com"
] |
gilwoo301@gmail.com
|
d0a92881174f016830e5b146ca97ba5a68b65627
|
2aa4c7c94866e7a958e4787dd4487aa7c1eb8d61
|
/applications/MappingApplication/tests/test_mapper_mpi_tests.py
|
17fa528cbfa65adc8d0f6521adde262131b8852b
|
[
"BSD-3-Clause"
] |
permissive
|
PFEM/Kratos
|
b48df91e6ef5a00edf125e6f5aa398505c9c2b96
|
796c8572e9fe3875562d77370fc60beeacca0eeb
|
refs/heads/master
| 2021-10-16T04:33:47.591467
| 2019-02-04T14:22:06
| 2019-02-04T14:22:06
| 106,919,267
| 1
| 0
| null | 2017-10-14T10:34:43
| 2017-10-14T10:34:43
| null |
UTF-8
|
Python
| false
| false
| 2,141
|
py
|
from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7
import KratosMultiphysics
from KratosMultiphysics.mpi import mpi
import KratosMultiphysics.MetisApplication
import KratosMultiphysics.TrilinosApplication
import KratosMultiphysics.MappingApplication as KratosMapping
import KratosMultiphysics.KratosUnittest as KratosUnittest
from base_mapper_tests import BaseMapperTests
from trilinos_import_model_part_utility import TrilinosImportModelPartUtility
class MapperMPITests(BaseMapperTests, KratosUnittest.TestCase):
@classmethod
def _ImportModelPart(cls):
cls.model_part_origin.AddNodalSolutionStepVariable(
KratosMultiphysics.PARTITION_INDEX)
cls.model_part_destination.AddNodalSolutionStepVariable(
KratosMultiphysics.PARTITION_INDEX)
origin_settings = KratosMultiphysics.Parameters("""{
"model_import_settings": {
"input_type": "mdpa",
"input_filename": \"""" + cls.input_file_origin + """\",
"partition_in_memory" : true
},
"echo_level" : 0
}""")
destination_settings = origin_settings.Clone()
destination_settings["model_import_settings"]["input_filename"].SetString(
cls.input_file_destination)
model_part_import_util_origin = TrilinosImportModelPartUtility(
cls.model_part_origin, origin_settings)
model_part_import_util_destination = TrilinosImportModelPartUtility(
cls.model_part_destination, destination_settings)
model_part_import_util_origin.ImportModelPart()
model_part_import_util_destination.ImportModelPart()
model_part_import_util_origin.CreateCommunicators()
model_part_import_util_destination.CreateCommunicators()
def _CreateMapper(self, mapper_settings):
return KratosMapping.MapperFactory.CreateMPIMapper(
self.model_part_origin,
self.model_part_destination,
mapper_settings)
if __name__ == '__main__':
KratosUnittest.main()
|
[
"philipp.bucher@tum.de"
] |
philipp.bucher@tum.de
|
224d192a356f25f72640dd130596fa1cc7f853c8
|
fb1fd30098fd4dd7f11e614fbcd19bda5e0414bd
|
/randNum.py
|
32dc0504c2cbabfba7c0c7b3ba6838a1d01a160a
|
[] |
no_license
|
kali-lg/python
|
6ceb452ae7fd611bb6b6b99a4be4404f3fd6b2de
|
0363dba3e224ee2044dbe3216289c0245df9c5c0
|
refs/heads/master
| 2021-01-10T09:37:58.103674
| 2016-03-07T13:09:57
| 2016-03-07T13:09:57
| 53,310,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
#!/usr/bin/python
import random
num = random.randint(0, 100)
while True:
try:
guess = int(raw_input("Please Enter number 1~100:\n"))
except ValueError, e:
print "Please Enter correct number, your number is wrong type."
continue
if guess > num:
print "Guess Bigger:", guess
elif guess < num:
print "Gusee Smaller:", guess
else:
print "Guess OK, Game Over:"
break
print "\n"
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
bf799d87050ee17a2efe9205421a451ddbc5bbb3
|
f0b741f24ccf8bfe9bd1950425d83b6291d21b10
|
/components/google-cloud/google_cloud_pipeline_components/container/v1/bigquery/ml_reconstruction_loss/launcher.py
|
b0671efb65d1838f7599a10b484a9e7483666bb0
|
[
"Apache-2.0"
] |
permissive
|
kubeflow/pipelines
|
e678342b8a325559dec0a6e1e484c525fdcc8ce8
|
3fb199658f68e7debf4906d9ce32a9a307e39243
|
refs/heads/master
| 2023-09-04T11:54:56.449867
| 2023-09-01T19:07:33
| 2023-09-01T19:12:27
| 133,100,880
| 3,434
| 1,675
|
Apache-2.0
| 2023-09-14T20:19:06
| 2018-05-12T00:31:47
|
Python
|
UTF-8
|
Python
| false
| false
| 3,115
|
py
|
# Copyright 2022 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GCP launcher for Bigquery jobs based on the AI Platform SDK."""
import argparse
import logging
import sys
from google_cloud_pipeline_components.container.v1.bigquery.ml_reconstruction_loss import remote_runner
from google_cloud_pipeline_components.container.v1.gcp_launcher.utils import parser_util
def _parse_args(args):
"""Parse command line arguments."""
parser, parsed_args = parser_util.parse_default_args(args)
# Parse the conditionally required arguments
parser.add_argument(
'--executor_input',
dest='executor_input',
type=str,
# executor_input is only needed for components that emit output artifacts.
required=True,
default=argparse.SUPPRESS,
)
parser.add_argument(
'--job_configuration_query_override',
dest='job_configuration_query_override',
type=str,
required=True,
default=argparse.SUPPRESS,
)
parser.add_argument(
'--model_name',
dest='model_name',
type=str,
required=True,
default=argparse.SUPPRESS,
)
parser.add_argument(
'--table_name',
dest='table_name',
type=str,
# table_name is only needed for BigQuery tvf model job component.
required=False,
default=argparse.SUPPRESS,
)
parser.add_argument(
'--query_statement',
dest='query_statement',
type=str,
# query_statement is only needed for BigQuery predict model job component.
required=False,
default=argparse.SUPPRESS,
)
parsed_args, _ = parser.parse_known_args(args)
return vars(parsed_args)
def main(argv):
"""Main entry.
Expected input args are as follows:
Project - Required. The project of which the resource will be launched.
Region - Required. The region of which the resource will be launched.
Type - Required. GCP launcher is a single container. This Enum will
specify which resource to be launched.
Request payload - Required. The full serialized json of the resource spec.
Note this can contain the Pipeline Placeholders.
gcp_resources - placeholder output for returning job_id.
Args:
argv: A list of system arguments.
"""
parsed_args = _parse_args(argv)
job_type = parsed_args['type']
if job_type != 'BigqueryMLReconstructionLossJob':
raise ValueError('Incorrect job type: ' + job_type)
logging.info('Job started for type: ' + job_type)
remote_runner.bigquery_ml_reconstruction_loss_job(**parsed_args)
if __name__ == '__main__':
main(sys.argv[1:])
|
[
"nobody@google.com"
] |
nobody@google.com
|
3ec2e2dd3b709a107fda00833615406e4642a963
|
1bb42bac177fb4e979faa441363c27cb636a43aa
|
/dual_encoder/model_utils.py
|
691253213276f6be9ac1bd05a51079a61df3c007
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
google-research/federated
|
a6040e80fa0fbf533e0d665c66a9bc549d208b3d
|
329e60fa56b87f691303638ceb9dfa1fc5083953
|
refs/heads/master
| 2023-08-28T13:10:10.885505
| 2023-08-22T23:06:08
| 2023-08-22T23:06:40
| 295,559,343
| 595
| 187
|
Apache-2.0
| 2022-05-12T08:42:53
| 2020-09-14T23:09:07
|
Python
|
UTF-8
|
Python
| false
| false
| 5,775
|
py
|
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for dual encoder model."""
from typing import Callable, Optional
import tensorflow as tf
NormalizationFnType = Optional[Callable[[tf.Tensor], tf.Tensor]]
l2_normalize_fn = lambda x: tf.math.l2_normalize(x, axis=-1)
@tf.function
def get_predicted_embeddings(y_pred, y_true, normalization_fn=l2_normalize_fn):
"""Helper for retrieving optionally normalized embeddings from y_pred.
Args:
y_pred: dual encoder model output. If the model outputs embeddings, `y_pred`
is concatenate(context_embedding, full vocab label embeddings) with shape
[batch_size + label_embedding_vocab_size, final_embedding_dim]. If the
model outputs similarities, `y_pred` is the similarity matrix with shape
[batch_size, label_embedding_vocab_size] between context and full vocab
label embeddings.
y_true: the true labels with shape [batch_size, 1].
normalization_fn: The normalization function to be applied to both context
and label embeddings.
Returns:
Optionally normalized context and label embeddings.
"""
batch_size = tf.shape(y_true)[0]
context_embedding, label_embedding = y_pred[:batch_size], y_pred[batch_size:]
# Optionally apply nomalization_fn to both context and label embeddings,
# computing the cosine similarity rather than the dot product.
if normalization_fn is not None:
context_embedding = normalization_fn(context_embedding)
label_embedding = normalization_fn(label_embedding)
return context_embedding, label_embedding
@tf.function
def get_embeddings_and_similarities(y_pred,
y_true,
expect_embeddings=True,
normalization_fn=l2_normalize_fn):
"""Retrieving the context and label embeddings and the similarities between them.
Args:
y_pred: Dual encoder model output. When expect_embeddings is true, `y_pred`
is concatenate(context_embedding, full vocab label embeddings) with shape
[batch_size + label_embedding_vocab_size, final_embedding_dim]. When
`expect_embeddings` is False, `y_pred` is the similarity matrix with shape
[batch_size, label_embedding_vocab_size] between context and full vocab
label embeddings.
y_true: The true labels with shape [batch_size, 1].
expect_embeddings: If `expect_embeddings` is True, `y_pred` is the context
and label embeddings. Otherwise, the y_pred is the batch or global
similarities.
normalization_fn: The normalization function to be applied to both context
and label embeddings.
Returns:
The optionally normalized context and label embeddings as well as the
similarities between them. The context and label embeddings are `None` if
`expect_embeddings` is False.
"""
if expect_embeddings:
context_embedding, label_embedding = (
get_predicted_embeddings(y_pred, y_true, normalization_fn))
# similarities[i][j] is the dot product of the ith context embedding and
# the jth label embedding in a batch.
similarities = tf.matmul(
context_embedding, label_embedding, transpose_b=True)
else:
context_embedding = label_embedding = None
similarities = y_pred
return context_embedding, label_embedding, similarities
class Similarities(tf.keras.layers.Layer):
"""Keras layer for computing similarities over context/label embeddings.
Takes in context embeddings within a batch and label embeddings to computes a
similarities matrix where similarities[i][j] is the dot product similarity
between context embedding i and label embedding j.
If label embeddings are those within the same batch, this function computes
the batch similarity.
If label embeddings are those for the full vocabulary, this function computes
the global similarity.
Optionally apply normalization to the embeddings, computing cosine similarity
instead of dot product.
"""
def __init__(self,
normalization_fn: NormalizationFnType = l2_normalize_fn,
**kwargs):
super().__init__(**kwargs)
self.normalization_fn = normalization_fn
def call(self, inputs):
if len(inputs) != 2:
raise ValueError(
'Exactly two inputs must be provided, context embeddings and label '
'embeddings, but %d inputs were provided.' % len(inputs))
context_embedding, label_embedding = inputs
# Optionally apply normalization to both context and label embeddings,
# computing the cosine similarity rather than the dot product.
if self.normalization_fn is not None:
context_embedding = self.normalization_fn(context_embedding)
label_embedding = self.normalization_fn(label_embedding)
# similarities[i][j] is the dot product of the ith context embedding and
# the jth label embedding in a batch.
similarities = tf.matmul(
context_embedding, label_embedding, transpose_b=True)
return similarities
def get_config(self):
config = super().get_config()
config.update({
'normalization_fn': self.normalization_fn,
})
return config
NORMALIZATION_FN_MAP = {
'none': None,
'l2_normalize': l2_normalize_fn,
}
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
445617559274b877d9caaaded1e30307947b51ec
|
ac9e79b04eadb95497b99c30444d952e6068f18f
|
/dockermap/map/policy/__init__.py
|
45f1833b73ce0c9c2dd07d072c74315426a84c27
|
[
"MIT"
] |
permissive
|
vijayshan/docker-map
|
ff58f5c8aba15b8d157478a6614c6d6681de1e61
|
a222c92947cbc22aef727c12f39fb93b0b192bc7
|
refs/heads/master
| 2021-01-17T03:16:31.693681
| 2015-09-14T08:20:55
| 2015-09-14T08:20:55
| 42,375,505
| 1
| 0
| null | 2015-09-12T22:31:07
| 2015-09-12T22:31:07
| null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .actions import ACTION_ATTACHED_FLAG, ACTION_DEPENDENCY_FLAG
from .simple import SimplePolicy
from .resume import ResumeUpdatePolicy
|
[
"matthias@erll.de"
] |
matthias@erll.de
|
65891c8750b9d10b031594b8b35080a55aaa6663
|
36409b78394002e5d6e9228ca256fd4654b38f80
|
/random walk/src/BD_LERW.py
|
225bf177733ba635a79943248f53c2381ba65975
|
[] |
no_license
|
xiaogang00/Mining-in-Social-Networks
|
fa383494fd56124096c60317af2b30373c0d4aac
|
87ab6f29ae148170d03470987299c7ea812d1dab
|
refs/heads/master
| 2020-12-02T16:22:59.938930
| 2017-08-30T01:58:33
| 2017-08-30T01:58:33
| 96,543,382
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,050
|
py
|
#!/usr/bin/python
#
# An implementation of a Bidirectional Loop Erased Random Walk (LERW)
# from a cylinder with reflecting boundaries on the left
# and open boundaries on the right.
# PNG output of a single trajectory.
# Habib Rehmann and Gunnar Pruessner
#
import random
import numpy as np
import matplotlib.pyplot as plt
from copy import deepcopy
seed = 10 # random seed
Length = 200 # length of the cyclinder
Circ = 200 # circumference of cyclinder
x = 0 # x coordinate of starting location
# y coordinate of starting location. Origin is at centre of square
y = Circ / 2
#在这里一开始的时候,x是在原点,而y是在中间的
s = 0 # Step number.
realizations = 8
trajectory = [] # List of the x coordinates of all points visited.
# (Length x Circ) 2D array of zeros
lattice = np.zeros((Length, Circ), dtype=int)
random.seed(seed)
# Plot config
dpi = 300
fig, ax = plt.subplots()
fig.set_size_inches(3, Circ * 3. / Length)
ax.set_xlim(0, Length - 1)
ax.set_ylim(0, Circ - 1)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def plot(LERW, c='g', Length = Length, Circ = Circ):
for pos in range(len(LERW)):
x, y = LERW[pos]
#不能画在边缘和角落
if (x == Length) or (x == 0) or (y == Circ) or (y == Circ) or (y == 0):
LERW[pos] = (np.nan, np.nan)
pos += 1
plt.plot(*zip(*LERW), color=c, linewidth=0.2)
# Generate a randomwalk
for i in range(realizations):
s = 0
x = 0 # x coordinate of starting location
y = Circ / 2 # y coordinate of starting location
#lattice在这里是格子的线
lattice = np.zeros((Length, Circ), dtype=int)
trajectory = []
while True:
s += 1
#下面相当于在x,y的方向上产生随机数
if (bool(random.getrandbits(1))):
if (bool(random.getrandbits(1))):
x += 1
else:
x -= 1
else:
if (bool(random.getrandbits(1))):
y += 1
else:
y -= 1
if (x >= Length):
break
elif (x < 0):
x = 0
if (y >= Circ):
y -= Circ
elif (y < 0):
y += Circ
lattice[x][y] += 1
trajectory.append((x, y))
x0, y0, pos = None, None, 0
# Loop erasure
LERW_LeftRight = deepcopy(trajectory)
lcpy = deepcopy(lattice)
x0, y0 = None, None
pos = 0
while pos < len(LERW_LeftRight):
x, y = LERW_LeftRight[pos]
if lcpy[x][y] > 1 and (not x0):
x0, y0 = x, y
pos0 = pos
elif (x == x0) and (y == y0) and (lcpy[x][y] == 1):
del LERW_LeftRight[pos0:pos]
x0, y0 = None, None
pos = pos0
lcpy[x][y] -= 1
pos += 1
plot(LERW_LeftRight)
# Loop erasure (tranversal from right to left)
LERW_RightLeft = deepcopy(trajectory[::-1])
lcpy = deepcopy(lattice)
x0, y0 = None, None
pos = 0
while pos < len(LERW_RightLeft):
x, y = LERW_RightLeft[pos]
if lcpy[x][y] > 1 and (not x0):
x0, y0 = x, y
pos0 = pos
elif (x == x0) and (y == y0) and (lcpy[x][y] == 1):
del LERW_RightLeft[pos0:pos]
x0, y0 = None, None
pos = pos0
lcpy[x][y] -= 1
pos += 1
plot(LERW_RightLeft, 'r')
# Plot random walk
plt.savefig(__file__[:-3]+".png", bbox_inches="tight", dpi=dpi)
|
[
"872310734@qq.com"
] |
872310734@qq.com
|
579d13c29895c97ff77f3425bac31cb6d6070857
|
1e6e3bb707920fdb01ebca23eaf81097c558d918
|
/tests/system/action/test_internal_actions.py
|
cc7ffd01313c71744500855191c73bb153e2160b
|
[
"MIT"
] |
permissive
|
OpenSlides/openslides-backend
|
cbd24589f82a6f29bde02611610511870bb6abbf
|
d8511f5138db4cc5fe4fa35e2a0200f766bd49c5
|
refs/heads/main
| 2023-08-23T11:54:25.064070
| 2023-08-22T11:15:45
| 2023-08-22T11:15:45
| 231,757,840
| 6
| 22
|
MIT
| 2023-09-14T16:23:41
| 2020-01-04T12:17:38
|
Python
|
UTF-8
|
Python
| false
| false
| 7,783
|
py
|
from tempfile import NamedTemporaryFile
from typing import Any, Dict, Optional
from openslides_backend.http.views.action_view import (
INTERNAL_AUTHORIZATION_HEADER,
ActionView,
)
from openslides_backend.http.views.base_view import RouteFunction
from openslides_backend.shared.env import DEV_PASSWORD
from openslides_backend.shared.util import ONE_ORGANIZATION_FQID
from tests.system.util import disable_dev_mode, get_route_path
from tests.util import Response
from .base import BaseActionTestCase
from .util import get_internal_auth_header
class BaseInternalRequestTest(BaseActionTestCase):
"""
Provides the ability to use the anonymous client to call an internal route.
"""
route: RouteFunction
def call_internal_route(
self,
payload: Any,
internal_auth_password: Optional[str] = DEV_PASSWORD,
) -> Response:
if internal_auth_password is None:
headers = {}
else:
headers = get_internal_auth_header(internal_auth_password)
return self.anon_client.post(
get_route_path(self.route),
json=payload,
headers=headers,
)
class BaseInternalPasswordTest(BaseInternalRequestTest):
"""
Sets up a server-side password for internal requests.
"""
internal_auth_password: str = "Q2^$2J9QXimW6lDPoGj4"
def setUp(self) -> None:
super().setUp()
self.secret_file = NamedTemporaryFile()
self.secret_file.write(self.internal_auth_password.encode("ascii"))
self.secret_file.seek(0)
self.app.env.vars["INTERNAL_AUTH_PASSWORD_FILE"] = self.secret_file.name
def tearDown(self) -> None:
super().tearDown()
self.app.env.vars["INTERNAL_AUTH_PASSWORD_FILE"] = ""
self.secret_file.close()
class BaseInternalActionTest(BaseInternalRequestTest):
"""
Sets up a server-side password for internal requests.
"""
route: RouteFunction = ActionView.internal_action_route
def internal_request(
self,
action: str,
data: Dict[str, Any],
internal_auth_password: Optional[str] = DEV_PASSWORD,
) -> Response:
return super().call_internal_route(
[{"action": action, "data": [data]}], internal_auth_password
)
class TestInternalActionsDev(BaseInternalActionTest):
"""
Uses the anonymous client to call the internal action route. This should skip all permission checks, so the requests
still succeed.
Just rudimentary tests that the actions generally succeed since if that's the case, everything should be handled
analogously to the external case, which is already tested sufficiently in the special test cases for the actions.
Hint: This test assumes that OPENSLIDES_DEVELOPMENT is truthy.
"""
def test_internal_user_create(self) -> None:
response = self.internal_request("user.create", {"username": "test"})
self.assert_status_code(response, 200)
self.assert_model_exists("user/2", {"username": "test"})
def test_internal_user_update(self) -> None:
response = self.internal_request("user.update", {"id": 1, "username": "test"})
self.assert_status_code(response, 200)
self.assert_model_exists("user/1", {"username": "test"})
def test_internal_user_delete(self) -> None:
response = self.internal_request("user.delete", {"id": 1})
self.assert_status_code(response, 200)
self.assert_model_deleted("user/1")
def test_internal_user_set_password(self) -> None:
response = self.internal_request(
"user.set_password", {"id": 1, "password": "new_password"}
)
self.assert_status_code(response, 200)
model = self.get_model("user/1")
assert self.auth.is_equals("new_password", model["password"])
def test_internal_organization_initial_import(self) -> None:
self.datastore.truncate_db()
response = self.internal_request("organization.initial_import", {"data": {}})
self.assert_status_code(response, 200)
self.assert_model_exists(ONE_ORGANIZATION_FQID)
self.assert_model_exists("user/1", {"username": "superadmin"})
def test_internal_mismatching_passwords(self) -> None:
response = self.internal_request(
"user.create", {"username": "test"}, "wrong_pw"
)
self.assert_status_code(response, 401)
self.assert_model_not_exists("user/2")
def test_internal_no_password_in_request(self) -> None:
response = self.internal_request("user.create", {"username": "test"}, None)
self.assert_status_code(response, 401)
self.assert_model_not_exists("user/2")
def test_internal_wrong_password_in_request(self) -> None:
response = self.internal_request("user.create", {"username": "test"}, "wrong")
self.assert_status_code(response, 401)
self.assert_model_not_exists("user/2")
def test_internal_execute_stack_internal_via_public_route(self) -> None:
self.datastore.truncate_db()
response = self.request(
"organization.initial_import", {"data": {}}, internal=False
)
self.assert_status_code(response, 400)
self.assertEqual(
response.json.get("message"),
"Action organization.initial_import does not exist.",
)
self.assert_model_not_exists("organization/1")
def test_internal_wrongly_encoded_password(self) -> None:
response = self.anon_client.post(
get_route_path(self.route),
json=[{"action": "user.create", "data": [{"username": "test"}]}],
headers={INTERNAL_AUTHORIZATION_HEADER: "openslides"},
)
self.assert_status_code(response, 400)
self.assert_model_not_exists("user/2")
@disable_dev_mode
class TestInternalActionsProd(BaseInternalActionTest):
"""
The same as the TestInternalActionsDev class but in prod mode.
"""
def test_internal_no_password_on_server(self) -> None:
response = self.internal_request(
"user.create", {"username": "test"}, "some password"
)
self.assert_status_code(response, 500)
self.assert_model_not_exists("user/2")
@disable_dev_mode
class TestInternalActionsProdWithPasswordFile(
BaseInternalActionTest, BaseInternalPasswordTest
):
"""
Same as TestInternalActionsProd but with a server-side password set.
"""
def test_internal_wrong_password(self) -> None:
response = self.internal_request("user.create", {"username": "test"}, "wrong")
self.assert_status_code(response, 401)
self.assert_model_not_exists("user/2")
def test_internal_execute_public_action(self) -> None:
response = self.internal_request(
"user.create", {"username": "test"}, self.internal_auth_password
)
self.assert_status_code(response, 200)
self.assert_model_exists("user/2")
def test_internal_execute_stack_internal_action(self) -> None:
self.datastore.truncate_db()
response = self.internal_request(
"organization.initial_import", {"data": {}}, self.internal_auth_password
)
self.assert_status_code(response, 200)
self.assert_model_exists(ONE_ORGANIZATION_FQID)
def test_internal_execute_backend_internal_action(self) -> None:
response = self.internal_request(
"option.create",
{"meeting_id": 1, "text": "test"},
self.internal_auth_password,
)
self.assert_status_code(response, 400)
self.assertEqual(
response.json.get("message"), "Action option.create does not exist."
)
self.assert_model_not_exists("option/1")
|
[
"noreply@github.com"
] |
OpenSlides.noreply@github.com
|
e0401fc292b6f962226021e0e3f636419bf5068e
|
958b6de6be5fb8bce876373cec29677259c6ceb3
|
/hypergan/train_hooks/experimental/weight_penalty_train_hook.py
|
7a672346f3853a2b0d4a457e359240de7e35efd9
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
RandomStudio/HyperGAN
|
712679b9121ad414d2f91205a82370d54a930120
|
586cefe69805f5ffa8dcb11aaf346f6b3dcf3ac9
|
refs/heads/master
| 2020-06-22T22:43:04.884557
| 2019-07-23T12:17:58
| 2019-07-23T12:17:58
| 198,420,256
| 0
| 0
| null | 2019-07-23T11:52:00
| 2019-07-23T11:51:59
| null |
UTF-8
|
Python
| false
| false
| 2,923
|
py
|
#From https://gist.github.com/EndingCredits/b5f35e84df10d46cfa716178d9c862a3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.framework import ops
from tensorflow.python.training import optimizer
import tensorflow as tf
import hyperchamber as hc
import numpy as np
import inspect
from operator import itemgetter
from hypergan.train_hooks.base_train_hook import BaseTrainHook
class WeightPenaltyTrainHook(BaseTrainHook):
def __init__(self, gan=None, config=None, trainer=None, name="WeightPenaltyTrainHook", memory_size=2, top_k=1):
super().__init__(config=config, gan=gan, trainer=trainer, name=name)
d_losses = []
weights = self.gan.weights()
if config.only_d:
weights = self.discriminator.weights()
if config.l2nn_penalty:
l2nn_penalties = []
if len(weights) > 0:
for w in weights:
w = tf.reshape(w, [-1, self.ops.shape(w)[-1]])
wt = tf.transpose(w)
wtw = tf.matmul(wt,w)
wwt = tf.matmul(w,wt)
def _l(m):
m = tf.abs(m)
m = tf.reduce_sum(m, axis=0,keep_dims=True)
m = tf.maximum(m-1, 0)
m = tf.reduce_max(m, axis=1,keep_dims=True)
return m
l2nn_penalties.append(tf.minimum(_l(wtw), _l(wwt)))
print('l2nn_penalty', self.config.l2nn_penalty, l2nn_penalties)
l2nn_penalty = self.config.l2nn_penalty * tf.add_n(l2nn_penalties)
self.add_metric('l2nn_penalty', self.gan.ops.squash(l2nn_penalty))
d_losses.append(l2nn_penalty)
if config.ortho_penalty:
penalties = []
for w in self.gan.weights():
print("PENALTY", w)
w = tf.reshape(w, [-1, self.ops.shape(w)[-1]])
wt = tf.transpose(w)
wtw = tf.matmul(wt,w)
wwt = tf.matmul(w,wt)
mwtw = tf.matmul(w, wtw)
mwwt = tf.matmul(wt, wwt)
def _l(w,m):
l = tf.reduce_mean(tf.abs(w - m))
l = self.ops.squash(l)
return l
penalties.append(tf.minimum(_l(w, mwtw), _l(wt, mwwt)))
penalty = self.config.ortho_penalty * tf.add_n(penalties)
self.add_metric('ortho_penalty', self.gan.ops.squash(penalty))
print("PENALTY", penalty)
penalty = tf.reshape(penalty, [1,1])
penalty = tf.tile(penalty, [self.gan.batch_size(), 1])
d_losses.append(penalty)
self.loss = self.ops.squash(d_losses)
def losses(self):
return [self.loss, self.loss]
def after_step(self, step, feed_dict):
pass
def before_step(self, step, feed_dict):
pass
|
[
"mikkel@255bits.com"
] |
mikkel@255bits.com
|
bb7645b996dd70bb11bceb7fa31190757f205a92
|
141d1fb160fcfb4294d4b0572216033218da702d
|
/exec -l /bin/zsh/google-cloud-sdk/lib/surface/composer/environments/run.py
|
b81165e938f3ff95fea3676709e9be6e342bacc4
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
sudocams/tech-club
|
1f2d74c4aedde18853c2b4b729ff3ca5908e76a5
|
c8540954b11a6fd838427e959e38965a084b2a4c
|
refs/heads/master
| 2021-07-15T03:04:40.397799
| 2020-12-01T20:05:55
| 2020-12-01T20:05:55
| 245,985,795
| 0
| 1
| null | 2021-04-30T21:04:39
| 2020-03-09T08:51:41
|
Python
|
UTF-8
|
Python
| false
| false
| 7,255
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to run an Airflow CLI sub-command in an environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import argparse
from googlecloudsdk.api_lib.composer import environments_util as environments_api_util
from googlecloudsdk.api_lib.composer import util as api_util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.composer import resource_args
from googlecloudsdk.command_lib.composer import util as command_util
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
WORKER_POD_SUBSTR = 'worker'
WORKER_CONTAINER = 'airflow-worker'
DEPRECATION_WARNING = ('Because Cloud Composer manages the Airflow metadata '
'database for your environment, support for the Airflow '
'`{}` subcommand is being deprecated. '
'To avoid issues related to Airflow metadata, we '
'recommend that you do not use this subcommand unless '
'you understand the outcome.')
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Run(base.Command):
"""Run an Airflow sub-command remotely in a Cloud Composer environment.
Executes an Airflow CLI sub-command remotely in an environment. If the
sub-command takes flags, separate the environment name from the sub-command
and its flags with ``--''. This command waits for the sub-command to
complete; its exit code will match the sub-command's exit code.
## EXAMPLES
The following command:
{command} myenv trigger_dag -- some_dag --run_id=foo
is equivalent to running the following command from a shell inside the
*my-environment* environment:
airflow trigger_dag some_dag --run_id=foo
"""
@staticmethod
def Args(parser):
resource_args.AddEnvironmentResourceArg(
parser, 'in which to run an Airflow command')
parser.add_argument(
'subcommand',
metavar='SUBCOMMAND',
choices=command_util.SUBCOMMAND_WHITELIST,
help=('The Airflow CLI subcommand to run. Available subcommands '
'include: {} (see https://airflow.apache.org/cli.html for more '
'info). Note that delete_dag is available from Airflow 1.10.1, '
'and list_dag_runs, next_execution are available from Airflow '
'1.10.2.').format(', '.join(command_util.SUBCOMMAND_WHITELIST)))
parser.add_argument(
'cmd_args',
metavar='CMD_ARGS',
nargs=argparse.REMAINDER,
help='Command line arguments to the subcommand.',
example='{command} myenv trigger_dag -- some_dag --run_id=foo')
def BypassConfirmationPrompt(self, args):
"""Bypasses confirmations with "yes" responses.
Prevents certain Airflow CLI subcommands from presenting a confirmation
prompting (which can hang the gcloud CLI). When necessary, bypass
confirmations with a "yes" response.
Args:
args: argparse.Namespace, An object that contains the values for the
arguments specified in the .Args() method.
"""
prompting_subcommands = ['delete_dag']
if args.subcommand in prompting_subcommands and set(
args.cmd_args).isdisjoint({'-y', '--yes'}):
args.cmd_args.append('--yes')
def DeprecationWarningPrompt(self, args):
response = True
if args.subcommand in command_util.SUBCOMMAND_DEPRECATION:
response = console_io.PromptContinue(
message=DEPRECATION_WARNING.format(args.subcommand),
default=False, cancel_on_no=True)
return response
def ConvertKubectlError(self, error, env_obj):
del env_obj # Unused argument.
return error
def Run(self, args):
self.DeprecationWarningPrompt(args)
running_state = (
api_util.GetMessagesModule(release_track=self.ReleaseTrack())
.Environment.StateValueValuesEnum.RUNNING)
env_ref = args.CONCEPTS.environment.Parse()
env_obj = environments_api_util.Get(
env_ref, release_track=self.ReleaseTrack())
if env_obj.state != running_state:
raise command_util.Error(
'Cannot execute subcommand for environment in state {}. '
'Must be RUNNING.'.format(env_obj.state))
cluster_id = env_obj.config.gkeCluster
cluster_location_id = command_util.ExtractGkeClusterLocationId(env_obj)
with command_util.TemporaryKubeconfig(cluster_location_id, cluster_id):
try:
kubectl_ns = command_util.FetchKubectlNamespace(
env_obj.config.softwareConfig.imageVersion)
pod = command_util.GetGkePod(
pod_substr=WORKER_POD_SUBSTR, kubectl_namespace=kubectl_ns)
log.status.Print(
'Executing within the following kubectl namespace: {}'.format(
kubectl_ns))
self.BypassConfirmationPrompt(args)
kubectl_args = [
'exec', pod, '-tic', WORKER_CONTAINER, 'airflow', args.subcommand
]
if args.cmd_args:
# Add '--' to the argument list so kubectl won't eat the command args.
kubectl_args.extend(['--'] + args.cmd_args)
command_util.RunKubectlCommand(
command_util.AddKubectlNamespace(kubectl_ns, kubectl_args),
out_func=log.status.Print)
except command_util.KubectlError as e:
raise self.ConvertKubectlError(e, env_obj)
@base.ReleaseTracks(base.ReleaseTrack.BETA, base.ReleaseTrack.ALPHA)
class RunBeta(Run):
"""Run an Airflow sub-command remotely in a Cloud Composer environment.
Executes an Airflow CLI sub-command remotely in an environment. If the
sub-command takes flags, separate the environment name from the sub-command
and its flags with ``--''. This command waits for the sub-command to
complete; its exit code will match the sub-command's exit code.
## EXAMPLES
The following command:
{command} myenv trigger_dag -- some_dag --run_id=foo
is equivalent to running the following command from a shell inside the
*my-environment* environment:
airflow trigger_dag some_dag --run_id=foo
"""
def ConvertKubectlError(self, error, env_obj):
is_private = (
env_obj.config.privateEnvironmentConfig and
env_obj.config.privateEnvironmentConfig.enablePrivateEnvironment)
if is_private:
return command_util.Error(
str(error) +
' Make sure you have followed https://cloud.google.com/composer/docs/how-to/accessing/airflow-cli#running_commands_on_a_private_ip_environment '
'to enable access to your private Cloud Composer environment from '
'your machine.')
return error
|
[
"yogocamlus@gmail.com"
] |
yogocamlus@gmail.com
|
1750d92d1dc355447d3f4c59c6a8905eb0f2bb15
|
23a1faa037ddaf34a7b5db8ae10ff8fa1bb79b94
|
/TCS_Practice/TCS_CodeVita_Problems/Constellation.py
|
e77bcc038db671a3100235e6c5e1bd94fd310097
|
[] |
no_license
|
Pyk017/Competetive-Programming
|
e57d2fe1e26eeeca49777d79ad0cbac3ab22fe63
|
aaa689f9e208bc80e05a24b31aa652048858de22
|
refs/heads/master
| 2023-04-27T09:37:16.432258
| 2023-04-22T08:01:18
| 2023-04-22T08:01:18
| 231,229,696
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,823
|
py
|
'''
Three characters { #, *, . } represents a constellation of stars and galaxies in space. Each galaxy is demarcated by # characters. There can be one or many stars in a given galaxy. Stars can only be in shape of vowels { A, E, I, O, U } . A collection of * in the shape of the vowels is a star. A star is contained in a 3x3 block. Stars cannot be overlapping. The dot(.) character denotes empty space.
Given 3xN matrix comprising of { #, *, . } character, find the galaxy and stars within them.
Note: Please pay attention to how vowel A is denoted in a 3x3 block in the examples section below.
Constraints
3 <= N <= 10^5
Input
Input consists of single integer N denoting number of columns.
Output
Output contains vowels (stars) in order of their occurrence within the given galaxy. Galaxy itself is represented by # character.
Example 1
Input
18
* . * # * * * # * * * # * * * . * .
* . * # * . * # . * . # * * * * * *
* * * # * * * # * * * # * * * * . *
Output
U#O#I#EA
Explanation
As it can be seen that the stars make the image of the alphabets U, O, I, E and A respectively.
Example 2
Input
12
* . * # . * * * # . * .
* . * # . . * . # * * *
* * * # . * * * # * . *
Output
U#I#A
Explanation
As it can be seen that the stars make the image of the alphabet U, I and A.
Possible solution:
Input:
12
* . * # . * * * # . * .
* . * # . . * . # * * *
* * * # . * * * # * . *
'''
n = int(input())
galaxy = [list(map(int, input().split())) for _ in range(3)]
for i in range(n):
if galaxy[0][i] == '#' and galaxy[1][j] == '#' and galaxy[2][i] == '#':
print('#', end='')
elif galaxy[0][i] == '.' and galaxy[1][j] == '.' and galaxy[2][i] == '.':
pass
else:
x = i
a, b, c, a1, b1, c1, a2, b2, c2 = galaxy[0][x], galaxy[0][x+1], galaxy[0][x+2], galaxy[1][x], galaxy[1][x+1], galaxy[1][x+2], galaxy[2][x], galaxy[2][x+1], galaxy[2][x+2]
if a == '.' and b == '*' and c == '.' and a1=='*' and b1 == '*' and c1 == '*' and a2=='*' and b2 == '.' and c2 == '*':
print("A", end='')
i = i + 2
if a == '*' and b == '*' and c == '*' and a1 == '*' and b1 == '*' and c1 == '*' and a2 == '*' and b2 == '*' and c2 == '*':
print("E", end='')
i = i + 2
if a == '*' and b == '*' and c == '*' and a1 == '.' and b1 == '*' and c1 == '.' and a2 == '*' and b2 == '*' and c2 == '*':
print("I", end='')
i = i + 2
if a == '*' and b == '*' and c == '*' and a1 == '*' and b1 == '.' and c1 == '*' and a2 == '*' and b2 == '*' and c2 == '*':
print("O", end='')
i = i + 2
if a == '*' and b == '.' and c == '*' and a1 == '*' and b1 == '.' and c1 == '*' and a2 == '*' and b2 == '*' and c2 =='*':
print("U", end='')
i = i + 2
|
[
"prakharkumar506978@gmail.com"
] |
prakharkumar506978@gmail.com
|
f85f6f39aa12d9bd44917d0f830d724ec3d6f956
|
c42f7f7a8421103cc3ca8ee44673704f7eea22b1
|
/src/utils/routes.py
|
01fe170c5eaa9ad4e3f0cf77beb0b1f34279b976
|
[
"MIT"
] |
permissive
|
styleolder/fp-server
|
fe585fe73014eb0421b25d5579191d24276df250
|
ae405e7c37a919bd73be567e3e098e7fe5524097
|
refs/heads/master
| 2020-03-21T12:05:36.250998
| 2018-06-24T13:07:51
| 2018-06-24T13:07:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,770
|
py
|
# -*- coding:utf-8 -*-
"""
http uri 路由装饰器
"""
from utils import log as logger
class route(object):
"""
@route('/some/path')
class SomeRequestHandler(RequestHandler):
pass
@route('/some/path', name='other')
class SomeOtherRequestHandler(RequestHandler):
pass
my_routes = route.make_routes(['api'])
"""
_routes = []
def __init__(self, uri, name=None):
""" 装饰器
@param uri 注册的uri名字,支持uri正则表达式
@param name 注册的uri别名
"""
self.uri = uri
if not name:
name = '-'.join(uri.split('/'))
self.name = name
def __call__(self, _handler):
""" gets called when we class decorate
"""
for item in self._routes:
if item.get('uri') == self.uri:
logger.error('uri aleady exists! uri:', self.uri, 'name:', self.name, 'handler:', _handler, caller=self)
if item.get('name') == self.name:
logger.warn('name aleady exists! uri:', self.uri, 'name:', self.name, 'handler:', _handler, caller=self)
self._routes.append({'uri': self.uri, 'name': self.name, 'handler': _handler})
return _handler
@classmethod
def make_routes(cls, dirs):
""" 注册并返回所有的handler
@param dirs list,需要注册uri路由的处理方法路径
"""
for dir in dirs:
s = 'import %s' % dir
exec(s)
routes = []
for handler_dic in cls._routes:
logger.info('register uri:', handler_dic['uri'], 'handler:', handler_dic.get('handler'), caller=cls)
routes.append((handler_dic.get('uri'), handler_dic.get('handler')))
return routes
|
[
"valesail7@gmail.com"
] |
valesail7@gmail.com
|
82d322d9d2a24a4f17977671c69823b4c05dcae3
|
523f8f5febbbfeb6d42183f2bbeebc36f98eadb5
|
/207_3.py
|
a0c0add9e9275d90f0930004027fe8138ec29417
|
[] |
no_license
|
saleed/LeetCode
|
655f82fdfcc3000400f49388e97fc0560f356af0
|
48b43999fb7e2ed82d922e1f64ac76f8fabe4baa
|
refs/heads/master
| 2022-06-15T21:54:56.223204
| 2022-05-09T14:05:50
| 2022-05-09T14:05:50
| 209,430,056
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,013
|
py
|
class Solution(object):
def canFinish(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
graph={}
indegree=[0]*numCourses
for i in range(numCourses):
graph[i]=[]
for pair in prerequisites:
graph[pair[1]].append(pair[0])
indegree[pair[0]]+=1
res=[]
while True:
flag=0
for node in range(len(indegree)):
if indegree[node]==0:
indegree[node]=float("inf")
res.append(node)
for n in graph[node]:
indegree[n]-=1
del graph[node]
flag=1
break
if flag==0:
break
return len(res)==numCourses
a=Solution()
presp=[[1,0]]
num=2
print(a.canFinish(num,presp))
nums=2
psp=[[1,0],[0,1]]
print(a.canFinish(num,psp))
|
[
"1533441387@qq.com"
] |
1533441387@qq.com
|
3e67c476deabc53331ccd7582f0feff94455d632
|
31741f4807f857675f9304088b689af9b043e7b1
|
/chp10/django_ecommerce/contact/views.py
|
8160a48a0c26f4c0a9a9857aa9771927481e3ab1
|
[] |
no_license
|
ujrc/Realpython_book3
|
c487ff0569f90b0e21c2c51cf951d6aad4755541
|
aaff8db074b8dd33d6c7305ac0a94c2ef161c847
|
refs/heads/master
| 2021-01-10T02:02:11.247279
| 2016-01-11T17:06:59
| 2016-01-11T17:06:59
| 48,060,189
| 0
| 0
| null | 2015-12-31T16:48:52
| 2015-12-15T18:03:47
|
Python
|
UTF-8
|
Python
| false
| false
| 755
|
py
|
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext, loader
from .forms import ContactView
from django.contrib import messages
def contact(request):
if request.method == 'POST':
form = ContactView(request.POST)
if form.is_valid():
our_form = form.save(commit=False)
our_form.save()
messages.add_message(
request, messages.INFO, 'Your message has been sent. Thank you.'
)
return HttpResponseRedirect('/')
else:
form = ContactView()
t = loader.get_template('contact/contact.html')
c = RequestContext(request, {'form': form, })
return HttpResponse(t.render(c))# Create your views here.
|
[
"uwjearc@yahoo.com"
] |
uwjearc@yahoo.com
|
337d900284082e21087ff98ddb9d2bb64e6b8248
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_banked.py
|
08d044b1996c25d485b9094eb3beb1112231d788
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 216
|
py
|
#calss header
class _BANKED():
def __init__(self,):
self.name = "BANKED"
self.definitions = bank
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['bank']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.