blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
43b33abced12630b4b24f937759544ab20ef8370
|
7f7bf9a5827d1441f18f568fc75ed5bf0159ca6c
|
/14_Yandex_Final_Tasks/A/A.py
|
d4258510d4f596ba4eaf12fd680b41e62c69c28d
|
[] |
no_license
|
KorsakovPV/yandex_contest
|
08bcff4eaf38d46a8348ac3abbb5f496857fe8e4
|
f67917ef710f5b138142b11ec4e6e4678b23e408
|
refs/heads/master
| 2023-01-06T13:04:07.955570
| 2020-10-24T20:22:41
| 2020-10-24T20:22:41
| 290,097,693
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,166
|
py
|
"""
A. Большое число
Вечером ребята решили поиграть в игру "Большое число".
Даны числа. Нужно определить, какое самое большое число можно из них составить.
Формат ввода
В первой строке записано n - количество чисел. Оно не превосходит 100.
Во второй строке через пробел записаны n неотрицательных чисел, каждое из
которых не превосходит 1000.
Формат вывода
Нужно вывести самое большое число, которое можно из них составить.
"""
# 34601917
class solution_key(str):
def __lt__(self, y):
return self + y > y + self
class Solution:
def largest_number(self, nums):
nums.sort(key=solution_key)
return ''.join(nums)
def main(input_file):
input_file = input_file.rstrip().split('\n')
data = input_file[1].rstrip().split()
sol = Solution()
return sol.largest_number(data)
def test(num):
left = 1
while left < len(num) // 2:
data = list()
data.append(num[:left])
data.append(num[left:-left])
data.append(num[-left:])
sol = Solution()
print(data, sol.largest_number(data))
left += 1
if __name__ == '__main__':
with open('input.txt') as f:
input_file = f.read()
answer = main(input_file)
with open('output.txt', 'w') as f:
f.write(answer)
test('123456789')
test('998877665544332211')
test('11111111112')
with open('input1.txt') as f:
input_file = f.read()
assert main(input_file) == '56215', 'input1.txt error\n' + str(
main(input_file))
with open('input2.txt') as f:
input_file = f.read()
assert main(input_file) == '78321', 'input2.txt error\n' + str(
main(input_file))
with open('input3.txt') as f:
input_file = f.read()
assert main(input_file) == '542210', 'input3.txt error\n' + str(
main(input_file))
|
[
"pavelkpv@gmail.com"
] |
pavelkpv@gmail.com
|
db312e4b277fb2bb1bcd2efcb4f3457c0f939a0b
|
460f981dfe1a05f14d2a4cdc6cc71e9ad798b785
|
/3/amd64/envs/navigator/lib/python3.6/site-packages/osgeo/_osr.py
|
35c7ae045de0cf8a2f9d8473b59af53c353048cc
|
[
"GPL-2.0-only",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"LicenseRef-scancode-mit-old-style",
"dtoa",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain-disclaimer",
"Zlib",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"Intel",
"Python-2.0"
] |
permissive
|
DFO-Ocean-Navigator/navigator-toolchain
|
d8c7351b477e66d674b50da54ec6ddc0f3a325ee
|
930d26886fdf8591b51da9d53e2aca743bf128ba
|
refs/heads/master
| 2022-11-05T18:57:30.938372
| 2021-04-22T02:02:45
| 2021-04-22T02:02:45
| 234,445,230
| 0
| 1
|
BSD-3-Clause
| 2022-10-25T06:46:23
| 2020-01-17T01:26:49
|
C++
|
UTF-8
|
Python
| false
| false
| 309
|
py
|
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, '_osr.cpython-36m-x86_64-linux-gnu.so')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
|
[
"dwayne.hart@gmail.com"
] |
dwayne.hart@gmail.com
|
a5d2f2afc771f9616114150718cd4b7d835be5b4
|
30150c7f6ed7a10ac50eee3f40101bc3165ebf9e
|
/src/coghq/DistributedMazeAI.py
|
29fd883b37c9512dac79c0b2b4c58979121052c7
|
[] |
no_license
|
toontown-restoration-project/toontown
|
c2ad0d552cb9d5d3232ae6941e28f00c11ca3aa8
|
9bef6d9f823b2c12a176b33518eaa51ddbe3fd2f
|
refs/heads/master
| 2022-12-23T19:46:16.697036
| 2020-10-02T20:17:09
| 2020-10-02T20:17:09
| 300,672,330
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,244
|
py
|
from otp.level import DistributedEntityAI
from . import DistributedBarrelBaseAI
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.ClockDelta import globalClockDelta
from direct.task import Task
class DistributedMazeAI(DistributedEntityAI.DistributedEntityAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedMazeAI')
def __init__(self, level, entId):
"""Create the maze."""
DistributedEntityAI.DistributedEntityAI.__init__(
self, level, entId)
self.roomDoId = level.doId
self.GameDuration = 60.0
self.DamageOnFailure = 20
self.finishedList = []
def delete(self):
"""Delete ourself and cleanup tasks."""
self.removeAllTasks()
DistributedEntityAI.DistributedEntityAI.delete(self)
def announceGenerate(self):
"""Load fields dependent on required fields."""
DistributedEntityAI.DistributedEntityAI.announceGenerate(self)
self.mazeEndTimeTaskName = self.uniqueName('mazeEndTime')
def getRoomDoId(self):
"""Return the doId of the room that contains us."""
return self.roomDoId
def setClientTriggered(self):
"""A player entered us, start the moles."""
if not hasattr(self, 'gameStartTime'):
self.gameStartTime = globalClock.getRealTime()
self.b_setGameStart(globalClockDelta.localToNetworkTime(\
self.gameStartTime))
def b_setGameStart(self, timestamp):
# send the distributed message first, so that any network msgs
# sent by the subclass upon start of the game will arrive
# after the game start msg
self.d_setGameStart(timestamp)
self.setGameStart(timestamp)
def d_setGameStart(self, timestamp):
self.notify.debug("BASE: Sending setGameStart")
self.sendUpdate("setGameStart", [timestamp])
def setGameStart(self, timestamp):
"""
This method gets called when all avatars are ready
Inheritors should call this plus the code to start the game
"""
self.notify.debug("BASE: setGameStart")
self.GameDuration = 35.0 + (self.numSections * 15.0)
self.prepareForGameStartOrRestart()
def prepareForGameStartOrRestart(self):
"""Zero out needed fields on a start or restart of the mole field."""
self.doMethodLater(self.GameDuration, self.gameEndingTimeHit, self.mazeEndTimeTaskName )
def setFinishedMaze(self):
senderId = self.air.getAvatarIdFromSender()
if senderId not in self.finishedList:
toon = simbase.air.doId2do.get(senderId)
if toon:
if len(self.finishedList) < 1:
toon.toonUp(200.0)
else:
toon.toonUp(20.0)
lastToon = 0
if hasattr(self, 'level'):
numToons = len(self.level.presentAvIds)
if numToons == (len(self.finishedList) + 1):
lastToon = 1
self.sendUpdate("toonFinished" , [senderId, len(self.finishedList), lastToon])
#print("toonFinished sent")
self.finishedList.append(senderId)
def gameEndingTimeHit(self, task):
"""Handle the game hitting its ending time."""
roomId = self.getLevelDoId()
room = simbase.air.doId2do.get(roomId)
if room:
playerIds = room.presentAvIds
for avId in playerIds:
av = simbase.air.doId2do.get(avId)
if av and (avId not in self.finishedList):
self.finishedList.append(avId)
self.sendUpdate("setGameOver", [])
def damageMe(self):
senderId = self.air.getAvatarIdFromSender()
av = simbase.air.doId2do.get(senderId)
roomId = self.getLevelDoId()
room = simbase.air.doId2do.get(roomId)
if room:
playerIds = room.presentAvIds
if av and (senderId in playerIds):
av.takeDamage(self.DamageOnFailure, quietly=0)
room.sendUpdate('forceOuch',[self.DamageOnFailure])
|
[
"brianlach72@gmail.com"
] |
brianlach72@gmail.com
|
e71df24b87f48a17c62c90b1f716500fac38224f
|
5c4136623d2ffea23d9e4d25e4f930f06cb627f3
|
/tapis_cli/commands/taccapis/v2/jobs/helpers/error.py
|
0457ab4a363f6ab7a9217d436d081e16e837c4c8
|
[
"BSD-3-Clause"
] |
permissive
|
shwetagopaul92/tapis-cli-ng
|
3ca8ee0127fca435151a2f2e6dbdb26ab501d470
|
6f424b8352c0d034d4f5547fac21d5c8dd097a7f
|
refs/heads/master
| 2020-12-13T09:31:33.334743
| 2020-01-08T19:14:51
| 2020-01-08T19:14:51
| 234,377,360
| 0
| 0
|
BSD-3-Clause
| 2020-01-16T17:45:14
| 2020-01-16T17:45:13
| null |
UTF-8
|
Python
| false
| false
| 2,221
|
py
|
"""Exceptions and error handlers
"""
import logging
import re
import os
import time
from agavepy.agave import AgaveError
from attrdict import AttrDict
from requests.exceptions import HTTPError
__all__ = [
'AgaveError', 'HTTPError', 'HTTPNotFoundError', 'TapisOperationFailed',
'ImportNotCompleteError', 'OutputFileExistsError', 'read_tapis_http_error',
'handle_http_error'
]
class OutputFileExistsError(IOError):
pass
class TapisOperationFailed(AgaveError):
pass
class HTTPNotFoundError(HTTPError):
pass
class ImportNotCompleteError(HTTPNotFoundError):
pass
def read_tapis_http_error(http_error_object):
"""Extract useful details from an exception raised by interactting
with a Tapis API
"""
h = http_error_object
# extract HTTP response code
code = -1
try:
code = h.response.status_code
assert isinstance(code, int)
except Exception:
# we have no idea what happened
code = 418
# extract HTTP reason
reason = 'UNKNOWN ERROR'
try:
reason = h.response.reason
except Exception:
pass
# Tapis APIs will give JSON responses if the target web service is at all
# capable of fulfilling the request. Therefore, try first to extract fields
# from the JSON response, then fall back to returning the plain text from
# the response.
err_msg = 'Unexpected encountered by the web service'
status_msg = 'error'
version_msg = 'unknown'
try:
j = h.response.json()
if 'message' in j:
err_msg = j['message']
if 'status' in j:
status_msg = j['status']
if 'version' in j:
version_msg = j['version']
except Exception:
err_msg = h.response.text
httperror = 'HTTPError - {} {}; message: {}; status: {}; version: {}; response.content: {}'
return httperror.format(code, reason, err_msg, status_msg, version_msg,
h.response.content)
def handle_http_error(httperror):
decorated_http_error = read_tapis_http_error(httperror)
if httperror.response.status_code == 404:
raise HTTPNotFoundError(httperror)
else:
raise decorated_http_error
|
[
"vaughn@tacc.utexas.edu"
] |
vaughn@tacc.utexas.edu
|
3ce314bb3d0814413a90400f26fbcd5b9b62d98b
|
177338a720f904f63926da055364cc0e2c0a850c
|
/spark/pyspark(by Leaderman git)/1.5.1/examples/app/spark_app_read_data_from_rcfile.py
|
b07906362ca93ab5697ccd402ee00a50a9c8df24
|
[
"Apache-2.0"
] |
permissive
|
xuefenga616/mygit
|
60ef7bf7201603e13d4621cf7a39dea8ec92e0b7
|
be3b8003fcc900ce7ca6616a9ddebb0edcbc1407
|
refs/heads/master
| 2020-09-13T11:50:55.448041
| 2017-08-27T10:59:00
| 2017-08-27T10:59:00
| 67,042,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 670
|
py
|
from pyspark import SparkConf, SparkContext
conf = SparkConf().setAppName("spark_app_read_data_from_rcfile")
sc = SparkContext(conf=conf)
rowRDD = sc.hadoopFile(path="hdfs://dip.cdh5.dev:8020/user/yurun/rcfile",
inputFormatClass="org.apache.hadoop.hive.ql.io.RCFileInputFormat",
keyClass="org.apache.hadoop.io.LongWritable",
valueClass="org.apache.hadoop.hive.serde2.columnar.BytesRefArrayWritable",
valueConverter="com.sina.dip.spark.converter.BytesRefArrayWritableToObjectArrayConverter")
pairs = rowRDD.collect()
for pair in pairs:
print pair[0], pair[1]
sc.stop()
|
[
"xuefeng_11@qq.com"
] |
xuefeng_11@qq.com
|
f9de43be1783e9e199bc07fac2b6171e9cd208b6
|
888a39e5e75f0e8311bbef63749685acf67fe715
|
/clubs/migrations/0004_auto_20150313_0759.py
|
dcccaf36853555bf052545438947b8beb7c4d180
|
[] |
no_license
|
mozilla/teach-api
|
94820e4ef9d51b1735b046a2a76cfbb9a37321da
|
35a38df7addb8ca37e014705fe056755c3c1564f
|
refs/heads/master
| 2023-09-01T09:00:37.995896
| 2016-12-15T19:42:23
| 2016-12-15T19:42:23
| 32,082,559
| 3
| 12
| null | 2019-03-29T05:11:24
| 2015-03-12T14:50:19
|
Python
|
UTF-8
|
Python
| false
| false
| 994
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('clubs', '0003_auto_20150313_0020'),
]
operations = [
migrations.RemoveField(
model_name='club',
name='creator',
),
migrations.AddField(
model_name='club',
name='owner',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL, help_text=b'The user who owns the Club and can change it.'),
preserve_default=False,
),
migrations.AlterField(
model_name='club',
name='longitude',
field=models.FloatField(help_text=b'Longitude of the club. Leave blank to automatically determine.', null=True, blank=True),
preserve_default=True,
),
]
|
[
"varmaa@gmail.com"
] |
varmaa@gmail.com
|
d091faee1b1bd72dac81cd9817bad618455a9452
|
a3978d2c5367b116e77c73637d6c04f6cb8bc8a8
|
/1807-1/07day/14-登录.py
|
1bbd9e9ee574d863a8484430290440535cae5329
|
[] |
no_license
|
liuchenghao2000/1807
|
9acb1abd0035f93113cd7c1bda5b2b30f511a4d7
|
cdbec3aae134b6f5ed0208fb5f8ee3e2a874a4ae
|
refs/heads/master
| 2020-03-27T07:17:25.369702
| 2018-09-06T01:46:28
| 2018-09-06T01:46:28
| 139,957,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
account = 123456
password = 'abc'
a = int(input('请输入账号:'))
p = input('请输入密码:')
if a == account and p == password:
print('登陆成功')
elif a != account:
print('账号不对')
elif p != password:
print('密码错误')
|
[
"1773719461@qq.com"
] |
1773719461@qq.com
|
115dec5e597d87a852c7cd0e1e0fb2a005e0196e
|
54c5ddf4d427c52447a983b8e26409fdb4de6c11
|
/src/architectures/jet_transforms/nmp/fixed_nmp/fixed_nmp.py
|
b844083ab5732d9827823ede20402583841bff3a
|
[
"BSD-3-Clause"
] |
permissive
|
XintianHan/jets
|
eda3b0f4f1256b57669cc59664d22b38d3999bc1
|
2ee178eaa7f6629d25ca87ee1096e5ea5d15bfd3
|
refs/heads/master
| 2021-04-18T21:35:03.263843
| 2018-03-22T21:32:35
| 2018-03-22T21:32:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,785
|
py
|
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
#from .adjacency import construct_physics_based_adjacency_matrix
#from ..stacked_nmp.attention_pooling import construct_pooling_layer
from ..message_passing import MP_LAYERS
#from ..message_passing.adjacency import construct_adjacency_matrix_layer
from ..adjacency import construct_adjacency
from .....architectures.readout import READOUTS
from .....architectures.embedding import EMBEDDINGS
from .....monitors import Histogram
from .....monitors import Collect
from .....monitors import BatchMatrixMonitor
class FixedNMP(nn.Module):
def __init__(self,
features=None,
hidden=None,
iters=None,
readout=None,
matrix=None,
emb_init=None,
mp_layer=None,
**kwargs
):
super().__init__()
self.iters = iters
emb_kwargs = {x: kwargs[x] for x in ['act', 'wn']}
self.embedding = EMBEDDINGS['n'](dim_in=features, dim_out=hidden, n_layers=int(emb_init), **emb_kwargs)
mp_kwargs = {x: kwargs[x] for x in ['act', 'wn', 'update', 'message']}
MPLayer = MP_LAYERS[mp_layer]
self.mp_layers = nn.ModuleList([MPLayer(hidden=hidden,**mp_kwargs) for _ in range(iters)])
Readout = READOUTS[readout]
self.readout = Readout(hidden, hidden)
self.adjacency_matrix = construct_adjacency(matrix=matrix, dim_in=features, dim_out=hidden, **kwargs)
def forward(self, jets, mask=None, **kwargs):
h = self.embedding(jets)
dij = self.adjacency_matrix(jets, mask=mask, **kwargs)
for mp in self.mp_layers:
h = mp(h=h, mask=mask, dij=dij, **kwargs)
out = self.readout(h)
return out
|
[
"isaachenrion@gmail.com"
] |
isaachenrion@gmail.com
|
619650f861eaee6a6fc6e373f4559eec6ede90b5
|
5963c12367490ffc01c9905c028d1d5480078dec
|
/homeassistant/components/rachio/webhooks.py
|
94c79a1504f06214aa4c2cdbc3d1318956b902b8
|
[
"Apache-2.0"
] |
permissive
|
BenWoodford/home-assistant
|
eb03f73165d11935e8d6a9756272014267d7d66a
|
2fee32fce03bc49e86cf2e7b741a15621a97cce5
|
refs/heads/dev
| 2023-03-05T06:13:30.354545
| 2021-07-18T09:51:53
| 2021-07-18T09:51:53
| 117,122,037
| 11
| 6
|
Apache-2.0
| 2023-02-22T06:16:51
| 2018-01-11T16:10:19
|
Python
|
UTF-8
|
Python
| false
| false
| 4,360
|
py
|
"""Webhooks used by rachio."""
from aiohttp import web
from homeassistant.const import URL_API
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .const import (
CONF_CLOUDHOOK_URL,
CONF_WEBHOOK_ID,
DOMAIN,
KEY_EXTERNAL_ID,
KEY_TYPE,
SIGNAL_RACHIO_CONTROLLER_UPDATE,
SIGNAL_RACHIO_RAIN_DELAY_UPDATE,
SIGNAL_RACHIO_RAIN_SENSOR_UPDATE,
SIGNAL_RACHIO_SCHEDULE_UPDATE,
SIGNAL_RACHIO_ZONE_UPDATE,
)
# Device webhook values
TYPE_CONTROLLER_STATUS = "DEVICE_STATUS"
SUBTYPE_OFFLINE = "OFFLINE"
SUBTYPE_ONLINE = "ONLINE"
SUBTYPE_OFFLINE_NOTIFICATION = "OFFLINE_NOTIFICATION"
SUBTYPE_COLD_REBOOT = "COLD_REBOOT"
SUBTYPE_SLEEP_MODE_ON = "SLEEP_MODE_ON"
SUBTYPE_SLEEP_MODE_OFF = "SLEEP_MODE_OFF"
SUBTYPE_BROWNOUT_VALVE = "BROWNOUT_VALVE"
# Rain delay values
TYPE_RAIN_DELAY_STATUS = "RAIN_DELAY"
SUBTYPE_RAIN_DELAY_ON = "RAIN_DELAY_ON"
SUBTYPE_RAIN_DELAY_OFF = "RAIN_DELAY_OFF"
# Rain sensor values
TYPE_RAIN_SENSOR_STATUS = "RAIN_SENSOR_DETECTION"
SUBTYPE_RAIN_SENSOR_DETECTION_ON = "RAIN_SENSOR_DETECTION_ON"
SUBTYPE_RAIN_SENSOR_DETECTION_OFF = "RAIN_SENSOR_DETECTION_OFF"
# Schedule webhook values
TYPE_SCHEDULE_STATUS = "SCHEDULE_STATUS"
SUBTYPE_SCHEDULE_STARTED = "SCHEDULE_STARTED"
SUBTYPE_SCHEDULE_STOPPED = "SCHEDULE_STOPPED"
SUBTYPE_SCHEDULE_COMPLETED = "SCHEDULE_COMPLETED"
SUBTYPE_WEATHER_NO_SKIP = "WEATHER_INTELLIGENCE_NO_SKIP"
SUBTYPE_WEATHER_SKIP = "WEATHER_INTELLIGENCE_SKIP"
SUBTYPE_WEATHER_CLIMATE_SKIP = "WEATHER_INTELLIGENCE_CLIMATE_SKIP"
SUBTYPE_WEATHER_FREEZE = "WEATHER_INTELLIGENCE_FREEZE"
# Zone webhook values
TYPE_ZONE_STATUS = "ZONE_STATUS"
SUBTYPE_ZONE_STARTED = "ZONE_STARTED"
SUBTYPE_ZONE_STOPPED = "ZONE_STOPPED"
SUBTYPE_ZONE_COMPLETED = "ZONE_COMPLETED"
SUBTYPE_ZONE_CYCLING = "ZONE_CYCLING"
SUBTYPE_ZONE_CYCLING_COMPLETED = "ZONE_CYCLING_COMPLETED"
SUBTYPE_ZONE_PAUSED = "ZONE_PAUSED"
# Webhook callbacks
LISTEN_EVENT_TYPES = [
"DEVICE_STATUS_EVENT",
"ZONE_STATUS_EVENT",
"RAIN_DELAY_EVENT",
"RAIN_SENSOR_DETECTION_EVENT",
"SCHEDULE_STATUS_EVENT",
]
WEBHOOK_CONST_ID = "homeassistant.rachio:"
WEBHOOK_PATH = URL_API + DOMAIN
SIGNAL_MAP = {
TYPE_CONTROLLER_STATUS: SIGNAL_RACHIO_CONTROLLER_UPDATE,
TYPE_RAIN_DELAY_STATUS: SIGNAL_RACHIO_RAIN_DELAY_UPDATE,
TYPE_RAIN_SENSOR_STATUS: SIGNAL_RACHIO_RAIN_SENSOR_UPDATE,
TYPE_SCHEDULE_STATUS: SIGNAL_RACHIO_SCHEDULE_UPDATE,
TYPE_ZONE_STATUS: SIGNAL_RACHIO_ZONE_UPDATE,
}
@callback
def async_register_webhook(hass, webhook_id, entry_id):
"""Register a webhook."""
async def _async_handle_rachio_webhook(hass, webhook_id, request):
"""Handle webhook calls from the server."""
data = await request.json()
try:
auth = data.get(KEY_EXTERNAL_ID, "").split(":")[1]
assert auth == hass.data[DOMAIN][entry_id].rachio.webhook_auth
except (AssertionError, IndexError):
return web.Response(status=web.HTTPForbidden.status_code)
update_type = data[KEY_TYPE]
if update_type in SIGNAL_MAP:
async_dispatcher_send(hass, SIGNAL_MAP[update_type], data)
return web.Response(status=web.HTTPNoContent.status_code)
hass.components.webhook.async_register(
DOMAIN, "Rachio", webhook_id, _async_handle_rachio_webhook
)
async def async_get_or_create_registered_webhook_id_and_url(hass, entry):
"""Generate webhook ID."""
config = entry.data.copy()
updated_config = False
webhook_url = None
webhook_id = config.get(CONF_WEBHOOK_ID)
if not webhook_id:
webhook_id = hass.components.webhook.async_generate_id()
config[CONF_WEBHOOK_ID] = webhook_id
updated_config = True
if hass.components.cloud.async_active_subscription():
cloudhook_url = config.get(CONF_CLOUDHOOK_URL)
if not cloudhook_url:
cloudhook_url = await hass.components.cloud.async_create_cloudhook(
webhook_id
)
config[CONF_CLOUDHOOK_URL] = cloudhook_url
updated_config = True
webhook_url = cloudhook_url
if not webhook_url:
webhook_url = hass.components.webhook.async_generate_url(webhook_id)
if updated_config:
hass.config_entries.async_update_entry(entry, data=config)
return webhook_id, webhook_url
|
[
"noreply@github.com"
] |
BenWoodford.noreply@github.com
|
6abfb37f56100c04c5a8cbe2d7015c80c253cf6e
|
711756b796d68035dc6a39060515200d1d37a274
|
/output_cog_tags/optimized_5276.py
|
f96515b50804aa1965f08969e7801c0d9cc5cbeb
|
[] |
no_license
|
batxes/exocyst_scripts
|
8b109c279c93dd68c1d55ed64ad3cca93e3c95ca
|
a6c487d5053b9b67db22c59865e4ef2417e53030
|
refs/heads/master
| 2020-06-16T20:16:24.840725
| 2016-11-30T16:23:16
| 2016-11-30T16:23:16
| 75,075,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,580
|
py
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog1_Anch" not in marker_sets:
s=new_marker_set('Cog1_Anch')
marker_sets["Cog1_Anch"]=s
s= marker_sets["Cog1_Anch"]
mark=s.place_marker((454.295, 440.981, 300.65), (0, 0, 1), 21.9005)
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((355.946, 424.19, 500.869), (1, 0.5, 0), 21.9005)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((412.644, 593.559, 557.081), (1, 0.5, 0), 21.9005)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((623.79, 589.94, 378.073), (1, 0.5, 0), 21.9005)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((362.824, 473.038, 475.95), (1, 0.87, 0), 21.9005)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((354.356, 461.616, 487.857), (1, 0.87, 0), 21.9005)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((512.882, 307.961, 572.428), (1, 0.87, 0), 21.9005)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((574.303, 418.992, 435.075), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((595.04, 300.665, 517.279), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((538.74, 324.189, 563.835), (0.97, 0.51, 0.75), 21.9005)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((436.049, 499.418, 452.124), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((349.642, 471.117, 512.95), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((536.204, 554.528, 366.226), (0.39, 0.31, 0.14), 21.9005)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((401.287, 486.686, 510.966), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((409.575, 412.839, 422.354), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((450.761, 413.68, 636.658), (0.6, 0.31, 0.64), 21.9005)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((399.115, 444.379, 445.665), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((640.451, 524.817, 544.187), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((529.327, 534.872, 611.099), (0.89, 0.1, 0.1), 21.9005)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((400.713, 509.226, 479.628), (0.3, 0.69, 0.29), 21.9005)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((497.055, 607.308, 396.047), (0.3, 0.69, 0.29), 21.9005)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
[
"batxes@gmail.com"
] |
batxes@gmail.com
|
e98489d6b8cb40edbfccddb2db4baba39162bb6f
|
d7016f69993570a1c55974582cda899ff70907ec
|
/sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2018_03_01/aio/operations/_metric_alerts_status_operations.py
|
1576cd774f361745d159f2234176c397ca099b72
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
kurtzeborn/azure-sdk-for-python
|
51ca636ad26ca51bc0c9e6865332781787e6f882
|
b23e71b289c71f179b9cf9b8c75b1922833a542a
|
refs/heads/main
| 2023-03-21T14:19:50.299852
| 2023-02-15T13:30:47
| 2023-02-15T13:30:47
| 157,927,277
| 0
| 0
|
MIT
| 2022-07-19T08:05:23
| 2018-11-16T22:15:30
|
Python
|
UTF-8
|
Python
| false
| false
| 7,849
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._metric_alerts_status_operations import build_list_by_name_request, build_list_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class MetricAlertsStatusOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~$(python-base-namespace).v2018_03_01.aio.MonitorManagementClient`'s
:attr:`metric_alerts_status` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def list(
self, resource_group_name: str, rule_name: str, **kwargs: Any
) -> _models.MetricAlertStatusCollection:
"""Retrieve an alert rule status.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param rule_name: The name of the rule. Required.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MetricAlertStatusCollection or the result of cls(response)
:rtype: ~$(python-base-namespace).v2018_03_01.models.MetricAlertStatusCollection
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.MetricAlertStatusCollection]
request = build_list_request(
resource_group_name=resource_group_name,
rule_name=rule_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("MetricAlertStatusCollection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts/{ruleName}/status"} # type: ignore
@distributed_trace_async
async def list_by_name(
self, resource_group_name: str, rule_name: str, status_name: str, **kwargs: Any
) -> _models.MetricAlertStatusCollection:
"""Retrieve an alert rule status.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param rule_name: The name of the rule. Required.
:type rule_name: str
:param status_name: The name of the status. Required.
:type status_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MetricAlertStatusCollection or the result of cls(response)
:rtype: ~$(python-base-namespace).v2018_03_01.models.MetricAlertStatusCollection
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.MetricAlertStatusCollection]
request = build_list_by_name_request(
resource_group_name=resource_group_name,
rule_name=rule_name,
status_name=status_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_name.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("MetricAlertStatusCollection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_name.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts/{ruleName}/status/{statusName}"} # type: ignore
|
[
"noreply@github.com"
] |
kurtzeborn.noreply@github.com
|
b70ee8da1219dd9283206acf0b078fbbe721eef6
|
558157826e0d25ff8f8e1e3a99a602832b49cfae
|
/model_0/mtl_model.py
|
78ab9c906397f549608234f6815d6fb3659eb38a
|
[] |
no_license
|
xljhtq/Transfer-learning-
|
3ed5d6b8663cc852193f5b5935e7932732158910
|
44e83b30753dd92dbc5d4fae2d85aa8aaef2c82c
|
refs/heads/master
| 2020-03-22T21:45:00.927073
| 2018-08-08T06:56:24
| 2018-08-08T06:56:24
| 140,711,696
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,166
|
py
|
# encoding=utf8
import tensorflow as tf
from base_model import *
TASK_NUM = 2
class MTLModel():
def __init__(self,
max_len=25,
filter_sizes=1,
num_filters=1,
num_hidden=1,
word_vocab=None,
l2_reg_lambda=0.0,
learning_rate=1,
adv=True):
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
self.input_left_0 = tf.placeholder(tf.int32, [None, max_len], name="input_left_0")
self.input_right_0 = tf.placeholder(tf.int32, [None, max_len], name="input_right_0")
self.input_y_0 = tf.placeholder(tf.int32, [None, 2], name="input_y_0")
self.input_task_0 = tf.placeholder(tf.int32, name="input_task_0")
print("input_left", self.input_left_0.name)
print("input_right", self.input_right_0.name)
print("dropout_keep_prob", self.dropout_keep_prob.name)
print ("input_y", self.input_y_0.name)
print ("input_task", self.input_task_0.name)
self.input_left_1 = tf.placeholder(tf.int32, [None, max_len], name="input_left_1")
self.input_right_1 = tf.placeholder(tf.int32, [None, max_len], name="input_right_1")
self.input_y_1 = tf.placeholder(tf.int32, [None, 2], name="input_y_1")
self.input_task_1 = tf.placeholder(tf.int32, name="input_task_1")
self.adv = adv
self.word_vocab = word_vocab
self.filter_sizes = filter_sizes
self.num_filters = num_filters
self.max_len = max_len
self.num_hidden = num_hidden
self.l2_reg_lambda = l2_reg_lambda
self.learning_rate = learning_rate
wordInitial = tf.constant(word_vocab.word_vecs)
self.word_embed = tf.get_variable("word_embedding",
trainable=False,
initializer=wordInitial,
dtype=tf.float32)
self.shared_conv = ConvLayer(layer_name='conv_shared',
filter_sizes=self.filter_sizes,
num_filters=self.num_filters)
self.shared_linear = LinearLayer('linear_shared', TASK_NUM, True)
self.tensors = []
with tf.name_scope("task_0"):
self.build_task_graph(task_label=self.input_task_0,
labels=self.input_y_0,
sentence_0=self.input_left_0,
sentence_1=self.input_right_0)
with tf.name_scope("task_1"):
self.build_task_graph(task_label=self.input_task_1,
labels=self.input_y_1,
sentence_0=self.input_left_1,
sentence_1=self.input_right_1)
def build_task_graph(self,
task_label,
labels,
sentence_0,
sentence_1):
sentence_0 = tf.nn.embedding_lookup(self.word_embed, sentence_0)
sentence_1 = tf.nn.embedding_lookup(self.word_embed, sentence_1)
sentence_0 = tf.nn.dropout(sentence_0, self.dropout_keep_prob)
sentence_1 = tf.nn.dropout(sentence_1, self.dropout_keep_prob)
######## layer
conv_layer = ConvLayer(layer_name='conv_task',
filter_sizes=self.filter_sizes,
num_filters=self.num_filters)
########
conv_out_0 = conv_layer(sentence_0)
conv_out_0 = max_pool(conv_outs=conv_out_0,
max_len=self.max_len,
num_filters=self.num_filters)
conv_out_1 = conv_layer(sentence_1)
conv_out_1 = max_pool(conv_outs=conv_out_1,
max_len=self.max_len,
num_filters=self.num_filters)
task_output = tf.concat(axis=1, values=[conv_out_0, conv_out_1], name='task_output')
shared_out_0 = self.shared_conv(sentence_0)
shared_out_0 = max_pool(conv_outs=shared_out_0,
max_len=self.max_len,
num_filters=self.num_filters)
shared_out_1 = self.shared_conv(sentence_1)
shared_out_1 = max_pool(conv_outs=shared_out_1,
max_len=self.max_len,
num_filters=self.num_filters)
shared_output = tf.concat(axis=1, values=[shared_out_0, shared_out_1], name='shared_output')
if self.adv:
feature = tf.concat([task_output, shared_output], axis=1)
else:
feature = task_output
feature = tf.nn.dropout(feature, self.dropout_keep_prob)
# Map the features to 2 classes
linear = LinearLayer('linear', 2, True)
logits, loss_l2 = linear(feature)
logits_prob = tf.nn.softmax(logits, name='prob')
print ("logits_prob: ",logits_prob.name)
xentropy = tf.nn.softmax_cross_entropy_with_logits(
labels=labels,
logits=logits)
loss_ce = tf.reduce_mean(xentropy)
loss_adv, loss_adv_l2 = self.adversarial_loss(shared_output, task_label, labels)
loss_diff = self.diff_loss(shared_output, task_output)
if self.adv:
print ("Adv is True")
loss = loss_ce + 0.05 * loss_adv + self.l2_reg_lambda * (loss_l2 + loss_adv_l2) + 0.01 * loss_diff
else:
print ("Adv is False")
loss = loss_ce + self.l2_reg_lambda * loss_l2
pred = tf.argmax(logits, axis=1)
labels_2 = tf.argmax(labels, axis=1)
acc = tf.cast(tf.equal(pred, labels_2), tf.float32)
acc = tf.reduce_mean(acc)
self.tensors.append((acc, loss, loss_adv, loss_ce))
def adversarial_loss(self, feature, task_label, y_label):
'''make the task classifier cannot reliably predict the task based on
the shared feature
'''
# input = tf.stop_gradient(input)
feature = flip_gradient(feature) ## let adv_loss increasing
feature = tf.nn.dropout(feature, self.dropout_keep_prob)
# Map the features to TASK_NUM classes
logits, loss_l2 = self.shared_linear(feature)
label = tf.reshape(tf.one_hot(task_label, 2, axis=-1),
shape=[1, 2])
medium = tf.slice(y_label, begin=[0, 0], size=[-1, 1])
label = tf.matmul(tf.fill(tf.shape(medium), 1.0), label)
loss_adv = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=label, logits=logits))
return loss_adv, loss_l2
def diff_loss(self, shared_feat, task_feat):
'''Orthogonality Constraints from https://github.com/tensorflow/models,
in directory research/domain_adaptation
'''
task_feat -= tf.reduce_mean(task_feat, 0) # 按列求得平均
shared_feat -= tf.reduce_mean(shared_feat, 0)
task_feat = tf.nn.l2_normalize(task_feat, 1) # 按行归一化
shared_feat = tf.nn.l2_normalize(shared_feat, 1)
correlation_matrix = tf.matmul(task_feat, shared_feat, transpose_a=True)
cost = tf.reduce_mean(tf.square(correlation_matrix)) * 0.01
cost = tf.where(cost > 0, cost, 0, name='value')
assert_op = tf.Assert(tf.is_finite(cost), [cost])
with tf.control_dependencies([assert_op]):
loss_diff = tf.identity(cost)
return loss_diff
# def build_train_op(self):
# self.train_ops = []
# for _, loss, _ in self.tensors:
# global_step = tf.Variable(0, name="global_step", trainable=False)
# train_op = optimize(loss,
# global_step,
# self.learning_rate)
# self.train_ops.append([train_op, global_step])
def build_train_op(self):
self.train_ops = []
for _, loss, _, _ in self.tensors:
train_op = optimize(loss, self.learning_rate)
self.train_ops.append(train_op)
|
[
"you@example.com"
] |
you@example.com
|
16928356a06c1cffc5c619e3e67d56b0eaf4414c
|
18825807a4cf373f00419e46ac70566d17115e9e
|
/top_interview_questions/easy/linked_list/merge_two_sorted_lists.py
|
029cef4d6de1b99c91d8a393ac56ff86aff1ced9
|
[] |
no_license
|
StefanRankovic/leetcode
|
51154d7297b4674c62e481c6c13016097207b4d0
|
bbed81b50acaef025186648c61110dbf65e5f6cb
|
refs/heads/master
| 2023-02-20T06:16:02.913457
| 2021-01-24T09:42:50
| 2021-01-24T09:42:50
| 266,200,324
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
head = current = ListNode()
while l1 and l2:
if l1.val <= l2.val:
current.next = l1
l1 = l1.next
else:
current.next = l2
l2 = l2.next
current = current.next
if l1:
current.next = l1
else:
current.next = l2
return head.next
|
[
"stefan.rankovic.89@gmail.com"
] |
stefan.rankovic.89@gmail.com
|
624993d2163e8fe840c9d4e7291e4d17025c0920
|
e47afb9557d5143da9397faa9d63003ec7b84b0d
|
/4_5_List comprehension and generator expressions/Main.py
|
0f3bb1163ed1bf5569b3eda82ee2c2ab4e9b0f01
|
[] |
no_license
|
Pavlo-Olshansky/Python_learns_1
|
feb12d6a258d98162844efec465343b7cfc14788
|
98d278198dca833887e456f09934a4b4c336bf40
|
refs/heads/master
| 2021-01-19T13:29:47.174180
| 2019-12-16T08:16:35
| 2019-12-16T08:16:35
| 82,393,126
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 886
|
py
|
xyz = [i for i in range(5)]
print(xyz)
xyz = (i for i in range(5)) # generator - it's in your memory now
print(xyz)
for i in xyz:
print(i)
# Generator's faster creating, but for working with them - it's longer
print('-------- 5 ----------\n')
input_list = [1, 5, 7, 10, 3, 20, 25, 12, 5]
def div_by_five(num):
if num % 5 == 0:
return True
else:
return False
xyz = (i for i in input_list if div_by_five(i)) # We sabing a memory, but it's slower
[print(i) for i in xyz]
xyz = [i for i in input_list if div_by_five(i)] # We don't sabing a memory, but it's faster
[print(i, end=' ') for i in xyz]
[[print(i, ii) for ii in range(3)] for i in range(3)]
board = ([(i+1, ii+1) for ii in range(555)] for i in range(5)) # Generator with 2 loops - must iterate
[print(i) for i in board] #like a board
# [[print(ii) for ii in i] for i in board] # in line
|
[
"pavlo.olshansky@gmail.com"
] |
pavlo.olshansky@gmail.com
|
e1480a36e4bd0529d505b4b9880353a5e348cd05
|
818c7b09d7e264c130a849f86dabaa3d43656870
|
/pycharm学习-基础篇/python模块/1.1时间模块.py
|
4938bf771a3a97e2f539239d9f336f6ae9b1c548
|
[] |
no_license
|
rishinkaku/python_course
|
b89d51b38f64248e074ba66230353b1b335475ab
|
6a2f2c175c863776a0e1125a8359a8ea97b95456
|
refs/heads/master
| 2020-03-28T11:50:20.231058
| 2018-08-29T13:01:59
| 2018-08-29T13:01:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,584
|
py
|
# 时间戮
import time
print(time.time()) # 通常时间表示从1970年1月1日00:00:00开始以秒为单位,返回的是一个float型数据
print(type(time.time())) # 通常时间表示从1970年1月1日00:00:00开始以秒为单位,返回的是一个float型数据
print(time.clock()) # 函数以浮点数计算的秒数返回当前的CPU时间,用来衡量不同程序的耗时
'''
符号 简述
%a 本地简化的星期的名称
%A 本地完整的星期的名称
%b 本地简化的月份的名称
%B 本地完整的月份的名称
%c 本地相应的日期的表示和时间表示
%d 月内的一天(0-31)
%H 24小时(0-23)
%I 12小时(0-12)
%J 年内的一天(1-365)
%m 月份(1-12)
%M 分钟(00-59)
%s 秒(00-59)
%y 两位数的年份表示(00-99)
%Y 四位数的年份表示(00-9999)
'''
'''
元组(struct time):共有9个属性
索引 属性 值
0 tm_year(年) 2018
1 tm_mon(月) 1-12
2 tm_mday(日) 1-31
3 tm_hour(时) 0-23
4 tm_min(分钟) 0-59
5 tm_sec(秒) 0-61 !
6 tm_wday(星期) 0-6 !
7 tm_yday(时) 1-366 #一年中的第几天
8 tm_isdst(是否是夏令时)默认值为-1,结果1表示是,0表示否
'''
# 1.UTC(世界协调时):格林威治天文时间,世界的标准时间 中国: UTC+8
# 1.localtime:获取当前时间的struct_time形式
print(time.localtime()) # 以元组为标准得到9个属性 #例如:time.struct_time(tm_year=2018, tm_mon=5, tm_mday=30, tm_hour=13, tm_min=44, tm_sec=27, tm_wday=2, tm_yday=150, tm_isdst=0)
# 2.获取当天的时间
import datetime
print(datetime.datetime.now()) # 当天详细时间
print(datetime.date.today()) # 当天时间
# step2:获取昨天的日期
def getTesterday():
today = datetime.date.today() # 获取当天时间
oneday = datetime.timedelta(days=1) # ays=1,表示时间跨度为1天
yesterday = today - oneday
print(yesterday)
return yesterday
yesterday = getTesterday()
print("昨天的时间为:", yesterday)
# 3.转换时间和日期的格式
import time
import datetime
def strTodaytime(datestr,format): # datestr为对应时间
return datetime.datetime.strptime(datestr,format) # datetime.datetime.strptime函数
print(time.strftime("%y-%m-%d ",time.localtime())) # time.strftime表示格式转换,"%Y-%m-%d %H:%M:%S"格式见上面详细说明 !!! 18-05-30
print(time.strftime("%y-%m-%d %H:%M:%S", time.localtime())) # time.strftime表示格式转换,"%Y-%m-%d %H:%M:%S"格式见上面详细说明 !!! 18-05-30 14:13:36
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) # time.strftime表示格式转换,"%Y-%m-%d %H:%M:%S"格式见上面详细说明 !!! 2018-05-30 14:13:36
print(strTodaytime("2014-2-16", "%Y-%m-%d")) # 结果为2014-02-16 00:00:00
print('\n')
# 4.获取日历相关的信息
import calendar
# 获取个月的日历,返回字符串的类型
cal = calendar.month(2015, 12) # 获取2015年12月的日历
print(cal)
# 获取一年的日历
cal = calendar.calendar(2018)
print(cal)
# 设置日历的第一天
calendar.setfirstweekday(calendar.SUNDAY) # 以星期天为第一列输出日历,当然默认是以星期一为第一天
cal = calendar.month(2015, 12)
print(cal)
# step2: 得到日历的HTML格式,当然这个一般是用不了的
cal = calendar.HTMLCalendar(calendar.MONDAY)
print(cal.formatmonth(2015, 12))
|
[
"1677913969@qq.com"
] |
1677913969@qq.com
|
368bcb43fae4cd5bd1b4080cb962bf41bf443ccd
|
ecb6b752523a126ef17895854b18e02df41c4cfe
|
/api_restful/user/api.py
|
419c2cc52c3f7985a8cb407d107b4cbb59b9a520
|
[
"MIT"
] |
permissive
|
zhanghe06/bearing_project
|
cd6a1b2ba509392da37e5797a3619454ca464276
|
25729aa7a8a5b38906e60b370609b15e8911ecdd
|
refs/heads/master
| 2023-05-27T17:23:22.561045
| 2023-05-23T09:26:07
| 2023-05-23T09:39:14
| 126,219,603
| 2
| 5
|
MIT
| 2022-12-08T03:11:27
| 2018-03-21T17:54:44
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,193
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: api.py
@time: 2020-02-28 21:26
"""
import datetime
from api_restful.databases.bearing import db_bearing
from app_common.maps.status_delete import STATUS_DEL_OK
from api_restful.models.model_bearing import User
from app_common.libs.mysql_orm_op import DbInstance
db_instance = DbInstance(db_bearing)
def get_user_row_by_id(user_id):
"""
通过 id 获取信息
:param user_id:
:return: None/object
"""
return db_instance.get_row_by_id(User, user_id)
def get_user_row(*args, **kwargs):
"""
获取信息
:param args:
:param kwargs:
:return: None/object
"""
return db_instance.get_row(User, *args, **kwargs)
def get_user_rows(*args, **kwargs):
"""
获取列表
:param args:
:param kwargs:
:return:
"""
return db_instance.get_rows(User, *args, **kwargs)
def get_user_limit_rows_by_last_id(last_pk, limit_num, *args, **kwargs):
"""
通过最后一个主键 id 获取最新信息列表
:param last_pk:
:param limit_num:
:param args:
:param kwargs:
:return:
"""
return db_instance.get_limit_rows_by_last_id(User, last_pk, limit_num, *args, **kwargs)
def add_user(user_data):
"""
添加信息
:param user_data:
:return: None/Value of user.id
:except:
"""
return db_instance.add(User, user_data)
def edit_user(user_id, user_data):
"""
修改信息
:param user_id:
:param user_data:
:return: Number of affected rows (Example: 0/1)
:except:
"""
return db_instance.edit(User, user_id, user_data)
def delete_user(user_id, force=False):
"""
删除信息
:param user_id:
:param force:
:return: Number of affected rows (Example: 0/1)
:except:
"""
if force:
return db_instance.delete(User, user_id)
else:
data = {
'status_delete': STATUS_DEL_OK,
'delete_time': datetime.datetime.now()
}
if isinstance(user_id, list):
return db_instance.update_rows(User, data, User.id.in_(user_id))
else:
return db_instance.edit(User, user_id, data)
def get_user_pagination(page=1, size=10, *args, **kwargs):
"""
获取列表(分页)
Usage:
items: 信息列表
has_next: 如果本页之后还有超过一个分页,则返回True
has_prev: 如果本页之前还有超过一个分页,则返回True
next_num: 返回下一页的页码
prev_num: 返回上一页的页码
iter_pages(): 页码列表
iter_pages(left_edge=2, left_current=2, right_current=5, right_edge=2) 页码列表默认参数
:param page:
:param size:
:param args:
:param kwargs:
:return:
"""
rows = db_instance.get_pagination(User, page, size, *args, **kwargs)
return rows
def delete_user_table():
"""
清空表
:return:
"""
return db_instance.delete_table(User)
def count_user(*args, **kwargs):
"""
计数
:param args:
:param kwargs:
:return:
"""
return db_instance.count(User, *args, **kwargs)
|
[
"zhang_he06@163.com"
] |
zhang_he06@163.com
|
cb4beb5018f9938b6d550942065e3d19aee69687
|
8fa162cddb2046cb47f3a06c72743ed67685d03a
|
/dvc/dependency/__init__.py
|
50222f02926bcad6b80fa0a1227b00b7b0ee84f1
|
[
"Apache-2.0"
] |
permissive
|
franekp/dvc
|
be9c123f03b77daa39781bd7e62fa25b9fae449f
|
e380a4a8586da643bf4e0d2281b13aee0d5e5207
|
refs/heads/master
| 2020-03-19T18:35:47.416381
| 2018-06-10T14:35:49
| 2018-06-10T14:35:49
| 136,816,230
| 0
| 0
|
Apache-2.0
| 2018-06-10T14:32:53
| 2018-06-10T14:32:52
| null |
UTF-8
|
Python
| false
| false
| 1,817
|
py
|
import schema
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from dvc.exceptions import DvcException
from dvc.config import Config
from dvc.dependency.base import DependencyBase
from dvc.dependency.s3 import DependencyS3
from dvc.dependency.gs import DependencyGS
from dvc.dependency.local import DependencyLOCAL
from dvc.dependency.hdfs import DependencyHDFS
from dvc.remote import Remote
from dvc.remote.local import RemoteLOCAL
from dvc.remote.s3 import RemoteS3
from dvc.remote.gs import RemoteGS
from dvc.remote.ssh import RemoteSSH
from dvc.remote.hdfs import RemoteHDFS
DEPS = [DependencyHDFS, DependencyS3, DependencyGS, DependencyLOCAL]
DEP_MAP = {'': DependencyLOCAL,
's3': DependencyS3,
'gs': DependencyGS,
'hdfs': DependencyHDFS,}
SCHEMA = {
DependencyBase.PARAM_PATH: str,
schema.Optional(RemoteLOCAL.PARAM_MD5): schema.Or(str, None),
schema.Optional(RemoteS3.PARAM_ETAG): schema.Or(str, None),
schema.Optional(RemoteHDFS.PARAM_CHECKSUM): schema.Or(str, None),
}
def _get(stage, p, info):
parsed = urlparse(p)
if parsed.scheme == 'remote':
sect = stage.project.config._config[Config.SECTION_REMOTE_FMT.format(parsed.netloc)]
remote = Remote(stage.project, sect)
return DEP_MAP[remote.scheme](stage, p, info, remote=remote)
for d in DEPS:
if d.supported(p):
return d(stage, p, info)
raise DvcException('Dependency \'{}\' is not supported'.format(p))
def loadd_from(stage, d_list):
ret = []
for d in d_list:
p = d.pop(DependencyBase.PARAM_PATH)
ret.append(_get(stage, p, d))
return ret
def loads_from(stage, s_list):
ret = []
for s in s_list:
ret.append(_get(stage, s, {}))
return ret
|
[
"kupruser@gmail.com"
] |
kupruser@gmail.com
|
ada4e614cc5df9247d515e08a82694f458afec8b
|
4d0cf37d15fda02881c5b8dcdb1f0076bb31c778
|
/lifelike/utils.py
|
333c2bc8c859c626173cd738f634a22aa3c49bef
|
[
"MIT"
] |
permissive
|
sudoentropy/lifelike
|
b53d70e666d94fdd266ea3567aa9291baf42efdf
|
2af3a9d25e777f67648f772f805ee66d6982bab0
|
refs/heads/master
| 2023-01-19T05:28:35.152335
| 2020-11-15T02:48:30
| 2020-11-15T02:48:30
| 290,666,687
| 0
| 0
|
MIT
| 2020-08-27T03:33:52
| 2020-08-27T03:33:51
| null |
UTF-8
|
Python
| false
| false
| 620
|
py
|
from functools import wraps
import cloudpickle as pickle
def must_be_compiled_first(function):
@wraps(function)
def f(self, *args, **kwargs):
if not self.is_compiled:
raise ValueError(
"You must run .compile first on the model before using this method."
)
return function(self, *args, **kwargs)
return f
def dump(model, filepath):
"""filepath is a string"""
with open(filepath, "wb") as f:
pickle.dump(model, f)
def load(filepath):
"""filepath is a string"""
with open(filepath, "rb") as f:
return pickle.load(f)
|
[
"cam.davidson.pilon@gmail.com"
] |
cam.davidson.pilon@gmail.com
|
256e9f2acc7001d59650a9633bf384d5377f1b05
|
66b82b6b95dd4eb85f037ed8c70a9dc0cccc16e4
|
/app/view_models/book.py
|
91e719be3070dc400a9b4ce0fd4a227e56128885
|
[] |
no_license
|
jpch89/fishbook
|
6c359b5efec88cbb414b72504a9131f924fe5f60
|
97db589046b9d59107033fa0ba9c071e63b6fcc0
|
refs/heads/master
| 2022-12-10T17:39:45.835372
| 2019-03-07T08:53:13
| 2019-03-07T08:53:13
| 144,078,892
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,148
|
py
|
# -*- coding: utf-8 -*-
# @Author: jpch89
# @Time: 2018/8/14 22:44
class BookViewModel:
def __init__(self, book):
self.title = book['title']
self.publisher = book['publisher']
self.author = '、'.join(book['author'])
self.image = book['image']
self.price = book['price']
self.summary = book['summary']
self.isbn = book['isbn']
self.pages = book['pages']
self.pubdate = book['pubdate']
self.binding = book['binding']
@property
def intro(self):
intros = filter(lambda x: True if x else False,
[self.author, self.publisher, self.price])
return ' / '.join(intros)
class BookCollection:
def __init__(self):
self.total = 0
self.books = []
self.keyword = ''
def fill(self, yushu_book, keyword):
self.total = yushu_book.total
self.keyword = keyword
self.books = [BookViewModel(book) for book in yushu_book.books]
class _BookViewModel:
@classmethod
def package_single(cls, data, keyword):
returned = {
'books': [],
'total': 0,
'keyword': keyword
}
if data:
returned['total'] = 1
returned['books'] = [cls.__cut_book_data(data)]
return returned
@classmethod
def package_collection(cls, data, keyword):
returned = {
'books': [],
'total': 0,
'keyword': keyword
}
if data:
returned['total'] = data['total']
returned['books'] = [cls.__cut_book_data(book) for book in data['books']]
return returned
@classmethod
def __cut_book_data(cls, data):
book = {
'title': data['title'],
'publisher': data['publisher'],
'pages': data['pages'] or '',
'author': '、'.join(data['author']),
'price': data['price'],
'summary': data['summary'] or '',
'image': data['image']
}
return book
|
[
"jpch89@outlook.com"
] |
jpch89@outlook.com
|
ea316e15a1e1e0ff59fbe6cb5228abfa0b24ffb3
|
1e013dc5f0de0f61e27f2867557803a01c01f4da
|
/Language/python/grammer/decorator/6_func_impl.py
|
63f6ff403a1a6df4d392df63c3735e914233c4a4
|
[] |
no_license
|
chengyi818/kata
|
a2941ce8675c6e7a47169a0eae4c757d3f6f5bf9
|
a7cb7ad499037bcc168aaa0eaba857b33c04ef14
|
refs/heads/master
| 2023-04-10T18:39:09.518433
| 2023-01-08T15:22:12
| 2023-01-08T15:22:12
| 53,040,540
| 1
| 0
| null | 2023-03-25T00:46:51
| 2016-03-03T10:06:58
|
C++
|
UTF-8
|
Python
| false
| false
| 554
|
py
|
#!/usr/bin/env python3
# Author: ChengYi
# Mail: chengyi818@foxmail.cn
# created time: Thu 21 Sep 2017 10:29:02 AM CST
def make_bold(func):
print('Initialize')
def wrapper():
print('Call')
return '<b>{}</b>'.format(func())
return wrapper
def get_content():
return "hello world"
@make_bold
def get_content2():
return "hello world"
if __name__ == "__main__":
get_content = make_bold(get_content)
print(type(get_content))
print(get_content())
print(get_content2())
print(type(get_content2))
|
[
"chengyi818@foxmail.com"
] |
chengyi818@foxmail.com
|
16529f3d07c226ff0580848d78c9cdaa4cb386d7
|
c5222000bf859009850452ec7bcf1533f7bf59f7
|
/color_picker.py
|
441acc9eaccbd29186422b5dc395c9f72cf5debc
|
[] |
no_license
|
nss-day-cohort-25/07-06-orientation-classes
|
38b016995c0a6273268d07104c0db2124383592a
|
9ed6046550e7b5254f8237efab47a057b74c94de
|
refs/heads/master
| 2020-03-22T14:52:01.360486
| 2018-07-09T17:31:10
| 2018-07-09T17:31:10
| 140,211,391
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
class ColorPicker():
def __init__(self, primary_color, **kwargs):
self.primary_color = primary_color
for key, value in kwargs.items():
setattr(self, key + "_color", value)
def get_colors(self):
return {k: v for k, v in self.__dict__.items() if 'color' in k}
|
[
"joeshepmedia@gmail.com"
] |
joeshepmedia@gmail.com
|
775999792fcec355ac984e6f1f97f26fbf4e854b
|
7950c4faf15ec1dc217391d839ddc21efd174ede
|
/leetcode-cn/2038.0_Remove_Colored_Pieces_if_Both_Neighbors_are_the_Same_Color.py
|
aa42c48cf130143120865f6675418d6da1b8c5b4
|
[] |
no_license
|
lixiang2017/leetcode
|
f462ecd269c7157aa4f5854f8c1da97ca5375e39
|
f93380721b8383817fe2b0d728deca1321c9ef45
|
refs/heads/master
| 2023-08-25T02:56:58.918792
| 2023-08-22T16:43:36
| 2023-08-22T16:43:36
| 153,090,613
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,461
|
py
|
'''
T: O(N)
S: O(1)
执行用时:112 ms, 在所有 Python3 提交中击败了98.98% 的用户
内存消耗:15.6 MB, 在所有 Python3 提交中击败了28.57% 的用户
通过测试用例:83 / 83
'''
class Solution:
def winnerOfGame(self, colors: str) -> bool:
a, b, pre = 0, 0, None
part_a = part_b = 0
for ch in colors:
if ch == pre:
if ch == 'A':
part_a += 1
else:
part_b += 1
else:
if ch == 'A':
if part_b >= 3:
b += part_b - 2
pre = 'A'
part_a = 1
else:
if part_a >= 3:
a += part_a - 2
pre = 'B'
part_b = 1
# do not forget last part!!!
if pre == 'A' and part_a >= 3:
a += part_a - 2
if pre == 'B' and part_b >= 3:
b += part_b - 2
return a > b
'''
T: O(N)
S: O(1)
执行用时:220 ms, 在所有 Python3 提交中击败了52.04% 的用户
内存消耗:15.5 MB, 在所有 Python3 提交中击败了55.10% 的用户
通过测试用例:83 / 83
'''
class Solution:
def winnerOfGame(self, colors: str) -> bool:
a = b = 0
n = len(colors)
for i in range(n - 2): # n-3 +2
# i, i + 1, i + 2
part = colors[i: i + 3]
if part == "AAA":
a += 1
elif part == "BBB":
b += 1
return a > b
'''
执行用时:240 ms, 在所有 Python3 提交中击败了47.96% 的用户
内存消耗:15.5 MB, 在所有 Python3 提交中击败了45.92% 的用户
通过测试用例:83 / 83
'''
class Solution:
def winnerOfGame(self, colors: str) -> bool:
a = b = 0
n, c = len(colors), colors
for i in range(n - 2): # n-3 +2
# i, i + 1, i + 2
if c[i] == 'A' and c[i + 1] == 'A' and c[i + 2] == 'A':
a += 1
elif c[i] == 'B' and c[i + 1] == 'B' and c[i + 2] == 'B':
b += 1
return a > b
'''
DP, T: O(N), S: O(1)
执行用时:336 ms, 在所有 Python3 提交中击败了23.47% 的用户
内存消耗:15.6 MB, 在所有 Python3 提交中击败了36.74% 的用户
通过测试用例:83 / 83
'''
class Solution:
def winnerOfGame(self, colors: str) -> bool:
# a b
freq = [0, 0]
move = [0, 0]
c = 'C'
for ch in colors:
if ch != c:
c = ch
freq[ord(ch) - ord('A')] = 1
else:
freq[ord(ch) - ord('A')] += 1
if freq[ord(ch) - ord('A')] >= 3:
move[ord(ch) - ord('A')] += 1
return move[0] > move[1]
'''
DP, T: O(N), S: O(1)
执行用时:164 ms, 在所有 Python3 提交中击败了78.57% 的用户
内存消耗:15.6 MB, 在所有 Python3 提交中击败了31.63% 的用户
通过测试用例:83 / 83
'''
class Solution:
def winnerOfGame(self, colors: str) -> bool:
# a b
move = [0, 0]
c, cnt = 'C', 0
for ch in colors:
if ch != c:
c = ch
cnt = 1
else:
cnt += 1
if cnt >= 3:
move[ord(ch) - ord('A')] += 1
return move[0] > move[1]
|
[
"laoxing201314@outlook.com"
] |
laoxing201314@outlook.com
|
bf375adb961dbcf915e1628a1b202e385aba4351
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4422/codes/1585_842.py
|
db14692d52f1f0a69e0aaedd361b0687c76c6431
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
# Teste seu codigo aos poucos.
# Nao teste tudo no final, pois fica mais dificil de identificar erros.
# Nao se intimide com as mensagens de erro. Elas ajudam a corrigir seu codigo.
x = int(input("Digite um numero: "))
print(x)
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
1a4bf86142afa60772834ed02506ed40060db509
|
0b9e884be78ecc22a44a94e2c1cabefd637b9ed0
|
/Python_Talk/mpt-master/ch14/live_price.py
|
04c8d8d261bcbee41454ee1c2b57b439f4d9d005
|
[] |
no_license
|
marcovnyc/penguin-code
|
6ba3faa5f21186918e2d08f5a0fcacebb2697e56
|
a0c1f91219ff74a8bb8e9fd3375b03b667056b54
|
refs/heads/master
| 2021-12-22T15:04:26.002512
| 2021-12-16T04:01:40
| 2021-12-16T04:01:40
| 7,264,458
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 533
|
py
|
from yahoo_fin import stock_info as si
# Start an infinite loop
while True:
# Obtain ticker symbol from you
ticker = input("What's the ticker symbol of the stock you are looking for?\n")
# If you want to stop, type in "done"
if ticker == "done":
break
# Otherwise, type in a stock ticker symbol
else:
# Obtain live stock price from Yahoo
price = si.get_live_price(ticker)
# Print out the stock price
print(f"The stock price for {ticker} is {price}.")
|
[
"penguin@penguin.com"
] |
penguin@penguin.com
|
d6dff94c7a3c17100e31444270570019c51afe06
|
611ffe1973f843626000e2320c1d425cdf0995ca
|
/lib/modules/synthesis_decoder.py
|
cddd9d53ae22e5c5bcb03badb09275f99bd209cc
|
[] |
no_license
|
kapitsa2811/Text2Scene
|
f5b1ff803329a68f9035420481db651e5fb0cf54
|
f471771cb8e6f8e1e27296f2c535b84fa27a79ed
|
refs/heads/master
| 2020-06-11T03:07:09.853489
| 2019-06-19T05:19:53
| 2019-06-19T05:19:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,853
|
py
|
#!/usr/bin/env python
import math, cv2
import numpy as np
import torch
import torch.nn as nn
from torchvision import models
from modules.separable_convolution import same_padding_size
# from modules.bilinear_downsample import BilinearDownsample
import torch.nn.functional as F
from modules.separable_convolution import separable_conv2d
class SynthesisDecoder(nn.Module):
def __init__(self, config):
super(SynthesisDecoder, self).__init__()
self.cfg = config
h, w = config.output_image_size
if config.use_color_volume:
in_channels = 3 * config.output_vocab_size
else:
in_channels = config.output_vocab_size + 4
self.block6 = nn.Sequential(self.make_layers(512 + in_channels, [512, 512], config.use_normalization, [h//64, w//64]))
self.block5 = nn.Sequential(self.make_layers(512 + 512 + in_channels, [512, 512], config.use_normalization, [h//32, w//32]))
self.block4 = nn.Sequential(self.make_layers(512 + 512 + in_channels, [512, 512], config.use_normalization, [h//16, w//16]))
self.block3 = nn.Sequential(self.make_layers(512 + 256 + in_channels, [512, 512], config.use_normalization, [h//8, w//8]))
self.block2 = nn.Sequential(self.make_layers(512 + 256 + in_channels, [512, 512], config.use_normalization, [h//4, w//4]))
self.block1 = nn.Sequential(self.make_layers(512 + 256 + in_channels, [256, 256], config.use_normalization, [h//2, w//2]))
self.block0 = nn.Sequential(self.make_layers(256 + in_channels, [256, 256], config.use_normalization, [h, w]))
# self.block_up = nn.Sequential(self.make_layers(256 + in_channels, [128, 128], config.use_normalization, [2*h, 2*w]))
if self.cfg.use_separable_convolution:
conv2d = separable_conv2d
else:
conv2d = nn.Conv2d
self.imaging = conv2d(256, 3, 1)
self.labeling = conv2d(256, config.output_vocab_size, 1)
def make_layers(self, inplace, places, use_layer_norm=False, resolution=None):
if self.cfg.use_separable_convolution:
conv2d = separable_conv2d
else:
conv2d = nn.Conv2d
layers = []
in_channels = inplace
for v in places:
if use_layer_norm:
current_conv2d = conv2d(in_channels, v, kernel_size=3, padding=1, bias=False)
current_lnorm = nn.LayerNorm([v, resolution[0], resolution[1]])
layers.extend([current_conv2d, current_lnorm])
else:
current_conv2d = conv2d(in_channels, v, kernel_size=3, padding=1, bias=True)
layers.append(current_conv2d)
layers.append(nn.LeakyReLU(0.2, inplace=True))
in_channels = v
return nn.Sequential(*layers)
def forward(self, inputs):
h, w = self.cfg.output_image_size
xx, x1, x2, x3, x4, x5, x6 = inputs
xx_d6 = F.interpolate(xx, size=[h//64, w//64], mode='bilinear', align_corners=True)
x = torch.cat([xx_d6, x6], dim=1)
x = self.block6(x)
x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)
xx_d5 = F.interpolate(xx, size=[h//32, w//32], mode='bilinear', align_corners=True)
x = torch.cat([xx_d5, x5, x], dim=1)
x = self.block5(x)
x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)
xx_d4 = F.interpolate(xx, size=[h//16, w//16], mode='bilinear', align_corners=True)
x = torch.cat([xx_d4, x4, x], dim=1)
x = self.block4(x)
x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)
xx_d3 = F.interpolate(xx, size=[h//8, w//8], mode='bilinear', align_corners=True)
x = torch.cat([xx_d3, x3, x], dim=1)
x = self.block3(x)
x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)
xx_d2 = F.interpolate(xx, size=[h//4, w//4], mode='bilinear', align_corners=True)
x = torch.cat([xx_d2, x2, x], dim=1)
x = self.block2(x)
x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)
xx_d1 = F.interpolate(xx, size=[h//2, w//2], mode='bilinear', align_corners=True)
x = torch.cat([xx_d1, x1, x], dim=1)
x = self.block1(x)
x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)
# x = torch.cat([x0, x], dim=1)
# x = self.block0(x)
# x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)
xx_d0 = xx #F.interpolate(xx, size=[h, w], mode='bilinear', align_corners=True)
x = torch.cat([xx_d0, x], dim=1)
x = self.block0(x)
image = self.imaging(x)
label = self.labeling(x)
image = 255.0*(image+1.0)/2.0
return image, label
|
[
"fuwen.tan@gmail.com"
] |
fuwen.tan@gmail.com
|
25189b66364fe5afb9c41481a00735828b4ffd74
|
14f455693213cae4506a01b7d0591e542c38de79
|
/config/munin/hookbox
|
95845775bd8e21f8cfa9f7bb60dcb248538dd6b1
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Cvalladares/Newsblur_Instrumented
|
f0b14d063759973330f202108a7eed3a29bcc033
|
4d6ee6aa9713879b1e2550ea5f2dbd819c73af12
|
refs/heads/master
| 2022-12-29T15:19:29.726455
| 2019-09-03T17:09:04
| 2019-09-03T17:09:04
| 206,130,022
| 0
| 0
|
MIT
| 2022-12-10T06:00:26
| 2019-09-03T17:07:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,641
|
#!/srv/newsblur/venv/newsblur/bin/python
import os
import json
import urllib
import urllib2
from vendor.munin import MuninPlugin
class HookboxPlugin(MuninPlugin):
title = 'hookbox'
args = "--base 1000"
vlabel = "Y"
info = "Subscibed users"
scale = False
def get_channels(self):
return os.environ.get('HOOKBOX_CHANNELS', '').split(',')
def get_url(self):
return os.environ.get('HOOKBOX_URL', 'http://localhost:8001/rest')
def get_secret(self):
return os.environ.get('HOOKBOX_SECRET', '')
@property
def fields(self):
return (
(channel, dict(
label=channel,
info="%s - users" % channel,
type="GAUGE",
))
for channel in self.get_channels()
)
def get_channel_info(self, channel_name):
values = {
'channel_name': channel_name,
'secret': self.get_secret(),
}
req = urllib2.Request("%s/get_channel_info?%s" % (self.get_url(), urllib.urlencode(values)))
resp = urllib2.urlopen(req)
return json.loads(resp.read())
def get_subscribers(self, channel_name):
try:
return len(self.get_channel_info(channel_name)[1]['subscribers'])
except (urllib2.URLError, KeyError), e:
return 'U'
def execute(self):
return dict(
(channel_name, self.get_subscribers(channel_name))
for channel_name in self.get_channels()
)
if __name__ == "__main__":
HookboxPlugin().run()
|
[
"Cvalladares4837@gmail.com"
] |
Cvalladares4837@gmail.com
|
|
870388e6f5642063a0b73de0282f49f939814e28
|
aedd3aeadfb13eda4489d26ee3d9762598878936
|
/leetcode/111. 二叉树的最小深度.py
|
70ee52bca1092dfa4ae3db932f4be11bf0763e60
|
[] |
no_license
|
AnJian2020/Leetcode
|
657e8225c4d395e8764ef7c672d435bda40584c7
|
cded97a52c422f98b55f2b3527a054d23541d5a4
|
refs/heads/master
| 2023-03-26T16:25:36.136647
| 2021-03-26T07:04:10
| 2021-03-26T07:04:10
| 283,940,538
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 527
|
py
|
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def minDepth(self, root: TreeNode) -> int:
if not root:
return 0
elif root.left is None and root.right is None:
return 1
else:
depth=10**9
if root.left:
depth=min(self.minDepth(root.left),depth)
if root.right:
depth=min(self.minDepth(root.right),depth)
return depth+1
|
[
"xuhao2018@foxmail.com"
] |
xuhao2018@foxmail.com
|
ef350d4222192afd3cb54baab0613d0cc93f46a1
|
d9eef8dd3489682c8db41f2311e3058d1f369780
|
/.history/abel-network-files/metis_transf_20180709120220.py
|
efc72c43810042d5e2eddbbadbc20d2d9643e947
|
[] |
no_license
|
McKenzie-Lamb/Gerrymandering
|
93fe4a49fe39a0b307ed341e46ba8620ea1225be
|
b7a7c4129d6b0fcd760ba8952de51eafa701eac3
|
refs/heads/master
| 2021-01-25T06:06:43.824339
| 2018-10-16T14:27:01
| 2018-10-16T14:27:01
| 93,526,515
| 0
| 0
| null | 2018-07-12T19:07:35
| 2017-06-06T14:17:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,080
|
py
|
# Author: Abel Gonzalez
# Date: 06/26/18
#
# Description:
# This program uses the .shp file to create a network graph where each node
# represents a census tract and the edge represents adjacency between each
# tract, usign graph-tool instead of networkx
import graph_tool.all as gt
import metis
from pathlib import Path
# Paths
main_folder = Path("abel-network-files/")
data_folder = Path("abel-network-files/data/")
images_folder = Path("abel-network-files/images/")
# Loading the previous created Graph and creating the prop maps
graph = gt.load_graph(str(data_folder / "tmp_graph100.gt"))
name = graph.new_vertex_property('string')
adjlist = []
nodew = []
for i in graph.vertices():
neighbors = tuple([j for j in i.all_neighbors()])
adjlist.append(neighbors)
#print(graph.vp.data[i]['PERSONS'])
nodew.append(graph.vp.data[i]['PERSONS'])
metis_graph = metis.adjlist_to_metis(adjlist, nodew=nodew)
objval, parts = metis.part_graph(metis_graph, nparts=4)
for i range(len(parts)):
name[graph.vertex(i)] = parts[i]
gt.draw_graph(graph, vertex_text=name)
|
[
"gonzaleza@ripon.edu"
] |
gonzaleza@ripon.edu
|
fe487c41d5826fb6bcb3c75532dc7c84986de7df
|
0dc37ab83fc6603770ba60050fc3d46534b3ef65
|
/backend/shah_rukh_2_dev_16118/settings.py
|
f652824a018a14d25dffd321a0319273f3cde280
|
[] |
no_license
|
crowdbotics-apps/shah-rukh-2-dev-16118
|
c668cdbf67780b1c0715013725c49edad4c84e11
|
1d39dd000cee1dd28ff33a0c8837d7443bf667e6
|
refs/heads/master
| 2023-01-19T00:29:50.251660
| 2020-12-03T09:26:54
| 2020-12-03T09:26:54
| 317,791,591
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,035
|
py
|
"""
Django settings for shah_rukh_2_dev_16118 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'shah_rukh_2_dev_16118.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'shah_rukh_2_dev_16118.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
e6a1599936b8a69e0e220ace641024745c01d0ab
|
85a1d1c79c152903e48ac29f2cd066c21e825485
|
/lcbru_events/views/home.py
|
19631feef3331bd6e46cf96532475d975774e3c0
|
[
"MIT"
] |
permissive
|
LCBRU/lcbru-events
|
2fd02f196ec9cbf31f76ec2f2dff2e7c0ea7f4d3
|
97245fbc711fc1876643d23df635d82752509040
|
refs/heads/master
| 2021-06-06T19:42:43.750497
| 2016-10-27T10:40:12
| 2016-10-27T10:40:12
| 43,355,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 132
|
py
|
from flask import render_template
from lcbru_events import app
@app.route('/')
def index():
return render_template('index.html')
|
[
"rabramley@gmail.com"
] |
rabramley@gmail.com
|
76e85db8873a136388ecef88893460abfada2771
|
a1b795c17832c1ec4a4a942b7e35c1f9d283b4db
|
/examples/timeline.py
|
a2075a79236a537b8ab42e32a2b7811b7b32b626
|
[
"MIT"
] |
permissive
|
ammunk/cassiopeia
|
016f1a79bcdb4758953625a5953ca19f3b79ccf4
|
4e77919f53a130b3f16977c76cc5f54f3868d8e5
|
refs/heads/master
| 2020-04-12T23:19:13.401624
| 2018-12-19T15:51:21
| 2018-12-19T15:51:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
import cassiopeia as cass
from cassiopeia import Summoner
def print_newest_match(name: str, account: int, id: int, region: str):
summoner = Summoner(name=name, account=account, id=id, region=region)
match_history = summoner.match_history
match = match_history[0]
print('Match ID:', match.id)
print(match.timeline.frame_interval)
if __name__ == "__main__":
print_newest_match(name="Kalturi", account=34718348, id=21359666, region="NA")
|
[
"jjmaldonis@gmail.com"
] |
jjmaldonis@gmail.com
|
d69062af5e4d244aba9c8bd7406683449558f838
|
66f17204c2bc83c9edbde10eda1797752a9e4e37
|
/download_data.py
|
7df33ed7924489c3e153d839d2ea478aec8cbbee
|
[
"MIT"
] |
permissive
|
lukassnoek/ICON2017
|
a698df9dc583408c8ea9893bc65b18fa1397224e
|
4797357978577317c65d8aed06d49aa842a6e653
|
refs/heads/master
| 2023-08-08T02:07:42.649391
| 2023-08-01T18:50:43
| 2023-08-01T18:50:43
| 95,113,866
| 23
| 6
| null | 2017-08-06T11:44:17
| 2017-06-22T12:35:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,410
|
py
|
""" This script downloads the data for the ICON2017 MVPA workshop
from Surfdrive (a data storage repository/drive from the
Dutch institute for IT in science/academia) using cURL,
which should be cross-platform. """
from __future__ import print_function
import subprocess
import os
import zipfile
import os.path as op
import platform
cmd = "where" if platform.system() == "Windows" else "which"
with open(os.devnull, 'w') as devnull:
res = subprocess.call([cmd, 'curl'], stdout=devnull)
if res != 0:
raise OSError("The program 'curl' was not found on your computer! "
"Either install it or download the data from surfdrive "
" (link on website)")
this_dir = op.dirname(op.realpath(__file__))
dst_dir = op.join(this_dir, 'data')
if not op.isdir(dst_dir):
os.makedirs(dst_dir)
data_file = 'https://surfdrive.surf.nl/files/index.php/s/Iv5tNOAMZTJ0WiS/download'
dst_file = op.join(dst_dir, 'data.zip')
if not op.isfile(dst_file):
print("Downloading the data ...\n")
cmd = "curl -o %s %s" % (dst_file, data_file)
return_code = subprocess.call(cmd, shell=True)
print("\nDone!")
print("Unzipping ...", end='')
zip_ref = zipfile.ZipFile(dst_file, 'r')
zip_ref.extractall(dst_dir)
zip_ref.close()
print(" done!")
os.remove(dst_file)
else:
print("Data is already downloaded and located at %s" % dst_file)
|
[
"lukassnoek@gmail.com"
] |
lukassnoek@gmail.com
|
1610566f544070fb05ac4fa174c60cccc4d70b9d
|
ae6189642a07fd789f51caadb924328a54919cac
|
/100-problems/review/minimum-spanning-tree/65-finals.py
|
e4f3b830d0c3a9a5dadb40a5102023f83f0676f9
|
[] |
no_license
|
d-matsui/atcorder
|
201e32403653b2fdf0d42188faf095eb8b793b86
|
22ec1af8206827e10a986cb24cf12acc52ab1d6a
|
refs/heads/master
| 2020-09-27T23:15:27.281877
| 2020-09-01T13:24:34
| 2020-09-01T13:24:34
| 226,632,569
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,289
|
py
|
#!/usr/bin/env python3
from pprint import pprint
from collections import deque, defaultdict
import itertools
import math
import sys
sys.setrecursionlimit(10 ** 6)
INF = float('inf')
N, M, K = map(int, input().split())
edges = []
for _ in range(M):
u, v, w = map(int, input().split())
edges.append([u-1, v-1, w])
def unite(u, v):
root_u = root(u)
root_v = root(v)
if root_u == root_v:
return
# assume size(root_u) >= size(root_v)
if size(root_u) < size(root_v):
root_u, root_v = root_v, root_u
parents[root_u] -= size(root_v)
parents[root_v] = root_u
def has_same_root(u, v):
return root(u) == root(v)
def root(v):
if parents[v] < 0:
return v
parents[v] = root(parents[v])
return parents[v]
def size(v):
return -parents[root(v)]
# Kruskal's algorithm
# 閉路を作らないようにしつつ、辺のコストが小さいものから順に選んでいく
# ある辺 e をグラフに追加したときに閉路を作るかどうかは、Union Find で判定できる
parents = [-1] * N
res = 0
count = 0
for u, v, w in sorted(edges, key=lambda e: e[2]):
if count == N - K:
break
if has_same_root(u, v):
continue
res += w
count += 1
unite(u, v)
print(res)
|
[
"mti.daiki@gmail.com"
] |
mti.daiki@gmail.com
|
004c6fef77001f731bfe2d9f676ea0d9f9ab8385
|
2eae961147a9627a2b9c8449fa61cb7292ad4f6a
|
/test/test_post_business_exchange_rates_business_exchange_rate.py
|
22b1621a24d2d74a9197a5506af05ada3ce07a5b
|
[] |
no_license
|
kgr-eureka/SageOneSDK
|
5a57cc6f62ffc571620ec67c79757dcd4e6feca7
|
798e240eb8f4a5718013ab74ec9a0f9f9054399a
|
refs/heads/master
| 2021-02-10T04:04:19.202332
| 2020-03-02T11:11:04
| 2020-03-02T11:11:04
| 244,350,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,920
|
py
|
# coding: utf-8
"""
Sage Business Cloud Accounting - Accounts
Documentation of the Sage Business Cloud Accounting API. # noqa: E501
The version of the OpenAPI document: 3.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.post_business_exchange_rates_business_exchange_rate import PostBusinessExchangeRatesBusinessExchangeRate # noqa: E501
from openapi_client.rest import ApiException
class TestPostBusinessExchangeRatesBusinessExchangeRate(unittest.TestCase):
"""PostBusinessExchangeRatesBusinessExchangeRate unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test PostBusinessExchangeRatesBusinessExchangeRate
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.post_business_exchange_rates_business_exchange_rate.PostBusinessExchangeRatesBusinessExchangeRate() # noqa: E501
if include_optional :
return PostBusinessExchangeRatesBusinessExchangeRate(
currency_id = '0',
rate = 1.337,
inverse_rate = 1.337,
base_currency_id = '0'
)
else :
return PostBusinessExchangeRatesBusinessExchangeRate(
currency_id = '0',
rate = 1.337,
)
def testPostBusinessExchangeRatesBusinessExchangeRate(self):
"""Test PostBusinessExchangeRatesBusinessExchangeRate"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
[
"kevin.gray@eurekasolutions.co.uk"
] |
kevin.gray@eurekasolutions.co.uk
|
08ac7056e18731742d5bd1cc2e2715f239731609
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2351/60636/244985.py
|
6927ce260bfd2b8f832d5b9567ff1180a363ec44
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,073
|
py
|
n=int(input())
sources=[]
for i in range(n-1):
x=input().split(" ")
source=[]
for a in x:
source.append(int(a))
sources.append(source)
number=[]
for i in range(1,n+1):
delete=sources.copy()
for y in sources:
if(i in y):
delete.pop(delete.index(y))
all_length=[]
while(len(delete)!=0):
all=[]
all.append(delete[0][0])
all.append(delete[0][1])
delete.pop(0)
if(len(delete)==1):
all_length.append(1)
break
else:
save=delete.copy()
for i in delete:
if((i[0] in all)|(i[1] in all)):
if(not i[0] in all):
all.append(i[0])
elif(not i[1] in all):
all.append(i[1])
save.pop(save.index(i))
delete=save
all_length.append(len(all))
number.append(max(all_length))
res=""
for i in range(len(number)):
if(number[i]==min(number)):
res=res+str(i+1)+" "
print(res,end="")
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
7bfd897fff357e2cc168ed67c397530fca7c3742
|
d41d18d3ea6edd2ec478b500386375a8693f1392
|
/plotly/validators/choropleth/colorbar/_titleside.py
|
3de81a3fa5674ea7244e6816a53d2b3a7e0e4379
|
[
"MIT"
] |
permissive
|
miladrux/plotly.py
|
38921dd6618650d03be9891d6078e771ffccc99a
|
dbb79e43e2cc6c5762251537d24bad1dab930fff
|
refs/heads/master
| 2020-03-27T01:46:57.497871
| 2018-08-20T22:37:38
| 2018-08-20T22:37:38
| 145,742,203
| 1
| 0
|
MIT
| 2018-08-22T17:37:07
| 2018-08-22T17:37:07
| null |
UTF-8
|
Python
| false
| false
| 510
|
py
|
import _plotly_utils.basevalidators
class TitlesideValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name='titleside',
parent_name='choropleth.colorbar',
**kwargs
):
super(TitlesideValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='colorbars',
role='style',
values=['right', 'top', 'bottom'],
**kwargs
)
|
[
"adam.kulidjian@gmail.com"
] |
adam.kulidjian@gmail.com
|
c2c73c9559c6f08d54ca4743125ef3d87c63a6f0
|
a6ca86b1dce35a9a59405b29e1f97e13bf1c7fc1
|
/tests/src/TestSuites/FunctionalTestSuite/Run_SchoolInfraReport.py
|
66f303df061a96feb40930453863ac3e89c44158
|
[
"MIT"
] |
permissive
|
htvenkatesh/cQube
|
44a0bfd9a281b07992f17c2d885392e62b3d0a83
|
c6e91d58fef5084b8b5962f313cd49f0400d7bfa
|
refs/heads/master
| 2022-12-04T02:40:54.425582
| 2020-08-21T07:21:47
| 2020-08-21T07:21:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,756
|
py
|
import time
from get_dir import pwd
from CRC import crc_report_functional_testing
from SI.MAP import School_Map_functional_testing
from SI.Report import School_report_functional_testing
import unittest
from HTMLTestRunner import HTMLTestRunner
from reuse_func import GetData
class MyTestSuite(unittest.TestCase):
@classmethod
def setUpClass(self):
self.data = GetData()
self.logger = self.data.get_functional_log("schoolinfrareport")
self.driver = self.data.get_driver()
self.data.open_cqube_appln(self.driver)
self.data.login_cqube(self.driver)
def test_Issue(self):
self.data.navigate_to_school_infrastructure()
time.sleep(2)
self.errMsg = self.data.get_data_status()
if self.errMsg.text == 'No data found':
print("No data in the school infrastructure report page")
else:
self.logger.info("school infra report execution started")
functional_test = unittest.TestSuite()
functional_test.addTests([
# file name .class name
unittest.defaultTestLoader.loadTestsFromTestCase(School_report_functional_testing.cQube_SI_Report)
])
p= pwd()
outfile = open(p.get_functional_report_path(), "a")
runner1 = HTMLTestRunner.HTMLTestRunner(
stream=outfile,
title='School Infra Report Functional Test Report',
verbosity=1,
)
runner1.run(functional_test)
outfile.close()
self.logger.info("school infra report execution ended")
@classmethod
def tearDownClass(self):
self.driver.close()
if __name__ == '__main__':
unittest.main()
|
[
"laxmikanth.vattamvar@tibilsolutions.com"
] |
laxmikanth.vattamvar@tibilsolutions.com
|
c81d93a49136768a898af81f73879a9dc37451d7
|
f2ec1298c00d813c7e973cac22184ea8f54eb60c
|
/MxShop/apps/goods/migrations/0009_auto_20190526_1746.py
|
3161358484d5d2ccc8825db304559f7006c8be63
|
[] |
no_license
|
llf-1996/mx_drf
|
fcfaa028630eeb02be91af5e30fb2a200037400c
|
f4878c0d9857e7af7277d10cc32da5d9c522de0c
|
refs/heads/master
| 2020-06-05T06:51:49.629343
| 2019-06-17T13:20:29
| 2019-06-17T13:20:29
| 192,350,339
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-05-26 17:46
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('goods', '0008_auto_20170826_1201'),
]
operations = [
migrations.AlterModelTable(
name='goodscategorybrand',
table='goods_goodsbrand',
),
]
|
[
"2367746876@qq.com"
] |
2367746876@qq.com
|
32eb948acc27bdfecd863a46de5660de37ba0f63
|
29da2ca6def1270be13a3096685a8e5d82828dff
|
/CIM14/IEC61970/Wires/RatioVariationCurve.py
|
e9599a428e11145a2c9f6036061938d3a0e6d732
|
[
"MIT"
] |
permissive
|
rimbendhaou/PyCIM
|
75eb3bcd3729b2410c03f3d5c66d6f1e05e21df3
|
d578bb0bf1af344342bd23344385ed9c06c2d0ee
|
refs/heads/master
| 2022-04-28T01:16:12.673867
| 2020-04-16T02:19:09
| 2020-04-16T02:19:09
| 256,085,381
| 0
| 0
|
MIT
| 2020-04-16T02:15:20
| 2020-04-16T02:08:14
| null |
UTF-8
|
Python
| false
| false
| 2,481
|
py
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.Core.Curve import Curve
class RatioVariationCurve(Curve):
"""A Ratio Variation Curve describes the change in tap ratio in relationship to tap step changes. The tap step is represented using the xValue and the ratio using y1value.
"""
def __init__(self, RatioTapChanger=None, *args, **kw_args):
"""Initialises a new 'RatioVariationCurve' instance.
@param RatioTapChanger: A RatioVariationCurve defines tap ratio changes for a RatioTapChanger.
"""
self._RatioTapChanger = None
self.RatioTapChanger = RatioTapChanger
super(RatioVariationCurve, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["RatioTapChanger"]
_many_refs = []
def getRatioTapChanger(self):
"""A RatioVariationCurve defines tap ratio changes for a RatioTapChanger.
"""
return self._RatioTapChanger
def setRatioTapChanger(self, value):
if self._RatioTapChanger is not None:
self._RatioTapChanger._RatioVariationCurve = None
self._RatioTapChanger = value
if self._RatioTapChanger is not None:
self._RatioTapChanger.RatioVariationCurve = None
self._RatioTapChanger._RatioVariationCurve = self
RatioTapChanger = property(getRatioTapChanger, setRatioTapChanger)
|
[
"rwl@thinker.cable.virginmedia.net"
] |
rwl@thinker.cable.virginmedia.net
|
33e488832c4747e6af26ad44f680379afbfb7ddf
|
3280f0c0c41e157bff2d4ad420c1b871402d6f95
|
/utils/cmfish/cmfish/middlewares.py
|
4dd368481be51d12184948c37537d4409716b44e
|
[] |
no_license
|
sui84/pytest
|
5efae6843556c541bcffed1ff4c4a38d598bca85
|
696f05ec40adb78e9a4bd7fab1eef6d324cbb9bb
|
refs/heads/master
| 2021-01-21T15:00:28.604917
| 2019-08-31T05:22:10
| 2019-08-31T05:22:10
| 95,368,315
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,904
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class CmfishSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
[
"sui84@126.com"
] |
sui84@126.com
|
d0eda4330f1ac653701045a4fc49b166fbca0fca
|
d6cf604d393a22fc5e071a0d045a4fadcaf128a6
|
/Enterprise/Marine/2020/2020_D.py
|
7e514768e74cacdb1d7271f0361b50aa5e7e7e78
|
[] |
no_license
|
shikixyx/AtCoder
|
bb400dfafd3745c95720b9009881e07bf6b3c2b6
|
7e402fa82a96bc69ce04b9b7884cb9a9069568c7
|
refs/heads/master
| 2021-08-03T21:06:45.224547
| 2021-07-24T11:58:02
| 2021-07-24T11:58:02
| 229,020,968
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,329
|
py
|
import sys
import numpy as np
from collections import defaultdict
sys.setrecursionlimit(10 ** 7)
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
# 愚直解
# TLE
N = int(readline())
VW = [0] + [list(map(int, readline().split())) for _ in range(N)]
Q = int(readline())
ul = [list(map(int, readline().split())) for _ in range(Q)]
ul_s = sorted(ul, reverse=True)
cache = [None] * (N + 1)
S = 10 ** 5 + 10
TABLE = defaultdict(int)
def solve(q, l):
q = q[::-1]
mi = -1
for i in range(len(q)):
x = q[i]
if type(cache[x]) is np.ndarray:
mi = i
else:
break
x = q[mi]
dp = cache[x] if mi != -1 else np.zeros(S, dtype=np.int64)
q = q[mi + 1 :] if mi != -1 else q
ans = calc(q, l, dp)
return ans
def calc(q, l, dp):
for u in q:
v, w = VW[u]
tmp = np.zeros(S, dtype=np.int64)
tmp[w:] = dp[:-w] + v
dp = np.maximum(dp, tmp)
# cache[u] = np.copy(dp)
cache[u] = dp
return max(dp[: l + 1])
ans = []
for u, l in ul_s:
q = []
v = u
while v != 1:
q.append(v)
v //= 2
q.append(1)
t = solve(q, l)
TABLE[(u, l)] = t
for u, l in ul:
ans.append(TABLE[(u, l)])
print("\n".join(map(str, ans)))
|
[
"shiki.49.313@gmail.com"
] |
shiki.49.313@gmail.com
|
0346e52eb44f0345bff729be90cefcd608a480f3
|
c46754b9600a12df4f9d7a6320dfc19aa96b1e1d
|
/src/transformers/models/groupvit/__init__.py
|
d0de4a00bd15005fe974f7240b9bc6c940f5b789
|
[
"Apache-2.0"
] |
permissive
|
huggingface/transformers
|
ccd52a0d7c59e5f13205f32fd96f55743ebc8814
|
4fa0aff21ee083d0197a898cdf17ff476fae2ac3
|
refs/heads/main
| 2023-09-05T19:47:38.981127
| 2023-09-05T19:21:33
| 2023-09-05T19:21:33
| 155,220,641
| 102,193
| 22,284
|
Apache-2.0
| 2023-09-14T20:44:49
| 2018-10-29T13:56:00
|
Python
|
UTF-8
|
Python
| false
| false
| 2,875
|
py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_import_structure = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_groupvit"] = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_tf_groupvit"] = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
[
"noreply@github.com"
] |
huggingface.noreply@github.com
|
0f7692a5b74a0bcdc450d391708db3d19733b7c8
|
0db97db08743783019efe022190f409d22ff95bd
|
/aliyun/api/rest/Rds20140815DescribeSQLLogReportListRequest.py
|
ade9beff607028d9b0aa607aa956e0d2735bff32
|
[
"Apache-2.0"
] |
permissive
|
snowyxx/aliyun-python-demo
|
8052e2a165f1b869affe632dda484d6ca203bd9b
|
ed40887ddff440b85b77f9b2a1fcda11cca55c8b
|
refs/heads/master
| 2021-01-10T03:37:31.657793
| 2016-01-21T02:03:14
| 2016-01-21T02:03:14
| 49,921,095
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
'''
Created by auto_sdk on 2015.08.07
'''
from aliyun.api.base import RestApi
class Rds20140815DescribeSQLLogReportListRequest(RestApi):
def __init__(self,domain='rds.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.DBInstanceId = None
self.EndTime = None
self.Page = None
self.PageNumbers = None
self.StartTime = None
def getapiname(self):
return 'rds.aliyuncs.com.DescribeSQLLogReportList.2014-08-15'
|
[
"snowyxx@126.com"
] |
snowyxx@126.com
|
dfdef595c1fdf2280519df6b76a1384d3bfc50b8
|
61747f324eaa757f3365fd7bf5ddd53ea0db47d1
|
/casepro/cases/migrations/0041_populate_partner_users.py
|
e14bef94e0e4ce2732c88f32738b188cfc2b350d
|
[
"BSD-3-Clause"
] |
permissive
|
BlueRidgeLabs/casepro
|
f8b0eefa8f961dd2fdb5da26a48b619ebc1f8c12
|
8ef509326f3dfa80bb44beae00b60cc6c4ac7a24
|
refs/heads/master
| 2022-01-24T09:01:18.881548
| 2017-12-05T18:46:05
| 2017-12-05T18:49:42
| 113,502,588
| 0
| 0
| null | 2017-12-07T21:57:37
| 2017-12-07T21:57:37
| null |
UTF-8
|
Python
| false
| false
| 760
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from django.db import migrations, models
def populate_partner_users(apps, schema_editor):
User = apps.get_model('auth', 'User')
for user in User.objects.exclude(profile__partner=None):
partner = user.profile.partner
org = partner.org
if user in org.editors.all() or user in org.viewers.all():
partner.users.add(user)
else:
print("User %s no longer has permissions in org %s to be a partner user" % (user.email, org.name))
class Migration(migrations.Migration):
dependencies = [
('cases', '0040_partner_users'),
]
operations = [
migrations.RunPython(populate_partner_users)
]
|
[
"rowanseymour@gmail.com"
] |
rowanseymour@gmail.com
|
5c7616aab880b8627ecf408e28c5d0fd92c361c3
|
c54f5a7cf6de3ed02d2e02cf867470ea48bd9258
|
/pyobjc/PyOpenGL-2.0.2.01/src/shadow/GL.3DFX.tbuffer.0001.py
|
e9fc25dbf782313f2fe2acf197b631a13a69c4d1
|
[] |
no_license
|
orestis/pyobjc
|
01ad0e731fbbe0413c2f5ac2f3e91016749146c6
|
c30bf50ba29cb562d530e71a9d6c3d8ad75aa230
|
refs/heads/master
| 2021-01-22T06:54:35.401551
| 2009-09-01T09:24:47
| 2009-09-01T09:24:47
| 16,895
| 8
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,502
|
py
|
# This file was created automatically by SWIG.
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _tbuffer
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "this"):
if isinstance(value, class_type):
self.__dict__[name] = value.this
if hasattr(value,"thisown"): self.__dict__["thisown"] = value.thisown
del value.thisown
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name) or (name == "thisown"):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
__version__ = _tbuffer.__version__
__date__ = _tbuffer.__date__
__api_version__ = _tbuffer.__api_version__
__author__ = _tbuffer.__author__
__doc__ = _tbuffer.__doc__
glTbufferMask3DFX = _tbuffer.glTbufferMask3DFX
glInitTbuffer3DFX = _tbuffer.glInitTbuffer3DFX
__info = _tbuffer.__info
|
[
"ronaldoussoren@f55f28a5-9edb-0310-a011-a803cfcd5d25"
] |
ronaldoussoren@f55f28a5-9edb-0310-a011-a803cfcd5d25
|
7cd1a27a27d0669662e639931e57b15c76d48382
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/BuildLinks1.10/test_input/CJ_16_1/16_1_1_roy999_main.py
|
d064a2f9d473ab5a4c2b76814eef2cbb9857e32e
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 388
|
py
|
def process(input_file, out):
t = int(input_file.readline())
for i in range(1, t + 1):
line = input_file.readline().strip()
result = solve(line)
out.write("Case #%i: %s\n" % (i, result))
def solve(s):
li = [s[0]]
for c in s[1:]:
if c < li[0]:
li.append(c)
else:
li.insert(0, c)
return ''.join(li)
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
801203ff33777b339565f62d514f14301dc94b25
|
c50e7eb190802d7849c0d0cea02fb4d2f0021777
|
/src/qumulo/azext_qumulo/aaz/latest/qumulo/__cmd_group.py
|
fabbbf233a1f1fca33e7e3fe0128b75b847faa29
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-cli-extensions
|
c1615b19930bba7166c282918f166cd40ff6609c
|
b8c2cf97e991adf0c0a207d810316b8f4686dc29
|
refs/heads/main
| 2023-08-24T12:40:15.528432
| 2023-08-24T09:17:25
| 2023-08-24T09:17:25
| 106,580,024
| 336
| 1,226
|
MIT
| 2023-09-14T10:48:57
| 2017-10-11T16:27:31
|
Python
|
UTF-8
|
Python
| false
| false
| 594
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command_group(
"qumulo",
)
class __CMDGroup(AAZCommandGroup):
"""Manage qumulo
"""
pass
__all__ = ["__CMDGroup"]
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
12cdaf829a0bb0cf9396af90e035ecbb241bf6a3
|
cf19e3a857d488ca449e515f641c686c7409fa87
|
/C01-Python-Basics/13-C01P02/Solution01/solution.py
|
e603c40480ffc9c2f8355ab07cb8e5d1095d5c40
|
[
"MIT"
] |
permissive
|
CreeperBeatz/Python-101-Forever
|
c2133962c0bd50e09f58df3908c8c52234363b7b
|
5b3e8706bec84104712d96419210a1e266b4d518
|
refs/heads/master
| 2023-06-07T07:53:23.796406
| 2021-07-05T14:19:33
| 2021-07-05T14:19:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
# Video - https://youtu.be/PmW8NfctNpk
def sum_of_digits(n):
n = abs(n)
digits = []
char_digits = list(str(n))
for char_digit in char_digits:
digits.append(int(char_digit))
return sum(digits)
tests = [
(1325132435356, 43),
(123, 6),
(6, 6),
(-10, 1)
]
for n, expected in tests:
result = sum_of_digits(n)
print(result == expected)
|
[
"radorado@hacksoft.io"
] |
radorado@hacksoft.io
|
0c5769fd45ee518599738a13853fe563e251f03f
|
529e713a78e82de2ae5d44cfb8ef209e0894d72a
|
/typer-cli-python/source_code_step_3/rptodo/cli.py
|
89531fcd5bf31d59d49aa457dba1661b7d555049
|
[
"MIT"
] |
permissive
|
realpython/materials
|
cd2f548276be2c82f134ca03eadb1cd279e0f26e
|
d2d62756d3854f54a12a767f2bf9470486c0ceef
|
refs/heads/master
| 2023-09-05T22:12:29.806738
| 2023-08-31T20:56:28
| 2023-08-31T20:56:28
| 132,374,697
| 4,678
| 6,482
|
MIT
| 2023-09-12T22:22:06
| 2018-05-06T20:46:18
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,423
|
py
|
"""This module provides the RP To-Do CLI."""
from pathlib import Path
from typing import Optional
import typer
from rptodo import ERRORS, __app_name__, __version__, config, database
app = typer.Typer()
@app.command()
def init(
db_path: str = typer.Option(
str(database.DEFAULT_DB_FILE_PATH),
"--db-path",
"-db",
prompt="to-do database location?",
),
) -> None:
"""Initialize the to-do database."""
app_init_error = config.init_app(db_path)
if app_init_error:
typer.secho(
f'Creating config file failed with "{ERRORS[app_init_error]}"',
fg=typer.colors.RED,
)
raise typer.Exit(1)
db_init_error = database.init_database(Path(db_path))
if db_init_error:
typer.secho(
f'Creating database failed with "{ERRORS[db_init_error]}"',
fg=typer.colors.RED,
)
raise typer.Exit(1)
else:
typer.secho(f"The to-do database is {db_path}", fg=typer.colors.GREEN)
def _version_callback(value: bool) -> None:
if value:
typer.echo(f"{__app_name__} v{__version__}")
raise typer.Exit()
@app.callback()
def main(
version: Optional[bool] = typer.Option(
None,
"--version",
"-v",
help="Show the application's version and exit.",
callback=_version_callback,
is_eager=True,
)
) -> None:
return
|
[
"lpozor78@gmail.com"
] |
lpozor78@gmail.com
|
5f91e1a1b7cb3ff8938287fb6fb8a0b3e3c9cd66
|
b4e4399f6d18ee83760604fc67c90d3f5eac52dd
|
/10 Days of Statistics/Day4.BinomialDistributionI.py
|
165bbc7384022eb86ad2468c05a50783bcab3f4c
|
[] |
no_license
|
angelvv/HackerRankSolution
|
88415c3ace68ddc10c76ae8df43ab5193aa921d4
|
8b2c323507f9a1826b4156aeab94815f41b6fc84
|
refs/heads/master
| 2021-07-17T20:51:50.758364
| 2020-05-26T17:25:05
| 2020-05-26T17:25:05
| 167,896,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 702
|
py
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
boy,girl = list(map(float, input().split())) # or =[float(x) for x in input().split()]
pBoy = boy/(boy+girl)
# hard code calculation
p0Boy = (1-pBoy)**6
p1Boy = 6 * pBoy * (1-pBoy)**5
p2Boy = 6*5/2 * (pBoy**2) * ((1-pBoy)**4)
pAtLeast3Boy = 1 - p0Boy - p1Boy - p2Boy
"""#Alternatively
def factorial(n):
return 1 if n==0 else n * factorial(n-1)
def nChoosek(n, k):
return factorial(n) / (factorial(k) * factorial(n - k))
def binomial(n, k, p):
return nChoosek(n, k) * (p**k) * ((1 - p)**(n - k))
pAtLeast3Boy = 1 - binomial(6, 0, pBoy) - binomial(6, 1, pBoy) - binomial(6, 2, pBoy)
"""
print(round(pAtLeast3Boy,3))
|
[
"angel.huang90@gmail.com"
] |
angel.huang90@gmail.com
|
70b366de4381df49308e12f0fad8a1925a18c1a7
|
b6bb53780f3d186ccbc6a900a977a766b6ac4ffb
|
/doc/src/slides/src/solver.py
|
3783db7b97e638d13cef6d46ee58fd929429c1bc
|
[
"BSD-3-Clause"
] |
permissive
|
wolf9s/doconce
|
1fa91766cad77dd16debade99e48954cfc7b6dee
|
0c7fecb267502a74cdeb7d90100cd2bdc0701cc1
|
refs/heads/master
| 2021-01-17T15:44:51.703555
| 2015-09-28T07:59:41
| 2015-09-28T07:59:41
| 43,336,393
| 1
| 0
| null | 2015-09-29T01:02:48
| 2015-09-29T01:02:48
| null |
UTF-8
|
Python
| false
| false
| 4,512
|
py
|
from numpy import *
from matplotlib.pyplot import *
import sys
def solver(I, a, T, dt, theta):
"""Solve u'=-a*u, u(0)=I, for t in (0,T]; step: dt."""
dt = float(dt) # avoid integer division
N = int(round(T/dt)) # no of time intervals
T = N*dt # adjust T to fit time step dt
u = zeros(N+1) # array of u[n] values
t = linspace(0, T, N+1) # time mesh
u[0] = I # assign initial condition
for n in range(0, N): # n=0,1,...,N-1
u[n+1] = (1 - (1-theta)*a*dt)/(1 + theta*dt*a)*u[n]
return u, t
def exact_solution(t, I, a):
return I*exp(-a*t)
def explore(I, a, T, dt, theta=0.5, makeplot=True):
"""
Run a case with the solver, compute error measure,
and plot the numerical and exact solutions (if makeplot=True).
"""
u, t = solver(I, a, T, dt, theta) # Numerical solution
u_e = exact_solution(t, I, a)
e = u_e - u
E = sqrt(dt*sum(e**2))
if makeplot:
figure() # create new plot
t_e = linspace(0, T, 1001) # very fine mesh for u_e
u_e = exact_solution(t_e, I, a)
plot(t, u, 'r--o') # red dashes w/circles
plot(t_e, u_e, 'b-') # blue line for u_e
legend(['numerical', 'exact'])
xlabel('t')
ylabel('u')
title('Method: theta-rule, theta=%g, dt=%g' % (theta, dt))
theta2name = {0: 'FE', 1: 'BE', 0.5: 'CN'}
savefig('%s_%g.png' % (theta2name[theta], dt))
show()
return E
def define_command_line_options():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--I', '--initial_condition', type=float,
default=1.0, help='initial condition, u(0)',
metavar='I')
parser.add_argument('--a', type=float,
default=1.0, help='coefficient in ODE',
metavar='a')
parser.add_argument('--T', '--stop_time', type=float,
default=1.0, help='end time of simulation',
metavar='T')
parser.add_argument('--makeplot', action='store_true',
help='display plot or not')
parser.add_argument('--dt', '--time_step_values', type=float,
default=[1.0], help='time step values',
metavar='dt', nargs='+', dest='dt_values')
return parser
def read_command_line(use_argparse=True):
if use_argparse:
parser = define_command_line_options()
args = parser.parse_args()
print 'I={}, a={}, makeplot={}, dt_values={}'.format(
args.I, args.a, args.makeplot, args.dt_values)
return args.I, args.a, args.T, args.makeplot, args.dt_values
else:
if len(sys.argv) < 6:
print 'Usage: %s I a on/off dt1 dt2 dt3 ...' % \
sys.argv[0]; sys.exit(1)
I = float(sys.argv[1])
a = float(sys.argv[2])
T = float(sys.argv[3])
makeplot = sys.argv[4] in ('on', 'True')
dt_values = [float(arg) for arg in sys.argv[5:]]
return I, a, T, makeplot, dt_values
def main():
I, a, T, makeplot, dt_values = read_command_line()
r = {}
for theta in 0, 0.5, 1:
E_values = []
for dt in dt_values:
E = explore(I, a, T, dt, theta, makeplot=False)
E_values.append(E)
# Compute convergence rates
m = len(dt_values)
r[theta] = [log(E_values[i-1]/E_values[i])/
log(dt_values[i-1]/dt_values[i])
for i in range(1, m, 1)]
for theta in r:
print '\nPairwise convergence rates for theta=%g:' % theta
print ' '.join(['%.2f' % r_ for r_ in r[theta]])
return r
def verify_convergence_rate():
r = main()
tol = 0.1
expected_rates = {0: 1, 1: 1, 0.5: 2}
for theta in r:
r_final = r[theta][-1]
diff = abs(expected_rates[theta] - r_final)
if diff > tol:
return False
return True # all tests passed
if __name__ == '__main__':
if 'verify_rates' in sys.argv:
sys.argv.remove('verify_rates')
if not '--dt' in sys.argv:
print 'Must assign several dt values through the --dt option'
sys.exit(1) # abort
if verify_convergence_rate():
pass
else:
print 'Bug in the implementation!'
else:
# Perform simulations
main()
|
[
"hpl@simula.no"
] |
hpl@simula.no
|
624ad33267ce9d85d2f3e6466b07a72980e4d01d
|
c8cee25ecb60ca3e6ce5e24c37db57f82f9858f6
|
/Vision Artificial/Emparejamiento.py
|
272fabf738a5fc92fc5a35e7e1cfddf092898c0c
|
[] |
no_license
|
mecomontes/Python
|
a0b4a0b69ae33ad3623e908731710563392d1615
|
daba4247cca90c43a979e3e3f292cd7b8951b3d0
|
refs/heads/master
| 2023-05-30T05:24:41.999196
| 2020-03-23T02:30:09
| 2020-03-23T02:30:09
| 249,317,310
| 1
| 0
| null | 2023-05-22T22:42:36
| 2020-03-23T02:29:38
|
Python
|
UTF-8
|
Python
| false
| false
| 4,283
|
py
|
""" Emparejamiento de plantillas
Teoría
El emparejamiento de plantillas (o template matching en inglés) es un método para buscar y encontrar la ubicación de una imagen de
plantilla en una imagen más grande. OpenCV viene con la función cv2.matchTemplate() para este propósito. Esta función, simplemente,
desliza la imagen de la plantilla sobre la imagen de entrada (como en la convolución 2D) y en cada punto compara la plantilla con
la porción correspondiente de la imagen de entrada. En OpenCV están implementados varios métodos de comparación. La función
devuelve una imagen en escala de grises, donde cada píxel indica cuánto coincide el entorno de ese píxel con la plantilla.
Si la imagen de entrada es de tamaño (WxH) y la imagen de la plantilla es de tamaño (wxh), la imagen de salida tendrá un tamaño de
(W-w + 1, H-h + 1). Una vez que obtenga el resultado, puede usar la función cv2.minMaxLoc() para encontrar dónde está el valor
máximo / mínimo. El valor máximo/ mínimo corresponde a la esquina superior izquierda del rectángulo con ancho w y alto h. Ese
rectángulo será la región de la imagen de entrada que mejor coincide con la plantilla.
Nota: Si está utilizando cv2.TM_SQDIFF como método de comparación, el valor mínimo dará la mejor coincidencia.
Emparejamiento de plantillas en OpenCV
A continuación se comparará el desempeño de diferentes métodos de emparejamiento de la función cv2.matchTemplate(), para encontrar
la cara de un hombre entre los granos de café:
A continuación se muestra el código que hace esto:"""
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('cafe.jpeg',0)
img2 = img.copy()
template = cv2.imread('template.png',0)
w, h = template.shape[::-1]
# All the 6 methods for comparison in a list
methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']
for meth in methods:
img = img2.copy()
method = eval(meth)
# Aplica el emparejamiento de plantillas
res = cv2.matchTemplate(img,template,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# Si el método es TM_SQDIFF o TM_SQDIFF_NORMED, tomar el mínimo
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(img,top_left, bottom_right, 255, 10)
plt.subplot(121),plt.imshow(res,cmap = 'gray')
plt.title('Resultado del emparejamiento'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(img,cmap = 'gray')
plt.title('Punto detectado'), plt.xticks([]), plt.yticks([])
plt.suptitle(meth)
plt.show()
"""En este caso se observa que los seis métodos dan resultados similares. Sin embargo, esto puede variar dependiendo de la imagen
y la plantilla en particular. Nótese que en los cuatro primeros gráficos a la izquierda el punto de máxima coincidencia es blanco
(correspondiente con un máximo) mientras que, con los últimos dos métodos el punto de máxima coincidencia es negro (correspondiente
con un mínimo)
Emparejamiento de plantillas con múltiples objetos
En la sección anterior, buscamos en la imagen la cara de un hombre, que aparece solo una vez en la imagen. Supongamos que está
buscando un objeto que tiene múltiples ocurrencias, cv2.minMaxLoc() no le dará todas las ubicaciones. En ese caso, fijaremos un
valor umbral por encima (o por debajo,dependiendo del método que usemos) del cual se asumirá que el objeto en la plantilla coincide
con el objeto en la imagen. A continuación un ejemplo, en el que se muestra una captura de pantalla del famoso juego Mario.
Utilizaremos el método explicado para encontrar todas las monedas."""
img_rgb = cv2.imread('mario.jpeg')
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread('moneda.jpeg',0)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
umbral = 0.8
loc = np.where( res >= umbral)
for pt in zip(*loc[::-1]):
cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
cv2.imwrite('res.png',img_rgb)
|
[
"1574@holbertonschool.com"
] |
1574@holbertonschool.com
|
13576721467bab9704a406a6cae793fe3350b13e
|
576cc83449e10fd3f98281970c46016ea7a5aea2
|
/Exercise_for_Job/华为牛客网/华为.py
|
e16a1edaa5afa95c959bd857bca6a52cf8e8b8f3
|
[] |
no_license
|
HotView/PycharmProjects
|
215ab9edd341e3293daebcf86d97537f8cd28d75
|
61393fe5ba781a8c1216a5cbe7e0d06149a10190
|
refs/heads/master
| 2020-06-02T07:41:53.608742
| 2019-11-13T08:31:57
| 2019-11-13T08:31:57
| 191,085,178
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 512
|
py
|
data = input().split()
str_data = []
def constr(str):
count = 8-len(i)
con_str = ''
while(count):
con_str =con_str+'0'
count = count-1
return i+con_str
for i in data[1:]:
if len(i)<=8:
str_data.append(constr(i))
else:
while(len(i)>8):
str_data.append(i[0:8])
i = i[8:]
str_data.append(constr(i))
sort_data = sorted(str_data,key=lambda x:x[0])
str_out = ''
for ele in sort_data:
str_out = str_out+ele+' '
print(str_out)
|
[
"864773190@qq.com"
] |
864773190@qq.com
|
ebb69f7ee17d733cf4f31b161a6e6a4d3c547dc5
|
23fddc940a266c2d1d0e0b1687c36cdbcc9d54d9
|
/app/login/model/manager.py
|
5528fcd8f4ce6716526ba111e2d8e93647621f0f
|
[] |
no_license
|
Cuick/traversing
|
210fcfb1c780037de59343fffeb4fa4d3f2eae32
|
c78982580af7f63c8bff4dcb37005b7f7c682b5b
|
refs/heads/master
| 2021-01-10T17:38:37.899460
| 2016-11-18T06:06:55
| 2016-11-18T06:06:55
| 55,397,540
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,181
|
py
|
# -*- coding:utf-8 -*-
"""
created by sphinx on 18/9/14.
"""
import os
import json
import time
account_cache = {}
class ServerManager(object):
def __init__(self):
self._servers = {}
self._is_static = False
if os.path.exists('server_list.json'):
sl = json.load(open('server_list.json'))
self._is_static = True
for _ in sl:
self._servers[_['name']] = _
print 'static server list:', self._servers
def sync_server(self, name, ip, port, status, no):
if not self._is_static:
server = dict(name=name, ip=ip, port=port, status=status, no=no)
for k, v in self._servers.items():
if v.get('name') == name:
del self._servers[k]
self._servers[time.time()] = server
return True
self._servers[time.time()] = server
def get_server(self):
if not self._is_static:
for t in self._servers.keys():
if time.time() - t > 180:
del self._servers[t]
return self._servers.values()
server_manager = ServerManager()
|
[
"zxzxck@163.com"
] |
zxzxck@163.com
|
63cbc8726893fed727dd884f838445cd7a2fd8e0
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/s3api_read_1/bucket-cor_get.py
|
906435f85b7feabef6ef4ae7f3baf051c9a51d1e
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,377
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import read_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3api/get-bucket-cors.html
if __name__ == '__main__':
"""
delete-bucket-cors : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3api/delete-bucket-cors.html
put-bucket-cors : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3api/put-bucket-cors.html
"""
parameter_display_string = """
# bucket : The bucket name for which to get the cors configuration.
"""
add_option_dict = {}
#######################################################################
# setting option use
# ex: add_option_dict["setting_matching_parameter"] = "--owners"
# ex: add_option_dict["setting_key"] = "owner_id"
#######################################################################
# single parameter
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
read_one_parameter("s3api", "get-bucket-cors", "bucket", add_option_dict)
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
b34c147b369988436ae350599d980ba016babbb4
|
dd65b9bc9475a6cc58817fd45c078e5a6abae241
|
/Tensorflow/car/web-tf2/gcf-packs/tensorflow2.0/source/tensorflow/_api/v2/compat/v1/test/__init__.py
|
bb58509c61e8cafbbf2c339d984a1ae2bc2e3996
|
[] |
no_license
|
jumbokh/gcp_class
|
5b68192ab4ad091362d89ad667c64443b3b095bb
|
0a8e2663bfb5b01ce20146da178fa0c9bd7c6625
|
refs/heads/master
| 2021-10-22T09:22:04.634899
| 2021-10-21T12:46:10
| 2021-10-21T12:46:10
| 228,617,096
| 8
| 7
| null | 2021-08-25T15:55:30
| 2019-12-17T12:58:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,438
|
py
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Testing.
See the [Testing](https://tensorflow.org/api_guides/python/test) guide.
Note: `tf.test.mock` is an alias to the python `mock` or `unittest.mock`
depending on the python version.
"""
from __future__ import print_function as _print_function
from tensorflow.python.framework.test_util import TensorFlowTestCase as TestCase
from tensorflow.python.framework.test_util import assert_equal_graph_def_v1 as assert_equal_graph_def
from tensorflow.python.framework.test_util import create_local_cluster
from tensorflow.python.framework.test_util import gpu_device_name
from tensorflow.python.framework.test_util import is_gpu_available
from tensorflow.python.ops.gradient_checker import compute_gradient
from tensorflow.python.ops.gradient_checker import compute_gradient_error
from tensorflow.python.platform.benchmark import TensorFlowBenchmark as Benchmark
from tensorflow.python.platform.benchmark import benchmark_config
from tensorflow.python.platform.googletest import StubOutForTesting
from tensorflow.python.platform.googletest import mock
from tensorflow.python.platform.test import get_temp_dir
from tensorflow.python.platform.test import is_built_with_cuda
from tensorflow.python.platform.test import main
from tensorflow.python.platform.test import test_src_dir_path
del _print_function
|
[
"jumbokh@gmail.com"
] |
jumbokh@gmail.com
|
bf244d1b65a6a4561dbd18468710c9bbcc05e83c
|
3529ecaa44a53172094ba13498097057c8972723
|
/Questiondir/655.print-binary-tree/655.print-binary-tree_112661405.py
|
c0358586c049dfc21db112211b47cf96f501e3f7
|
[] |
no_license
|
cczhong11/Leetcode-contest-code-downloader
|
0681f0f8c9e8edd5371fd8d0a1d37dcc368566b6
|
db64a67869aae4f0e55e78b65a7e04f5bc2e671c
|
refs/heads/master
| 2021-09-07T15:36:38.892742
| 2018-02-25T04:15:17
| 2018-02-25T04:15:17
| 118,612,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 986
|
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def ht(self, root):
if root is None:
return 0
return 1 + max(self.ht(root.left), self.ht(root.right))
def helper(self, root, level, start, end):
if root is None:
return
mid = (end + start) / 2
self.res[level][mid] = str(root.val)
self.helper(root.left, level+1, start, mid-1)
self.helper(root.right, level+1, mid+1, end)
def printTree(self, root):
"""
:type root: TreeNode
:rtype: List[List[str]]
"""
h = self.ht(root)
w = 1
for i in range(1, h):
w = (w * 2 + 1)
self.res = [['' for _ in range(w)] for _ in range(h)]
self.helper(root, 0, 0, w-1)
return self.res
|
[
"tczhong24@gmail.com"
] |
tczhong24@gmail.com
|
901fc421da3ad2c486a8789dd60bf1740f10dabf
|
8da76aabcf9cfea3478f56037edbb5fa1513140b
|
/tallsmall/production/tallsmall/account/.svn/text-base/models.py.svn-base
|
0b5425bbac404be079a32c07f10089f307f8dc28
|
[] |
no_license
|
mikanyman/.virtualenvs-legacy
|
039479f31f2ca9f9a3d3544d8837429ddd0a7492
|
5486128b5b3b7ddb9ec81d43e3bb601a23b4025a
|
refs/heads/master
| 2020-12-31T07:10:07.018881
| 2017-02-01T02:16:55
| 2017-02-01T02:16:55
| 80,566,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,390
|
"""
#http://www.turnkeylinux.org/blog/django-profile
#http://docs.django-userena.org/en/latest/installation.html
from django.contrib.auth.models import User
from userena.models import UserenaLanguageBaseProfile
from django.db import models
#class UserProfile(models.Model):
class UserProfile(UserenaLanguageBaseProfile):
user = models.ForeignKey(User, unique=True)
url = models.URLField("Website", blank=True)
company = models.CharField(max_length=50, blank=True)
User.profile = property(lambda u: UserProfile.objects.get_or_create(user=u)[0])
"""
# from django-userena demo_project
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from userena.models import UserenaLanguageBaseProfile
import datetime
class Profile(UserenaLanguageBaseProfile):
""" Default profile """
GENDER_CHOICES = (
(1, _('Male')),
(2, _('Female')),
)
user = models.OneToOneField(User,
unique=True,
verbose_name=_('user'),
related_name='profile')
gender = models.PositiveSmallIntegerField(_('gender'),
choices=GENDER_CHOICES,
blank=True,
null=True)
website = models.URLField(_('website'), blank=True, verify_exists=True)
location = models.CharField(_('location'), max_length=255, blank=True)
birth_date = models.DateField(_('birth date'), blank=True, null=True)
about_me = models.TextField(_('about me'), blank=True)
@property
def age(self):
if not self.birth_date: return False
else:
today = datetime.date.today()
# Raised when birth date is February 29 and the current year is not a
# leap year.
try:
birthday = self.birth_date.replace(year=today.year)
except ValueError:
day = today.day - 1 if today.day != 1 else today.day + 2
birthday = self.birth_date.replace(year=today.year, day=day)
if birthday > today: return today.year - self.birth_date.year - 1
else: return today.year - self.birth_date.year
|
[
"mika.nyman@synapse-computing.com"
] |
mika.nyman@synapse-computing.com
|
|
e08a02cbe98519c266ea2415228cd8b1314f6563
|
7cd2c3868d83be96f2699eeed4f6f4ae9dbf3a35
|
/programmers/DFSorBFS/타겟넘버.py
|
3e62a03c143dbc24cf8dad5323f0f1826310106e
|
[] |
no_license
|
Kimyechan/codingTestPractice
|
4189e97f8543b9afc87374539acb5d1cecf40ce6
|
c9d3878eb0d47fab22151fc0d39eef1dfd2210b5
|
refs/heads/master
| 2023-06-09T14:34:55.984710
| 2021-06-27T18:30:45
| 2021-06-27T18:30:45
| 282,819,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,089
|
py
|
import sys
import copy
sys.setrecursionlimit(100000)
operatorList = []
# def dfs(operators, count):
# if len(operators) == count:
# operatorList.append(copy.deepcopy(operators))
# return
#
# for x in [1, -1]:
# operators.append(x)
# dfs(operators, count)
# operators.pop()
#
#
# def solution(numbers, target):
# answer = 0
# count = len(numbers)
#
# operators = []
#
# dfs(operators, count)
# for operation in operatorList:
# result = 0
# for i in range(len(operation)):
# result += operation[i] * numbers[i]
# if result == target:
# answer += 1
#
# return answer
answer = 0
def dfs(numbers, target, sum, index):
global answer
if index == len(numbers):
if sum == target:
answer += 1
return
dfs(numbers, target, sum + numbers[index], index + 1)
dfs(numbers, target, sum - numbers[index], index + 1)
def solution(numbers, target):
dfs(numbers, target, 0, 0)
return answer
print(solution([1, 1, 1, 1, 1], 3))
|
[
"vlvkcjswo7@naver.com"
] |
vlvkcjswo7@naver.com
|
0f054b6e23fbaf3c471d841c202f72d1f2244345
|
62c523b000e43b41bcb2bc96259f2e0136e8548f
|
/src/data/data.py
|
703cef216a5a30549d909773faa9f1b5d06e41ca
|
[] |
no_license
|
OlofHarrysson/pytorch-foundation
|
d7e0bd6d4bdf52bcf65d0c6e370e8e78ee0219cd
|
6632e5260302669c458dde28be47c03fed052e53
|
refs/heads/master
| 2021-10-07T22:46:05.674105
| 2021-01-27T19:20:22
| 2021-01-27T19:20:22
| 212,659,773
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,357
|
py
|
from torchvision import datasets
from torch.utils.data import DataLoader
from collections import namedtuple
from anyfig import get_config
from ..transforms import get_train_transforms, get_val_transforms
from ..utils.meta_utils import get_project_root
def setup_dataloaders():
dataloaders = namedtuple('Dataloaders', ['train', 'val'])
return dataloaders(train=setup_trainloader(), val=setup_valloader())
def setup_trainloader():
transforms = get_train_transforms()
dataset_dir = get_project_root() / 'datasets'
dataset = MyCifar10(dataset_dir, transforms, train=True)
return DataLoader(dataset,
batch_size=get_config().batch_size,
num_workers=get_config().num_workers,
shuffle=True)
def setup_valloader():
transforms = get_val_transforms()
dataset_dir = get_project_root() / 'datasets'
dataset = MyCifar10(dataset_dir, transforms, train=False)
return DataLoader(dataset,
batch_size=get_config().batch_size,
num_workers=get_config().num_workers)
class MyCifar10(datasets.CIFAR10):
def __init__(self, path, transforms, train=True):
super().__init__(path, train, download=True)
self.transforms = transforms
def __getitem__(self, index):
im, label = super().__getitem__(index)
return self.transforms(im), label
|
[
"harrysson.olof@gmail.com"
] |
harrysson.olof@gmail.com
|
51b475ef145277d90a9da2e2b9fd07047a6f501b
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/contrib/cv/classification/LResNet100E-IR/prof.py
|
7c2e0f01a90ddcd74354f146f728d0b9b6cc1964
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 4,277
|
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import argparse
from tqdm import tqdm
import torch
from torch import nn
from torch import optim
import apex
from apex import amp
from model import Backbone, Arcface
from utils import separate_bn_paras
def get_data(args):
x = torch.rand((args.batch, 3, 112, 112), dtype=torch.float32)
y = torch.randint(2, (args.batch,), dtype=torch.long)
return x, y
class NewModel(nn.Module):
def __init__(self):
super(NewModel, self).__init__()
self.backbone = Backbone(num_layers=100, drop_ratio=0.6, mode='ir_se')
self.head = Arcface(embedding_size=512, classnum=85742)
def forward(self, images, labels):
embeddings = self.backbone(images)
thetas = self.head(embeddings, labels)
return thetas
def prepare_args():
parser = argparse.ArgumentParser(description='get prof')
parser.add_argument("-device", help="device", default='cuda:0', type=str)
parser.add_argument("-amp", help="use amp", default=True, type=str)
parser.add_argument("-batch", help="batch size", default=256, type=int)
args = parser.parse_args()
return args
if __name__ == '__main__':
# 640.982ms
args = prepare_args()
device = torch.device(args.device)
if 'npu' in args.device:
torch.npu.set_device(device)
else:
torch.cuda.set_device(device)
# model
model = NewModel()
model = model.to(device)
print('model head create over ')
# optimizer
paras_only_bn, paras_wo_bn = separate_bn_paras(model.backbone)
if 'npu' in args.device and args.amp:
optimizer = apex.optimizers.NpuFusedSGD([
{'params': paras_wo_bn + [model.head.kernel], 'weight_decay': 5e-4},
{'params': paras_only_bn}
], lr=0.001, momentum=0.9)
else:
optimizer = optim.SGD([
{'params': paras_wo_bn + [model.head.kernel], 'weight_decay': 5e-4},
{'params': paras_only_bn}
], lr=0.001, momentum=0.9)
print('optimizer create over')
# loss function
loss_func = nn.CrossEntropyLoss().to(device)
# amp setting
if 'npu' in args.device and args.amp:
model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale=128.0, combine_grad=True)
elif 'cuda' in args.device and args.amp:
model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale=128.0)
print('start warm up train')
# warm up train
for _ in tqdm(range(5)):
imgs, labels = get_data(args)
imgs = imgs.to(device)
labels = labels.to(device)
thetas = model(imgs, labels)
loss = loss_func(thetas, labels)
optimizer.zero_grad()
if args.amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
print('start get prof')
# get prof
if "npu" in args.device:
k_v = {'use_npu': True}
else:
k_v = {'use_cuda': True}
with torch.autograd.profiler.profile(**k_v) as prof:
imgs, labels = get_data(args)
imgs = imgs.to(device)
labels = labels.to(device)
thetas = model(imgs, labels)
loss = loss_func(thetas, labels)
optimizer.zero_grad()
if args.amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
# print(prof.key_averages().table(sort_by="self_cpu_time_total"))
prof.export_chrome_trace("output.prof") # "output.prof"为输出文件地址
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
f676137f1b3df9dd44c03457f8de846a6d8ac76e
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/all-gists/4187084/snippet.py
|
47fc8edb16f6917914298e0cefd3eb24d7a13d52
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398
| 2023-02-11T18:20:10
| 2023-02-11T18:20:10
| 119,861,038
| 76
| 19
| null | 2020-07-26T03:14:55
| 2018-02-01T16:19:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,729
|
py
|
# Name: EXIFmover.py
# Author: Brian Klug (@nerdtalker / brian@brianklug.org)
# Purpose:
# Move Files into directory based on EXIF data make and model
# Designed to un-clusterfuck the Dropbox camera upload directory which is a mess of every
# JPEG and PNG ever if you use it like I do on a bunch of phones, and thus totally unwieldy
# and full of images sorted by date or else nothing sometimes, dropbox seems nondeterminstic
# Moves files into /[Image Make]+[Image Model]/ eg /Camera Uploads/LGE Nexus 4/
# Creates directory if it doesn't exist, moves into that directory if it exists
# Files without EXIF get moved into /nomake nomodel (EG screenshots / nonsense) except exifmover/exif.py
# This is experimental and one-way in a destructive sense, I take no responsibility
# if this absolutely destroys your directory structure for some reason
# I STRONGLY recommend making a copy of Camera Uploads, then running this on the copy, first
# Requires EXIF-PY to be installed and importable
# EXIF-PY can be obtained from https://github.com/ianare/exif-py
# Previous implementation used EXIF.py standalone, updated to work with installable version
# Run simply (eg from ipython "run exifmover.py" inside "Camera Upload")
# Tested on OS 10.8.2 and Python 2.7.3 EPD
# Tested on Windows XP and Python 2.7.3 EPD
# Tested on Ubuntu 11.10
try:
import exifread
except:
print "exifread was not found in the same directory as exifmover.py"
import os
import time
start_time=time.time()
path = os.getcwd()
dirList=os.listdir(path)
excludedfiles = ["EXIF.py","EXIFmover.py","exifmover.py","thumbs.db",".DS_Store","EXIF.pyc"]
for fname in dirList:
if os.path.isfile(fname):
if fname not in excludedfiles:
print "File name is " + fname
f = open(fname)
try:
tags = exifread.process_file(f)
except:
print "Couldn't read tag on " + fname
try:
make = tags['Image Make'].printable
except: make = 'nomake'
try:
model = tags['Image Model'].printable
except: model = 'nomodel'
src = path + "/" + fname
#print "source is " + src
dst = path + "/" + make + " " + model + "/"
#print "destination is " + dst
if os.path.isdir(dst) == False:
os.mkdir(dst)
#print "made" + dst
destination = dst+fname
f.close()
try:
os.rename(src,destination)
except:
print "Oh noes. That didn't work for some reason"
print 'Done. Execution took {:0.3f} seconds'.format((time.time() - start_time))
|
[
"gistshub@gmail.com"
] |
gistshub@gmail.com
|
6cc8562d0421b4e61baf344ba161216a74b334cb
|
e802ed36cbfb55b87654b8aa7932ae2fc2ae7d43
|
/u05ps02q03.py
|
46947989451ecf5e42a777c1666b562aa1a090d4
|
[] |
no_license
|
maryammouse/cs101
|
29f493ab421117fb9bc038da4de7c5bdc29ca7ac
|
6c15855c7cdc24972a0ff370d417c5de8278ce9c
|
refs/heads/master
| 2016-09-01T22:34:16.016963
| 2014-05-26T21:54:49
| 2014-05-26T21:54:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,251
|
py
|
# Write a procedure, rotate which takes as its input a string of lower case
# letters, a-z, and spaces, and an integer n, and returns the string constructed
# by shifting each of the letters n steps, and leaving the spaces unchanged.
# Note that 'a' follows 'z'. You can use an additional procedure if you
# choose to as long as rotate returns the correct string.
# Note that n can be positive, negative or zero.
# I love being able to use code I've defined already! Love not
# starting from scratch :)
def shift_n_letters(letter, n):
if ord(letter) < 97: # just had to make
return ' ' # this one change
if ord(letter) + n > 122:
n = ord(letter) + n - 122
return chr(96 + n)
elif ord(letter) + n < 97:
n = 97 - (ord(letter) + n)
return chr(123 - n)
return chr(ord(letter) + n)
def rotate(word, n):
rotated = ''
for letter in word:
rotated += shift_n_letters(letter, n)
return rotated
print 'coralee' + 'sings'
print rotate ('sarah', 13)
#>>> 'fnenu'
print rotate('fnenu',13)
#>>> 'sarah'
print rotate('dave',5)
#>>>'ifaj'
print rotate('ifaj',-5)
#>>>'dave'
print rotate(("zw pfli tfuv nfibj tfiivtkcp pfl jyflcu "
"sv rscv kf ivru kyzj"),-17)
#>>> ???
|
[
"stellanova@Maryams-MacBook-Air.local"
] |
stellanova@Maryams-MacBook-Air.local
|
fbee9da62d0a7ff55bff70fe12034d97b7805070
|
55ceefc747e19cdf853e329dba06723a44a42623
|
/_CodeTopics/LeetCode_contest/weekly/weekly2021/248-[smallweek]/248_1.py
|
0810c9fe1f89962d294e01c337639653707396fc
|
[] |
no_license
|
BIAOXYZ/variousCodes
|
6c04f3e257dbf87cbe73c98c72aaa384fc033690
|
ee59b82125f100970c842d5e1245287c484d6649
|
refs/heads/master
| 2023-09-04T10:01:31.998311
| 2023-08-26T19:44:39
| 2023-08-26T19:44:39
| 152,967,312
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
class Solution(object):
def buildArray(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
n = len(nums)
res = [-1] * n
for i in range(n):
res[i] = nums[nums[i]]
return res
"""
https://leetcode-cn.com/submissions/detail/192094213/
134 / 134 个通过测试用例
状态:通过
执行用时: 32 ms
内存消耗: 13.1 MB
"""
|
[
"noreply@github.com"
] |
BIAOXYZ.noreply@github.com
|
71b966c7a8456e8f3cde13f79f2955b28d5a2c91
|
d9b5fc6e35e56e182fe1bfe9bafd2562a5d9cf33
|
/bluefly/areadetector_sim.py
|
9e8a4cac9d449257fc4dc3b75dcff4ded04a82b3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
untzag/bluefly
|
6406ef19b573c05a52bcd6cc53c27c5db1ca5cdf
|
5f461998a3f629a5f07e8733ab937a0302fa92f6
|
refs/heads/master
| 2022-12-16T13:52:30.621420
| 2020-09-17T11:12:06
| 2020-09-17T11:12:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,981
|
py
|
import asyncio
import h5py
import numpy as np
from bluesky.run_engine import get_bluesky_event_loop
from bluefly.areadetector import DetectorDriver, HDFWriter
from bluefly.motor import MotorDevice
from bluefly.simprovider import SimProvider
def make_gaussian_blob(width: int, height: int) -> np.ndarray:
"""Make a Gaussian Blob with float values in range 0..1"""
x, y = np.meshgrid(np.linspace(-1, 1, width), np.linspace(-1, 1, height))
d = np.sqrt(x * x + y * y)
blob = np.exp(-(d ** 2))
return blob
def interesting_pattern(x: float, y: float) -> float:
"""This function is interesting in x and y in range -10..10, returning
a float value in range 0..1
"""
z = 0.5 + (np.sin(x) ** 10 + np.cos(10 + y * x) * np.cos(x)) / 2
return z
DATA_PATH = "/entry/data/data"
UID_PATH = "/entry/uid"
SUM_PATH = "/entry/sum"
def sim_detector_logic(
p: SimProvider,
driver: DetectorDriver,
hdf: HDFWriter,
x: MotorDevice,
y: MotorDevice,
width: int = 320,
height: int = 240,
):
stopping = asyncio.Event(loop=get_bluesky_event_loop())
# The detector image we will modify for each image (0..255 range)
blob = make_gaussian_blob(width, height) * 255
hdf_file = None
p.set_value(driver.array_size_x, width)
p.set_value(driver.array_size_y, height)
@p.on_call(hdf.start)
async def do_hdf_start():
file_path = p.get_value(hdf.file_template) % (
p.get_value(hdf.file_path),
p.get_value(hdf.file_name),
)
nonlocal hdf_file
hdf_file = h5py.File(file_path, "w", libver="latest")
# Data written in a big stack, growing in that dimension
hdf_file.create_dataset(
DATA_PATH,
dtype=np.uint8,
shape=(1, height, width),
maxshape=(None, height, width),
)
for path, dtype in {UID_PATH: np.int32, SUM_PATH: np.float64}.items():
# Areadetector attribute datasets have the same dimesionality as the data
hdf_file.create_dataset(
path, dtype=dtype, shape=(1, 1, 1), maxshape=(None, 1, 1), fillvalue=-1
)
hdf_file.swmr_mode = True
@p.on_call(driver.start)
async def do_driver_start():
stopping.clear()
# areaDetector drivers start from array_counter + 1
offset = p.get_value(driver.array_counter) + 1
exposure = p.get_value(driver.acquire_time)
period = p.get_value(driver.acquire_period)
for i in range(p.get_value(driver.num_images)):
try:
# See if we got told to stop
await asyncio.wait_for(stopping.wait(), period)
except asyncio.TimeoutError:
# Carry on
pass
else:
# Stop now
break
uid = i + offset
# Resize the datasets so they fit
for path in (DATA_PATH, SUM_PATH, UID_PATH):
ds = hdf_file[path]
expand_to = tuple(max(*z) for z in zip((uid + 1, 1, 1), ds.shape))
ds.resize(expand_to)
intensity = interesting_pattern(
p.get_value(x.motor.readback), p.get_value(y.motor.readback)
)
detector_data = (blob * intensity * exposure / period).astype(np.uint8)
hdf_file[DATA_PATH][uid] = detector_data
hdf_file[UID_PATH][uid] = uid
hdf_file[SUM_PATH][uid] = np.sum(detector_data)
p.set_value(hdf.array_counter, p.get_value(hdf.array_counter) + 1)
@p.on_call(hdf.flush_now)
async def do_hdf_flush():
# Note that UID comes last so anyone monitoring knows the data is there
for path in (DATA_PATH, SUM_PATH, UID_PATH):
hdf_file[path].flush()
@p.on_call(hdf.stop)
async def do_hdf_close():
hdf_file.close()
@p.on_call(driver.stop)
async def do_driver_stop():
stopping.set()
|
[
"tom.cobb@diamond.ac.uk"
] |
tom.cobb@diamond.ac.uk
|
35be7f5c28be793d70e91298fc6b79f1a31dec25
|
64cf985225d14e1954ed91e5a261a465a44b0cc5
|
/mirror/mirror/settings.py
|
225f4f100303392553e2236166673a8b2600eb68
|
[] |
no_license
|
pinewoods/back-to-game
|
dee3c76e85186c86c6feaa0bd56635c1e460e6f0
|
824734bbd2e235886e3cb8e30d587949896a0d7e
|
refs/heads/master
| 2021-01-21T12:26:50.102778
| 2015-05-13T14:13:37
| 2015-05-13T14:13:37
| 34,489,413
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,808
|
py
|
"""
Django settings for mirror project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6!0h@jqz0cuc80p*tahm!q4j7kc=^zl8*)j!n*yh^@!w!(j==y'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'sync_control',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mirror.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mirror.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'PAGE_SIZE': 10,
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAdminUser',),
}
|
[
"vido@usp.br"
] |
vido@usp.br
|
92d63691e4e9d10e1eea6a25f16402a6962731df
|
c61a28aba19f7cdf9a5127e8a782bf115c265e70
|
/apps/recruitpro/recruitpro/recruitpro/doctype/territory/test_territory.py
|
8009c125ad483c92f2292298cee52bee7615b914
|
[
"MIT"
] |
permissive
|
sharmilaviji/RecruitPRO-NEW
|
fa72c8fc00f469a41798b1047c11dcc470fbc495
|
dcfaedebe56b45acd6ddcab7e24c939b853a2c8c
|
refs/heads/master
| 2021-05-26T12:14:12.611154
| 2020-04-27T04:40:50
| 2020-04-27T04:40:50
| 254,125,640
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, teampro and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestTerritory(unittest.TestCase):
pass
|
[
"sharmiviji1997@gmail.com"
] |
sharmiviji1997@gmail.com
|
43e980b35ac84de26ba65cce540a4d5a8ca11f20
|
6d2cf861c46230de97d5244b7915057419f8125d
|
/sdk/cognitiveservices/azure-cognitiveservices-language-textanalytics/samples/async_samples/sample_recognize_entities_async.py
|
6111e4ec8f8009664718b89662fd6c86bef2616e
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
pombredanne/azure-sdk-for-python
|
075bfef712c014445bacdef4dd05aacd82673dcc
|
ebd73c3fc22dcf17be2a903f32bdd95d9090f283
|
refs/heads/master
| 2020-10-01T15:56:00.475346
| 2019-12-11T21:31:24
| 2019-12-11T21:31:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,464
|
py
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_recognize_entities_async.py
DESCRIPTION:
This sample demonstrates how to recognize named entities in a batch of documents.
USAGE:
python sample_recognize_entities_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_TEXT_ANALYTICS_ENDPOINT - the endpoint to your cognitive services resource.
2) AZURE_TEXT_ANALYTICS_KEY - your text analytics subscription key
OUTPUT:
Document text: Microsoft was founded by Bill Gates and Paul Allen.
Entity: Microsoft Type: Organization Confidence Score: 1.0
Entity: Bill Gates Type: Person Confidence Score: 1.0
Entity: Paul Allen Type: Person Confidence Score: 1.0
Document text: I had a wonderful trip to Seattle last week.
Entity: Seattle Type: Location Confidence Score: 0.806
Entity: last week Type: DateTime Confidence Score: 0.8
Document text: I visited the Space Needle 2 times.
Entity: Space Needle Type: Organization Confidence Score: 0.922
Entity: 2 Type: Quantity Confidence Score: 0.8
"""
import os
import asyncio
class RecognizeEntitiesSampleAsync(object):
endpoint = os.getenv("AZURE_TEXT_ANALYTICS_ENDPOINT")
key = os.getenv("AZURE_TEXT_ANALYTICS_KEY")
async def recognize_entities_async(self):
# [START batch_recognize_entities_async]
from azure.cognitiveservices.language.textanalytics.aio import TextAnalyticsClient
text_analytics_client = TextAnalyticsClient(endpoint=self.endpoint, credential=self.key)
documents = [
"Microsoft was founded by Bill Gates and Paul Allen.",
"I had a wonderful trip to Seattle last week.",
"I visited the Space Needle 2 times.",
]
async with text_analytics_client:
result = await text_analytics_client.recognize_entities(documents)
docs = [doc for doc in result if not doc.is_error]
for idx, doc in enumerate(docs):
print("\nDocument text: {}".format(documents[idx]))
for entity in doc.entities:
print("Entity: \t", entity.text, "\tType: \t", entity.type,
"\tConfidence Score: \t", round(entity.score, 3))
# [END batch_recognize_entities_async]
async def alternative_scenario_recognize_entities_async(self):
"""This sample demonstrates how to retrieve batch statistics, the
model version used, and the raw response returned from the service.
It additionally shows an alternative way to pass in the input documents
using a list[TextDocumentInput] and supplying your own IDs and language hints along
with the text.
"""
from azure.cognitiveservices.language.textanalytics.aio import TextAnalyticsClient
text_analytics_client = TextAnalyticsClient(endpoint=self.endpoint, credential=self.key)
documents = [
{"id": "0", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen."},
{"id": "1", "language": "de", "text": "I had a wonderful trip to Seattle last week."},
{"id": "2", "language": "es", "text": "I visited the Space Needle 2 times."},
]
extras = []
def callback(resp):
extras.append(resp.statistics)
extras.append(resp.model_version)
extras.append(resp.raw_response)
async with text_analytics_client:
result = await text_analytics_client.recognize_entities(
documents,
show_stats=True,
model_version="latest",
response_hook=callback
)
async def main():
sample = RecognizeEntitiesSampleAsync()
await sample.recognize_entities_async()
await sample.alternative_scenario_recognize_entities_async()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
[
"noreply@github.com"
] |
pombredanne.noreply@github.com
|
7c13f5d302cee4d4a134bad26d56721e3d3d4450
|
fb2a07bbf368076b83e31639c4152799fcccfdcd
|
/siteEtu/accountCreator.py
|
bac6f408ceb7f1c017bd05300e040e40f5634235
|
[] |
no_license
|
Trymal/TrombinoscopeCorrec
|
ae696473593c6d01b2533765c037e436f84fea98
|
cebcfffa7df5a45d60da125b11523f933c0341e2
|
refs/heads/master
| 2022-07-08T08:21:07.441738
| 2020-05-10T15:51:48
| 2020-05-10T15:51:48
| 257,204,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
import random
import hashlib
from setup import *
def saveAccount(account):
with open("./comptes.csv", "a") as fichier:
fichier.write(account)
nb_comptes = 150
m_hash = hashlib.sha256()
for nb in range(nb_comptes):
prenom = random.choice(PRENOMS)
nom = random.choice(NOMS)
filiere = random.choice(FILIERES)
groupe = random.choice(GROUPES[filiere])
mail = (prenom + "." + nom + "@gmail.com").lower()
mdp = (nom[0] + prenom).lower().encode()
m_hash.update(mdp)
compte = "{};{};{};{};{};{};{}\n".format(nom, prenom, mail, filiere, groupe, m_hash.hexdigest(),
DIR_PP)
saveAccount(compte)
|
[
"unconfigured@null.spigotmc.org"
] |
unconfigured@null.spigotmc.org
|
df11f48a42b4597d6517b5f0ba783ccce171f5c3
|
382df78024f588acea08039a0b0a9e24f297b6a3
|
/python/pandas/ewma.py
|
bee90fab01dfaa6c378c98d0f5f613642413a4a8
|
[] |
no_license
|
id774/sandbox
|
c365e013654790bfa3cda137b0a64d009866d19b
|
aef67399893988628e0a18d53e71e2038992b158
|
refs/heads/master
| 2023-08-03T05:04:20.111543
| 2023-07-31T14:01:55
| 2023-07-31T14:01:55
| 863,038
| 4
| 1
| null | 2020-03-05T06:18:03
| 2010-08-26T01:05:11
|
TeX
|
UTF-8
|
Python
| false
| false
| 275
|
py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
ewma = pd.stats.moments.ewma
x = list(range(1, 50)) + list(range(50, 0, -1))
ma = ewma(np.array(x), span=15)
plt.plot(x, linewidth=1.0)
plt.plot(ma, linewidth=1.0)
plt.show()
plt.savefig("image.png")
|
[
"idnanashi@gmail.com"
] |
idnanashi@gmail.com
|
0836dc0604cbb303c5dd9456b4ff69711997d47a
|
a4c04117685c3d28dd60bdfc45654cb2c935f746
|
/rasterio_example.py
|
e5c89ffdf395eb38ede6fd20f40df78b4fb1d5d6
|
[] |
no_license
|
DKnapp64/General_Python_Codes
|
1ca40779bb381d526d61c5d5fedcc76ae797c590
|
8d4669c82c17455640a0a3123f92760cd65cc26a
|
refs/heads/main
| 2023-02-28T05:55:46.018482
| 2021-02-01T21:55:16
| 2021-02-01T21:55:16
| 335,077,354
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
import rasterio
import numpy
with rasterio.drivers():
with rasterio.open('baikal_subset.tif') as src:
b1, b2, b3, b4, b5 = src.read()
profile = src.profile
profile.update(
dtype=rasterio.float64,
count=1,
compress='lzw')
ndvi = numpy.zeros(b1.shape)
ndvi = (b1-b2)/(b1+b2)
with rasterio.open('ndvi_python.tif', 'w', **profile) as dst:
dst.write(ndvi.astype(rasterio.float64), 1)
|
[
"dknapp4@asu.edu"
] |
dknapp4@asu.edu
|
710b13be4f032fd0628dfcd05eec3fa390d50805
|
9b0f102588acdc125cf8d4dfa1e51dffe12e1f2f
|
/apps/trade/urls.py
|
e272b659438321b9cf37e506f4956b8d547cecd9
|
[] |
no_license
|
aurthurm/sagetrader
|
d6e8fc3df5847acc05d134e7a39797ca6258d4ef
|
97ca91b4f460fdf83837244e1dc5517fc0d74850
|
refs/heads/master
| 2023-01-24T23:23:59.129638
| 2020-07-21T09:19:46
| 2020-07-21T09:19:46
| 165,684,233
| 1
| 0
| null | 2023-01-03T16:11:42
| 2019-01-14T15:23:52
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,019
|
py
|
from django.urls import path, include
from .views import *
app_name = 'trade'
urlpatterns = [
path('dashboard/', Dashboard.as_view(), name='dashboard'),
path('statistics/member', Statistics.as_view(), name='member-statistics'),
path('trading-plan/update/', UpdatePlan.as_view(), name='update-plan'),
path('trading-portfolio/update/', UpdatePortfolio.as_view(), name='update-portfolio'),
path('trading-portfolio/remove/', UpdatePortfolioRemove.as_view(), name='update-portfolio-remove'),
path('mine/', TradeList.as_view(), name='my-trades'),
path('strategies/mine/', StrategiesList.as_view(), name='my-strategies'),
path('place/', PlaceTrade.as_view(), name='place-trade'),
path('strategy/add/', StrategyCreate.as_view(), name='add-strategy'),
path('<int:trade_id>/detail', TradeDetail.as_view(), name='trade-detail'),
path('<int:trade_id>/followup', AddFollowUp.as_view(), name='trade-followup'),
path('<int:trade_id>/charts/', AddChart.as_view(), name='add-chart'),
]
|
[
"aurthurmusendame@gmail.com"
] |
aurthurmusendame@gmail.com
|
7c91cb81835b975d6fc34bfe38d28812aecc2704
|
b0cdbad299f6174bfdb0fba173dbcf3889b82209
|
/Object Oriented Programming/oops/class_11.py
|
a4d0fb9c8a988a9e9eb3c05db8bf43193528c9e4
|
[] |
no_license
|
deesaw/PythonD-06
|
a33e676f1e0cfc13b4ea645c8b60547b198239ac
|
3c6f065d7be2e3e10cafb6cef79d6cae9d55a7fa
|
refs/heads/master
| 2023-03-18T08:24:42.030935
| 2021-03-02T14:15:09
| 2021-03-02T14:15:09
| 343,797,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 594
|
py
|
#usage of super
# parent class
class Deer:
def __init__(self):
print("Deer is in forest")
def whoisThis(self):
print("Parent - Deer")
def jump(self):
print("Deer is jumping")
# child class
class Stag(Deer):
def __init__(self):
print("Stag is ready")
def whoisThis(self):
print("Child - Stag")
def run(self):
print("Runs faster")
def parentwhoisThis(self):
#super(Stag,self).whoisThis()
super().whoisThis()
bucky = Stag()
bucky.whoisThis()
bucky.jump()
bucky.run()
bucky.parentwhoisThis()
|
[
"69420960+deesaw@users.noreply.github.com"
] |
69420960+deesaw@users.noreply.github.com
|
b055b4c4e168f77967e6a90390a200084d7360a1
|
265b451de88d2cbc7cb7af24e42c78b128cd80ee
|
/SQL/group-by-time.py
|
e6bf6985cece32effe0706b8b63c7b57e2c4140a
|
[] |
no_license
|
ryanchang1005/Django-lab
|
c967f0bf01fc1bc5727194bb036e6e378cfe075c
|
9186c07177d6f563a8b8dcd00464ac741f736ce9
|
refs/heads/master
| 2023-06-30T03:44:37.749216
| 2021-08-03T03:06:01
| 2021-08-03T03:06:01
| 288,667,082
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,235
|
py
|
"""
Group by minute
Input:
6 2020-12-09 09:29:04.215836
7 2020-12-09 09:29:18.682882
8 2020-12-09 09:29:19.831128
9 2020-12-09 09:29:34.918914
10 2020-12-09 09:29:37.036617
11 2020-12-09 09:29:38.672620
12 2020-12-09 09:29:40.311120
13 2020-12-09 09:29:40.820016
14 2020-12-09 09:29:41.537559
15 2020-12-09 09:29:53.676690
16 2020-12-09 09:30:02.336606
17 2020-12-09 09:30:03.815859
18 2020-12-09 09:30:05.412835
19 2020-12-09 09:30:15.673348
20 2020-12-09 09:34:50.976693
21 2020-12-09 09:34:53.490987
Output:
[
{'log_minute': '17:29', 'log_count': 10},
{'log_minute': '17:30', 'log_count': 4},
{'log_minute': '17:34', 'log_count': 2}
]
"""
def format(x):
return '%02d' % x
from datetime import datetime
from django.db.models import Count
from django.db.models.functions import Trunc
qs = Log.objects.all().annotate(log_minute=Trunc('created', 'minute')).values('log_minute').annotate(log_count=Count('id'))
data = []
for it in qs:
data.append({
'log_minute': format(it['log_minute'].hour) + ':' + format(it['log_minute'].minute),
'log_count': it['log_count'],
})
data.sort(key=lambda it: it['log_minute'])
|
[
"ryan@maideax.com"
] |
ryan@maideax.com
|
96ce5236d2509a84730721428f0aa0e3a53f1054
|
2d54ab7a1e829f89b554d6abc27527fdb38539ff
|
/run.py
|
ec5c92c704cc0c8aa94e6d0680f4482f76dfc1e3
|
[] |
no_license
|
zhtjtcz/Software-Backend
|
1c3c73d8863d0d0df9cdfa08e4900f878127ed6c
|
ca865f1fe75493098050b236634f776f7b97d04d
|
refs/heads/main
| 2023-06-07T06:28:05.345830
| 2021-06-17T16:30:47
| 2021-06-17T16:30:47
| 367,622,524
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
import os
os.system("python manage.py makemigrations")
os.system("python manage.py migrate")
os.system("nohup python manage.py runserver 0.0.0.0:8000 & \n")
print("The backend is running!")
|
[
"18377221@buaa.edu.cn"
] |
18377221@buaa.edu.cn
|
956d2b8dc4154b4476b17b8c513d88086fefd4be
|
b47c136e077f5100478338280495193a8ab81801
|
/Lights/adafruit-circuitpython-bundle-6.x-mpy-20210310/examples/register_unarystruct.py
|
42858d8bc61cd90d8330727506be4c495ca3014f
|
[
"Apache-2.0"
] |
permissive
|
IanSMoyes/SpiderPi
|
22cd8747cc389f674cc8d95f32b4d86f9b7b2d8e
|
cc3469980ae87b92d0dc43c05dbd579f0fa8c4b1
|
refs/heads/master
| 2023-03-20T22:30:23.362137
| 2021-03-12T17:37:33
| 2021-03-12T17:37:33
| 339,555,949
| 16
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,397
|
py
|
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
from board import SCL, SDA
from busio import I2C
from adafruit_bus_device.i2c_device import I2CDevice
from adafruit_register.i2c_struct import UnaryStruct
DEVICE_ADDRESS = 0x74 # device address of PCA9685 board
A_DEVICE_REGISTER_1 = 0x00 # Configuration register on the is31fl3731 board
A_DEVICE_REGISTER_2 = 0x03 # Auto Play Control Register 2 on the is31fl3731 board
class DeviceControl: # pylint: disable-msg=too-few-public-methods
def __init__(self, i2c):
self.i2c_device = i2c # self.i2c_device required by UnaryStruct class
register1 = UnaryStruct(A_DEVICE_REGISTER_1, "<B") # 8-bit number
register2 = UnaryStruct(A_DEVICE_REGISTER_2, "<B") # 8-bit number
# The follow is for I2C communications
comm_port = I2C(SCL, SDA)
device = I2CDevice(comm_port, DEVICE_ADDRESS)
registers = DeviceControl(device)
# set the bits in the device
registers.register1 = 1 << 3 | 2
registers.register2 = 32
# display the device values for the bits
print("register 1: {}; register 2: {}".format(registers.register1, registers.register2))
# toggle the bits
registers.register1 = 2 << 3 | 5
registers.register2 = 60
# display the device values for the bits
print("register 1: {}; register 2: {}".format(registers.register1, registers.register2))
|
[
"ians.moyes@gmail.com"
] |
ians.moyes@gmail.com
|
43fb565c520ceba642840b9e2be90020c32f9bab
|
00ef8e1eb57b73427508b20aadf0266da6b1f900
|
/rlf/rl/loggers/sanity_checker.py
|
b27b9192a2eb8ad7ad2c58c8f3ceb49e8e0e9079
|
[
"MIT"
] |
permissive
|
amy12xx/rl-toolkit
|
f4643935cc8afd960356bfeae74c233d2596dea9
|
8254df8346752ea0226ae2064cc1eabc839567b0
|
refs/heads/master
| 2023-08-14T00:56:52.270642
| 2021-09-28T15:59:32
| 2021-09-28T15:59:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,400
|
py
|
from collections import defaultdict
import torch.nn as nn
def print_tensor(x):
if len(x.shape) > 1:
return str(x.view(-1, x.shape[-1])[0, :5])
else:
return str(x[:5])
class SanityChecker:
def __init__(self, should_check, is_verbose, stop_key, stop_iters):
self.is_verbose = is_verbose
self.should_check = should_check
self.log_key_calls = defaultdict(lambda:0)
self.stop_key = stop_key
self.stop_iters = stop_iters
def check(self, log_key, **kwargs):
if not self.should_check:
return
if self.is_verbose:
print('---')
for k,v in kwargs.items():
print(self.get_str(k,v))
self.log_key_calls[log_key] += 1
if self.log_key_calls[self.stop_key] >= self.stop_iters:
raise ValueError('Sanity stopped. Program done.')
def check_rnd_state(self, key):
if not self.should_check:
return
weight = nn.Linear(3,2).weight
print(f"{key}:Rnd", weight.view(-1).detach()[0].item())
def get_str(self, k,v, indent=""):
s = f"{indent}{k}: "
if isinstance(v, dict):
for x,y in v.items():
s += "\n"
s += self.get_str(x,y, " ")
elif isinstance(v, nn.Module):
params = list(v.parameters())
sample_spots = [0, -1, -5, 3]
for x in sample_spots:
s += f"\n{indent} {x}:" + print_tensor(params[x])
else:
s += f"{v}"
return s
sanity_checker = None
def get_sanity_checker():
global sanity_checker
assert sanity_checker is not None
return sanity_checker
def set_sanity_checker(args):
global sanity_checker
cmd = args.sanity_cmd
if len(cmd) == 0:
cmd = ':'
stop_key, stop_iters = cmd.split(':')
if stop_iters == '':
stop_iters = 1
else:
stop_iters = int(stop_iters)
sanity_checker = SanityChecker(args.sanity, args.sanity_verbose, stop_key,
stop_iters)
def set_sanity_checker_simple():
global sanity_checker
sanity_checker = SanityChecker(True, True, "", 1000000000)
def check(*args, **kwargs):
get_sanity_checker().check(*args, **kwargs)
def c(v):
get_sanity_checker().check("tmp", v=v)
def check_rand_state(key=""):
get_sanity_checker().check_rnd_state(key)
|
[
"me@andrewszot.com"
] |
me@andrewszot.com
|
49a249a7ab9728cf12bc9b8176cd9f2b40792e1f
|
553af2a9b110d54c0c2d972726f69ad9578f772f
|
/k2_domain/tests.py
|
63942882cb5753b4c1bdf78693ee9e5f8bc64b29
|
[] |
no_license
|
simonemmott/k2
|
31ca9aca661e4a070ec3dfd6f6533abcc84ed883
|
f455727c3f25dd2ad428c9c2936f05d94a62f843
|
refs/heads/master
| 2022-12-16T23:10:48.687149
| 2019-06-24T22:48:24
| 2019-06-24T22:48:24
| 192,113,010
| 0
| 0
| null | 2022-12-08T05:15:52
| 2019-06-15T18:47:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,428
|
py
|
from django.test import TestCase
from jinja2 import Environment
from k2.jinja2 import environment
from jinja2 import PackageLoader
import json
from k2_util import templates
from k2.settings import BASE_DIR
from posix import lstat
jinja2_env = environment(loader=PackageLoader('k2_domain', 'jinja2'))
def test_domain():
class Object(object):
pass
class List(object):
def __init__(self, lst):
self.lst = lst
def all(self):
return self.lst
domain = Object()
domain.name = 'DOMAIN_NAME'
model1 = Object()
model1.id = 1
model1.name = 'MODEL_1'
model2 = Object()
model2.id = 2
model2.name = 'MODEL_2'
domain.models = List([model1, model2])
return domain
# Create your tests here.
class Jinja2Tests(TestCase):
def test_template_from_string(self):
template = jinja2_env.from_string('Hello {{target}}!')
output = template.render(target='World')
self.assertEqual('Hello World!', output)
def test_list_from_string(self):
template = jinja2_env.from_string('[{% for model in domain.models.all() %}{{model.name}}.py,{% endfor %}]')
lst = template.render(domain=test_domain())[1:-2].split(',')
self.assertEquals('MODEL_1.py', lst[0])
self.assertEquals('MODEL_2.py', lst[1])
class IndexTests(TestCase):
def test_indexes(self):
index = templates.index(jinja2_env, BASE_DIR, 'k2_domain', 'k2_domain', domain=test_domain())
self.assertEquals(1, len(index))
self.assertTrue('DOMAIN_NAME' in index.keys())
self.assertEquals('k2_domain/domain.name', index.get('DOMAIN_NAME'))
index = templates.index(jinja2_env, BASE_DIR, 'k2_domain', 'k2_domain/domain.name/models', domain=test_domain())
self.assertEquals(3, len(index))
self.assertTrue('__init__.py' in index.keys())
self.assertEquals('k2_domain/domain.name/models/__init__.py', index.get('__init__.py'))
self.assertTrue('MODEL_1.py' in index.keys())
self.assertEquals('k2_domain/domain.name/models/model.py&model=1', index.get('MODEL_1.py'))
self.assertTrue('MODEL_2.py' in index.keys())
self.assertEquals('k2_domain/domain.name/models/model.py&model=2', index.get('MODEL_2.py'))
|
[
"simon.emmott@yahoo.co.uk"
] |
simon.emmott@yahoo.co.uk
|
eab2e39b65e58dce27b14af21523d7b9932dfbcf
|
e82b761f53d6a3ae023ee65a219eea38e66946a0
|
/All_In_One/addons/cellblender/bng/bng_operators.py
|
a10ca18c55f2aa90e867e4bc68e9c7d6f33991d3
|
[] |
no_license
|
2434325680/Learnbgame
|
f3a050c28df588cbb3b14e1067a58221252e2e40
|
7b796d30dfd22b7706a93e4419ed913d18d29a44
|
refs/heads/master
| 2023-08-22T23:59:55.711050
| 2021-10-17T07:26:07
| 2021-10-17T07:26:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,935
|
py
|
import bpy
import os
import subprocess
from cellblender import cellblender_properties, cellblender_operators
#from . import net
# We use per module class registration/unregistration
filePath = ''
def register():
bpy.utils.register_module(__name__)
def unregister():
bpy.utils.unregister_module(__name__)
def cleanup(filePath):
pass
def execute_bionetgen(filepath,context):
mcell = context.scene.mcell
if mcell.cellblender_preferences.bionetgen_location_valid:
bngpath = mcell.cellblender_preferences.bionetgen_location
print ("\nBioNetGen exe found: " + bngpath)
destpath = os.path.dirname(__file__)
exe_bng = " ".join([bngpath, "--outdir", destpath, filepath]) # create command string for BNG execution
print("*** Starting BioNetGen execution ***")
print(" Command: " + exe_bng )
#os.system(exe_bng) # execute BNG
subprocess.call([bngpath,"--outdir",destpath,filepath])
else:
# Perform the search as done before
from os.path import exists
filebasename = os.path.basename(filepath)
filedirpath = os.path.dirname(filepath) # dir of the bngl script file
check_dir = filedirpath;
n = 0
while(n!=20): # iterative search for BNG exe file (starts from the dir containing the bngl script file)
bng_dir = check_dir # current dir (+ any unchecked child dir) to be checked
checked = {} # list of dirs for which search is complete
i = 0
for (dirpath, dirname, filename) in os.walk(bng_dir): # Search over the current and previously unchecked child dirs
if (i == 0):
check_dir = os.path.dirname(dirpath) # mark the parent dir for next search (after current and child dirs are done)
i = 1
if dirpath in checked: # escape any child dir if already been checked
continue
bngpath = os.path.join(dirpath,"BNG2.pl") # tentative path for the BNG exe. file
print ( "Searching for " + bngpath )
if os.path.exists(bngpath): # if BNG exe.file found, proceed for BNG execution
print ("\nBioNetGen exe found: " + bngpath)
destpath = os.path.dirname(__file__)
exe_bng = " ".join([bngpath, "--outdir", destpath, filepath]) # create command string for BNG execution
print("*** Started BioNetGen execution ***")
#os.system(exe_bng) # execute BNG
subprocess.call([bngpath,"--outdir",destpath,filepath])
return{'FINISHED'}
checked.update({dirpath:True}) # store checked directory in the list
n +=1
if (n==20): # too many iterations; BNG not found, stop further search
print ("Error running BioNetGen. BNG2.pl not found....")
return{'FINISHED'}
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
d40c9fe210c8f3cad3a2f5d0c8912bb687d1a187
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/subscription/v20191001preview/__init__.py
|
809b67527e985eb66ca6611d911e43258c96a031
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .get_subscription_alias import *
from .subscription_alias import *
from ._inputs import *
from . import outputs
|
[
"noreply@github.com"
] |
morrell.noreply@github.com
|
9e2cf686b700b1a54492e9adc5d477ec83e3fbad
|
eec698a1cb1b76a5f00ca51726bb7387104dbe89
|
/backend/app/models/category.py
|
a5d236d50fa781ec1431eabd8c1822f4bc571eab
|
[] |
no_license
|
MSurfer20/recommender-portal
|
9450153e94f89785038918c29c227556df3636b4
|
02924c0d813af4ddb29041ccad34e57963e57168
|
refs/heads/master
| 2022-10-28T11:10:48.265123
| 2020-06-09T20:36:18
| 2020-06-09T20:36:18
| 267,325,013
| 1
| 0
| null | 2020-06-09T13:43:08
| 2020-05-27T13:20:46
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,119
|
py
|
from enum import Enum
from pydantic import AnyUrl, Field
from typing import List, Optional
from bson.objectid import ObjectId
from .base import Base, ObjectID, MusicEnum, VideoEnum, CategoryEnum
class Category(Base):
"""Category definiton"""
id: CategoryEnum = Field(None, alias="_id")
class ItemBase(Base):
"""Base fields for any item"""
id: ObjectID = Field(None, alias="_id")
flags: List[str] = list()
hidden: bool = False
title: str
url: AnyUrl
year_release: int
genres: List[str] = list()
class Show(ItemBase):
"""Shows category definition"""
seasons: int
episode_length: int
season_length: int
streaming: Optional[VideoEnum]
class Anime(Show):
"""Anime category defintion"""
# NOTE update if any other fields required
pass
class Movie(ItemBase):
"""Movie category definition"""
language: str
director: str
streaming: Optional[VideoEnum]
class Music(ItemBase):
"""Music category definiton"""
artist: str
album: Optional[str]
streaming: Optional[MusicEnum]
class Book(ItemBase):
author: str
|
[
"pkrockstar7@gmail.com"
] |
pkrockstar7@gmail.com
|
468e1bd89d23c215a903bc44ce45617e25a3f400
|
c3787b68c6276a2dd10008f609e6091ecbcca0b0
|
/two_pointers/triplets_smaller_sum.py
|
138fbcefec62ae14e4e1f8c54378519a6183ac46
|
[
"MIT"
] |
permissive
|
cosmos-sajal/ds_algo
|
45afc8470c2b32fc56041bfbca8acb886cb70864
|
d01912b8c42d8465660cee04e7648731c9acbaff
|
refs/heads/master
| 2022-11-06T15:44:25.030664
| 2020-06-20T16:59:56
| 2020-06-20T16:59:56
| 263,012,260
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 622
|
py
|
# https://www.educative.io/courses/grokking-the-coding-interview/mElknO5OKBO
def triplet_with_smaller_sum(arr, target):
arr.sort()
arr_len = len(arr)
result = 0
for i in range(arr_len - 1):
ptr1 = i + 1
ptr2 = arr_len - 1
sum = target - arr[i]
while ptr1 < ptr2:
if arr[ptr1] + arr[ptr2] < sum:
result += (ptr2 - ptr1)
ptr1 += 1
else:
ptr2 -= 1
return result
def main():
print(triplet_with_smaller_sum([-1, 0, 2, 3], 3))
print(triplet_with_smaller_sum([-1, 4, 2, 1, 3], 5))
main()
|
[
"sajal.4591@gmail.com"
] |
sajal.4591@gmail.com
|
9274b3b36010b8ebad3d07e1d6dd9345b598c5df
|
5cf0842f3c066b2dbbea703bfff9e90b69905937
|
/neurodsp/tests/test_utils_outliers.py
|
09058cbc0b8443cc26506d563be3579b51a623c6
|
[
"MIT"
] |
permissive
|
srcole/neurodsp
|
9e880c990587e928e3cc550fef53f5bb1d112bfa
|
6b500d967a2ca63b62d07ab345e021f15e53be6a
|
refs/heads/master
| 2021-05-23T05:48:06.916165
| 2019-06-14T03:44:06
| 2019-06-14T03:44:06
| 94,925,277
| 1
| 0
| null | 2017-08-30T18:30:22
| 2017-06-20T18:58:16
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,261
|
py
|
"""Tests for outlier related utility functions."""
import numpy as np
from numpy.testing import assert_equal
from neurodsp.utils.outliers import *
###################################################################################################
###################################################################################################
def test_remove_nans():
# Test with equal # of NaNs on either edge
arr = np.array([np.NaN, np.NaN, 1, 2, 3, np.NaN, np.NaN])
arr_no_nans, arr_nans = remove_nans(arr)
assert_equal(arr_no_nans, np.array([1, 2, 3]))
assert_equal(arr_nans, np.array([True, True, False, False, False, True, True]))
# Test with different # of NaNs on either edge
arr = np.array([np.NaN, np.NaN, 1, 2, 3, 4, np.NaN,])
arr_no_nans, arr_nans = remove_nans(arr)
assert_equal(arr_no_nans, np.array([1, 2, 3, 4]))
assert_equal(arr_nans, np.array([True, True, False, False, False, False, True]))
def test_restore_nans():
arr_no_nans = np.array([1, 2, 3])
arr_nans = np.array([True, True, False, False, False, True])
arr_restored = restore_nans(arr_no_nans, arr_nans)
assert_equal(arr_restored, np.array([np.NaN, np.NaN, 1, 2, 3, np.NaN]))
def test_discard_outliers():
pass
|
[
"tdonoghue@ucsd.edu"
] |
tdonoghue@ucsd.edu
|
019daba148c86709b395b3909db8a86a6c9a1bd3
|
bda7a0576e17fe417175680b5698635b876c8091
|
/users/migrations/0002_auto_20200219_1614.py
|
b420d734a989b370f84149c8b0aed3a478f4ff1b
|
[
"Apache-2.0"
] |
permissive
|
Robotix-NITRR/RobotixWeb2021
|
56bb66667e5bd106930138f7ed69afeee609fe59
|
7f66a0dd5c54c44e6a128d8139d6bfd1135580f9
|
refs/heads/master
| 2023-04-04T09:59:53.961189
| 2021-04-12T12:07:25
| 2021-04-12T12:07:25
| 334,345,468
| 0
| 2
|
Apache-2.0
| 2021-01-30T08:26:24
| 2021-01-30T06:31:56
|
Python
|
UTF-8
|
Python
| false
| false
| 541
|
py
|
# Generated by Django 2.1.7 on 2020-02-19 10:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='currency',
field=models.IntegerField(default=1000),
),
migrations.AddField(
model_name='userprofile',
name='is_active',
field=models.BooleanField(default=True),
),
]
|
[
"nikhil0223@gmail.com"
] |
nikhil0223@gmail.com
|
d65d42379061c239c3e54ddd63c11dffd5c86e91
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-3/9e31bd9e2d6cba783883eb4905b19fcbe0ca5534-<backend_pyqt4_internal_check>-bug.py
|
7487ccb89335c1e72c0ba32a5347a266fdafb3a0
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
def backend_pyqt4_internal_check(self):
try:
from PyQt4 import QtCore
except ImportError:
raise CheckFailed('PyQt4 not found')
try:
qt_version = QtCore.QT_VERSION
pyqt_version_str = QtCore.QT_VERSION_STR
except AttributeError:
raise CheckFailed('PyQt4 not correctly imported')
else:
return ('Qt: %s, PyQt: %s' % (self.convert_qt_version(qt_version), pyqt_version_str))
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
407652123c34cc2bfd537335f227c65cefbb2c11
|
f1af5236a561c07d1def8da8716be333e2fb896c
|
/test/Service/test_pack_unpack.py
|
35fbd6df707d360ab26b285c2acb982ad03f3d84
|
[
"Apache-2.0"
] |
permissive
|
chryswoods/acquire
|
1a9dbbfa0f82d761a314fd34b77c0ad982ff60e6
|
fe4c9cb2b90374b386d5ea38e514faa96661701a
|
refs/heads/devel
| 2022-12-22T08:29:52.155771
| 2022-01-11T11:34:02
| 2022-01-11T11:34:02
| 156,683,200
| 21
| 5
|
Apache-2.0
| 2022-12-17T12:47:35
| 2018-11-08T09:35:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,995
|
py
|
import pytest
from Acquire.Crypto import PrivateKey, get_private_key
from Acquire.Service import pack_arguments, unpack_arguments
from Acquire.Service import pack_return_value, unpack_return_value
from Acquire.Service import create_return_value
from Acquire.ObjectStore import string_to_bytes, bytes_to_string
import random
import json
def _bar():
raise PermissionError("Test Traceback")
def _foo():
_bar()
def test_pack_unpack_args_returnvals():
privkey = get_private_key("testing")
pubkey = privkey.public_key()
args = {"message": "Hello, this is a message",
"status": 0,
"long": [random.random() for _ in range(2)]}
func = "test_function"
packed = pack_arguments(function=func, args=args)
crypted = pubkey.encrypt(packed)
uncrypted = privkey.decrypt(crypted)
(f, unpacked, keys) = unpack_arguments(args=uncrypted)
print(keys)
assert(args == unpacked)
assert(f == func)
packed = pack_arguments(function=func, args=args,
key=pubkey, response_key=pubkey,
public_cert=pubkey)
data = json.loads(packed.decode("utf-8"))
assert(data["encrypted"])
assert(data["fingerprint"] == privkey.fingerprint())
payload = privkey.decrypt(string_to_bytes(data["data"]))
payload = json.loads(payload)
assert(payload["sign_with_service_key"] == privkey.fingerprint())
assert(payload["encryption_public_key"] == bytes_to_string(pubkey.bytes()))
assert(payload["payload"] == args)
(f, unpacked, keys) = unpack_arguments(function=func, args=packed,
key=privkey)
message = {"message": "OK"}
return_value = create_return_value(message)
packed_result = pack_return_value(function=func,
payload=return_value, key=keys,
private_cert=privkey)
result = json.loads(packed_result.decode("utf-8"))
assert(result["fingerprint"] == privkey.fingerprint())
assert(result["encrypted"])
data = string_to_bytes(result["data"])
sig = string_to_bytes(result["signature"])
pubkey.verify(signature=sig, message=data)
data = json.loads(privkey.decrypt(data))
assert(data["payload"]["return"] == message)
result = unpack_return_value(return_value=packed_result,
key=privkey, public_cert=pubkey)
assert(result == message)
try:
return_value = create_return_value(_foo())
except Exception as e:
return_value = create_return_value(e)
packed_result = pack_return_value(function=func,
payload=return_value, key=keys,
private_cert=privkey)
with pytest.raises(PermissionError):
result = unpack_return_value(function=func, return_value=packed_result,
key=privkey, public_cert=pubkey)
|
[
"chryswoods@gmail.com"
] |
chryswoods@gmail.com
|
7014ef7a1f5107c645c6b794df58809717cbbcd7
|
2d47e5f7f358429ac46ed6bf61e8b8d68581dfeb
|
/cart/views.py
|
3899a4bae7bb3e9e967e15dcfab285467cdcbbb8
|
[] |
no_license
|
hossamelneily/Freeshop
|
7e24bbf6eb3668a121e9d6903347e6405295d589
|
48b1650164bad5f9691b11dc7f2377c800bd8240
|
refs/heads/master
| 2021-11-21T20:11:23.306946
| 2019-10-28T13:16:42
| 2019-10-28T13:16:42
| 144,614,064
| 1
| 2
| null | 2021-09-08T00:07:47
| 2018-08-13T17:46:42
|
HTML
|
UTF-8
|
Python
| false
| false
| 5,513
|
py
|
from django.shortcuts import render,redirect
from django.contrib.auth import authenticate , login , get_user_model
from django.conf import settings
# Create your views here.
from cart.models import cart
from products.models import product
from products.models import product
from orders.models import orders
from ecommerce.forms import login_page,GuestForm
from billing.models import billing
from Address.models import Address
from Guest.models import Guest
from django.urls import reverse
from Address.forms import AdressForm,UsePrevAdd
from django.http import JsonResponse
import json
user=get_user_model()
def cart_view_API(request):
cart_obj = cart.objects.get_or_create(request)
product = cart_obj.products.all()
product_list=[]
for x in product:
product_list.append({"name":x.Name,"price":x.price,"url":x.get_absolute_url(),"id":x.id})
return JsonResponse({ "product": product_list,
"cart_total": cart_obj.total,
"cart_subtotal":cart_obj.subtotal
})
def cart_view(request):
# i have created a function in the carts.models and i will call it instead of implementing it here.
cart_obj=cart.objects.get_or_create(request)
return render(request,"carts/home.html",{"cart_obj":cart_obj})
def cart_update(request):
prod_obj=product.objects.get_by_id(id=request.POST.get('product_id'))
cart_obj = cart.objects.get_or_create(request)
if prod_obj in cart_obj.products.all():
cart_obj.products.remove(prod_obj)
added=False
else:
cart_obj.products.add(prod_obj)
added=True
request.session['cart_items']=cart_obj.products.count()
if request.is_ajax(): # will return json format
print("Ajax is working ")
json_data={
"added":added,
"cart_items_count":request.session.get("cart_items",0)
}
return JsonResponse(json_data)
return redirect("cart:show") # need to modify this return
def checkout_view(request):
cart_obj = cart.objects.get_or_create(request)
order_obj=None
Address_qs=None
prev_form_shipping = None
prev_form_billing = None
Shipping_Address_qs = None
Billing_Address_qs = None
loginform = login_page(request.POST or None)
guestform = GuestForm(request.POST or None)
adressForm = AdressForm(request.POST or None)
has_card = None
# change =None
billing_profile= billing.objects.get_or_new(request)
if billing_profile is not None:
order_obj, order_created = orders.objects.get_or_new(billing_profile, cart_obj)
order_obj.Associate_orders_to_Addresses(request)
if request.user.is_authenticated:
Address_qs=Address.objects.filter(billing=billing_profile)
Shipping_Address_qs = Address_qs.filter(Address_Type='shipping').values('id','Address_line_1','State','Postal_Code','city').distinct()
#values('id','Address_line_1','State','Postal_Code','city') --> i have added the id beacuse we need to save the id of the address
#but if the addresses all have the same values, this query will return the same addresses even we make ditinct beacuse their ids are different
#so the user have to add different address to return different ones,
if Shipping_Address_qs:
prev_form_shipping = UsePrevAdd(request.POST or None,initial={'Address_Type':'shipping'})
Billing_Address_qs = Address_qs.filter(Address_Type='billing').values('id','Address_line_1','State','Postal_Code','city').distinct()
if Billing_Address_qs:
prev_form_billing = UsePrevAdd(request.POST or None,initial={'Address_Type':'billing'})
# if 'change' in request.build_absolute_uri():
# print('changes')
# change =True
# return redirect("cart:checkout")
has_card=billing_profile.has_card # get the active cards
if request.method=="POST" and not request.is_ajax(): #came from checkout() function when user entered all addresses and payement method, last step
is_done = order_obj.check_orders()
if is_done:
did_charge = billing_profile.process_charge(order_obj=order_obj)
if did_charge:
order_obj.mark_paid()
del request.session['cart_id']
request.session['cart_items'] = 0
# if not billing_profile.user: #not means false or None # is None means None only this for guest user
# billing_profile.set_card_inactive
return redirect("cart:success")
return redirect("cart:checkout")
context={
"cart_obj":cart_obj,
"object": order_obj,
"billing_profile":billing_profile,
"loginpage":loginform,
"guestform":guestform,
"addressform":adressForm,
'prev_form_shipping':prev_form_shipping,
'prev_form_billing': prev_form_billing,
"Address_qs":Address_qs,
'Shipping_Address_qs':Shipping_Address_qs,
'Billing_Address_qs':Billing_Address_qs,
"has_card":has_card,
# 'change':change,
"public_key":getattr(settings,'STRIPE_PUB_Key','pk_test_UmKYvEdkBYpow9jUa9gloSTC')
}
return render(request,"carts/checkout.html",context)
def checkout_done(request):
return render(request, "carts/success.html", {})
|
[
"hossamelneily@gmail.com"
] |
hossamelneily@gmail.com
|
20683b9d7eaaf6988c7944329dd9cec29ca2947e
|
bab737891a602e8afc8b6a132ace3f05c37999e4
|
/blog/migrations/0009_auto_20170821_1442.py
|
d499a002ce2474ef9b550b762290ec0d1950cb34
|
[] |
no_license
|
lianchonghui/django-blog
|
34ddf308d15adf633b10676f835f27dd94a457f0
|
53f395f1d2ad2e4cea1fe38b99db705bb7fb352e
|
refs/heads/master
| 2021-09-09T15:33:40.171955
| 2018-03-17T14:24:05
| 2018-03-17T14:24:05
| 118,338,599
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,203
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-08-21 06:42
from __future__ import unicode_literals
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
import blog
class Migration(migrations.Migration):
dependencies = [
('blog', '0008_auto_20170430_1033'),
]
operations = [
migrations.AlterField(
model_name='category',
name='cover',
field=models.ImageField(blank=True, upload_to='covers/categories/%Y/%m/%d/', verbose_name='cover'),
),
migrations.AlterField(
model_name='category',
name='cover_caption',
field=models.CharField(blank=True, max_length=255, verbose_name='cover caption'),
),
migrations.AlterField(
model_name='category',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='creation time'),
),
migrations.AlterField(
model_name='category',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='creator'),
),
migrations.AlterField(
model_name='category',
name='description',
field=models.TextField(blank=True, verbose_name='description'),
),
migrations.AlterField(
model_name='category',
name='genre',
field=models.PositiveSmallIntegerField(choices=[(1, 'collection'), (2, 'tutorial')], default=1, verbose_name='genre'),
),
migrations.AlterField(
model_name='category',
name='name',
field=models.CharField(max_length=100, verbose_name='name'),
),
migrations.AlterField(
model_name='category',
name='resource',
field=models.URLField(blank=True, verbose_name='resource'),
),
migrations.AlterField(
model_name='category',
name='slug',
field=models.SlugField(unique=True, verbose_name='slug'),
),
migrations.AlterField(
model_name='category',
name='status',
field=models.PositiveSmallIntegerField(blank=True, choices=[(1, 'ongoing'), (2, 'finished')], null=True, verbose_name='status'),
),
migrations.AlterField(
model_name='category',
name='title',
field=models.CharField(blank=True, max_length=255, verbose_name='title'),
),
migrations.AlterField(
model_name='post',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='author'),
),
migrations.AlterField(
model_name='post',
name='body',
field=models.TextField(verbose_name='body'),
),
migrations.AlterField(
model_name='post',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.Category', verbose_name='category'),
),
migrations.AlterField(
model_name='post',
name='cover',
field=models.ImageField(blank=True, upload_to=blog.models.post_cover_path, verbose_name='cover'),
),
migrations.AlterField(
model_name='post',
name='created_time',
field=models.DateTimeField(auto_now_add=True, verbose_name='creation time'),
),
migrations.AlterField(
model_name='post',
name='excerpt',
field=models.CharField(blank=True, max_length=255, verbose_name='excerpt'),
),
migrations.AlterField(
model_name='post',
name='modified_time',
field=models.DateTimeField(auto_now=True, verbose_name='modification time'),
),
migrations.AlterField(
model_name='post',
name='pub_date',
field=models.DateTimeField(blank=True, null=True, verbose_name='publication time'),
),
migrations.AlterField(
model_name='post',
name='status',
field=models.PositiveSmallIntegerField(choices=[(1, 'published'), (2, 'draft'), (3, 'hidden')], default=2, verbose_name='status'),
),
migrations.AlterField(
model_name='post',
name='tags',
field=models.ManyToManyField(blank=True, to='blog.Tag', verbose_name='tags'),
),
migrations.AlterField(
model_name='post',
name='title',
field=models.CharField(max_length=255, verbose_name='title'),
),
migrations.AlterField(
model_name='post',
name='views',
field=models.PositiveIntegerField(default=0, editable=False, verbose_name='views'),
),
migrations.AlterField(
model_name='tag',
name='name',
field=models.CharField(max_length=100, verbose_name='name'),
),
]
|
[
"lianchonghui@foxmail.com"
] |
lianchonghui@foxmail.com
|
90bb6f2d6386a5ad0b37e43f530eb0556d348aa8
|
64bf39b96a014b5d3f69b3311430185c64a7ff0e
|
/intro-ansible/venv2/lib/python3.8/site-packages/ansible/modules/network/netvisor/pn_ospfarea.py
|
34739d451da56d21430f472be5da741f0e3d2cbd
|
[
"MIT"
] |
permissive
|
SimonFangCisco/dne-dna-code
|
7072eba7da0389e37507b7a2aa5f7d0c0735a220
|
2ea7d4f00212f502bc684ac257371ada73da1ca9
|
refs/heads/master
| 2023-03-10T23:10:31.392558
| 2021-02-25T15:04:36
| 2021-02-25T15:04:36
| 342,274,373
| 0
| 0
|
MIT
| 2021-02-25T14:39:22
| 2021-02-25T14:39:22
| null |
UTF-8
|
Python
| false
| false
| 6,375
|
py
|
#!/usr/bin/python
""" PN-CLI vrouter-ospf-add/remove """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_ospfarea
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
short_description: CLI command to add/remove ospf area to/from a vrouter.
description:
- Execute vrouter-ospf-add, vrouter-ospf-remove command.
- This command adds/removes Open Shortest Path First(OSPF) area to/from
a virtual router(vRouter) service.
options:
pn_cliusername:
description:
- Login username.
required: true
pn_clipassword:
description:
- Login password.
required: true
pn_cliswitch:
description:
- Target switch(es) to run the CLI on.
required: False
state:
description:
- State the action to perform. Use 'present' to add ospf-area, 'absent'
to remove ospf-area and 'update' to modify ospf-area.
required: true
choices: ['present', 'absent', 'update']
pn_vrouter_name:
description:
- Specify the name of the vRouter.
required: true
pn_ospf_area:
description:
- Specify the OSPF area number.
required: true
pn_stub_type:
description:
- Specify the OSPF stub type.
choices: ['none', 'stub', 'stub-no-summary', 'nssa', 'nssa-no-summary']
pn_prefix_listin:
description:
- OSPF prefix list for filtering incoming packets.
pn_prefix_listout:
description:
- OSPF prefix list for filtering outgoing packets.
pn_quiet:
description:
- Enable/disable system information.
required: false
default: true
"""
EXAMPLES = """
- name: "Add OSPF area to vrouter"
pn_ospfarea:
state: present
pn_cliusername: admin
pn_clipassword: admin
pn_ospf_area: 1.0.0.0
pn_stub_type: stub
- name: "Remove OSPF from vrouter"
pn_ospf:
state: absent
pn_cliusername: admin
pn_clipassword: admin
pn_vrouter_name: name-string
pn_ospf_area: 1.0.0.0
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
returned: always
type: str
stdout:
description: The set of responses from the ospf command.
returned: always
type: list
stderr:
description: The set of error responses from the ospf command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
import shlex
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'vrouter-ospf-area-add'
if state == 'absent':
command = 'vrouter-ospf-area-remove'
if state == 'update':
command = 'vrouter-ospf-area-modify'
return command
def main():
""" This section is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=True, type='str'),
pn_clipassword=dict(required=True, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=True, type='str',
choices=['present', 'absent', 'update']),
pn_vrouter_name=dict(required=True, type='str'),
pn_ospf_area=dict(required=True, type='str'),
pn_stub_type=dict(type='str', choices=['none', 'stub', 'nssa',
'stub-no-summary',
'nssa-no-summary']),
pn_prefix_listin=dict(type='str'),
pn_prefix_listout=dict(type='str'),
pn_quiet=dict(type='bool', default='True')
)
)
# Accessing the arguments
cliusername = module.params['pn_cliusername']
clipassword = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
vrouter_name = module.params['pn_vrouter_name']
ospf_area = module.params['pn_ospf_area']
stub_type = module.params['pn_stub_type']
prefix_listin = module.params['pn_prefix_listin']
prefix_listout = module.params['pn_prefix_listout']
quiet = module.params['pn_quiet']
command = get_command_from_state(state)
# Building the CLI command string
cli = '/usr/bin/cli'
if quiet is True:
cli += ' --quiet '
cli += ' --user %s:%s ' % (cliusername, clipassword)
if cliswitch:
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
cli += ' %s vrouter-name %s area %s ' % (command, vrouter_name, ospf_area)
if stub_type:
cli += ' stub-type ' + stub_type
if prefix_listin:
cli += ' prefix-list-in ' + prefix_listin
if prefix_listout:
cli += ' prefix-list-out ' + prefix_listout
# Run the CLI command
ospfcommand = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(ospfcommand)
# Response in JSON format
if result != 0:
module.exit_json(
command=cli,
stderr=err.rstrip("\r\n"),
changed=False
)
else:
module.exit_json(
command=cli,
stdout=out.rstrip("\r\n"),
changed=True
)
# AnsibleModule boilerplate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
[
"sifang@cisco.com"
] |
sifang@cisco.com
|
52c0c13d7052ae235e036a5de1bc4bb4f2f89261
|
a5a3ccf9feae3f5aa475153d92447f8f3ba8c013
|
/data/kcbot.py
|
b5e78e6669e62e23f0901827fbc01946a7740baa
|
[] |
no_license
|
waldenven/tsdata
|
2a309d976ce2994c7cca8ad91e6287cb8180b8cf
|
a4229b7978f4c14ffc2201ea38a1a44e68dec130
|
refs/heads/master
| 2020-06-13T22:42:45.334001
| 2012-07-03T03:42:01
| 2012-07-03T03:42:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,369
|
py
|
import urllib, re
from datetime import datetime
from pandas import DataFrame, Series, read_csv
from BeautifulSoup import BeautifulSoup
from tsdata.data.basedata import basedata
def all_daily_urls():
url = 'http://www.kcbt.com/daily_wheat_price.asp'
f = urllib.urlopen(url)
txt = f.read()
f.close()
soup = BeautifulSoup(txt)
ts = soup.findAll('table', { 'border' : 1, 'cellpadding' : "3", 'align' :"center", 'width' : "50%"})
tds = soup.findAll('td', width='33%', nowrap='nowrap')
return map(lambda x: x.a['href'], tds)
def one_date(url):
f = urllib.urlopen(url)
df = read_csv(f)
df =df.rename(columns=lambda x: x.strip().lower()).dropna()
df['date'] = map(lambda x: datetime.strptime(x.strip(), '%m/%d/%Y').date(), df['date'])
for col in df.columns:
if col not in ['exch', 'comid', 'date']:
df[col] = map(lambda x: float(x), df[col])
elif col in ['exch', 'comid']:
df[col] = map(lambda x: x.strip(), df[col])
df['LYY'] = map(lambda m,y: 'FGHJKMNQUVXZ'[int(m-1)] + '%02d' % (y), df['month'], df['year'])
return df
class kcbotfuts(basedata):
archivename = 'kcbot.pickle'
tag = 'f'
chunktype = 'DAY'
earliest = datetime(2011,11,16)
_cache = None
_changed = False
_updated = False
_scaling = { k:0.01 for k in [ 'previous', 'open', 'high', 'low', 'close', 'settle' ] } # to match CBOT
def handles(self, symbol):
l = len(self.tag) + 1 + 2
return symbol[:l].lower() == self.tag + '_' + 'kw'
def parsesymbol(self, symbol):
synre = re.compile('%s_([^@_]*)_([^@]*)@(.*)' % self.tag )
synrenoat = re.compile('%s_([^@_]*)_([^@]*)' % self.tag )
m = synre.match( symbol )
if m:
commod = m.group(1)
month = m.group(2)
tag = m.group(3)
else:
m = synrenoat.match( symbol )
commod = m.group(1)
month = m.group(2)
tag = 'settle'
commod = commod.upper()
return { 'column' : tag, 'filter' : { 'comid' : commod, 'LYY' : month } }
def _updateday(self, din):
df = DataFrame()
url = 'http://www.kcbt.com/download/kcprccsv/kcprccsv_%4d%02d%02d.csv' % (din.year, din.month, din.day)
return one_date(url)
|
[
"none@none.com"
] |
none@none.com
|
9f73be10103f40ed4d479780397977619122dcb9
|
9947d1e328a3262a35a61385dc537c3dc557ab7d
|
/pythonnet/day3_PM/day3/recv_file.py
|
db7d06884dc73ce18d8d0d17c294d3419bf2c549
|
[] |
no_license
|
nuass/lzh
|
d0a7c74a3295523d1fe15eeaa73997fc04469f06
|
3cb1cf1e448b88ade226d113a7da4eab7bbb5c09
|
refs/heads/master
| 2021-02-06T06:10:32.772831
| 2019-06-10T08:54:49
| 2019-06-10T08:54:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
from socket import *
s = socket()
s.bind(('127.0.0.1',8888))
s.listen(3)
c,addr = s.accept()
print("Connect from",addr)
f = open('leg.jpg','wb')
while True:
data = c.recv(1024)
if not data:
break
f.write(data)
f.close()
c.close()
s.close()
|
[
"1581627402@qq.com"
] |
1581627402@qq.com
|
3bacc8c066e42bc4d4187216f852f631b9e01070
|
e21ab91cf22bf8359831e974d49e1fd90b69197a
|
/tests/conftest.py
|
3208b58757a89871d4e97393c3db78b2a76d169c
|
[
"BSD-3-Clause"
] |
permissive
|
creamofclubs/odin
|
9be21013a7d707aee978e83bc2cb9ef70f81befa
|
deb00b6af56d319a3e11f71dbee7d217c8713fae
|
refs/heads/master
| 2023-08-12T04:06:34.791245
| 2021-09-06T03:11:55
| 2021-09-06T03:11:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
import os
import sys
import datetime
HERE = os.path.abspath(os.path.dirname(__file__))
SRC = os.path.normpath(os.path.join(HERE, "..", "src"))
sys.path.insert(0, SRC)
import odin.datetimeutil
ARE_YOU_EXPERIENCED = datetime.date(1967, 5, 12)
MWT = odin.datetimeutil.FixedTimezone(-6, "Mountain War Time")
BOOM = datetime.datetime(1945, 7, 16, 5, 29, 45, 0, MWT)
|
[
"tim@savage.company"
] |
tim@savage.company
|
28248168d3c9bf357c416aec52b917287ae926cf
|
158a327c3cbff52a607d4ec668c658f0d8d8eea8
|
/DP/55. Jump Game.py
|
13c37440c5ef1d510135f5ac8ff8bfe1773f858b
|
[] |
no_license
|
smartwell/leet_niuke
|
f11cbf826f9b2b358b614a63200e93aef47054a2
|
eb84aaa1248074dceda831f9385d68a24941fa04
|
refs/heads/master
| 2020-06-24T16:37:33.073534
| 2019-07-25T13:19:49
| 2019-07-25T13:19:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,648
|
py
|
########################45##########################
def Jump(nums): #想象成这一步所能跳跃的范围,若最后终点在范围内则为最佳点 采用贪心算法 最优避免DP规划导致多余计算
n,fir,end,count=len(nums),0,0,0 #count计步器 思路:fir0,end0中找到使其跳的最远的作为end1,fir1=end0+1,依照此规律知道某段firn,endn中的数超过n-1
while end < n-1: #n-1表示从0到len(nums)要完成的最少跳跃步数
count = count + 1
maxend = end + 1
for i in range(fir, end+1):
if i + nums[i] >= n - 1:
return count
maxend = max(i + nums[i], maxend)
fir, end = end + 1 , maxend #下一轮的开始为上一轮while循环的结束
return count
def Jump2(nums): #####以上程序碰到列表中有0的数则可能出现问题
last, cur, step = 0, 0, 0
n = len(nums)
for i in range(n):
if i > last:
step += 1
last = cur
cur = max(cur, i + nums[i])
return step
##############################55###############################
def canJump(nums):
maxend = 0
for i in range(len(nums)): #前序遍历
if i > maxend:
return False
maxend = max(i+nums[i],maxend)
return True
def canJump2(nums):
n = len(nums) -1
goal = nums[n]
for i in range(n,-1,-1): #后序遍历
if i + nums[i] >= goal:
goal = i
return not goal
if __name__ == '__main__':
nums = [0,2,3]
nums2 = [3,2,1,0,4]
print(canJump(nums))
|
[
"w5802022@gmail.com"
] |
w5802022@gmail.com
|
c7e9e9554266187a384876586f55b405a1471944
|
9fd934751ef1d332b792d204df0757ed91f36ef6
|
/route66_project/route66/views.py
|
b3e6cee4b0f257ed94d9ee9aff83c33beaa4838e
|
[] |
no_license
|
cs-fullstack-2019-spring/django-intro2-routes-cw-cgarciapieto
|
e55fa7a7b4c9ef317b9c6e3db1a8ba8eaea7602c
|
f232f3125fce93c03e687940e10b22280ba1d0d0
|
refs/heads/master
| 2020-04-23T23:57:22.501082
| 2019-02-20T05:04:38
| 2019-02-20T05:04:38
| 171,550,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
# http response function
def goods(request):
return HttpResponse("Here you go, POKEMON FOREVER")
def joy(request):
return HttpResponse("This song drives me nuts")
def index(request):
return HttpResponse("the goods or song of death")
# challenge variable is passed in
def response(request):
return HttpResponse(challenge)
challenge =("I heard you")
|
[
"cgarciapieto@gmail.com"
] |
cgarciapieto@gmail.com
|
189c925a75f59f6b8ae9d4cfeb0648a69fcff9a5
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_199/388.py
|
94106f5f14c7292d2212da973bc6701dbe2060af
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
def solve(s, k):
s = list(s)
k = int(k)
ans = 0
while s:
if s.pop() != '+':
if len(s) < k - 1: return "IMPOSSIBLE"
for i in range(1, k):
s[-i] = '-' if s[-i] == '+' else '+'
ans += 1
return ans
for i in range(1, int(input()) + 1):
print("Case #", i, ": ", solve(*input().split()), sep='')
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
6498e8c2aa931f3faecf9b2f1027d5f19f53e176
|
d24a6e0be809ae3af8bc8daa6dacfc1789d38a84
|
/other_contests/PAST2019/I.py
|
1fe31e281c0bf9947e3edece5bda7e352ee8a741
|
[] |
no_license
|
k-harada/AtCoder
|
5d8004ce41c5fc6ad6ef90480ef847eaddeea179
|
02b0a6c92a05c6858b87cb22623ce877c1039f8f
|
refs/heads/master
| 2023-08-21T18:55:53.644331
| 2023-08-05T14:21:25
| 2023-08-05T14:21:25
| 184,904,794
| 9
| 0
| null | 2023-05-22T16:29:18
| 2019-05-04T14:24:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
def solve(n, m, s_list, c_list):
dp = [[sum(c_list) + 1] * (2 ** n) for _ in range(m + 1)]
dp[0][0] = 0
for i in range(m):
k = 0
s = s_list[i]
for p in range(n):
if s[- p - 1] == "Y":
k += 2 ** p
c = c_list[i]
for j in range(2 ** n):
dp[i + 1][j] = dp[i][j]
for j in range(2 ** n):
dp[i + 1][j | k] = min(dp[i + 1][j | k], dp[i][j] + c)
# print(dp)
res = dp[m][2 ** n - 1]
if res == sum(c_list) + 1:
return -1
else:
return res
def main():
n, m = map(int, input().split())
s_list = [""] * m
c_list = [0] * m
for i in range(m):
s, c = input().split()
s_list[i] = s
c_list[i] = int(c)
res = solve(n, m, s_list, c_list)
print(res)
def test():
assert solve(3, 4, ["YYY", "YYN", "YNY", "NYY"], [100, 20, 10, 25]) == 30
assert solve(5, 4, ["YNNNN", "NYNNN", "NNYNN", "NNNYN"], [10, 10, 10, 10]) == -1
if __name__ == "__main__":
test()
main()
|
[
"cashfeg@gmail.com"
] |
cashfeg@gmail.com
|
5fcafa9dc665d308fac15b10dc9dda4e4a8b26f0
|
45b64f620e474ac6d6b2c04fbad2730f67a62b8e
|
/Varsity-Final-Project-by-Django-master/.history/project/quiz/models_20210424121724.py
|
e9feec73e98c6b749012c04f069e8911025079ec
|
[] |
no_license
|
ashimmitra/Final-Project
|
99de00b691960e25b1ad05c2c680015a439277e0
|
a3e1d3c9d377e7b95b3eaf4dbf757a84a3858003
|
refs/heads/master
| 2023-04-11T06:12:35.123255
| 2021-04-26T15:41:52
| 2021-04-26T15:41:52
| 361,796,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,603
|
py
|
from django.db import models
# Create your models here.
class Quiz(models.Model):
question = models.CharField(max_length = 500)
option1 = models.CharField(max_length = 20)
option2 = models.CharField(max_length = 20)
option3 = models.CharField(max_length = 20)
option4 = models.CharField(max_length = 20)
answer = models.CharField(max_length = 20)
class Bangla(models.Model):
question = models.CharField(max_length = 500)
option1 = models.CharField(max_length = 20)
option2 = models.CharField(max_length = 20)
option3 = models.CharField(max_length = 20)
option4 = models.CharField(max_length = 20)
answer = models.CharField(max_length = 20)
class Math(models.Model):
question = models.CharField(max_length = 500)
option1 = models.CharField(max_length = 20)
option2 = models.CharField(max_length = 20)
option3 = models.CharField(max_length = 20)
option4 = models.CharField(max_length = 20)
answer = models.CharField(max_length = 20)
class Science(models.Model):
question = models.CharField(max_length = 500)
option1 = models.CharField(max_length = 20)
option2 = models.CharField(max_length = 20)
option3 = models.CharField(max_length = 20)
option4 = models.CharField(max_length = 20)
answer = models.CharField(max_length = 20)
class GK(models.Model):
question = models.CharField(max_length = 500)
option1 = models.CharField(max_length = 20)
option2 = models.CharField(max_length = 20)
option3 = models.CharField(max_length = 20)
option4 = models.CharField(max_length = 20)
answer = models.CharField(max_length = 20)
class Mat(models.Model):
question = models.CharField(max_length = 500)
option1 = models.CharField(max_length = 20)
option2 = models.CharField(max_length = 20)
option3 = models.CharField(max_length = 20)
option4 = models.CharField(max_length = 20)
answer = models.CharField(max_length = 20)
class Sci(models.Model):
question = models.CharField(max_length = 500)
option1 = models.CharField(max_length = 20)
option2 = models.CharField(max_length = 20)
option3 = models.CharField(max_length = 20)
option4 = models.CharField(max_length = 20)
answer = models.CharField(max_length = 20)
class GNK(models.Model):
question = models.CharField(max_length = 500)
option1 = models.CharField(max_length = 20)
option2 = models.CharField(max_length = 20)
option3 = models.CharField(max_length = 20)
option4 = models.CharField(max_length = 20)
answer = models.CharField(max_length = 20)
|
[
"34328617+ashimmitra@users.noreply.github.com"
] |
34328617+ashimmitra@users.noreply.github.com
|
1d1d26c8fc8487564347ded0561a2639a59ac73e
|
2fc1e3382ae1024b008004ef6302914b492f51e1
|
/cos/models/const.py
|
ce15e48b226aeb7293242c88f466838c589fd636
|
[] |
no_license
|
tclh123/COS
|
abf475b32cb45e6f099508675a8138dd2211e963
|
7e4843fbfe67f7e795eccffc3b48270b152ba438
|
refs/heads/master
| 2016-09-15T21:43:55.956184
| 2014-01-04T17:46:50
| 2014-01-04T17:46:50
| 15,521,178
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
# coding=utf-8
PERM_BASIC = 0b001
PERM_ADMIN = 0b010
PERM_DELIVERY = 0b100
ORDER_STATUS_SUBMIT = 1
ORDER_STATUS_PAY = 2
ORDER_STATUS_WAIT = 3
ORDER_STATUS_DELIVERY = 4
ORDER_STATUS_OVER = 5
# '待买家完善订单后提交'
# '待买家付款'
# '待餐厅处理'
# '待送餐员送餐'
# '订单结束'
PAYMENT = {
u'货到付款': 1,
u'工资支付': 2,
u'网上支付': 3,
}
|
[
"tclh123@gmail.com"
] |
tclh123@gmail.com
|
eea2161b70cf5a3ecfebef0a04cfa64e7e059291
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_corrective.py
|
fc4041522f6dce91bdc98602bc6f91a6cf0a525b
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
#calss header
class _CORRECTIVE():
def __init__(self,):
self.name = "CORRECTIVE"
self.definitions = [u'intended to improve a situation: ', u'used to refer to something that is intended to cure a medical condition: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
9d42ae4909b6084829161c508e2a65ffa42c2b2a
|
a9e3b2e87563acf39ce74c5e57aa4ad5c13404bf
|
/cartridge/shop/admin.py
|
2cae83122ba67a49a11d3d4dce0a7b0f2428ceaf
|
[
"BSD-3-Clause"
] |
permissive
|
CDC/cartridge
|
40cf531f2912637f9bde261baf6b52e94aed388f
|
b9229617bbf054f1958bf5f2fadaf523d5d72522
|
refs/heads/master
| 2021-01-20T23:37:26.759210
| 2011-12-19T23:30:15
| 2011-12-19T23:30:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,718
|
py
|
from copy import deepcopy
from django.contrib import admin
from django.db.models import ImageField
from django.utils.translation import ugettext_lazy as _
from mezzanine.core.admin import DisplayableAdmin, TabularDynamicInlineAdmin
from mezzanine.pages.admin import PageAdmin
from cartridge.shop.fields import MoneyField
from cartridge.shop.forms import ProductAdminForm, ProductVariationAdminForm
from cartridge.shop.forms import ProductVariationAdminFormset
from cartridge.shop.forms import DiscountAdminForm, ImageWidget, MoneyWidget
from cartridge.shop.models import Category, Product, ProductImage
from cartridge.shop.models import ProductVariation, ProductOption, Order
from cartridge.shop.models import OrderItem, Sale, DiscountCode
# Lists of field names.
option_fields = [f.name for f in ProductVariation.option_fields()]
billing_fields = [f.name for f in Order._meta.fields
if f.name.startswith("billing_detail")]
shipping_fields = [f.name for f in Order._meta.fields
if f.name.startswith("shipping_detail")]
category_fieldsets = deepcopy(PageAdmin.fieldsets)
category_fieldsets[0][1]["fields"][3:3] = ["content"] # , "products"]
category_fieldsets += ((_("Product filters"), {
"fields": ("options", "sale", ("price_min", "price_max"), "combined"),
"classes": ("collapse-closed",)},),)
class CategoryAdmin(PageAdmin):
fieldsets = category_fieldsets
formfield_overrides = {ImageField: {"widget": ImageWidget}}
filter_horizontal = ("options",) # "products", )
class ProductVariationAdmin(admin.TabularInline):
verbose_name_plural = _("Current variations")
model = ProductVariation
fields = ("sku", "default", "num_in_stock", "unit_price", "sale_price",
"sale_from", "sale_to", "image")
extra = 0
formfield_overrides = {MoneyField: {"widget": MoneyWidget}}
form = ProductVariationAdminForm
formset = ProductVariationAdminFormset
class ProductImageAdmin(TabularDynamicInlineAdmin):
model = ProductImage
formfield_overrides = {ImageField: {"widget": ImageWidget}}
product_fieldsets = deepcopy(DisplayableAdmin.fieldsets)
product_fieldsets[0][1]["fields"].extend(["available", "categories",
"content"])
product_fieldsets = list(product_fieldsets)
product_fieldsets.append((_("Other products"),
{"classes": ("collapse-closed",), "fields": ("related_products",
"upsell_products")}))
product_fieldsets.insert(1, (_("Create new variations"),
{"classes": ("create-variations",), "fields": option_fields}))
class ProductAdmin(DisplayableAdmin):
list_display = ("admin_thumb", "title", "status", "available",
"admin_link")
list_display_links = ("admin_thumb", "title")
list_editable = ("status", "available")
list_filter = ("status", "available", "categories")
filter_horizontal = ("categories", "related_products", "upsell_products")
search_fields = ("title", "content", "categories__title",
"variations__sku")
inlines = (ProductImageAdmin, ProductVariationAdmin)
form = ProductAdminForm
fieldsets = product_fieldsets
def save_model(self, request, obj, form, change):
"""
Store the product object for creating variations in save_formset.
"""
super(ProductAdmin, self).save_model(request, obj, form, change)
self._product = obj
def save_formset(self, request, form, formset, change):
"""
Create variations for selected options if they don't exist, manage the
default empty variation creating it if no variations exist or removing
it if multiple variations exist, and copy the pricing and image fields
from the default variation to the product.
"""
super(ProductAdmin, self).save_formset(request, form, formset, change)
if isinstance(formset, ProductVariationAdminFormset):
options = dict([(f, request.POST.getlist(f)) for f in option_fields
if request.POST.getlist(f)])
self._product.variations.create_from_options(options)
self._product.variations.manage_empty()
self._product.copy_default_variation()
class ProductOptionAdmin(admin.ModelAdmin):
ordering = ("type", "name")
list_display = ("type", "name")
list_display_links = ("type",)
list_editable = ("name",)
list_filter = ("type",)
search_fields = ("type", "name")
radio_fields = {"type": admin.HORIZONTAL}
class OrderItemInline(admin.TabularInline):
verbose_name_plural = _("Items")
model = OrderItem
extra = 0
formfield_overrides = {MoneyField: {"widget": MoneyWidget}}
class OrderAdmin(admin.ModelAdmin):
ordering = ("status", "-id")
list_display = ("id", "billing_name", "total", "time", "status",
"transaction_id", "invoice")
list_editable = ("status",)
list_filter = ("status", "time")
list_display_links = ("id", "billing_name",)
search_fields = (["id", "status", "transaction_id"] +
billing_fields + shipping_fields)
date_hierarchy = "time"
radio_fields = {"status": admin.HORIZONTAL}
inlines = (OrderItemInline,)
formfield_overrides = {MoneyField: {"widget": MoneyWidget}}
fieldsets = (
(_("Billing details"), {"fields": (tuple(billing_fields),)}),
(_("Shipping details"), {"fields": (tuple(shipping_fields),)}),
(None, {"fields": ("additional_instructions", ("shipping_total",
"shipping_type"), ("discount_total", "discount_code"),
"item_total", ("total", "status"), "transaction_id")}),
)
class SaleAdmin(admin.ModelAdmin):
list_display = ("title", "active", "discount_deduct", "discount_percent",
"discount_exact", "valid_from", "valid_to")
list_editable = ("active", "discount_deduct", "discount_percent",
"discount_exact", "valid_from", "valid_to")
filter_horizontal = ("categories", "products")
formfield_overrides = {MoneyField: {"widget": MoneyWidget}}
form = DiscountAdminForm
fieldsets = (
(None, {"fields": ("title", "active")}),
(_("Apply to product and/or products in categories"),
{"fields": ("products", "categories")}),
(_("Reduce unit price by"),
{"fields": (("discount_deduct", "discount_percent",
"discount_exact"),)}),
(_("Sale period"), {"fields": (("valid_from", "valid_to"),)}),
)
class DiscountCodeAdmin(admin.ModelAdmin):
list_display = ("title", "active", "code", "discount_deduct",
"discount_percent", "min_purchase", "free_shipping", "valid_from",
"valid_to")
list_editable = ("active", "code", "discount_deduct", "discount_percent",
"min_purchase", "free_shipping", "valid_from", "valid_to")
filter_horizontal = ("categories", "products")
formfield_overrides = {MoneyField: {"widget": MoneyWidget}}
form = DiscountAdminForm
fieldsets = (
(None, {"fields": ("title", "active", "code")}),
(_("Apply to product and/or products in categories"),
{"fields": ("products", "categories")}),
(_("Reduce unit price by"),
{"fields": (("discount_deduct", "discount_percent"),)}),
(None, {"fields": (("min_purchase", "free_shipping"),)}),
(_("Valid for"), {"fields": (("valid_from", "valid_to"),)}),
)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Product, ProductAdmin)
admin.site.register(ProductOption, ProductOptionAdmin)
admin.site.register(Order, OrderAdmin)
admin.site.register(Sale, SaleAdmin)
admin.site.register(DiscountCode, DiscountCodeAdmin)
|
[
"steve@jupo.org"
] |
steve@jupo.org
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.