blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
65a3424eb666d1b95d64e170a548eba3ee06e8ea
|
1bd3076902117867ec048210905195ba2aaaaa6b
|
/exercise/leetcode/python_src/by2017_Sep/Leet081.py
|
031e578b7c55dcfc5b24e1ee98786a96056655d6
|
[] |
no_license
|
SS4G/AlgorithmTraining
|
d75987929f1f86cd5735bc146e86b76c7747a1ab
|
7a1c3aba65f338f6e11afd2864dabd2b26142b6c
|
refs/heads/master
| 2021-01-17T20:54:31.120884
| 2020-06-03T15:04:10
| 2020-06-03T15:04:10
| 84,150,587
| 2
| 0
| null | 2017-10-19T11:50:38
| 2017-03-07T03:33:04
|
Python
|
UTF-8
|
Python
| false
| false
| 196
|
py
|
class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: bool
"""
return target in set(nums)
|
[
"ziheng_song@126.com"
] |
ziheng_song@126.com
|
4c05e2be63b355c4b1e20763ff2b62cee65c0990
|
2ef5b78a1a750ee33d86f36bba176796163e3933
|
/demo5/forms.py
|
4f222179f56bc614355a243b4e1381dc87a68970
|
[] |
no_license
|
LIZEJU/flask-demo
|
08f8419757dc4902239b89b3df9ea71ce918ad26
|
6ae201e3cc078b7a3fd18fb6d114b0b83f1b4b41
|
refs/heads/master
| 2020-09-27T05:58:09.742198
| 2020-01-30T11:04:41
| 2020-01-30T11:04:41
| 226,445,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,203
|
py
|
#encoding:utf-8
# 引入form基类
from flask_wtf import Form
# 引入form元素父类
from wtforms import StringField , PasswordField ,FileField
# 引入form验证父类
from wtforms.validators import DataRequired,Length
from wtforms.validators import InputRequired
from flask_wtf.file import FileRequired , FileAllowed
from flask_wtf.form import FlaskForm
# 登录表单类,继承form类
class BaseLogin(Form):
# 用户名
name = StringField('name',validators=[DataRequired(message='用户名不能为空'),Length(6,16,message='长度位于6-16之间')],render_kw={'placeholder':'输入用户名'})
password = PasswordField('password',validators=[DataRequired(message='密码不能为空'),Length(6,16,message='长度位于6-16之间')],render_kw={'placeholder':'输入密码'})
from wtforms import Form, FileField, StringField
from wtforms.validators import InputRequired
from flask_wtf.file import FileRequired, FileAllowed
class UploadForm(Form):
file = FileField(validators=[FileRequired(), # FileRequired必须上传
FileAllowed(['jpg', 'png', 'gif']) # FileAllowed:必须为指定的格式的文件
])
|
[
"m18611694189@163.com"
] |
m18611694189@163.com
|
2f6d03026c09a7ce173a67c3e1969dc363abb6b6
|
e6bc1f55371786dad70313eb468a3ccf6000edaf
|
/Extras/matrix/Correct/s030.py
|
c58235fbf2b318bd0371e500ca863583fa7ce813
|
[] |
no_license
|
prateksha/Source-Code-Similarity-Measurement
|
9da92e3b22c372ed6ea54d8b6ab2c5921e8c41c0
|
fb371b837917794d260a219a1ca09c46a5b15962
|
refs/heads/master
| 2023-01-04T07:49:25.138827
| 2020-10-25T14:43:57
| 2020-10-25T14:43:57
| 285,744,963
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,303
|
py
|
#!/usr/bin/python3
# MatrixError
# Your code - begin
class MatrixError(Exception):
# Your code - end
class Matrix:
def __init__(self, m):
# check if m is empty
if (len(m)==0):
raise:
except :
"List Empty"
# check if all rows are lists
# check if all rows are non-empty lists
# check if all rows are of the same length
# create matrix attribute using deep copy
# method matrix - to return the matrix through deep copy
# method dimensions - to return the dimensions, i.e. number of rows and number of columns as a tuple
# method add - to add two matrices
def add(self, m):
# your code here
# method multiply - to multiply two matrices
def multiply(self, m):
# your code here
# method transpose - to find the matrix transpose
def transpose(self):
# your code here
# static method to carry out deep copy of lists
# your code here to declare this method as static
def deep_copy(m):
# your code here
def __str__(self):
return str(self.matrix())
if __name__ == "__main__":
m1 = Matrix([[1, 2, 3], [3, 4, 5]])
m2 = Matrix([[10, 20, 30], [30, 40, 50]])
print("sum1 = ", str(m1.add(m2)))
print("sum2 = ", str(m2.add(m1)))
print("product1 = ", m1.multiply(m2))
|
[
"pratekshau@gmail.com"
] |
pratekshau@gmail.com
|
f59576e84f7ea29af8679e9e3cc8f1dd93b936f5
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/minWindow_20200618160003.py
|
227eef656edcd857940c47b680c4c4aa56bf8fce
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 614
|
py
|
# for this question we have two pointers
# left pointer
# right pointer
# we move the right pointer and maintain the position of the left pointer
# when we find the word we move the left pointer
# store that word its shortest form
# we keep moving the right pointer
# sz,azjskfzts
def minWindow(str1,str2):
left = 0
right = 0
j = 0
word = set(str1)
print(word)
while left < len(str2) and right < len(str2) and j <len(str1):
if str[j] in str2[left:right]:
minWindow("sz","azjskfzts")
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
b96ac1eb8024992f0b224485416d49af4bfde378
|
8da91c26d423bacbeee1163ac7e969904c7e4338
|
/pyvisdk/do/host_virtual_nic_spec.py
|
c946f61307dcc38838e49156a3cc38c9e730a3f0
|
[] |
no_license
|
pexip/os-python-infi-pyvisdk
|
5d8f3a3858cdd61fb76485574e74ae525cdc7e25
|
1aadea0afbc306d09f6ecb9af0e683dbbf961d20
|
refs/heads/master
| 2023-08-28T02:40:28.789786
| 2020-07-16T04:00:53
| 2020-07-16T04:00:53
| 10,032,240
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,173
|
py
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def HostVirtualNicSpec(vim, *args, **kwargs):
'''This data object type describes the VirtualNic configuration containing both
the configured properties on a VirtualNic and identification information.'''
obj = vim.client.factory.create('{urn:vim25}HostVirtualNicSpec')
# do some validation checking...
if (len(args) + len(kwargs)) < 0:
raise IndexError('Expected at least 1 arguments got: %d' % len(args))
required = [ ]
optional = [ 'distributedVirtualPort', 'ip', 'mac', 'mtu', 'portgroup', 'tsoEnabled',
'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
[
"jmb@pexip.com"
] |
jmb@pexip.com
|
d4b95ac7eb2b29dd8d737aed9ed3f2ef6766e308
|
67c957b74e22bb191b9941cd753642516f32f27d
|
/name_uniq.py
|
bec11f03d47fcfd69317da3b4e58ddeaa4ccd673
|
[] |
no_license
|
chenchiyuan/jobeasy
|
a1ddc567901234b96d69658791f280bdfca43215
|
0a9cb465e1f8b4068069330b58f418890bde407b
|
refs/heads/master
| 2016-09-05T20:58:11.679848
| 2014-04-08T07:18:27
| 2014-04-08T07:18:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,301
|
py
|
# -*- coding: utf-8 -*-
# __author__ = chenchiyuan
# 通过POI名来判重, 必须内网跑
from __future__ import division, unicode_literals, print_function
import requests
import json
import base64
class ExcelHelper(object):
@classmethod
def write(cls, path, data, encoding="utf-8"):
import xlwt
workbook = xlwt.Workbook(encoding)
worksheet = workbook.add_sheet('sheet1')
for i, line in enumerate(data):
for j, text in enumerate(line):
worksheet.write(i, j, label=text)
workbook.save(path)
@classmethod
def read(cls, path):
import xlrd
workbook = xlrd.open_workbook(path)
sheet = workbook.sheets()[0]
for row in range(1, sheet.nrows):
yield sheet.cell(row, 0).value, sheet.cell(row, 1).value.strip()
def get_data(url):
headers = {
"host": "place.map.baidu.com",
}
r = requests.get(url, headers=headers)
return r.content
def name_to_bid(name):
url = "http://api.map.baidu.com/?qt=s&wd=%s&rn=10&ie=utf-8&oue=1&res=api&c=131" % name
data = json.loads(get_data(url))
try:
result = data['content'][0]['primary_uid']
except Exception:
try:
hot_city = data['content'][0]['code']
except:
print(url)
raise Exception()
url = "http://api.map.baidu.com/?qt=s&wd=%s&rn=10&ie=utf-8&oue=1&res=api&c=%s" % (name, hot_city)
data = json.loads(get_data(url))
try:
result = data['content'][0]['primary_uid']
except:
print(url)
raise Exception()
return result
def call_curl(url):
import subprocess
proc = subprocess.Popen(["curl", "--header", 'Host: place.map.baidu.com', url], stdout=subprocess.PIPE)
(out, err) = proc.communicate()
return out
def get_poi(x, y):
url = "http://api.map.baidu.com/ag/coord/convert?from=5&to=2&x=%s&y=%s" % (x, y)
json_data = json.loads(get_data(url))
return base64.decodestring(json_data['x']), base64.decodestring(json_data['y'])
def gen_info(bid):
url = "http://cq01-map-place00.cq01.baidu.com:8881/1/di/0/get.json?qt=13&nc=1&uid_list=%s" % bid
json_data = json.loads(call_curl(url))
data = json_data['data'][bid]
name = data['name']
address = data['address']
phone = data['phone']
city_name = data['city_name']
x, y = get_poi(data['point_x'], data['point_y'])
return {
"name": name,
"address": address,
"phone": phone,
"point_x": x,
"point_y": y,
"city_name": city_name
}
def parse_name(name):
bid = name_to_bid(name)
return gen_info(bid)
def parse_names(path, names):
data = []
for data_id, name in names:
try:
info = parse_name(name)
line = [data_id, info['name'], info['city_name'], info['address'], info['phone'], info['point_y'], info['point_x'], ]
except Exception, err:
line = [data_id, name, "", "", "", "", ""]
data.append(line)
ExcelHelper.write(path, data)
if __name__ == "__main__":
from_path = "/Users/shadow/Desktop/imax.xlsx"
to_path = "//Users/shadow/Desktop/result.xlsx"
names = ExcelHelper.read(from_path)
parse_names(to_path, names)
|
[
"chenchiyuan03@gmail.com"
] |
chenchiyuan03@gmail.com
|
31404902b27fdb152d6280d2618358c91f58c56e
|
6b791247919f7de90c8402abcca64b32edd7a29b
|
/lib/coginvasion/gags/Geyser.py
|
49fb02d48689e8123aa8b4780d597b0a706f7552
|
[
"Apache-2.0"
] |
permissive
|
theclashingfritz/Cog-Invasion-Online-Dump
|
a9bce15c9f37b6776cecd80b309f3c9ec5b1ec36
|
2561abbacb3e2e288e06f3f04b935b5ed589c8f8
|
refs/heads/master
| 2021-01-04T06:44:04.295001
| 2020-02-14T05:23:01
| 2020-02-14T05:23:01
| 240,434,213
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,238
|
py
|
# uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: lib.coginvasion.gags.Geyser
from lib.coginvasion.gags.SquirtGag import SquirtGag
from lib.coginvasion.gags.ChargeUpGag import ChargeUpGag
from lib.coginvasion.gags import GagGlobals
from lib.coginvasion.globals import CIGlobals
from direct.interval.IntervalGlobal import Sequence, Wait, Func, LerpScaleInterval
from direct.interval.IntervalGlobal import ActorInterval, LerpPosInterval, Parallel
from direct.interval.IntervalGlobal import SoundInterval
from panda3d.core import Point3
class Geyser(SquirtGag, ChargeUpGag):
def __init__(self):
SquirtGag.__init__(self, CIGlobals.Geyser, GagGlobals.getProp(5, 'geyser'), 105, GagGlobals.GEYSER_HIT_SFX, None, None, None, None, None, None, 1, 1)
ChargeUpGag.__init__(self, 24, 10, 50, 0.5, maxCogs=4)
self.setImage('phase_3.5/maps/geyser.png')
self.entities = []
self.timeout = 3.0
return
def start(self):
SquirtGag.start(self)
ChargeUpGag.start(self, self.avatar)
def unEquip(self):
SquirtGag.unEquip(self)
ChargeUpGag.unEquip(self)
def buildGeyser(self):
def clearNodes(entity, paths):
for i in xrange(paths.getNumPaths()):
paths[i].removeNode()
geyserWater = loader.loadModel(self.model)
waterRemoveSet = geyserWater.findAllMatches('**/hole')
waterRemoveSet.addPathsFrom(geyserWater.findAllMatches('**/shadow'))
clearNodes(geyserWater, waterRemoveSet)
geyserMound = loader.loadModel(self.model)
moundRemoveSet = geyserMound.findAllMatches('**/Splash*')
moundRemoveSet.addPathsFrom(geyserMound.findAllMatches('**/spout'))
clearNodes(geyserMound, moundRemoveSet)
entitySet = [
geyserWater, geyserMound]
self.entities.append(entitySet)
return entitySet
def removeEntity(self, entity):
for iEntity in self.entities:
if iEntity == entity:
self.entities.remove(iEntity)
def onActivate(self, ignore, cog):
self.startEntity(self.buildGeyser(), cog)
def startEntity(self, entity, cog):
geyserHold = 1.5
scaleUpPoint = Point3(1.8, 1.8, 1.8)
geyserWater = entity[0]
geyserMound = entity[1]
def showEntity(entity, cog):
entity.reparentTo(render)
entity.setPos(cog.getPos())
def __getGeyserTrack():
track = Sequence(Func(showEntity, geyserMound, cog), Func(showEntity, geyserWater, cog), LerpScaleInterval(geyserWater, 1.0, scaleUpPoint, startScale=GagGlobals.PNT3NEAR0), Wait(0.5 * geyserHold), LerpScaleInterval(geyserWater, 0.5, GagGlobals.PNT3NEAR0, startScale=scaleUpPoint), LerpScaleInterval(geyserMound, 0.5, GagGlobals.PNT3NEAR0), Func(geyserWater.removeNode), Func(geyserMound.removeNode), Func(self.removeEntity, entity))
return track
def __getCogTrack():
def handleHit():
if self.isLocal():
cog.sendUpdate('hitByGag', [self.getID()])
startPos = cog.getPos(render)
cogFloat = Point3(0, 0, 14)
cogEndPos = Point3(startPos[0] + cogFloat[0], startPos[1] + cogFloat[1], startPos[2] + cogFloat[2])
suitType = cog.suitPlan.getSuitType()
if suitType == 'A':
startFlailFrame = 16
endFlailFrame = 16
else:
startFlailFrame = 15
endFlailFrame = 15
track = Sequence()
track.append(Func(cog.d_disableMovement))
track.append(Wait(0.5))
slipIval = Sequence(ActorInterval(cog, 'slip-backward', playRate=0.5, startFrame=0, endFrame=startFlailFrame - 1), Func(cog.pingpong, 'slip-backward', fromFrame=startFlailFrame, toFrame=endFlailFrame), Wait(0.5), Parallel(ActorInterval(cog, 'slip-backward', playRate=1.0, startFrame=endFlailFrame), Func(cog.startRay), Func(handleHit)))
slipUp = LerpPosInterval(cog, 1.1, cogEndPos, startPos=startPos, fluid=1)
slipDn = LerpPosInterval(cog, 0.6, startPos, startPos=cogEndPos, fluid=1)
geyserMotion = Sequence(slipUp, slipDn)
track.append(Parallel(slipIval, geyserMotion))
if cog.getHealth() - self.getDamage() <= 0:
track.append(Func(cog.d_enableMovement))
return track
if entity and cog:
track = Sequence()
track.append(Parallel(SoundInterval(self.hitSfx, node=self.avatar), Parallel(__getGeyserTrack(), __getCogTrack())))
track.start()
def release(self):
ChargeUpGag.release(self)
self.reset()
if self.isLocal():
base.localAvatar.sendUpdate('usedGag', [self.id])
cogs = ChargeUpGag.getSelectedCogs(self)
for cog in cogs:
if cog.getHealth() > 0:
geyser = self.buildGeyser()
self.startEntity(geyser, cog)
self.avatar.d_trapActivate(self.getID(), self.avatar.doId, 0, cog.doId)
|
[
"theclashingfritz@users.noreply.github.com"
] |
theclashingfritz@users.noreply.github.com
|
0c708aa7731e0845288916df910173d7240ce581
|
4560d7e3aa3be65ffaf102e780b44d6fab51cfd7
|
/fastapi_pagination/bases.py
|
c2726bfa5e3f5b2dd7437896047d9906fa145ce4
|
[
"MIT"
] |
permissive
|
mathbeal/fastapi-pagination
|
55235bcfb72feebdd0bad7e6bc7fcd3ba028e0bd
|
485acf9862316d4ca58657fa6896a9469e419387
|
refs/heads/main
| 2023-03-17T22:45:19.649532
| 2021-03-01T08:07:59
| 2021-03-01T08:07:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 826
|
py
|
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Generic, Sequence, Type, TypeVar
from pydantic.generics import GenericModel
from typing_extensions import Protocol
if TYPE_CHECKING:
from .params import LimitOffsetPaginationParams # pragma no cover
T = TypeVar("T")
C = TypeVar("C")
class AbstractParams(Protocol):
@abstractmethod
def to_limit_offset(self) -> LimitOffsetPaginationParams:
pass # pragma: no cover
class AbstractPage(GenericModel, Generic[T], ABC):
@classmethod
@abstractmethod
def create(cls: Type[C], items: Sequence[T], total: int, params: AbstractParams) -> C:
pass # pragma: no cover
class Config:
arbitrary_types_allowed = True
__all__ = [
"AbstractPage",
"AbstractParams",
]
|
[
"1998uriyyo@gmail.com"
] |
1998uriyyo@gmail.com
|
d36bbdba4d86a8634ff9481655aa289e125af4a7
|
1e013dc5f0de0f61e27f2867557803a01c01f4da
|
/Language/python/module/shutit/login_server.py
|
c64484bac2ae2c39af84814e4875ba008058b876
|
[] |
no_license
|
chengyi818/kata
|
a2941ce8675c6e7a47169a0eae4c757d3f6f5bf9
|
a7cb7ad499037bcc168aaa0eaba857b33c04ef14
|
refs/heads/master
| 2023-04-10T18:39:09.518433
| 2023-01-08T15:22:12
| 2023-01-08T15:22:12
| 53,040,540
| 1
| 0
| null | 2023-03-25T00:46:51
| 2016-03-03T10:06:58
|
C++
|
UTF-8
|
Python
| false
| false
| 314
|
py
|
#!/usr/bin/env python3
# Author: ChengYi
# Mail: chengyi818@foxmail.cn
# created time: Fri 30 Jun 2017 09:45:45 AM CST
import shutit
session = shutit.create_session('bash')
session.login('ssh you@example.com', user='you',
password="password")
session.send('hostname', echo=True)
session.logout()
|
[
"chengyi818@foxmail.com"
] |
chengyi818@foxmail.com
|
8aac425c473f78bdd1e3a956e3e20aea659288ae
|
e17e40dbb6ed8caaac5c23de29071b403637f5ae
|
/transformers_keras/tokenizers/space_tokenizer_test.py
|
d14ad108c617d1d66aa0f29a709712be005d2ca3
|
[
"Apache-2.0"
] |
permissive
|
Linessiex/transformers-keras
|
cb739075c8daab39d52dc6cd6bafe5e45f8259be
|
0bb576db356f575390815dc64840b78b8ecf6227
|
refs/heads/master
| 2020-11-25T05:58:09.448200
| 2019-09-23T09:13:59
| 2019-09-23T09:13:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,893
|
py
|
import os
import unittest
import tensorflow as tf
from transformers_keras.tokenizers.space_tokenizer import SpaceTokenizer
class SpaceTokenizerTest(unittest.TestCase):
def buildTokenizer(self):
tokenizer = SpaceTokenizer()
corpus = ['train.tgt.txt']
corpus = [os.path.join('testdata', f) for f in corpus]
tokenizer.build_from_corpus(corpus)
return tokenizer
def testTokenize(self):
tokenizer = self.buildTokenizer()
print(tokenizer.token2id_dict)
print(tokenizer.id2token_dict)
print(tokenizer.vocab_size)
def testConvertTokens2Ids(self):
tokenizer = self.buildTokenizer()
print('token2 id dict: ', tokenizer.token2id_dict)
words = tf.constant(['I', 'am', 'a', 'developer'])
v = tokenizer.encode(words)
print(v)
def testConvertIds2Tokens(self):
tokenizer = self.buildTokenizer()
print('id2token dict: ', tokenizer.id2token_dict)
ids = tf.constant([1, 0, 2, 3, 4], dtype=tf.dtypes.int64)
v = tokenizer.decode(ids)
print(v)
def testSaveVocabFile(self):
tokenizer = self.buildTokenizer()
tokenizer.save_to_vocab('testdata/vocab.test.txt')
def testBuildFromVocab(self):
print('============start build from vocab=============')
tokenizer = SpaceTokenizer()
tokenizer.build_from_vocab('testdata/vocab.test.txt')
print('token2id dict: ', tokenizer.token2id_dict)
print('id2token dict: ', tokenizer.id2token_dict)
words = tf.constant(['I', 'am', 'a', 'developer'])
v0 = tokenizer.encode(words)
print(v0)
ids = tf.constant([1, 0, 2, 3, 4], dtype=tf.dtypes.int64)
v1 = tokenizer.decode(ids)
print(v1)
print('============end build from vocab=============')
if __name__ == '__main__':
unittest.main()
|
[
"zhouyang.luo@gmail.com"
] |
zhouyang.luo@gmail.com
|
3742ae3ea8651e93bf19112552eca41e07d13d17
|
211fdc0564d9b003a82bb880d2e422dac85a5752
|
/correcciones/segunda_parte/examen_2/ExamenII-12-11499/ExamenII-12-11499/conjunto.py
|
3618d6877e24566f9f12339ab0226e071bff4c12
|
[] |
no_license
|
dvdalilue/ci2692_ene_mar_2017
|
45fa1833b4b3b49a1e5be33e58f01cb23bb2d6aa
|
1690e6429c2c5ec167d505642d3344b249257475
|
refs/heads/master
| 2021-01-20T01:32:43.668461
| 2017-04-24T22:50:01
| 2017-04-24T22:50:01
| 89,292,212
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,329
|
py
|
"""
Implementacion de el TAD Conjunto, con una estructura conjunto que representa
una lista enlazada simple, que va a contener elementos de tipo entero
Autor: Orlando Chaparro Carnet: 12-11499
Ultima modificacion:
Jueves, 30 de marzo de 2017 - Semana 12
"""
class Elemento:
def __init__(self, e, n):
"""
Crea un elemento que sera agregado a la lista enlazada
"""
self.elemento = e
self.next = n
class Conjunto:
def crearConjunto(self):
"""
Crea un nuevo conjunto. Como el TAD Conjunto está implementado con una clase,
entonces esta operación corresponde al constructor de la clase.
"""
self.head = None
self.tail = None
self.cantidadElem = 0
def agregar(self, e):
"""
Agrega un elemento de tipo entero al conjunto
"""
NuevoElemento = Elemento(elemento, None)
if self.cantidadElem == 0:
self.head = NuevoElemento
self.tail = NuevoElemento
self.cantidadElem += 1
elif self.cantidadElem >= 1:
self.tail.next = NuevoElemento
self.cantidadElem += 1
def pertenece(self, e):
"""
Determina si un elemento esta o no en el conjunto. Si el elemento ya forma parte del conjunto,
retorna True, en caso contrario retorna False
"""
aux = self.head
if self.cantidadElem == 0:
print("No existen elementos en el conjunto finito")
return False
while aux is not None and aux.elemento != e:
aux = aux.next
if aux is None:
return False
if aux.elemento == e
return True
def union(self, conjunto):
if self.cantidadElem == 0:
return conjunto
else:
ConjuntoFinal = Conjunto()
Aux1 = self.head
Aux2 = conjunto.head
while Aux1 is not None:
ConjuntoFinal.agregar(Aux1.elemento)
Aux1 = Aux1.next
while Aux2 is not None:
ConjuntoFinal.agregar(Aux2.elemento)
Aux2 = Aux2.next
return ConjuntoFinal
def interseccion(self, conjunto):
ConjuntoFinal = Conjunto()
Aux1 = self.head
Aux2 = conjunto.head
if self.cantidadElem == 0
ConjuntoFinal.agregar(0)
return ConjuntoFinal
else:
while Aux1 is not None and Aux2 is not None:
if Aux1.elemento == Aux2.elemento:
ConjuntoFinal.agregar(Aux1.elemento)
Aux1 = Aux1.next
Aux2 = Aux2.next
def Mostrar(self):
aux = self.head
while aux is not None:
print(aux)
aux = aux.next
|
[
"dvdalilue@gmail.com"
] |
dvdalilue@gmail.com
|
4ebb914c4cd0607646f94e6605538d7f8cdd6278
|
bc233c24523f05708dd1e091dca817f9095e6bb5
|
/bitmovin_api_sdk/notifications/__init__.py
|
a892dc177947a1b63dc28a0a33fdf5cc5a9ed245
|
[
"MIT"
] |
permissive
|
bitmovin/bitmovin-api-sdk-python
|
e3d6cf8eb8bdad62cb83ec77c0fc4950b06b9cdd
|
b0860c0b1be7747cf22ad060985504da625255eb
|
refs/heads/main
| 2023-09-01T15:41:03.628720
| 2023-08-30T10:52:13
| 2023-08-30T10:52:13
| 175,209,828
| 13
| 14
|
MIT
| 2021-04-29T12:30:31
| 2019-03-12T12:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 558
|
py
|
from bitmovin_api_sdk.notifications.notifications_api import NotificationsApi
from bitmovin_api_sdk.notifications.webhooks.webhooks_api import WebhooksApi
from bitmovin_api_sdk.notifications.states.states_api import StatesApi
from bitmovin_api_sdk.notifications.emails.emails_api import EmailsApi
from bitmovin_api_sdk.notifications.notification_list_query_params import NotificationListQueryParams
from bitmovin_api_sdk.notifications.notification_state_entry_list_by_notification_id_query_params import NotificationStateEntryListByNotificationIdQueryParams
|
[
"openapi@bitmovin.com"
] |
openapi@bitmovin.com
|
6e1d6cd2a8bfea5fab3e963a27b3b7ae78265dc3
|
a4ea525e226d6c401fdb87a6e9adfdc5d07e6020
|
/src/azure-cli/azure/cli/command_modules/cosmosdb/aaz/latest/cosmosdb/postgres/configuration/server/__cmd_group.py
|
d961af574f0abaf5e29fdf103bfbd24bcdb8d564
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] |
permissive
|
Azure/azure-cli
|
13340eeca2e288e66e84d393fa1c8a93d46c8686
|
a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca
|
refs/heads/dev
| 2023-08-17T06:25:37.431463
| 2023-08-17T06:00:10
| 2023-08-17T06:00:10
| 51,040,886
| 4,018
| 3,310
|
MIT
| 2023-09-14T11:11:05
| 2016-02-04T00:21:51
|
Python
|
UTF-8
|
Python
| false
| false
| 694
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command_group(
"cosmosdb postgres configuration server",
is_preview=True,
)
class __CMDGroup(AAZCommandGroup):
"""Manage Azure Cosmos DB for PostgreSQL server configurations.
"""
pass
__all__ = ["__CMDGroup"]
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
e564ecc4fbbc90fe1f74ddf416902f8e65510058
|
de265eba4074121d53295550fb901016df9f5556
|
/django_service/Sample/management/commands/custom_createsuperuser.py
|
8ff37a94427784c22a3759a0bb6ad61912f559d0
|
[] |
no_license
|
1shikawa/django-service-ecs
|
d0ef169746c1baaf5eca3bb10d338b89969a10b8
|
dca521ebbeb812f6692cb77aedd727029d22e39c
|
refs/heads/master
| 2023-05-01T22:01:18.249844
| 2019-12-20T07:24:12
| 2019-12-20T07:24:12
| 183,669,797
| 0
| 0
| null | 2023-04-21T20:38:30
| 2019-04-26T17:33:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,168
|
py
|
from django.contrib.auth.management.commands import createsuperuser
from django.core.management import CommandError
class Command(createsuperuser.Command):
help = 'Create a superuser with a password non-interactively'
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--password', dest='password', default=None,
help='Specifies the password for the superuser.',
)
def handle(self, *args, **options):
options.setdefault('interactive', False)
username = options.get('username')
email = options.get('email')
password = options.get('password')
database = options.get('database')
if not (email and password):
raise CommandError('--username, --email and --password are required options')
user_data = {
'email': email,
'password': password,
}
exists = self.UserModel._default_manager.db_manager(database).filter(email=email).exists()
if not exists:
self.UserModel._default_manager.db_manager(database).create_superuser(**user_data)
|
[
"ishikawa.toru@gmail.com"
] |
ishikawa.toru@gmail.com
|
d97dcb6e0ca0834d47adde8295161d6df7c1effb
|
7a88fc18f30d5dd3ac935877d4d9268a56c296be
|
/di_website/publications/migrations/0007_auto_20190924_1349.py
|
1907659178d26314ad37e2f4f1ba61a1d7f5b83d
|
[] |
no_license
|
devinit/DIwebsite-redesign
|
745a480b7ba0feffa34dc664548ee4c5a7b4d470
|
9ec46823c67cdd4f35be255896bf30d8f6362666
|
refs/heads/develop
| 2023-08-30T04:06:20.951203
| 2023-08-07T12:06:07
| 2023-08-07T12:06:07
| 184,287,370
| 1
| 0
| null | 2023-08-28T14:34:57
| 2019-04-30T15:29:25
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,077
|
py
|
# Generated by Django 2.2.2 on 2019-09-24 13:49
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('publications', '0006_auto_20190919_1544'),
]
operations = [
migrations.AddField(
model_name='shortpublicationpage',
name='published_date',
field=models.DateTimeField(blank=True, default=django.utils.timezone.now, help_text='This date will be used for display and ordering'),
),
migrations.AlterField(
model_name='legacypublicationpage',
name='published_date',
field=models.DateTimeField(blank=True, default=django.utils.timezone.now, help_text='This date will be used for display and ordering'),
),
migrations.AlterField(
model_name='publicationpage',
name='published_date',
field=models.DateTimeField(blank=True, default=django.utils.timezone.now, help_text='This date will be used for display and ordering'),
),
]
|
[
"alex.k.miller@gmail.com"
] |
alex.k.miller@gmail.com
|
26a9092923ba18920ccdad2427001c1ad995f102
|
aee1878ba3e31a36c805025c662ab55a45003898
|
/model_zoo/preprocess.py
|
2ab21774d485dedb6137ca9f7b4dea35f9469a68
|
[
"MIT"
] |
permissive
|
boluoyu/ModelZoo
|
4a40c1215200ddcf5e96554bf2474593cfe60f1e
|
e8906d5c5195c1f6ebdc46e69fa8cd0439317c60
|
refs/heads/master
| 2020-06-15T17:29:18.108553
| 2019-02-08T16:20:07
| 2019-02-08T16:20:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
from sklearn.preprocessing import StandardScaler
def standardize(fit_data, extra_data=None):
"""
standardize data
:param fit_data: data to fit and transform
:param extra_data: extra data to transform
:return:
"""
s = StandardScaler()
s.fit(fit_data)
fit_data = s.transform(fit_data)
if not extra_data is None:
extra_data = s.transform(extra_data)
return fit_data, extra_data
return fit_data
|
[
"cqc@cuiqingcai.com"
] |
cqc@cuiqingcai.com
|
311e80aa2ff395e8c37abb76ae904d71bc65e325
|
8ef8e6818c977c26d937d09b46be0d748022ea09
|
/cv/super_resolution/ttsr/pytorch/model/SearchTransfer.py
|
2b5783ca0517680d2052d57305968c26bdac5edf
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
Deep-Spark/DeepSparkHub
|
eb5996607e63ccd2c706789f64b3cc0070e7f8ef
|
9d643e88946fc4a24f2d4d073c08b05ea693f4c5
|
refs/heads/master
| 2023-09-01T11:26:49.648759
| 2023-08-25T01:50:18
| 2023-08-25T01:50:18
| 534,133,249
| 7
| 6
|
Apache-2.0
| 2023-03-28T02:54:59
| 2022-09-08T09:07:01
|
Python
|
UTF-8
|
Python
| false
| false
| 2,276
|
py
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class SearchTransfer(nn.Module):
def __init__(self):
super(SearchTransfer, self).__init__()
def bis(self, input, dim, index):
# batch index select
# input: [N, ?, ?, ...]
# dim: scalar > 0
# index: [N, idx]
views = [input.size(0)] + [1 if i!=dim else -1 for i in range(1, len(input.size()))]
expanse = list(input.size())
expanse[0] = -1
expanse[dim] = -1
index = index.view(views).expand(expanse)
return torch.gather(input, dim, index)
def forward(self, lrsr_lv3, refsr_lv3, ref_lv1, ref_lv2, ref_lv3):
### search
lrsr_lv3_unfold = F.unfold(lrsr_lv3, kernel_size=(3, 3), padding=1)
refsr_lv3_unfold = F.unfold(refsr_lv3, kernel_size=(3, 3), padding=1)
refsr_lv3_unfold = refsr_lv3_unfold.permute(0, 2, 1)
refsr_lv3_unfold = F.normalize(refsr_lv3_unfold, dim=2) # [N, Hr*Wr, C*k*k]
lrsr_lv3_unfold = F.normalize(lrsr_lv3_unfold, dim=1) # [N, C*k*k, H*W]
R_lv3 = torch.bmm(refsr_lv3_unfold, lrsr_lv3_unfold) #[N, Hr*Wr, H*W]
R_lv3_star, R_lv3_star_arg = torch.max(R_lv3, dim=1) #[N, H*W]
### transfer
ref_lv3_unfold = F.unfold(ref_lv3, kernel_size=(3, 3), padding=1)
ref_lv2_unfold = F.unfold(ref_lv2, kernel_size=(6, 6), padding=2, stride=2)
ref_lv1_unfold = F.unfold(ref_lv1, kernel_size=(12, 12), padding=4, stride=4)
T_lv3_unfold = self.bis(ref_lv3_unfold, 2, R_lv3_star_arg)
T_lv2_unfold = self.bis(ref_lv2_unfold, 2, R_lv3_star_arg)
T_lv1_unfold = self.bis(ref_lv1_unfold, 2, R_lv3_star_arg)
T_lv3 = F.fold(T_lv3_unfold, output_size=lrsr_lv3.size()[-2:], kernel_size=(3,3), padding=1) / (3.*3.)
T_lv2 = F.fold(T_lv2_unfold, output_size=(lrsr_lv3.size(2)*2, lrsr_lv3.size(3)*2), kernel_size=(6,6), padding=2, stride=2) / (3.*3.)
T_lv1 = F.fold(T_lv1_unfold, output_size=(lrsr_lv3.size(2)*4, lrsr_lv3.size(3)*4), kernel_size=(12,12), padding=4, stride=4) / (3.*3.)
S = R_lv3_star.view(R_lv3_star.size(0), 1, lrsr_lv3.size(2), lrsr_lv3.size(3))
return S, T_lv3, T_lv2, T_lv1
|
[
"jia.guo@iluvatar.ai"
] |
jia.guo@iluvatar.ai
|
c91819f9ce17f0642a4a7e15dc0d0cc7c34ed6c1
|
fccc9acd62447941a49313c01fcf324cd07e832a
|
/exe115/sistema.py
|
358cfb5e914db10289609b7d41eb882cee96b3dc
|
[] |
no_license
|
paulovictor1997/Python
|
fba884ea19ed996c6f884f3fcd3d49c5a34cfd3d
|
671d381673796919a19582bed9d0ee70ec5a8bea
|
refs/heads/master
| 2023-04-29T18:01:55.451258
| 2021-05-19T15:12:09
| 2021-05-19T15:12:09
| 354,154,332
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 717
|
py
|
from exe115.lib.interface import *
from exe115.lib.arquivo import *
from time import sleep
arq = 'doc.txt'
if not arquivoExiste(arq):
criarArquivo(arq)
while True:
resposta = menu(['Ver pessoas cadastradas','Cadastrar','SAIR'])
if resposta == 1:
#Aqui vai listar o conteúdo do arquivo
lerArquivo(arq)
elif resposta == 2:
#Cadastrar uma nova pessoa
cabeçalho('Novo Cadastro')
nome = str(input('Nome : '))
idade = leiaInt('Idade : ')
cadastrar(arq, nome, idade)
elif resposta == 3:
cabeçalho('\033[34mSaindo...Volte sempre !\033[m')
break
else:
print('\033[31mErro... Digite novamente !\033[m')
sleep(1)
|
[
"paulovictornunes97@gmail.com"
] |
paulovictornunes97@gmail.com
|
a9fa385bdf2356beab9851c83ca610512a6d532c
|
cb3bce599e657188c30366adb0af3007ff9b8f96
|
/src/network/ex29-1.py
|
eeab288fc33fad3fc9b2440e19e26bdfdd2f8b06
|
[] |
no_license
|
skk4/python_study
|
534339e6c378d686c29af6d81429c472fca19d6d
|
4bdd2a50f4bdfd28fdb89a881cb2ebb9eac26987
|
refs/heads/master
| 2021-01-01T04:36:52.037184
| 2017-12-08T01:04:27
| 2017-12-08T01:04:27
| 97,207,719
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 467
|
py
|
'''
Created on 2017.7.18
@author: Administrator
'''
import socket
'''
a_tuple = [('1', ('2', '22', '222'), '3'),('4',('5', '55', '555'), '6'), ('1', ('7', '77', '777'), '3')]
print [x[1][1] for x in a_tuple]
'''
localip = socket.gethostname()
print localip
fdqdn = socket.getfqdn(localip)
print fdqdn
result = socket.getaddrinfo(localip, None, 0, socket.SOCK_STREAM)
ips = [x[4][0] for x in result]
new_ips = ', '.join(ips)
print new_ips
|
[
"skk_4@163.com"
] |
skk_4@163.com
|
250187df27f88d6af5ce301cfd96f7a72e3f38cd
|
b48764e6684ffbd73b0043dc889c013860642e8d
|
/1학기/퍼스널컬러.py
|
8691df840217d0d309f14ff4a0e152e708a3cc16
|
[] |
no_license
|
tanghee/Programming-Python-
|
c6d32a1e49d5c95c8359aeb8775cb52cc665167a
|
eb402357ad31638d867042e76af507bc6c67a0b4
|
refs/heads/master
| 2022-03-27T07:27:18.888660
| 2019-12-10T02:06:41
| 2019-12-10T02:06:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,778
|
py
|
#2416임태희
#18개의 질문을 통해 자가진단을 진행합니다
#예를 들어, Q1의 답이 0이라면 Q4의 질문으로 답이 1이라면 Q2의 질문으로 넘어가는 방식입니다.
#함수 하나씩 질문을 넣어서 해당 질문의 답에 따라서 다음 함수를 호출해주는 방식으로 진행합니다.
print("FIND YOUR COLOR!")
print("♡쉽고 빠른 자가진단으로 나만의 컬러를 찾아보세요♡\n")
#함수 q1() ~ q18()과 spring(), summer(), autumn(), winter()를 생성해 줍니다.
#자가진단 결과를 spring(), summer(), autumn(), winter() 함수를 호출해서 알려준다.
#봄웜톤
def spring():
print("당신은 ☆봄웜톤☆ 입니다!")
print("이 타입의 사람들은 생기발랄하고 젊은 느낌을 줍니다.")
print("피부색 >> 복숭아빛의 밝고 노란빛의 투명한 피부를 가지고 있습니다.")
print("머리색 >> 대체로 눈동자색과 비슷한 밝은 갈색으로 윤기나고 찰랑찰랑한 머릿결을 가지고 있습니다.")
print("눈동자색 >> 밝은갈색으로 빛이 나고 맑아 보입니다.")
print("대표연예인 >> 수지, 설리, 아이유가 있습니다.\n")
#여름쿨톤
def summer():
print("당신은 ☆여름쿨톤☆ 입니다!")
print("이 타입의 사람들은 우아하고 여성스러운 느낌을 줍니다.")
print("피부색 >> 핑크빛이 도는 혈색이 좋은 피부를 가지고 있습니다.")
print("머리색 >> 약간 부시시한 회갈색 머리카락을 가지고 있습니다.")
print("눈동자색 >> 차분하고 부드러운 갈색의 눈동자를 가지고 있습니다.")
print("대표연예인 >> 손예진, 김하늘이 있습니다.\n")
#가을웜톤
def autumn():
print("당신은 ☆가을웜톤☆ 입니다!")
print("이 타입의 사람들은 어른스럽고 차분한 이미지를 가지고 있습니다.")
print("피부색 >> 노르스름하며 윤기가 없고, 얼굴의 혈색이 없는 편입니다.")
print("머리색 >> 윤기가 없는 짙은갈색입니다.")
print("눈동자색 >> 짙고 깊이감있는 짙은 황갈색 계열입니다.")
print("대표연예인 >> 이효리, 박정아, 탕웨이가 있습니다.\n")
#겨울쿨톤
def winter():
print("당신은 ☆겨울쿨톤☆ 입니다!")
print("이 타입의 사람들은 심플하면서 모던한 스타일로 도회적입니다.")
print("피부색 >> 희고 푸른빛을 지니고 있어 차갑고 창백해 보입니다.")
print("머리색 >> 푸른빛이 도는 짙은갈색이나 검은색입니다.")
print("눈동자색 >> 검은색이나 짙은회갈색입니다.")
print("대표연예인 >> 김혜수, 선우선이 있습니다.\n")
def q1():
answer = input(">>당신의 피부톤은 어떻습니까? (0:하얀색이다, 1:검은색이다) : ")
if(answer == "0"):
q4()
elif(answer == "1"):
q2()
else:
q1()
def q2():
answer = input(">>당신의 눈동자색은 무엇입니까? (0:검은색, 1:짙은갈색, 2:밝은갈색) : ")
if(answer == "0"):
q5()
elif(answer == "1"):
q5()
elif(answer == "2"):
q3()
else:
q2()
def q3():
answer = input(">>당신과 잘 어울리는 아이섀도우 계열은 무엇입니까? (0:회색계열, 1:갈색계열) : ")
if(answer == "0"):
q5()
elif(answer == "1"):
q11()
else:
q3()
def q4():
answer = input(">>당신의 눈 인상은 어떻습니까? (0:강한편, 1:부드러운편) : ")
if(answer == "0"):
q5()
elif(answer == "1"):
q7()
else:
q4()
def q5():
answer = input(">>당신에게 어울리는 분홍색은 무엇입니까? (0:핫핑크, 1:코랄핑크) : ")
if(answer == "0"):
q10()
elif(answer == "1"):
q8()
else:
q5()
def q6():
answer = input(">>연분홍색이나 연노란색처럼 포근하고 사랑스러운 색이 잘 어울리나요? (0:어울립니다, 1:어울리지 않습니다) : ")
if(answer == "0"):
q17()
elif(answer == "1"):
q14()
else:
q6()
def q7():
answer = input(">>화장하지 않은 얼굴에 검은색 옷을 입으면 어떻습니까? (0:이목구비가 뚜렷하게 보입니다 1:얼굴색이 안 좋아보입니다) : ")
if(answer == "0"):
q10()
elif(answer == "1"):
q5()
else:
q7()
def q8():
answer = input(">>당신에게 잘 어울리는 액세서리는 무엇입니까? (0:골드제품, 1:실버제품) : ")
if(answer == "0"):
q6()
elif(answer == "1"):
q9()
else:
q8()
def q9():
answer = input(">>황토색, 겨자색, 이끼색, 적갈색처럼 차분하고 고상한 색이 잘 어울리나요? (0:어울립니다, 1:어울리지 않습니다) : ")
if(answer == "0"):
q15()
elif(answer == "1"):
q6()
else:
q9()
def q10():
answer = input(">>당신의 첫인상은 어떻습니까? (0:강한인상, 1:부드러운인상, 2:평범한인상) : ")
if(answer == "0"):
q13()
elif(answer == "1"):
q11()
elif(answer == "2"):
q8()
else:
q10()
def q11():
answer = input(">>햇볕에 노출되면 피부가 어떻게 되나요? (0:잘 탑니다, 1:잘 타지 않습니다, 2:보기에 해당하지 않습니다) : ")
if(answer == "0"):
q9()
elif(answer == "1"):
q8()
elif(answer == "2"):
q12()
else:
q11()
def q12():
answer = input(">>상대방이 보는 당신의 이미지는 어떻습니까? (0:친근감 있고 부드러운 이미지, 1:강하고 차가운 이미지) : ")
if(answer == "0"):
q17()
elif(answer == "1"):
q14()
else:
q12()
def q13():
answer = input(">>당신과 잘 어울리는 색은 무엇입니까? (0:선명한 원색, 1:부드러운 파스텔색) : ")
if(answer == "0"):
q14()
elif(answer == "1"):
q8()
else:
q13()
def q14():
answer = input(">>당신의 얼굴 가까이에 대보았을 때 가장 잘 어울리는 꽃은 무엇입니까? (0:붉은빛의 장미, 1:핑크빛의 튤립) : ")
if(answer == "0"):
q18()
elif(answer == "1"):
q17()
else:
q14()
def q15():
answer = input(">>당신의 머리색은 무엇입니까? (0:진한갈색, 1:진한검은색, 2:밝은갈색, 3:부드러운검은색) : ")
if(answer == "0"):
q18()
elif(answer == "1"):
q18()
elif(answer == "2"):
q14()
elif(answer == "3"):
q14()
else:
q15()
def q16():
answer = input(">>당신의 얼굴은 어려보이는 편입니까? (0:그렇습니다, 1:그렇지 않습니다) : ")
if(answer == "0"):
print("\n")
spring()
elif(answer == "1"):
print("\n")
autumn()
else:
q16()
def q17():
answer = input(">>당신에게 잘 어울리는 니트색상은 무엇입니까? (0:노란기가 있는 따뜻한색, 1:푸른기가 있는 차가운색) : ")
if(answer == "0"):
q16()
elif(answer == "1"):
print("\n")
summer()
else:
q17()
def q18():
answer = input(">>당신이 어두운색 정장을 입는다면 어울리는 색상은 무엇입니까? (0:검은색계열, 1:회색계열, 2:어두운갈색계열) : ")
if(answer == "0"):
print("\n")
winter()
elif(answer == "1"):
print("\n")
winter()
elif(answer == "2"):
print("\n")
autumn()
else:
q18()
#q1()함수 호출로 인해서 프로그램이 실행됩니다.
q1()
|
[
"s2018w37@e-mirim.hs.kr"
] |
s2018w37@e-mirim.hs.kr
|
760e6204b7ba9ca5639eb67bc4f8cc5c7db2f082
|
9e419006675f6991480f350017798a4b3e0ccbd8
|
/borrow/borrow/wsgi.py
|
a341999c24e547fbe6d5387a7a8e21b63835555e
|
[
"MIT"
] |
permissive
|
chairco/django-tutorial-borrow
|
263ccb7305f29d4bcc3139bd65605df70ec30498
|
28f747e115feabdcbd96d15fbc73f1c5d22236cd
|
refs/heads/master
| 2021-01-17T19:51:56.967984
| 2016-07-12T04:41:17
| 2016-07-12T04:41:17
| 63,126,576
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
py
|
"""
WSGI config for borrow project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "borrow.settings")
application = Cling(get_wsgi_application())
#application = get_wsgi_application()
|
[
"chairco@gmail.com"
] |
chairco@gmail.com
|
57e1c158e17a6f159aaf900eef82d7f9d995f7ef
|
10b3d1ce02eaa4908dc16ca378ddfb1955b2d625
|
/MV3D_TF_release/lib/utils/construct_voxel.py
|
456ee612b2788efdc122a5a6282577d0c3396a2d
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
ZiningWang/Sparse_Pooling
|
7281aa0d974849eac8c48faa5ba08519b091ef6e
|
f46882832d0e2fed5ab4a0af15cead44fd3c6faa
|
refs/heads/master
| 2023-05-26T08:47:16.232822
| 2023-05-20T08:39:11
| 2023-05-20T08:39:11
| 141,640,800
| 56
| 21
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,772
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import time
import os,sys
#add library to the system path
lib_path = os.path.abspath(os.path.join('lib'))
sys.path.append(lib_path)
from utils.transform import calib_to_P,clip3DwithinImage,projectToImage,lidar_to_camera
from utils.config_voxels import cfg
side_range = (cfg.Y_MIN, cfg.Y_MAX-0.01)
fwd_range = (cfg.X_MIN, cfg.X_MAX-0.01)
height_range = (cfg.Z_MIN, cfg.Z_MAX-0.01) #
res = cfg.VOXEL_X_SIZE
zres = cfg.VOXEL_Z_SIZE
NUM_VOXEL_FEATURES = 7
MAX_NUM_POINTS=cfg.VOXEL_POINT_COUNT
'''
def lidar_bv_append(scan,calib,img_size,in_camera_frame=False):
#return the additional data for MV3D_img
if not(in_camera_frame):
P = calib_to_P(calib)
indices = clip3DwithinImage(scan[:,0:3].transpose(),P,img_size)
scan = scan[indices,:]
else:
P = calib_to_P(calib,from_camera=True)
bv_index,scan_filtered,bv_size = point_in_bv_indexes(scan)
N = bv_index.shape[0]
img_points = projectToImage(scan_filtered,P)
img_index = np.round(img_points).astype(int)
img_index = np.vstack((img_index,np.zeros((1,N))))
return {'bv_index':bv_index,'img_index':img_index,'bv_size':bv_size,'img_size':img_size}
'''
def point_cloud_2_top_sparse(points,
res=res,
zres=zres,
side_range=side_range, # left-most to right-most
fwd_range=fwd_range, # back-most to forward-most
height_range=height_range, # bottom-most to upper-most
top_count = None,
to_camera_frame = False,
points_in_cam = False,
calib=None,
img_size = [0,0],
augmentation=False,
img_index2=None
):
""" Creates an birds eye view representation of the point cloud data for MV3D.
WZN: NOTE to get maximum speed, should feed all LIDARs to the function because we wisely initialize the grid
"""
#t0 = time.time()
if to_camera_frame:
indices = clip3DwithinImage(points[:,0:3].transpose(),P,img_size)
points = points[indices,:]
img_index2 = img_index2[:,indices]
points[:,0:3] = lidar_to_camera(points[:,0:3].transpose(),calib)
points_in_cam = True
if points_in_cam:
points = points[:,[2,0,1,3]] #forward, side, height
#x_points = points[:, 1]
#y_points = points[:, 2]
#z_points = points[:, 0]
else:
assert False, 'Wrong, cannot process LIDAR coordinate points'
points[:,1] = -points[:1]
#x_points = points[:, 0]
#y_points = -points[:, 1]
#z_points = points[:, 2]
# INITIALIZE EMPTY ARRAY - of the dimensions we want
x_max = int((side_range[1] - side_range[0]) / res)
y_max = int((fwd_range[1] - fwd_range[0]) / res)
z_max = int((height_range[1] - height_range[0]) / zres)
voxel_full_size = np.array([ z_max+1, x_max+1, y_max+1])
'''
if top_count is None:
top_count = np.zeros([y_max+1, x_max+1, z_max+1],dtype=int)-1
else:
assert x_max==(top_count.shape[1]-1) and y_max==(top_count.shape[0]-1), 'shape mismatch of top_count, %d vs. %d and %d vs. %d'%(x_max,top_count.shape[1]-1,y_max,top_count.shape[0]-1)
'''
f_filt = np.logical_and(
(points[:, 0] > fwd_range[0]), (points[:, 0] < fwd_range[1]))
s_filt = np.logical_and(
(points[:, 1] > side_range[0]), (points[:, 1] < side_range[1]))
z_filt = np.logical_and(
(points[:, 2] > height_range[0]), (points[:, 2] < height_range[1]))
filter = np.logical_and(np.logical_and(f_filt, s_filt),z_filt)
#print np.sum(f_filt),np.sum(s_filt),np.sum(z_filt)
points_filt = points[filter,:] #fwd,side,height
img_index2 = img_index2[:,filter]
xyz_points = points_filt[:, 0:3]
xyz_img = np.zeros_like(xyz_points,dtype=int)
#points_filt = points_filt.tolist()
#reflectance = points_filt[:,3]
#print 'init time: ', time.time()-t0
#t0 = time.time()
counter_all = 0
counter_voxels = 0
# CONVERT TO PIXEL POSITION VALUES - Based on resolution
# SHIFT PIXELS TO HAVE MINIMUM BE (0,0)
# floor & ceil used to prevent anything being rounded to below 0 after
xyz_img[:,0] = (((xyz_points[:,1]-side_range[0]) / res).astype(np.int32)) # x axis is -y in LIDAR
xyz_img[:,1] = (((xyz_points[:,0]-fwd_range[0]) / res).astype(np.int32)) # y axis is -x in LIDAR
xyz_img[:,2] = (((xyz_points[:,2]-height_range[0]) / zres).astype(np.int32))
#print xyz_img.shape
unique_xyz,indices_inv = np.unique(xyz_img,axis=0,return_inverse=True,return_counts=False)
counter_voxels = unique_xyz.shape[0]
top_sparse = np.zeros([counter_voxels,MAX_NUM_POINTS,NUM_VOXEL_FEATURES])
#WZN: the first colum is always 0 which indicates the batch number!!! IMPORTANT
indices_and_count_sparse = np.zeros([counter_voxels,5],dtype=int)#.tolist()
indices_and_count_sparse[:,1:4] = unique_xyz[:,[2,0,1]] # voxel shape is 1x10(updpw)x200(side)x240(fwd) for network
#indices_and_count_sparse = np.array([[0]*4]*counter_voxels)
#print indices_and_count_sparse.shape
filt_indices = []
for j in range(xyz_img.shape[0]):
sparse_index = indices_inv[j]
num_points = indices_and_count_sparse[sparse_index,-1]
if num_points<MAX_NUM_POINTS:
top_sparse[sparse_index,num_points,0:4] = points_filt[j]
indices_and_count_sparse[sparse_index,-1] += 1
filt_indices.append(j)
top_sparse[:,:,4:7] = top_sparse[:,:,0:3]-np.expand_dims(np.sum(top_sparse[:,:,0:3],axis=1)/indices_and_count_sparse[:,4:5],1)
# so for corrdinates, it is [y_img(from z_cam), x_img(from x_cam), z_img(from y_cam)], but for feature it is [z_cam(x_lidar),x_cam(-y_lidar),y_cam(z_lidar)]
voxel_dict = {'feature_buffer': top_sparse,
'coordinate_buffer': indices_and_count_sparse[:,0:4],
'number_buffer': indices_and_count_sparse[:,-1]}
#construct image indexes
if points_in_cam:
P = calib_to_P(calib,from_camera=True)
else:
assert False, 'Wrong, cannot process LIDAR coordinate points'
img_index2 = img_index2[:,filt_indices]
N = img_index2.shape[1]
img_index = np.vstack((img_index2,np.zeros((1,N)).astype(int)))
bv_index = xyz_img[filt_indices,:][:,[1,0]]
M_val = 1.0/(indices_and_count_sparse[indices_inv[filt_indices],-1])
return voxel_dict,voxel_full_size,img_index,bv_index,M_val
|
[
"kiwoo.shin@berkeley.edu"
] |
kiwoo.shin@berkeley.edu
|
bbf8704f9a342da4a6242959c75449f67e72b2f1
|
9450d31f41d59f238d4db6b10ac4819cfce8a32b
|
/run_game.py
|
4a5cabddccc61a400caf445609b916ed03c2f6ab
|
[
"BSD-3-Clause"
] |
permissive
|
Python-Repository-Hub/pyweek24
|
4ed4959cecd0ac55e2fa68756cbdf15aa1f246af
|
284dc9c1a152fca8e39cf9d637f089ab772b3afd
|
refs/heads/master
| 2022-06-15T02:51:14.652778
| 2018-02-13T19:23:21
| 2018-02-13T19:23:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 694
|
py
|
import sys
if sys.version_info < (3, 6):
sys.exit(
"This game requires Python 3.6 or later."
)
import os
from pathlib import Path
dist = Path(__file__).parent.resolve()
src = str(dist / 'src')
sys.path.insert(0, src)
os.chdir(src)
try:
import main
except ImportError:
import traceback
traceback.print_exc()
req = dist / 'requirements.txt'
sys.exit(
"""
Please ensure you have the following packages installed:
%s
You can run 'pip install -r requirements.txt' to install these (currently this
will require a compiler to be configured).
You will also require AVBin from
https://avbin.github.io/AVbin/Download.html
""" % req.read_text()
)
|
[
"mauve@mauveweb.co.uk"
] |
mauve@mauveweb.co.uk
|
bba69b0ee5109c3789eb94c4c13b6199daacbf77
|
a138092a4fd0bd46e21fade96fea5dfba7742e20
|
/scratches/dicts.py
|
028103db5eaeb53cb84611165d1ed1e15a955fb8
|
[] |
no_license
|
DuaneNielsen/CartPoleQ
|
979c00ca15bbac0719ba9197bedffe829f322561
|
202b82cf2b04aaa63965277c326413c62f188ed2
|
refs/heads/master
| 2020-03-25T07:27:22.651681
| 2018-09-07T01:40:35
| 2018-09-07T01:40:35
| 143,562,021
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
dict = {}
def increment():
if 'key' not in dict:
dict['key'] = 1
else:
dict['key'] += 1
print(dict['key'])
increment()
increment()
|
[
"duane.nielsen.rocks@gmail.com"
] |
duane.nielsen.rocks@gmail.com
|
044e3599fa084cf436a5af3b58daa4d5d4aafefc
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Flask/FlaskIntroduction/env/lib/python3.6/site-packages/jinja2/idtracking.py
|
e507483177b9aae92710ac72877a042a2f540cb0
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:d866c34b322f180ac40462e4a2f2e4a847e6631996b047fc737419c0ce2e36cc
size 9197
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
ee32622758a3d1e38bdf665d2426e7b04d9517db
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/HyLkfdagDGc99ZhbF_2.py
|
5cbf4778f6f92984c2090e629a1ebc045c4aaa40
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 603
|
py
|
"""
Create a function that takes a number `n` (integer greater than zero) as an
argument, and returns `2` if `n` is odd and `8` if `n` is even.
You can only use the following arithmetic operators: addition of numbers `+`,
subtraction of numbers `-`, multiplication of number `*`, division of number
`/`, and exponentiation `**`.
You are not allowed to use any other methods in this challenge (i.e. no if
statements, comparison operators, etc).
### Examples
f(1) ➞ 2
f(2) ➞ 8
f(3) ➞ 2
### Notes
N/A
"""
def f(n):
return 5 + 3 * (-1) ** ((n / 2 - n // 2) * 2)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
1ed343ca0e6cbe3de6a727240f40e690f376f493
|
cf89f6250926d993df642d1204e82f73529500e9
|
/app/backend/views.py
|
2a6d11060a3e88b1cfa6089bf7e89809881ae2e8
|
[] |
no_license
|
gurnitha/django-login-logout
|
ce05911608122fa7dc560c5c230f2ed7a8ba95e0
|
3be89a432e2e9f9299ba670fce319f96e0af0cfa
|
refs/heads/main
| 2023-04-15T11:34:28.639319
| 2021-05-02T04:33:00
| 2021-05-02T04:33:00
| 363,554,559
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,552
|
py
|
# app/backend/views.py
# from django.shortcuts import render
# from django.contrib.auth import authenticate, login, logout
# from django.contrib import messages
# from django.http import HttpResponseRedirect
# from django.urls import reverse
# # Create your views here.
# def homePageAdmin(request):
# return render(request, 'backend/home.html')
# #adminLogin
# def adminLogin(request):
# # Get the input (username and password) from the login form
# username=request.POST.get('username')
# password=request.POST.get('password')
# # Authenticate user input(credentials)
# user=authenticate(
# request=request,username=username,password=password)
# # If user exist in the database
# if user is not None:
# login(request=request, user=user)
# return HttpResponseRedirect(reverse('home_admin'))
# # If user is not exist in the database
# else:
# messages.error(request, 'Invalid login detail!')
# return HttpResponseRedirect(reverse('login_page'))
# def adminLoginProcess(request):
# return render(request, 'backend/login_process.html')
# START FROM ZERO AGAIN
# app/dashboard/views.py
from django.shortcuts import render
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.urls import reverse
'''import login_required module to add conditionl to user login'''
from django.contrib.auth.decorators import login_required
# Create your views here.
'''Only loggeg in user can access the admin dashboard'''
@login_required(login_url='/admin/')
def adminHome(request):
messages.success(request, 'Logged in successfully!')
return render(request, 'backend/home.html')
def adminLogin(request):
return render(request, 'backend/login.html')
def adminLoginProcess(request):
return render(request, 'backend/login_process.html')
def adminLoginProcess(request):
# Get input from the login form
username=request.POST.get('username')
password=request.POST.get('password')
# Authenticate user credentials
user=authenticate(
request=request,
username=username,
password=password)
# If user exist
if user is not None:
login(request=request, user=user)
return HttpResponseRedirect(reverse('admin_home'))
# If user not exist
else:
messages.error(
request, 'Login error! Invalid login detail!')
return HttpResponseRedirect(reverse('admin_login'))
def adminLogoutProcess(request):
logout(request)
messages.success(request, 'Logged out successfully!')
return HttpResponseRedirect(reverse('admin_login'))
|
[
"ingafter60@outlook.com"
] |
ingafter60@outlook.com
|
641d1c9fb4ce72e07ab9d93104d0a361c98c83e6
|
d71978ac89d21de391174c4a6f96edc38142b51f
|
/src/front/form.py
|
2ca88661ab5304c968d1f95b72f6182ceb74a0ee
|
[] |
no_license
|
mzakany23/django-calendar-app
|
4d5fd4f4f5e835f0b81904ea5040f43f2f7e4cb0
|
1cedb92214e9649bb1c9ebcd64e78a66b1d5232b
|
refs/heads/master
| 2016-09-05T23:39:40.026293
| 2015-03-27T00:21:47
| 2015-03-27T00:21:47
| 32,084,980
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
from django import forms
class LoginForm(forms.Form):
username = forms.CharField(widget=forms.TextInput(
attrs={
'placeholder' : 'Username',
'class' : 'form-control',
'id' : 'inputEmail',
}))
password = forms.CharField(widget=forms.PasswordInput(
attrs={
'placeholder' : 'Password',
'class' : 'form-control',
'id' : 'inputPassword',
}))
|
[
"mzakany@gmail.com"
] |
mzakany@gmail.com
|
e775477bbdf1b10bfe37482cb0347cb2b9f64e68
|
776cf3b0f5865c8639692e1256abb5ad493c9f92
|
/__old_stuff/pga/pga_no_sort/ga.py
|
44c16031213aaa30fc75f982ad3221ebed52c595
|
[] |
no_license
|
ralphbean/ms-thesis
|
90afb1d5729d83f1910d8dec2e6d4c65d0304bc0
|
3fea08aa069d735fb7048afbab37bb429800fb48
|
refs/heads/master
| 2021-01-19T11:28:14.382925
| 2012-01-25T15:24:54
| 2012-01-25T15:24:54
| 3,265,816
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,793
|
py
|
#!/usr/bin/python
from time import time
from random import random, randint
import shelve, sys, gc
import crossover as cvr
import mutation as mut
import metrics as mtr
import selection as sel
# A constant:
first = False
num_trials = 3
max_gens = 3000
# Print things to stdout and log things that need logging
def IO_update(ID, generation, pop, max_gens):
print "\rL:", mtr.fitness(pop[0]), "a:", pop[0]['amplitude'],
print "%", 100.0 * float(generation)/(max_gens-1),
print " First:", first,
sys.stdout.flush()
# Print stuff to stdout:
# print ID
# print "generation: ", generation
# print " best:", mtr.fitness(pop[0]), "a:", pop[0]['amplitude']
# print " secn:", mtr.fitness(pop[1]), "a:", pop[1]['amplitude']
# print " wrst:", mtr.fitness(pop[-1]), "a:", pop[-1]['amplitude']
# Log stuff to file with shelve
d = shelve.open("dat/" + str(ID) + "." + str(generation) + ".pop" )
d['pop'] = pop
d.close()
def initialize_pop():
# Some initialization constants:
lower_size = 2
upper_size = 50
num = 100
pop = []
for j in range(num):
print "\rInitializing Population %", 100*float(j)/(num-1),
sys.stdout.flush()
org = { 'org':
[[random()*2-1 for i in range(randint(lower_size, upper_size))],
[0,0,0]],
'amplitude' : random() * 0.1 + 0.05 }
org['fitness'] = mtr.fitness(org)
pop.append(org)
print " Done."
return pop
def handle_args():
if len(sys.argv) != 5:
print "Usage:"
print " ga.py <comparator> <crossover> <mutation> <selection>"
sys.exit(1)
cmp_fnc = mtr.fn_list[int(sys.argv[1])]
c_over_op = cvr.fn_list[int(sys.argv[2])]
select_op = sel.fn_list[int(sys.argv[4])]
return cmp_fnc, c_over_op, select_op
def do_experiment(cmp_fnc, c_over_op, select_op, trial, force=False):
ID = str(cmp_fnc)+"."+str(c_over_op)+"."+str(select_op)+"."+str(trial)
cmp_fnc = mtr.fn_list[cmp_fnc]
c_over_op = cvr.fn_list[c_over_op]
select_op = sel.fn_list[select_op]
print "ID:", ID,
print str(cmp_fnc)[10:-15],str(c_over_op)[10:-15],str(select_op)[10:-15]
pop = None
generation = 0
while ( generation < max_gens ):
# First check to see if this experiment is already done...
d = shelve.open("dat/"+ID+"." + str(generation) + ".pop")
if 'pop' in d:
prog = 100.0 * float(generation)/(max_gens-1)
print "\rAlready computed. Skipping ahead. %",prog," f:",first,
sys.stdout.flush() # Update our percentage ticker.
generation = generation + 1 # Advance the generation counter.
pop = d['pop'] # Load that population into memory.
d.close()
continue
d.close()
# Initialize our population if we haven't already
if not pop:
pop = initialize_pop()
# Otherwise we need to compute!
pop.sort(lambda x,y : mtr.comparator(x,y, cmp_fnc) ) # Eval and sort
IO_update(ID, generation, pop, max_gens) # Spit out status
pop = select_op(pop, c_over_op, cmp_fnc) # Breed
generation = generation + 1 # Tick
# Forcibly revaluate the fitness of the hero.
try:
del pop[0]['fitness']
except KeyError:
pass
print " Done."
def combinations():
combins = []
for i in range(len(mtr.fn_list)):
for j in range(len(cvr.fn_list)):
for k in range(len(sel.fn_list)):
for l in range(num_trials):
combins.append([i,j,k,l])
if first:
combins = combins[:len(combins)/2]
else:
combins = combins[len(combins)/2:]
print "Total number of combinations: ", len(combins)
return combins
if __name__ == '__main__':
times = []
combins = combinations()
for i in range(len(combins)):
cmp_fnc, c_over_op, select_op, trial = combins[i]
start = time()
results = do_experiment(cmp_fnc, c_over_op, select_op, trial)
times.append(time() - start)
print "Trial:", times[-1]/(60**2), "(h).",
avg = sum(times)/(60**2 * len(times))
print "Average:", avg, "(h). GC:", gc.get_count()
p_done = 100*float(i+1)/(len(combins))
h_elap = sum(times)/(60**2)
print "%",p_done,"done with entire experiment.", h_elap, "(h) elapsed."
h_left = h_elap*(100-p_done)/p_done
print "Expect to be done in", h_left, "(h)."
print
# Get the function pointers from the arg list
#cmp_fnc, c_over_op, select_op = handle_args()
#do_experiment(cmp_fnc, c_over_op, select_op, 0)
|
[
"ralph.bean@gmail.com"
] |
ralph.bean@gmail.com
|
3986965c2d0a784332a7d222363a1d17a565161e
|
9abe914e718155f3a560915c56a55996155159fb
|
/orders/migrations/0001_initial.py
|
bc9759f5039dba1b8d785e8084892eb818553505
|
[] |
no_license
|
Chaoslecion123/Tienda-Django
|
07c8e2abe8cf659a4fce910c2b8fc858d9276e3b
|
d06e0c789bab69472d0931b2322e7da4f2eaa3bd
|
refs/heads/master
| 2022-03-28T18:07:38.083362
| 2019-12-11T20:01:59
| 2019-12-11T20:01:59
| 227,241,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,431
|
py
|
# Generated by Django 3.0 on 2019-12-06 23:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import orders.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('carts', '0002_cart_products'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[(orders.models.OrderStatus['CREATED'], 'CREATED'), (orders.models.OrderStatus['PAYED'], 'PAYED'), (orders.models.OrderStatus['COMPLETED'], 'COMPLETED'), (orders.models.OrderStatus['CANCELED'], 'CANCELED')], default=orders.models.OrderStatus['CREATED'], max_length=50)),
('shopping_total', models.DecimalField(decimal_places=2, default=5, max_digits=8)),
('total', models.DecimalField(decimal_places=2, default=0, max_digits=8)),
('created_at', models.DateTimeField(auto_now_add=True)),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='carts.Cart')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"chaoslecion71@gmail.com"
] |
chaoslecion71@gmail.com
|
2fa1ed31a07f2af23528b5c43403571d54f6cc0a
|
00c9124de185eead1d28dc190e1e125e5b4ceb26
|
/Pyto Mac/PyObjC/Quartz/QuartzFilters/_metadata.py
|
e2ec80683f20af0307984bc22985baf5d9fa4c51
|
[
"MIT"
] |
permissive
|
cclauss/Pyto
|
028b9e9c3c2fefa815a4d6831e25f5ebae5b14f1
|
1c4ccc47e3a91e996bf6ec38c527d244de2cf7ed
|
refs/heads/master
| 2020-04-23T22:09:21.151598
| 2019-03-02T00:44:20
| 2019-03-02T00:44:20
| 171,491,840
| 0
| 0
|
MIT
| 2019-02-19T14:49:25
| 2019-02-19T14:49:25
| null |
UTF-8
|
Python
| false
| false
| 1,987
|
py
|
# This file is generated by objective.metadata
#
# Last update: Tue Jun 26 07:59:02 2018
import objc, sys
if sys.maxsize > 2 ** 32:
def sel32or64(a, b): return b
else:
def sel32or64(a, b): return a
if sys.byteorder == 'little':
def littleOrBig(a, b): return a
else:
def littleOrBig(a, b): return b
misc = {
}
constants = '''$globalUpdateOK@Z$kQuartzFilterApplicationDomain$kQuartzFilterManagerDidAddFilterNotification$kQuartzFilterManagerDidModifyFilterNotification$kQuartzFilterManagerDidRemoveFilterNotification$kQuartzFilterManagerDidSelectFilterNotification$kQuartzFilterPDFWorkflowDomain$kQuartzFilterPrintingDomain$'''
enums = '''$$'''
misc.update({})
r = objc.registerMetaDataForSelector
objc._updatingMetadata(True)
try:
r(b'NSObject', b'quartzFilterManager:didAddFilter:', {'retval': {'type': b'v'}, 'arguments': {2: {'type': b'@'}, 3: {'type': b'@'}}})
r(b'NSObject', b'quartzFilterManager:didModifyFilter:', {'retval': {'type': b'v'}, 'arguments': {2: {'type': b'@'}, 3: {'type': b'@'}}})
r(b'NSObject', b'quartzFilterManager:didRemoveFilter:', {'retval': {'type': b'v'}, 'arguments': {2: {'type': b'@'}, 3: {'type': b'@'}}})
r(b'NSObject', b'quartzFilterManager:didSelectFilter:', {'retval': {'type': b'v'}, 'arguments': {2: {'type': b'@'}, 3: {'type': b'@'}}})
r(b'QuartzFilter', b'applyToContext:', {'retval': {'type': b'Z'}})
r(b'QuartzFilterManager', b'selectFilter:', {'retval': {'type': b'Z'}})
finally:
objc._updatingMetadata(False)
protocols={'QuartzFilterManagerDelegate': objc.informal_protocol('QuartzFilterManagerDelegate', [objc.selector(None, b'quartzFilterManager:didSelectFilter:', b'v@:@@', isRequired=False), objc.selector(None, b'quartzFilterManager:didAddFilter:', b'v@:@@', isRequired=False), objc.selector(None, b'quartzFilterManager:didModifyFilter:', b'v@:@@', isRequired=False), objc.selector(None, b'quartzFilterManager:didRemoveFilter:', b'v@:@@', isRequired=False)])}
expressions = {}
# END OF FILE
|
[
"adrilabbelol@gmail.com"
] |
adrilabbelol@gmail.com
|
0229304f52f6ac1a1a145202bfd50b7ebcab2f13
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03353/s439050403.py
|
217d9912ac078b47e9ed9f82f7964e6a159318bd
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
S = input()
K = int(input())
Ans = []
while len(Ans) < K:
for i in range(26):
if chr(97+i) in S and chr(97+i) not in Ans:
f = chr(97 + i)
Ans.append(f)
break
for i, s in enumerate(S):
if s == f:
for j in range(2, 2 + 10):
if i + j > len(S):
break
if S[i:i+j] not in Ans:
Ans.append(S[i:i+j])
Ans.sort()
print(Ans[K-1])
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
f1a7a3948b8324c5db96b653272eb3ff8269f357
|
7d93e4f8a9475ada3edd770263ccb8cd98a9e73d
|
/tonetutor_webapi/settings.py
|
4578eab178976962e18525b426db619d3143ad69
|
[] |
no_license
|
JivanAmara/tonetutor-webapi
|
78a7c03fe69aa392db508358040369ecf0973050
|
adf23af0dd12e5dd967695621146cd67ac5a416c
|
refs/heads/master
| 2021-04-30T04:46:46.336909
| 2018-02-20T00:17:56
| 2018-02-20T00:22:32
| 121,543,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,145
|
py
|
"""
Django settings for tonetutor_webapi project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# This is the host to pass tone check requests through to.
AUDIO_PROTOCOL = os.environ.get('AUDIO_PROTOCOL', 'http://')
AUDIO_HOST = os.environ.get('AUDIO_HOST', 'www.mandarintt.com')
AUDIO_PATH = os.environ.get('AUDIO_PATH', '/audio/')
LOG_FILEPATH = '/var/log/tonetutor_webapi.log'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'PAGE_SIZE': 10
}
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', 'Not really the secret key.')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', False)
ALLOWED_HOSTS = ['api.mandarintt.com', 'test-api.mandarintt.com', 'www.mandarintt.com', 'localhost']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'webapi',
'rest_framework',
'rest_framework.authtoken',
'syllable_samples',
'tonerecorder',
'hanzi_basics',
'corsheaders',
'usermgmt',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'tonetutor_webapi.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tonetutor_webapi.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DBPASS = os.environ.get('DB_PASS')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'webvdc', # Or path to database file if using sqlite3.
'USER': 'webvdc', # Not used with sqlite3.
'PASSWORD': DBPASS, # Not used with sqlite3.
'HOST': 'database-host', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/tonetutor_webapi-static'
MEDIA_URL = '/'
# This should be a volume mapped to the shared media root on host system
MEDIA_ROOT = '/mnt/data-volume/tonetutor-media/'
# Subdirectory of MEDIA_ROOT where attempt audio gets stored.
SYLLABLE_AUDIO_DIR = 'audio-files'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': LOG_FILEPATH,
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
'': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
},
}
|
[
"Development@JivanAmara.net"
] |
Development@JivanAmara.net
|
6bd19bea2e70c55542a03dfa7a782c7d2dfd879c
|
2e8db6ce133756ebe9998a48f98072e2133af792
|
/users/admin.py
|
c74af4fc5bb95c0218144cc7205abe0d7dd7c842
|
[] |
no_license
|
KadogoKenya/JWT_TEST
|
b76f015e65e3950af4b35eebde165700fb8fed03
|
35aa2f9c5b11fa827fbc8565530be597c1f4c2d5
|
refs/heads/master
| 2023-03-26T20:55:19.301952
| 2021-03-25T07:56:52
| 2021-03-25T07:56:52
| 349,028,668
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,094
|
py
|
from django.contrib import admin
from django import forms
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
# Register your models here.
# admin.site.register(UserManager,UserAdmin)
from django.contrib.auth.models import Group
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from users.models import User
# from customauth.models import MyUser
class UserCreationForm(forms.ModelForm):
"""A form for creating new users. Includes all the required
fields, plus a repeated password."""
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = User
fields = ('email', 'username')
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
"""A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
password = ReadOnlyPasswordHashField()
class Meta:
model = User
fields = ('email', 'password', 'username', 'is_active', 'is_admin')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
class UserAdmin(BaseUserAdmin):
# The forms to add and change user instances
form = UserChangeForm
add_form = UserCreationForm
# The fields to be used in displaying the User model.
# These override the definitions on the base UserAdmin
# that reference specific fields on auth.User.
list_display = ('email', 'username', 'is_admin')
list_filter = ('is_admin',)
fieldsets = (
(None, {'fields': ('email', 'password')}),
('Personal info', {'fields': ('username',)}),
('Permissions', {'fields': ('is_admin',)}),
)
# add_fieldsets is not a standard ModelAdmin attribute. UserAdmin
# overrides get_fieldsets to use this attribute when creating a user.
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'username', 'password1', 'password2')}
),
)
search_fields = ('email',)
ordering = ('email',)
filter_horizontal = ()
admin.site.register(User,UserAdmin)
admin.site.unregister(Group)
|
[
"you@example.com"
] |
you@example.com
|
1897bf0e769aee4e932c3b17a1669983468986ba
|
b501a5eae1018c1c26caa96793c6ee17865ebb2d
|
/Networking/socket/socket_multicast_receiver.py
|
0d53559902dd07fb86eb6ef43f80a3e62f3943dd
|
[] |
no_license
|
jincurry/standard_Library_Learn
|
12b02f9e86d31ca574bb6863aefc95d63cc558fc
|
6c7197f12747456e0f1f3efd09667682a2d1a567
|
refs/heads/master
| 2022-10-26T07:28:36.545847
| 2018-05-04T12:54:50
| 2018-05-04T12:54:50
| 125,447,397
| 0
| 1
| null | 2022-10-02T17:21:50
| 2018-03-16T01:32:50
|
Python
|
UTF-8
|
Python
| false
| false
| 638
|
py
|
import socket
import struct
import sys
multicast_group = '224.3.29.71'
server_address = ('', 10000)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(server_address)
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock.setsockopt(
socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
mreq
)
while True:
print('\n waiting to receive message')
data, address = sock.recvfrom(1024)
print('received {} bytes from {}'.format(
len(data), address
))
print(data)
print('sending acknowledgement to ', address)
sock.sendto(b'ack', address)
|
[
"jintao422516@gmail.com"
] |
jintao422516@gmail.com
|
dd1bb84b3f09f1d5438cfa643cf57a0b86ad6d4d
|
c658b7eed69edfb1a7610694fe7b8e60a5005b7c
|
/test/functional/test_framework/blocktools.py
|
9c2958f076ad5d7002953c87f035d1bf308c0885
|
[
"MIT"
] |
permissive
|
wolfoxonly/coc
|
0864a6dce2c36d703d93e9b2fb201f599d6db4bd
|
ff8584518c6979db412aec82e6528a4e37077da2
|
refs/heads/master
| 2021-01-24T16:52:14.665824
| 2018-04-28T10:00:42
| 2018-04-28T10:00:42
| 123,215,964
| 0
| 0
|
MIT
| 2018-02-28T02:15:20
| 2018-02-28T02:15:20
| null |
UTF-8
|
Python
| false
| false
| 3,934
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Crowncoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for manipulating blocks and transactions."""
from .mininode import *
from .script import CScript, OP_TRUE, OP_CHECKSIG, OP_RETURN
# Create a block (with regtest difficulty)
def create_block(hashprev, coinbase, nTime=None):
block = CBlock()
if nTime is None:
import time
block.nTime = int(time.time()+600)
else:
block.nTime = nTime
block.hashPrevBlock = hashprev
block.nBits = 0x207fffff # Will break after a difficulty adjustment...
block.vtx.append(coinbase)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
# From BIP141
WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed"
def get_witness_script(witness_root, witness_nonce):
witness_commitment = uint256_from_str(hash256(ser_uint256(witness_root)+ser_uint256(witness_nonce)))
output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(witness_commitment)
return CScript([OP_RETURN, output_data])
# According to BIP141, blocks with witness rules active must commit to the
# hash of all in-block transactions including witness.
def add_witness_commitment(block, nonce=0):
# First calculate the merkle root of the block's
# transactions, with witnesses.
witness_nonce = nonce
witness_root = block.calc_witness_merkle_root()
# witness_nonce should go to coinbase witness.
block.vtx[0].wit.vtxinwit = [CTxInWitness()]
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(witness_nonce)]
# witness commitment is the last OP_RETURN output in coinbase
block.vtx[0].vout.append(CTxOut(0, get_witness_script(witness_root, witness_nonce)))
block.vtx[0].rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
def serialize_script_num(value):
r = bytearray(0)
if value == 0:
return r
neg = value < 0
absvalue = -value if neg else value
while (absvalue):
r.append(int(absvalue & 0xff))
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return r
# Create a coinbase transaction, assuming no miner fees.
# If pubkey is passed in, the coinbase output will be a P2PK output;
# otherwise an anyone-can-spend output.
def create_coinbase(height, pubkey = None):
coinbase = CTransaction()
coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff),
ser_string(serialize_script_num(height)), 0xffffffff))
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = 50 * COIN
halvings = int(height/150) # regtest
coinbaseoutput.nValue >>= halvings
if (pubkey != None):
coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG])
else:
coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
coinbase.vout = [ coinbaseoutput ]
coinbase.calc_sha256()
return coinbase
# Create a transaction.
# If the scriptPubKey is not specified, make it anyone-can-spend.
def create_transaction(prevtx, n, sig, value, scriptPubKey=CScript()):
tx = CTransaction()
assert(n < len(prevtx.vout))
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff))
tx.vout.append(CTxOut(value, scriptPubKey))
tx.calc_sha256()
return tx
def get_legacy_sigopcount_block(block, fAccurate=True):
count = 0
for tx in block.vtx:
count += get_legacy_sigopcount_tx(tx, fAccurate)
return count
def get_legacy_sigopcount_tx(tx, fAccurate=True):
count = 0
for i in tx.vout:
count += i.scriptPubKey.GetSigOpCount(fAccurate)
for j in tx.vin:
# scriptSig might be of type bytes, so convert to CScript for the moment
count += CScript(j.scriptSig).GetSigOpCount(fAccurate)
return count
|
[
"415313577@qq.com"
] |
415313577@qq.com
|
210f9371a43aa3890b7486dc0f209676644979f8
|
87bae60470bbe5316d7da8bc4a8709e33b40e2b5
|
/whatsnew/south_migrations/0004_auto__del_field_whatsnew_released__add_field_whatsnew_enabled__chg_fie.py
|
b1dc20a303abe6f65e0843cb1077e8a5bf63afa1
|
[] |
no_license
|
saxix/django-whatsnew
|
c11f0d5fa87e5e1c5c7648e8162bd39c64e69302
|
68b33e5e2599a858e00eda53e1c13a503e1b3856
|
refs/heads/develop
| 2021-01-19T12:39:41.876635
| 2015-01-28T16:18:29
| 2015-01-28T16:18:29
| 18,416,313
| 0
| 2
| null | 2015-01-28T16:18:30
| 2014-04-03T20:00:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,524
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'WhatsNew.released'
db.delete_column(u'whatsnew_whatsnew', 'released')
# Adding field 'WhatsNew.enabled'
db.add_column(u'whatsnew_whatsnew', 'enabled',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Changing field 'WhatsNew.version'
db.alter_column(u'whatsnew_whatsnew', 'version', self.gf('whatsnew.fields.VersionField')(max_length=50))
def backwards(self, orm):
# Adding field 'WhatsNew.released'
db.add_column(u'whatsnew_whatsnew', 'released',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Deleting field 'WhatsNew.enabled'
db.delete_column(u'whatsnew_whatsnew', 'enabled')
models = {
u'whatsnew.whatsnew': {
'Meta': {'object_name': 'WhatsNew'},
'content': ('django.db.models.fields.TextField', [], {}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'version': ('whatsnew.fields.VersionField', [], {'max_length': '50'})
}
}
complete_apps = ['whatsnew']
|
[
"s.apostolico@gmail.com"
] |
s.apostolico@gmail.com
|
1248dbd0c2b9a530f886af3cabee8148160d28b7
|
c972024b36470ea42a01075cc3dc5df2ab2defcc
|
/mysite/radpress/urls.py
|
e576bc6e9464e86f98204a0e8b8649c90615b75f
|
[] |
no_license
|
davidrae/abacus-direct
|
1c55bed4639716080b77c03359d981fdd3363027
|
dc09e2345a01ec36f6a8e2adf1dba12f11cb55ad
|
refs/heads/master
| 2016-08-11T12:49:15.407345
| 2015-11-03T15:50:16
| 2015-11-03T15:50:16
| 44,256,109
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,579
|
py
|
from django.conf.urls import patterns, url
from radpress.views import (
ArticleArchiveView, ArticleDetailView, ArticleListView, PreviewView,
PageDetailView, SearchView, ZenModeView, ZenModeUpdateView, TagListView,
GenericTagListView, index, users, reports, alerts, museums, privacy,
solutions, wan, carpark, contact)
from radpress.feeds import ArticleFeed
urlpatterns = patterns(
'radpress.views',
#url(r'^$',
# view=TagListView.as_view(),
# name='radpress-article-list'),
url(r'^$', 'index', name='radpress_home'),
url(r'^tags/$',
view=ArticleArchiveView.as_view(),
name='radpress-article-tags'),
url(r'^archives/$',
view=GenericTagListView.as_view(),
name='radpress-article-archive'),
url(r'^detail/(?P<slug>[-\w]+)/$',
view=ArticleDetailView.as_view(),
name='radpress-article-detail'),
url(r'^p/(?P<slug>[-\w]+)/$',
view=PageDetailView.as_view(),
name='radpress-page-detail'),
url(r'^preview/$',
view=PreviewView.as_view(),
name='radpress-preview'),
url(r'^search/$',
view=SearchView.as_view(),
name='radpress-search'),
url(r'^zen/$',
view=ZenModeView.as_view(),
name='radpress-zen-mode'),
url(r'zen/(?P<pk>\d+)/$',
view=ZenModeUpdateView.as_view(),
name='radpress-zen-mode-update'),
url(r'^rss/$',
view=ArticleFeed(),
name='radpress-rss'),
url(r'^rss/(?P<tags>[-/\w]+)/$',
view=ArticleFeed(),
name='radpress-rss'),
#url(r'^$',
#view=ArticleListView.as_view(),
#name='radpress-article-list'),
url(r'^$',
'index',
name='radpress-home'),
#url(r'^detail/features/$',
# view=ArticleDetailView.as_view(),
# name='radpress-features'),
url(r'^about/$',
'about',
name='radpress-about-us'),
url(r'^contact/$',
'contact',
name='radpress-contact'),
# Features pages
url(r'features/$',
'features',
name='radpress-features'),
url(r'^features/users/$',
'users',
name='radpress-users'),
url(r'^features/live/$',
'live',
name='radpress-live'),
url(r'^features/reports/$',
'reports',
name='radpress-reports'),
url(r'^features/auto/$',
'auto',
name='radpress-auto'),
url(r'^features/alerts/$',
'alerts',
name='radpress-alerts'),
# Solutions
url(r'^solutions/$',
'solutions',
name='radpress-solutions'),
url(r'^solutions/wan$',
'wan',
name='radpress-wan'),
url(r'^solutions/cloud$',
'cloud',
name='radpress-cloud'),
url(r'^solutions/smart-devices$',
'smart',
name='radpress-smart'),
# Industries
url(r'^indsutries/$',
'industries',
name='radpress-industries'),
url(r'^industries/retail/$',
'retail',
name='radpress-retail'),
url(r'^industries/museums/$',
'museums',
name='radpress-museum'),
url(r'^industries/carparks/$',
'carpark',
name='radpress-carpark'),
url(r'^airport/$',
'airports',
name='radpress-airport'),
# Footer links
url(r'^privacy-policy/$',
'privacy',
name='radpress-privacy'),
url(r'^site-map/$',
'sitemap',
name='radpress-sitemap'),
url(r'^terms-of-use/$',
'terms',
name='radpress-terms'),
)
|
[
"david.rae@vmsuk.com"
] |
david.rae@vmsuk.com
|
a704ca8f1db8d26dc296c050b61bbfdec012c64f
|
03ff28a6004ba92f3b04c88bebabb503bed0ad0c
|
/main/migrations/0016_remove_assistantprofile_bio.py
|
092e6aec88360f9c741c9f29399cd654c36c95ea
|
[
"MIT"
] |
permissive
|
mzazakeith/Therapy101
|
ebf8ff945f04dc04c7d05fb5cc9f923bca861c88
|
be00dd988c6b636f52b57638e70c89da3acbf1a3
|
refs/heads/master
| 2023-01-08T09:12:13.882424
| 2018-10-01T15:26:25
| 2018-10-01T15:26:25
| 144,596,894
| 0
| 0
|
MIT
| 2023-01-04T10:58:09
| 2018-08-13T15:12:55
|
Python
|
UTF-8
|
Python
| false
| false
| 402
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-08-24 06:18
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0015_assistantprofile_location'),
]
operations = [
migrations.RemoveField(
model_name='assistantprofile',
name='bio',
),
]
|
[
"mzazakeith@gmail.com"
] |
mzazakeith@gmail.com
|
d8503c77ebac2cd8fe3ea0088bc0ea6508e354d6
|
b9a1dfcde3851847531b031b99df8bf96edc72be
|
/0x0F-python-object_relational_mapping/101-relationship_states_cities_list.py
|
7c723017eb5f8ce8a9f2b5ae496e97a47d0db301
|
[] |
no_license
|
JackWanaCode/holbertonschool-higher_level_programming
|
c304eba4039dc188d9f2383ae93791be786360b9
|
8c2b5a612aad968f7dcb7bbfdb8a1791650dce8f
|
refs/heads/master
| 2020-03-28T11:19:50.763159
| 2019-02-22T07:40:56
| 2019-02-22T07:40:56
| 148,202,592
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 856
|
py
|
#!/usr/bin/python3
"""Start link class to table in database
"""
import sys
from relationship_state import Base, State
from relationship_city import City
from sqlalchemy import (create_engine)
from sqlalchemy.orm import sessionmaker, relationship, backref
if __name__ == "__main__":
engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.
format(sys.argv[1], sys.argv[2], sys.argv[3]),
pool_pre_ping=True)
Base.metadata.create_all(engine, checkfirst=True)
Session = sessionmaker(bind=engine)
session = Session()
flag = 0
st_id_remember = 0
for st in (session.query(State).order_by(State.id).all()):
print("{}: {}".format(st.id, st.name))
for ct in st.cities:
print("\t{}: {}".format(ct.id, ct.name))
session.commit()
session.close()
|
[
"kazucuong@yahoo.com"
] |
kazucuong@yahoo.com
|
cf9ba6f2882fafb0adabf0b721a0fceacb2e24c3
|
99b6faa1e31b9b18755e90070e24787632cd4776
|
/apps/postcrash/models.py
|
04e994a4e29da3b868fa2f4acc4dd2ea173ff84f
|
[] |
no_license
|
taliasman/kitsune
|
d6743ef9e5b26951a87638a963e7429abf1d0327
|
f8085205eef143011adb4c52d1f183da06c1c58e
|
refs/heads/master
| 2021-05-28T19:50:40.670060
| 2013-03-11T13:55:15
| 2013-03-11T13:55:15
| 8,706,741
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
from django.db import models
from sumo.models import ModelBase
from wiki.models import Document
class Signature(ModelBase):
signature = models.CharField(max_length=255, db_index=True, unique=True)
document = models.ForeignKey(Document)
def __unicode__(self):
return u'<%s> %s' % (self.signature, self.document.title)
def get_absolute_url(self):
doc = self.document.get_absolute_url().lstrip('/')
_, _, url = doc.partition('/')
return u'/' + url
|
[
"james@mozilla.com"
] |
james@mozilla.com
|
bb16decea6b3859bd8afa70e6ca5507d66be55e0
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2844/60891/272575.py
|
29c0553ab2f29823241003b7ebce0900d08eee6b
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
n_t = [int(i) for i in input().split()]
n = n_t[0]
t = n_t[1]
a = [int(i) for i in input().split()]
max_n = []
for i in range(n):
remain = t
count = 0
j = i
while j < n and remain >= a[j]:
remain -= a[j]
count += 1
j += 1
max_n.append(count)
print(max(max_n))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
ec400f0e82a29ba599263f5b9cbb120c712dff95
|
b4ce39af031a93354ade80d4206c26992159d7c7
|
/Tutorials/Binary Search/Binary Search Function and whileloop.py
|
02a4f2dcb91829a7b678557a1157cc2ec7b8f2a9
|
[] |
no_license
|
Bibin22/pythonpgms
|
4e19c7c62bc9c892db3fd8298c806f9fdfb86832
|
e297d5a99db2f1c57e7fc94724af78138057439d
|
refs/heads/master
| 2023-06-15T00:51:14.074564
| 2021-07-12T17:44:47
| 2021-07-12T17:44:47
| 315,982,691
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
def search(list, n):
l = 0
u = len(list)-1
while l <= u:
mid = (l + u) // 2
if list[mid] == n:
return True
else:
if list[mid] < n:
l = mid+1
else:
u = mid-1
return False
list = [1, 2, 4, 5, 6, 7, 8, 9,10]
n = int(input("entera number"))
if search(list, n):
print("item found ")
else:
print("item not found")
|
[
"bibinjoy82@gmail.com"
] |
bibinjoy82@gmail.com
|
90c29bd4121c5b2955daef0224a72300c97b7d67
|
6c82cb2e9bab9931c973433e2e384061e1405fc5
|
/app/models/customer.py
|
50630e0b62fb5881d174d7094ad36b8febeb566b
|
[] |
no_license
|
M0r13n/bully-backend
|
7153b27552ff2ef25c9ffdf63c55600f3fddcd7b
|
e9443e10f39a819012d612cd0cd075fb8d75bee2
|
refs/heads/master
| 2023-03-09T13:38:16.198239
| 2021-02-21T12:39:01
| 2021-02-21T12:39:01
| 291,683,343
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 975
|
py
|
from datetime import datetime
from sqlalchemy.orm import relationship
from app.extensions import db
class Customer(db.Model):
__tablename__ = "customer"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
first_name = db.Column(db.String(255), nullable=False, unique=False, index=True)
last_name = db.Column(db.String(255), nullable=False, unique=False, index=True)
street = db.Column(db.String(255), nullable=False, unique=False, index=True)
zip_code = db.Column(db.String(10), nullable=False, unique=False, index=True)
city = db.Column(db.String(255), nullable=False, unique=False, index=True)
tel = db.Column(db.String(64), nullable=True, unique=False, index=True)
email = db.Column(db.String(255), nullable=False, unique=False, index=True)
registered_on = db.Column(db.DateTime, nullable=False, default=datetime.now)
reservations = relationship("Reservation", back_populates="customer")
|
[
"31622033+M0r13n@users.noreply.github.com"
] |
31622033+M0r13n@users.noreply.github.com
|
e89963810a075e160abc281ff7078690ec605237
|
eba3e4a3935d6422d1ed85aaf69337f5ba15fc74
|
/sqlalchemy-migrate/test/versioning/test_template.py
|
72217ac895daee6f61801609b45e5c2471a8ca45
|
[] |
no_license
|
arianepaola/tg2jython
|
2ae74250ca43b021323ef0951a9763712c2eb3d6
|
971b9c3eb8ca941d1797bb4b458f275bdca5a2cb
|
refs/heads/master
| 2021-01-21T12:07:48.815690
| 2009-03-27T02:38:11
| 2009-03-27T02:38:11
| 160,242
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
from test import fixture
from migrate.versioning.repository import *
import os
class TestPathed(fixture.Base):
def test_templates(self):
"""We can find the path to all repository templates"""
path = str(template)
self.assert_(os.path.exists(path))
def test_repository(self):
"""We can find the path to the default repository"""
path = template.get_repository()
self.assert_(os.path.exists(path))
def test_script(self):
"""We can find the path to the default migration script"""
path = template.get_script()
self.assert_(os.path.exists(path))
|
[
"ariane@venus.(none)"
] |
ariane@venus.(none)
|
a9ba472f2edf7d86bd6237c5e0a8b230fd58eeb7
|
704da68062145c0e1d016256bbe86f2286c6d149
|
/tests/test_models/test_user.py
|
e14bb600f7e57395687239482e89920eacc3d550
|
[] |
no_license
|
TMcMac/AirBnB_clone_old
|
bf39a4cb80fc39390d2af03861938954c24dc742
|
5130e5b08ca9d301600ea963f126d4e124b305cd
|
refs/heads/master
| 2023-01-03T15:09:53.609497
| 2020-10-29T20:04:41
| 2020-10-29T20:04:41
| 274,945,103
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,533
|
py
|
#!/usr/bin/python3
"""Unit test for User class"""
import unittest
from models.base_model import BaseModel
from models.user import User
class TestBaseModel(unittest.TestCase):
"""Unit test for User class"""
@classmethod
def setUp(cls):
print('SetupClass')
@classmethod
def tearDown(cls):
print('TearDownClass')
def setUp(self):
"""Unit test setup"""
print('setUp')
self.u1 = User()
self.u2 = User()
def tearDown(self):
"""Unit test tear down"""
del self.u1
del self.u2
def test_init(self):
"""Test for init method"""
print("testing init...")
self.assertIsNotNone(self.u1)
self.assertIsInstance(self.u1, BaseModel)
self.assertIs(type(self.u1), User)
def test_uuid(self):
"""Test for uuid attribute"""
print("testing uuid...")
self.assertTrue(hasattr(self.u1, "id"))
self.assertNotEqual(self.u1.id, self.u2.id)
self.assertIsInstance(self.u1.id, str)
def test_email(self):
"""Test for email attribute"""
print("testing email...")
self.assertTrue(hasattr(self.u1, "email"))
self.assertEqual(self.u1.email, "")
self.assertIsInstance(self.u1.email, str)
def test_first_name(self):
"""Test for first_name attribute"""
print("testing first_name...")
self.assertTrue(hasattr(self.u1, "first_name"))
self.assertEqual(self.u1.first_name, "")
self.assertIsInstance(self.u1.first_name, str)
def test_last_name(self):
"""Test for last_name attribute"""
print("testing last_name...")
self.assertTrue(hasattr(self.u1, "last_name"))
self.assertEqual(self.u1.last_name, "")
self.assertIsInstance(self.u1.last_name, str)
def test_str(self):
"""Test for __str__ method"""
print("testing __str__method...")
result = len(self.u1.__str__())
self.assertTrue(result, 172)
def test_save(self):
"""Test for save method"""
print("testing save method...")
prechange = self.u1.updated_at
self.u1.save()
postchange = self.u1.updated_at
self.assertNotEqual(prechange, postchange)
def test_created_at(self):
"""Test for created at time"""
print("Testing the created at time attr")
self.assertTrue(hasattr(self.u1, "created_at"))
def test_updated_at(self):
"""Test for the updated at time attr"""
print("Testing the updated at time attr")
prechange = self.u1.updated_at
self.u1.save()
postchange = self.u1.updated_at
self.assertNotEqual(prechange, postchange)
def test_kwargs(self):
"""Test for kwargs"""
print("Testing for kwargs")
self.u1.name = "Holberton"
self.u1.my_number = 89
u1_json = self.u1.to_dict()
u2 = User(**u1_json)
self.assertEqual(self.u1.id, u2.id)
self.assertEqual(self.u1.created_at, u2.created_at)
self.assertEqual(self.u1.updated_at, u2.updated_at)
self.assertEqual(self.u1.name, u2.name)
self.assertEqual(self.u1.my_number, u2.my_number)
def test_module_docstring(self):
"""Test for existence of module docstring"""
print("testing module docstring...")
result = len(__import__('models.user').__doc__)
self.assertTrue(result > 0, True)
def test_class_docstring(self):
"""User Class Docstring Test"""
print("test_class_docstring")
result = len(User.__doc__)
self.assertTrue(result > 0, True)
def test_init_docstring(self):
"""User init Docstring Test"""
print("test_init_docstring")
result = len(self.__init__.__doc__)
self.assertTrue(result > 0, True)
def test__str__docstring(self):
"""User __str__ Docstring Test"""
print("testing __str__ docstring...")
result = len(User.__str__.__doc__)
self.assertTrue(result > 0, True)
def test_save_docstring(self):
"""User save method Docstring Test"""
print("testing save docstring...")
result = len(User.save.__doc__)
self.assertTrue(result > 0, True)
def test_to_dict_docstring(self):
"""User to_dict Docstring Test"""
print("testing to_dict docstring...")
result = len(User.to_dict.__doc__)
self.assertTrue(result > 0, True)
if __name__ == "__main__":
unittest.main()
|
[
"vietnamtran@gmail.com"
] |
vietnamtran@gmail.com
|
a48f5d7a47297a3004b4066f232a6791173b237c
|
7caa438706a423dd9779a81f8345fcf1ec11e921
|
/NXT-Python/pyglet-1.2.4/tests/window/WINDOW_SET_ICON.py
|
bdeac31fc404c7ca4aa58750eff8f810ef925d2f
|
[
"BSD-3-Clause"
] |
permissive
|
tamarinvs19/python-learning
|
5dd2582f5dc504e19a53e9176677adc5170778b0
|
1e514ad7ca8f3d2e2f785b11b0be4d57696dc1e9
|
refs/heads/master
| 2021-07-15T13:23:24.238594
| 2021-07-08T07:07:21
| 2021-07-08T07:07:21
| 120,604,826
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 990
|
py
|
#!/usr/bin/env python
'''Test that window icon can be set.
Expected behaviour:
One window will be opened. It will have an icon depicting a yellow
"A".
Close the window or press ESC to end the test.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: WINDOW_SET_MOUSE_CURSOR.py 717 2007-03-03 07:04:10Z Alex.Holkner $'
import unittest
from pyglet.gl import *
from pyglet import image
from pyglet import window
from pyglet.window import key
from os.path import join, dirname
icon_file = join(dirname(__file__), 'icon1.png')
class WINDOW_SET_ICON(unittest.TestCase):
def test_set_icon(self):
self.width, self.height = 200, 200
self.w = w = window.Window(self.width, self.height)
w.set_icon(image.load(icon_file))
glClearColor(1, 1, 1, 1)
while not w.has_exit:
glClear(GL_COLOR_BUFFER_BIT)
w.flip()
w.dispatch_events()
w.close()
if __name__ == '__main__':
unittest.main()
|
[
"slavabarsuk@ya.ru"
] |
slavabarsuk@ya.ru
|
97743cb1f373612199548c0a7a0999c2a8642e77
|
c30e2b2e1b7876af01afc11e70b9bde66ebc6d6a
|
/conftest.py
|
9c646ac4834f556a9c633eecf8706d0f05ad49ad
|
[] |
no_license
|
Jumas-Cola/stepik_selenium_test_project
|
7aad125be2623520c6bfe93b34438ffd12de4303
|
8bc920a25ff83883dc6dd653f3dec7ea25350f27
|
refs/heads/master
| 2020-06-11T10:49:45.344813
| 2019-06-28T20:04:08
| 2019-06-28T20:04:08
| 193,936,673
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
import pytest
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
def pytest_addoption(parser):
parser.addoption('--language', action='store', default='en-gb',
help='Choose language of browser.\
For example: --language="es"')
@pytest.fixture
def browser(request):
lang = request.config.getoption("language")
options = Options()
options.add_experimental_option('prefs', {'intl.accept_languages': lang})
browser = webdriver.Chrome(options=options)
yield browser
browser.quit()
|
[
"kbbyfl91@gmail.com"
] |
kbbyfl91@gmail.com
|
a8ad2ad4050925e02f37ff0841869c81a863dd0c
|
14d8418ca5990217be67aee89fdaa310db03fbba
|
/models/collector_pagination_response.py
|
f8002358f1839b81d410471e69eb2cd75df52a25
|
[
"Apache-2.0"
] |
permissive
|
sachanta/lm-sdk-python
|
3a16457bd2d5b880a0d238a88a9d1d5b8d9675f0
|
e476d415c7279457f79b5d032a73d950af2fe96b
|
refs/heads/master
| 2023-08-03T08:39:42.842790
| 2021-09-13T07:20:56
| 2021-09-13T07:20:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,155
|
py
|
# coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. Note: For Python SDKs, the REQUEST parameters can contain camelCase or an underscore. However, the RESPONSE parameters will always contain an underscore. For example, the REQUEST parameter can be testLocation or test_location. The RESPONSE parameter will be test_location. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from logicmonitor_sdk.models.collector_base import CollectorBase # noqa: F401,E501
class CollectorPaginationResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'total': 'int',
'search_id': 'str',
'items': 'list[CollectorBase]'
}
attribute_map = {
'total': 'total',
'search_id': 'searchId',
'items': 'items'
}
def __init__(self, total=None, search_id=None, items=None): # noqa: E501
"""CollectorPaginationResponse - a model defined in Swagger""" # noqa: E501
self._total = None
self._search_id = None
self._items = None
self.discriminator = None
if total is not None:
self.total = total
if search_id is not None:
self.search_id = search_id
if items is not None:
self.items = items
@property
def total(self):
"""Gets the total of this CollectorPaginationResponse. # noqa: E501
:return: The total of this CollectorPaginationResponse. # noqa: E501
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this CollectorPaginationResponse.
:param total: The total of this CollectorPaginationResponse. # noqa: E501
:type: int
"""
self._total = total
@property
def search_id(self):
"""Gets the search_id of this CollectorPaginationResponse. # noqa: E501
:return: The search_id of this CollectorPaginationResponse. # noqa: E501
:rtype: str
"""
return self._search_id
@search_id.setter
def search_id(self, search_id):
"""Sets the search_id of this CollectorPaginationResponse.
:param search_id: The search_id of this CollectorPaginationResponse. # noqa: E501
:type: str
"""
self._search_id = search_id
@property
def items(self):
"""Gets the items of this CollectorPaginationResponse. # noqa: E501
:return: The items of this CollectorPaginationResponse. # noqa: E501
:rtype: list[CollectorBase]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this CollectorPaginationResponse.
:param items: The items of this CollectorPaginationResponse. # noqa: E501
:type: list[CollectorBase]
"""
self._items = items
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CollectorPaginationResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CollectorPaginationResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"bamboo@build01.us-west-1.logicmonitor.net"
] |
bamboo@build01.us-west-1.logicmonitor.net
|
4fd18aa08ea26bb71cfe0f3e50af1b3194892ff2
|
902dea88ec336851f2c325d44a0dd0eaf411fb87
|
/day1/strings/19.py
|
0e66b0d530e704999c4e711edd826992dae3e877
|
[] |
no_license
|
shobhit-nigam/tsip_pydoge
|
34d8e03d0744c2eff7615ae94bd6998739ce2bfd
|
bff5b24e1e93b5b41dfcb913cee280d1ee53bbf5
|
refs/heads/main
| 2023-07-19T05:21:35.459518
| 2021-09-06T12:59:22
| 2021-09-06T12:59:22
| 399,728,155
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 144
|
py
|
# double & single quotes
#
vara = "I'am Manish"
varb = 'I\'am Manish'
print("vara =", vara)
print("varb =", varb)
# error
#varc = 'pushpa"
|
[
"noreply@github.com"
] |
shobhit-nigam.noreply@github.com
|
7eabf17dbaccbfaf4b782657965ea1bc1aaebdf7
|
c317f99691f549b393562db200b1e9504ce11f95
|
/algorithms_learn/what_can_be_computed/src/convertSatTo3Sat.py
|
4ae664cf883f08c293e4acd4fed2ec99d750493a
|
[
"CC-BY-4.0"
] |
permissive
|
RRisto/learning
|
5349f9d3466150dbec0f4b287c13333b02845b11
|
618648f63a09bf946a50e896de8aed0f68b5144a
|
refs/heads/master
| 2023-09-01T00:47:23.664697
| 2023-08-30T17:56:48
| 2023-08-30T17:56:48
| 102,286,332
| 15
| 24
| null | 2023-07-06T21:22:48
| 2017-09-03T18:42:58
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,474
|
py
|
# SISO program convertSatTo3Sat.py
# Convert an instance of SAT into an
# equivalent instance of 3-SAT.
# inString: an instance of SAT, formatted as described in the textbook
# and sat.py.
# returns: an instance of 3SAT in the same string format.
# Example:
# >>> convertSatTo3Sat('(x1 OR x2 OR NOT x3 OR NOT x4)')
# '(d1 OR x1 OR x2) AND (NOT d1 OR NOT x3 OR NOT x4)'
import utils; from utils import rf
import sat
def convertSatTo3Sat(inString):
cnfFormula = sat.readSat(inString)
allVariables = sat.getVariablesAsSet(cnfFormula)
# Repeatedly sweep through the clauses looking for "long" clauses
# (i.e., clauses with more than three literals). We favor
# simplicity and readability over efficiency here. Every time a
# long clause is found, it is removed, split, and replaced in the
# list by two new, shorter clauses. The new clauses are inserted
# at the point where the previous long clause was removed. Then
# we go back to the start of the entire list of clauses and start
# looking for long clauses again. This ends up being quadratic or
# worse, whereas near-linear is possible. But the approach is
# simple to understand and the clauses remain in a logical order.
done = False
while not done:
done = True
for clauseID in range(len(cnfFormula)):
clause = cnfFormula[clauseID]
if len(clause) > 3:
done = False
(newClause1, newClause2) = splitClause(clause, allVariables)
cnfFormula.pop(clauseID)
cnfFormula.insert(clauseID, newClause1)
cnfFormula.insert(clauseID+1, newClause2)
break
return sat.writeSat(cnfFormula)
def splitClause(clause, allVariables):
"""Split a clause using the method described in the textbook.
Args:
clause (dict mapping str to int): Each key is a variable in
the clause and the value is +1 for positive literals, -1
for negative, 0 for both
allVariables (set of str): A set of all variables in use, so
that we can choose dummy variables that are not already in
use.
Returns:
(clause, clause): 2-tuple consisting of two clauses, where
each clause is a dictionary as described in the parameter
above. The two clauses are the result of splitting the
input using the method described in the textbook.
"""
assert len(clause) > 3
numLiterals = len(clause)
dummyVariable = addDummyVariable(allVariables)
# There is no need to sort the variables, but it will give a more
# readable and predictable outcome, since otherwise the order of
# variables in the dictionary will be arbitrary.
sortedClauseVariables = sorted(clause.keys())
newClause1 = dict()
newClause2 = dict()
# Put the first numLiterals-2 literals into newClause1, and the
# last two literals into newClause2.
for i in range(numLiterals):
variable = sortedClauseVariables[i]
posNeg = clause[variable]
if i < numLiterals-2:
newClause1[variable] = posNeg
else:
newClause2[variable] = posNeg
# Add the dummy variable, positive in newClause1 and negative in newClause2
newClause1[dummyVariable] = +1
newClause2[dummyVariable] = -1
return (newClause1, newClause2)
# Create, add, and return a new dummy variable name. Specifically, the
# set allVariables is a set of all current variable names. We find a
# new variable name of the form d1, d2, d3, ... which is not in the
# given set. The new name is added to the set, and the new name is also
# returned. Implemented with a simple linear time algorithm; of
# course we could do better than that if desired.
def addDummyVariable(allVariables):
"""Create, add, and return a new dummy variable name.
Specifically, the set allVariables is a set of all current
variable names. We find a new variable name of the form d1, d2,
d3, ... which is not in the given set. The new name is added to
the set, and the new name is also returned. Implemented with a
simple linear time algorithm; of course we could do better than
that if desired.
Args:
allVariables (set of str): A set of all variables in use, so
that we can choose dummy variables that are not already in
use.
Returns:
str: the new dummy variable name.
"""
i = 1; done = False
while not done:
dummyName = 'd' + str(i)
if dummyName not in allVariables:
allVariables.add(dummyName)
return dummyName
i += 1
def testAddDummyVariable():
formulaStr = '(x1 OR x2 OR NOT x3 OR NOT x4 OR x5) AND (NOT x1 OR NOT x2 OR x3 OR x4) AND (x4 OR NOT x5)'
cnfFormula = sat.readSat(formulaStr)
allVariables = sat.getVariablesAsSet(cnfFormula)
numVars = len(allVariables)
for i in range(5):
dummyName = addDummyVariable(allVariables)
utils.tprint(dummyName, allVariables)
varName = 'd'+str(i+1)
assert varName in allVariables
assert len(allVariables) == numVars + i+1
def testSplitClause():
formulaStr = '(x1 OR x2 OR NOT x3 OR NOT x4 OR x5) AND (NOT x1 OR NOT x2 OR x3 OR x4) AND (x4 OR NOT x5)'
cnfFormula = sat.readSat(formulaStr)
allVariables = sat.getVariablesAsSet(cnfFormula)
result = splitClause(cnfFormula[0], allVariables)
solution = ({'x1': 1, 'd1': 1, 'x3': -1, 'x2': 1}, {'d1': -1, 'x5': 1, 'x4': -1})
utils.tprint('before split:', cnfFormula[0], '\nafter split:', result)
assert result==solution
def testConvertSatTo3Sat():
s0 = '(x1 OR x2 OR NOT x3 OR NOT x4 OR x5) AND (NOT x1 OR NOT x2 OR x3 OR x4) AND (x4 OR NOT x5)'
s0soln = '(d1 OR d2 OR x1) AND (NOT d2 OR x2 OR NOT x3) AND (NOT d1 OR NOT x4 OR x5) AND (d3 OR NOT x1 OR NOT x2) AND (NOT d3 OR x3 OR x4) AND (x4 OR NOT x5)'
s1 = ''
s1soln = ''
s2 = 'x1'
s2soln = '(x1)'
s3 = 'x1 AND NOT x2'
s3soln = '(x1) AND (NOT x2)'
s4 = 'x1 OR NOT x2'
s4soln = '(x1 OR NOT x2)'
testvals = [
(s0, s0soln),
(s1, s1soln),
(s2, s2soln),
(s3, s3soln),
(s4, s4soln),
]
for (inString, soln) in testvals:
utils.tprint('**', inString, '**')
converted = convertSatTo3Sat(inString)
utils.tprint(converted, '\n\n')
assert converted == soln
|
[
"ristohinno@gmail.com"
] |
ristohinno@gmail.com
|
a6e0a619fb8ccad6c68753739f72ef7217d9a4a8
|
95689182691599b2e74ca33b36d2828a01ec5889
|
/proyectos_de_ley/pdl/migrations/0002_proyecto_legislatura.py
|
f73ae0753f3b06383b38193a5fcf041fd0a05384
|
[
"MIT"
] |
permissive
|
proyectosdeley/proyectos_de_ley
|
2392c6f3fdefc88d355f37e615ddb5ddc70c5321
|
aed3f09dd2e41711bdcb27aec66a1a0d7896bb35
|
refs/heads/master
| 2021-07-14T12:33:33.793325
| 2020-07-26T19:44:53
| 2020-07-26T19:44:53
| 23,754,905
| 13
| 10
|
MIT
| 2020-07-26T19:44:54
| 2014-09-07T07:32:53
|
Python
|
UTF-8
|
Python
| false
| false
| 492
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-09-02 20:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pdl', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='proyecto',
name='legislatura',
field=models.IntegerField(default=2011, max_length=4),
preserve_default=False,
),
]
|
[
"mycalesis@gmail.com"
] |
mycalesis@gmail.com
|
941083e324bb5e8f93f3c8d1192cd5dacd22c422
|
75566ef3423c72fe9e73075dfe29df172b65a28c
|
/src/scraper/database.py
|
f2d4c88d42deffe9bfa9459831dc37dae9483bca
|
[] |
no_license
|
bicsi/cpaggregator
|
7020663305eeff8690e92da64fc796926b12fe31
|
b6459fe33c19dc2020b29470e457f0666b0ff212
|
refs/heads/master
| 2022-12-12T10:24:38.756844
| 2020-11-11T13:53:30
| 2020-11-11T13:53:30
| 153,731,264
| 2
| 1
| null | 2022-12-08T01:15:28
| 2018-10-19T05:33:06
|
CSS
|
UTF-8
|
Python
| false
| false
| 2,178
|
py
|
import os
from pprint import pprint
from pymongo import MongoClient, ReplaceOne
from pymongo.errors import BulkWriteError
from core.logging import log
def __insert_many_silent(coll, iterable, unique_fields):
requests = []
for elem in iterable:
find_dict = {field: elem[field] for field in unique_fields}
requests.append(ReplaceOne(find_dict, elem, upsert=True))
try:
result = coll.bulk_write(requests)
return result.inserted_count
except BulkWriteError as bwe:
for err in bwe.details['writeErrors']:
if err['code'] != 11000:
log.error(bwe.details)
log.error(pprint(iterable))
raise
return bwe.details['nInserted']
def get_db():
if os.environ.get('PRODUCTION'):
connection = MongoClient(os.environ.get('MONGODB_HOST'), int(os.environ.get('MONGODB_PORT')))
db = connection[os.environ.get('MONGODB_NAME')]
db.authenticate(os.environ.get('MONGODB_USER'), os.environ.get('MONGODB_PASS'))
return db
return MongoClient()['competitive']
def insert_report(db, report_id, created_at, report):
coll = db["reports"]
coll.insert({
'report_id': report_id,
'created_at': created_at,
'report': report,
})
def insert_submissions(db, submissions):
return __insert_many_silent(
coll=db["submissions"],
iterable=submissions,
unique_fields=['judge_id', 'submission_id', 'author_id'])
def insert_handles(db, handles):
return __insert_many_silent(
coll=db["handles"],
iterable=handles,
unique_fields=['judge_id', 'handle'])
def find_submissions(db, date_range=None, **query_dict):
coll = db["submissions"]
if date_range is not None:
date_start, date_end = date_range
query_dict.update({
'submitted_on': {
'$gte': date_start,
'$lte': date_end,
}
})
return coll.find(query_dict)
def insert_tasks(db, tasks):
return __insert_many_silent(
coll=db["tasks"],
iterable=tasks,
unique_fields=['judge_id', 'task_id'])
|
[
"lucianbicsi@gmail.com"
] |
lucianbicsi@gmail.com
|
2f2f37808d18c375de2161f33f361a7206bf124d
|
29d09c634ffdd8cab13631d62bc6e3ad00df49bf
|
/Algorithm/swexpert/1249_보급로건설.py
|
0f9f48542b53f0796bb44539d57d7850a9539998
|
[] |
no_license
|
kim-taewoo/TIL_PUBLIC
|
f1d32c3b4f46344c1c99f02e95cc6d2a888a0374
|
ae86b542f8b1805b5dd103576d6538e3b1f5b9f4
|
refs/heads/master
| 2021-09-12T04:22:52.219301
| 2021-08-28T16:14:11
| 2021-08-28T16:14:11
| 237,408,159
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 672
|
py
|
from collections import deque
dr = (-1,0,1,0)
dc = (0,1,0,-1)
T = int(input())
for t in range(1, T+1):
n = int(input())
board = [list(map(int, list(input()))) for _ in range(n)]
chk = [[21470000 for _ in range(n)] for __ in range(n)]
chk[0][0] = 0
q = deque((0,0))
while q:
r, c = q.popleft()
cost = chk[r][c]
for d in range(4):
nr, nc = r + dr[d], c + dc[d]
if 0 <= nr < n and 0 <= nc < n:
ncost = cost + board[nr][nc]
if ncost < chk[nr][nc]:
chk[nr][nc] = ncost
q.append((nr,nc))
print("#{} {}".format(t, chk[n-1][n-1]))
|
[
"acoustic0419@gmail.com"
] |
acoustic0419@gmail.com
|
51b36a04caec338e1885e9ea1791c5c6c0d3e2af
|
d8d1a9b2bec5b2679129c246cf58acc836e4355b
|
/pytest_resource_path/absolute_path_factory.py
|
117b373b80071080391d19d48136aa6e5f7f39ef
|
[
"MIT"
] |
permissive
|
yukihiko-shinoda/pytest-resource-path
|
0ac0d612887f453b793ec114b65eb9613817b5cc
|
bc56c4b5f2c8f3138baeac7f145717f6a70af7b6
|
refs/heads/master
| 2023-04-15T21:05:18.643053
| 2021-05-01T04:27:12
| 2021-05-01T04:27:12
| 261,375,368
| 10
| 0
|
MIT
| 2021-05-01T02:45:39
| 2020-05-05T06:08:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,451
|
py
|
"""Implements creating process for absolute path to argument of constructor."""
from pathlib import Path
from types import FunctionType, MethodType
from typing import Union
from pytest_resource_path.exceptions import LogicError
from pytest_resource_path.path_factory import PathFactory
__all__ = ["AbsolutePathFactory"]
class AbsolutePathFactory:
"""Implements creating process for absolute path to argument of constructor."""
def __init__(self, path_target: Path):
self.path_target = path_target
def create_by_function(self, item: Union[MethodType, FunctionType]) -> Path:
path = PathFactory.create_absolute_path_by_function(item)
return self.create_by_path(path)
def create_by_path(self, path: Path) -> Path:
"""Creates absolute path to parh_target."""
index = None
index_tests = None
string_path_tests = str(self.path_target)
for index, part in enumerate(path.parts):
if part == string_path_tests:
index_tests = index
if index is None or index_tests is None:
raise LogicError( # pragma: no cover
"Unexpected path.\n"
"path = " + str(path) + ",\n"
"string_path_tests = " + string_path_tests + ",\n"
"index_tests, " + str(index_tests) + ",\n"
"index = " + str(index)
)
return path.parents[index - index_tests - 1]
|
[
"yuk.hik.future@gmail.com"
] |
yuk.hik.future@gmail.com
|
b6250476b579a0d0ee5585b0f82e06ea882db68d
|
de64154c4a968ab8c04390938edc300f2b52f129
|
/tests/lldb/runtest.py
|
71ee019c66d0c9c37232d6e655065a37bdde5e49
|
[
"Apache-2.0"
] |
permissive
|
curliph/NyuziProcessor
|
7364a83a52b3f1d461c908a9ff88ee222be08c25
|
2d7cc748a8388a5be4c28d3cb34786bc9f0b801a
|
refs/heads/master
| 2020-04-10T15:16:51.874141
| 2018-12-09T22:40:55
| 2018-12-09T22:40:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,708
|
py
|
#!/usr/bin/env python3
#
# Copyright 2011-2015 Jeff Bush
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
import subprocess
import sys
sys.path.insert(0, '..')
import test_harness
class EmulatorProcess(object):
"""
This spawns the emulator process and LLDB in MI (machine interface) mode.
It allows communication with LLDB with it via stdin and stdout. It has the
__enter__ and __exit__ methods allowing it to be used in the 'with'
construct so it will automatically be torn down when the test is done.
"""
def __init__(self, hexfile):
self.hexfile = hexfile
self.elf_file = os.path.splitext(hexfile)[0] + '.elf'
self.output = None
self.emulator_proc = None
self.lldb_proc = None
self.outstr = None
self.instr = None
def __enter__(self):
emulator_args = [
test_harness.EMULATOR_PATH,
'-m',
'gdb',
'-v',
self.hexfile
]
if test_harness.DEBUG:
self.output = None
else:
self.output = open(os.devnull, 'w')
self.emulator_proc = subprocess.Popen(emulator_args, stdout=self.output,
stderr=subprocess.STDOUT)
lldb_args = [
test_harness.COMPILER_BIN + 'lldb-mi'
]
# XXX race condition: the emulator needs to be ready before
# lldb tries to connect to it.
try:
self.lldb_proc = subprocess.Popen(lldb_args, stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
self.outstr = self.lldb_proc.stdin
self.instr = self.lldb_proc.stdout
except:
test_harness.kill_gently(self.emulator_proc)
raise
return self
def __exit__(self, *unused):
test_harness.kill_gently(self.emulator_proc)
test_harness.kill_gently(self.lldb_proc)
def send_command(self, cmd):
if test_harness.DEBUG:
print('LLDB send: ' + cmd)
self.outstr.write(str.encode(cmd + '\n'))
self.outstr.flush()
return self.wait_response()
def wait_response(self):
response = ''
while True:
response += self.instr.read(1).decode('utf-8')
if response.endswith('^done'):
break
if test_harness.DEBUG:
print('LLDB recv: ' + response)
return response
def wait_stop(self):
current_line = ''
while True:
inchar = self.instr.read(1).decode('utf-8')
current_line += inchar
if inchar == '\n':
if test_harness.DEBUG:
print('LLDB recv: ' + current_line[:-1])
if current_line.startswith('*stopped'):
break
current_line = ''
FRAME_RE = re.compile(
'frame #[0-9]+:( 0x[0-9a-f]+)? [a-zA-Z_\\.0-9]+`(?P<function>[a-zA-Z_0-9][a-zA-Z_0-9]+)')
AT_RE = re.compile(' at (?P<filename>[a-z_A-Z][a-z\\._A-Z]+):(?P<line>[0-9]+)')
def parse_stack_crawl(response):
"""
Given text response from the debugger containing a stack crawl, this will
return a list of tuples where each entry represents the function name,
filename, and line number of the call site.
"""
stack_info = []
for line in response.split('\\n'):
frame_match = FRAME_RE.search(line)
if frame_match:
func = frame_match.group('function')
at_match = AT_RE.search(line)
if at_match:
stack_info += [(func, at_match.group('filename'),
int(at_match.group('line')))]
else:
stack_info += [(func, '', 0)]
return stack_info
@test_harness.test(['emulator'])
def lldb(*unused):
"""This mainly validates that LLDB is reading symbols correctly."""
hexfile = test_harness.build_program(
['test_program.c'], opt_level='-O0', cflags=['-g'])
with EmulatorProcess(hexfile) as conn:
conn.send_command('file "' + test_harness.WORK_DIR + '/program.elf"')
conn.send_command('gdb-remote 8000\n')
response = conn.send_command(
'breakpoint set --file test_program.c --line 27')
if 'Breakpoint 1: where = program.elf`func2 + 100 at test_program.c:27' not in response:
raise test_harness.TestException(
'breakpoint: did not find expected value ' + response)
conn.send_command('c')
conn.wait_stop()
expected_stack = [
('func2', 'test_program.c', 27),
('func1', 'test_program.c', 35),
('main', 'test_program.c', 41),
('do_main', '', 0)
]
response = conn.send_command('bt')
crawl = parse_stack_crawl(response)
if crawl != expected_stack:
raise test_harness.TestException(
'stack crawl mismatch ' + str(crawl))
response = conn.send_command('print value')
if '= 67' not in response:
raise test_harness.TestException(
'print value: Did not find expected value ' + response)
response = conn.send_command('print result')
if '= 128' not in response:
raise test_harness.TestException(
'print result: Did not find expected value ' + response)
# Up to previous frame
conn.send_command('frame select --relative=1')
response = conn.send_command('print a')
if '= 12' not in response:
raise test_harness.TestException(
'print a: Did not find expected value ' + response)
response = conn.send_command('print b')
if '= 67' not in response:
raise test_harness.TestException(
'print b: Did not find expected value ' + response)
conn.send_command('step')
conn.wait_stop()
response = conn.send_command('print result')
if '= 64' not in response:
raise test_harness.TestException(
'print b: Did not find expected value ' + response)
test_harness.execute_tests()
|
[
"jeffbush001@gmail.com"
] |
jeffbush001@gmail.com
|
1f40a950463b7987030dd8e8eb8b668506c8e112
|
920f0fbb7064f2017ff62da372eaf79ddcc9035b
|
/lc_ladder/Basic_Algo/data-struture/Kth_Largest_Element.py
|
263f9c80c8d59cb0954144b77ec4601158323480
|
[] |
no_license
|
JenZhen/LC
|
b29a1c45d8c905680c7b4ad0017516b3dca80cc4
|
85219de95e41551fce5af816b66643495fe51e01
|
refs/heads/master
| 2021-06-03T10:03:02.901376
| 2020-08-05T19:44:48
| 2020-08-05T19:44:48
| 104,683,578
| 3
| 1
| null | 2020-08-05T19:44:50
| 2017-09-24T23:30:35
|
Python
|
UTF-8
|
Python
| false
| false
| 3,106
|
py
|
#!/usr/bin/python
# http://www.lintcode.com/en/problem/kth-largest-element/
# Example
# In array [9,3,2,4,8], the 3rd largest element is 4.
# In array [1,2,3,4,5], the 1st largest element is 5, 2nd largest element is 4, 3rd largest element is 3 and etc.
"""
Algo: QuickSelect
D.S.: Heap, Array
Solution1:
Heap
Find largest number -> max heap
- In python, heapq is min heap. To convert to maxHeap, negate value when push, negate when pop
- In c++, priority queue is max heap.
std::priority_queue<int, std::vector<int>, mycomparison> myPQ
Takes type, container, comparator. use comparator to define min/max/customized heap
Find the Kth element
- option 1: iterate k times
- use fixed size heap
Solution2:
QuickSelect 经典模板, 适用于有相同数字的情况
Time: O(N) - average, O(N ^ 2) - worst case
Space: O(1)
[4, 1, 3, 2] - N = 4; K = 2
第K(2)大 --> 第3小 --> idx = 2 (N - K)
第K(2)小 --> 第3大 --> idx = 1 (K - 1)
Corner cases:
"""
class Solution1:
# @param k & A a integer and an array
# @return ans a integer
def kthLargestElement(self, k, A):
if A is None or k is None or k == 0:
return None
from heapq import heappush, heappop
heap = []
res = None
def _maxHeapPush(heap, value):
negVal = value * (-1)
heappush(heap, negVal)
def _maxHeapPop(heap):
return heappop(heap) * (-1)
for i in A:
_maxHeapPush(heap, i)
for i in range(k):
val = _maxHeapPop(heap)
if i == k - 1:
res = val
return res
class Solution2:
def findKthLargest(self, nums: List[int], k: int) -> int:
return self.quick_select(nums, 0, len(nums) - 1, len(nums) - k)
def quick_select(self, nums, l, r, target_idx):
if l == r:
return nums[l]
# find the rightmost element to pivot, could be a random one within l, r
pivot_idx = self.partition(nums, l, r)
if target_idx == pivot_idx:
return nums[target_idx]
elif target_idx < pivot_idx:
# 注意挪动的时候要pivot_idx + 1
return self.quick_select(nums, l, pivot_idx - 1, target_idx)
else:
return self.quick_select(nums, pivot_idx + 1, r, target_idx)
def partition(self, nums, l, r):
i = l - 1 # i + 1 表示下一个比Pivot值小的数应该去的地方
pivot_value = nums[r]
for j in range(l, r):
if nums[j] <= pivot_value:
i += 1
nums[i], nums[j] = nums[j], nums[i]
# 最后不要忘记挪动Pivot数,并返回
i += 1
nums[i], nums[r] = nums[r], nums[i]
return i
# parition can be
def partition2(self, nums, l, r):
i = l - 1 # i + 1 will be next number less than pivot value
pivot_value = nums[r]
for j in range(l, r + 1):
if nums[j] <= pivot_value:
i += 1
nums[i], nums[j] = nums[j], nums[i]
return i
# Test Cases
if __name__ == "__main__":
|
[
"jenzhen.nyc89@yahoo.com"
] |
jenzhen.nyc89@yahoo.com
|
b4ca34cc14fe2f16d01b462a5870c0073ce4817b
|
3b2940c38412e5216527e35093396470060cca2f
|
/top/api/rest/TmcGroupAddRequest.py
|
0cfb70b6b6c82a626717aa0186205a08ca854def
|
[] |
no_license
|
akingthink/goods
|
842eb09daddc2611868b01ebd6e330e5dd7d50be
|
ffdb5868a8df5c2935fc6142edcdf4c661c84dca
|
refs/heads/master
| 2021-01-10T14:22:54.061570
| 2016-03-04T09:48:24
| 2016-03-04T09:48:24
| 45,093,302
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
'''
Created by auto_sdk on 2015-01-20 12:44:32
'''
from top.api.base import RestApi
class TmcGroupAddRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.group_name = None
self.nicks = None
def getapiname(self):
return 'taobao.tmc.group.add'
|
[
"yangwenjin@T4F-MBP-17.local"
] |
yangwenjin@T4F-MBP-17.local
|
023a448e65f13fa1aa110f94679a9a53c6e6c840
|
3506d8c9a8391be52d24cff54f27537a92a7228c
|
/HackerRank/Strings/Palindrome_Index.py
|
e01f253f9a8217c583a0c47373b035d3b04a1d7b
|
[] |
no_license
|
saumya-singh/CodeLab
|
04ef2c61c516c417c03c6a510e8b5e6e498fbe5d
|
9371f0d6bd45e5592dae25b50f0d04ba45ae67cf
|
refs/heads/master
| 2021-09-12T05:01:17.491312
| 2018-04-14T19:48:40
| 2018-04-14T19:48:40
| 81,596,628
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 742
|
py
|
#!/bin/python3
#https://www.hackerrank.com/challenges/palindrome-index/problem
import sys
def palindromeIndex(s):
length = len(s)
counter = length//2
i = 0
j = length -1
flag = 0
while i < counter:
if s[i] == s[j]:
i += 1
j -= 1
else:
if s[i] == s[j - 1] and s[i + 1] == s[j - 2]:
index = j
elif s[j] == s[i + 1] and s[j - 1] == s[i + 2]:
index = i
flag = 1
break
if flag == 0:
index = -1
return index
q = int(input().strip())
answer = []
for i in range(q):
s = input().strip()
result = palindromeIndex(s)
answer.append(result)
for i in answer:
print(i)
|
[
"saumya.singh0993@gmail.com"
] |
saumya.singh0993@gmail.com
|
6b1a3c6752fb987270983195e5489c049ddefa3c
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/file/formats/xar/XARUtil.pyi
|
cc17580bacec6487f3d70bd74261a9a2bf5e6add
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142
| 2021-03-18T13:31:40
| 2021-03-18T13:31:40
| 338,577,905
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 765
|
pyi
|
import ghidra.app.util.bin
import ghidra.program.model.listing
import java.lang
class XARUtil(object):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
@overload
@staticmethod
def isXAR(__a0: ghidra.app.util.bin.ByteProvider) -> bool: ...
@overload
@staticmethod
def isXAR(__a0: ghidra.program.model.listing.Program) -> bool: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
|
[
"tsunekou1019@gmail.com"
] |
tsunekou1019@gmail.com
|
98c7acd6912e8bc5c371f591667dc2220b9071e9
|
f10230c09860f1e01dcef56daab30a9200d7eb60
|
/common/admin.py
|
170b802e29c547aedb5673fa213fe83ff9dd05ac
|
[] |
no_license
|
DentiQ/test_web
|
08280ff506439e9bd1565fb502a9780b3c79707d
|
b7bceab1edd670a8cf60ecf882a466c26e68fb63
|
refs/heads/master
| 2023-01-23T14:43:17.420477
| 2020-12-01T13:54:18
| 2020-12-01T13:54:18
| 316,973,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import Group
from django.utils.translation import ugettext_lazy as _
|
[
"dentiq0414@gmail.com"
] |
dentiq0414@gmail.com
|
4c49ddf6a3224bc642267e24b285e2a9da8767ed
|
6b6e20004b46165595f35b5789e7426d5289ea48
|
/endpoints/appr/__init__.py
|
c998d8a958ab8159696191c64e114b0ac4e6902c
|
[
"Apache-2.0"
] |
permissive
|
anwarchk/quay
|
2a83d0ab65aff6a1120fbf3a45dd72f42211633b
|
23c5120790c619174e7d36784ca5aab7f4eece5c
|
refs/heads/master
| 2020-09-12T18:53:21.093606
| 2019-11-15T19:29:02
| 2019-11-15T19:29:02
| 222,517,145
| 0
| 0
|
Apache-2.0
| 2019-11-18T18:32:35
| 2019-11-18T18:32:35
| null |
UTF-8
|
Python
| false
| false
| 1,796
|
py
|
import logging
from functools import wraps
from cnr.exception import Forbidden
from flask import Blueprint
from app import metric_queue
from auth.permissions import (AdministerRepositoryPermission, ReadRepositoryPermission,
ModifyRepositoryPermission)
from endpoints.appr.decorators import require_repo_permission
from util.metrics.metricqueue import time_blueprint
appr_bp = Blueprint('appr', __name__)
time_blueprint(appr_bp, metric_queue)
logger = logging.getLogger(__name__)
def _raise_method(repository, scopes):
raise Forbidden("Unauthorized access for: %s" % repository,
{"package": repository, "scopes": scopes})
def _get_reponame_kwargs(*args, **kwargs):
return [kwargs['namespace'], kwargs['package_name']]
require_app_repo_read = require_repo_permission(ReadRepositoryPermission,
scopes=['pull'],
allow_public=True,
raise_method=_raise_method,
get_reponame_method=_get_reponame_kwargs)
require_app_repo_write = require_repo_permission(ModifyRepositoryPermission,
scopes=['pull', 'push'],
raise_method=_raise_method,
get_reponame_method=_get_reponame_kwargs)
require_app_repo_admin = require_repo_permission(AdministerRepositoryPermission,
scopes=['pull', 'push'],
raise_method=_raise_method,
get_reponame_method=_get_reponame_kwargs)
|
[
"jimmy.zelinskie+git@gmail.com"
] |
jimmy.zelinskie+git@gmail.com
|
b692f39e14180b18642560210b11e1e6cb9edac3
|
9a355bd36f089a829eb965bb6d725534443a4f15
|
/nlp-LDA/main.py
|
b292767164afc103ab14fa3ecb0bb9377d7e224f
|
[] |
no_license
|
yflfly/nlp_tools
|
bf1c5745c37116b14fba417d41271c716ee9a487
|
95ddeaad18ee94cc19b715e38c9af2cda7cbe213
|
refs/heads/master
| 2021-06-21T10:17:57.713605
| 2021-03-07T13:00:36
| 2021-03-07T13:00:36
| 176,850,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,472
|
py
|
# coding:utf-8
from gensim import corpora, models
import jieba.posseg as jp
import jieba
# 基于LDA主题模型的关键词提取算法实现
# 简单文本处理
def get_text(text):
flags = ('n', 'nr', 'ns', 'nt', 'eng', 'v', 'd') # 词性
stopwords = ('的', '就', '是', '用', '还', '在', '上', '作为') # 停用词
words_list = []
for text in texts:
words = [w.word for w in jp.cut(text) if w.flag in flags and w.word not in stopwords]
words_list.append(words)
return words_list
# 生成LDA模型
def LDA_model(words_list):
# 构造词典
# Dictionary()方法遍历所有的文本,为每个不重复的单词分配一个单独的整数ID,同时收集该单词出现次数以及相关的统计信息
dictionary = corpora.Dictionary(words_list)
print(dictionary)
print('打印查看每个单词的id:')
print(dictionary.token2id) # 打印查看每个单词的id
# 将dictionary转化为一个词袋
# doc2bow()方法将dictionary转化为一个词袋。得到的结果corpus是一个向量的列表,向量的个数就是文档数。
# 在每个文档向量中都包含一系列元组,元组的形式是(单词 ID,词频)
corpus = [dictionary.doc2bow(words) for words in words_list]
print('输出每个文档的向量:')
print(corpus) # 输出每个文档的向量
# LDA主题模型
# num_topics -- 必须,要生成的主题个数。
# id2word -- 必须,LdaModel类要求我们之前的dictionary把id都映射成为字符串。
# passes -- 可选,模型遍历语料库的次数。遍历的次数越多,模型越精确。但是对于非常大的语料库,遍历太多次会花费很长的时间。
lda_model = models.ldamodel.LdaModel(corpus=corpus, num_topics=2, id2word=dictionary, passes=10)
return lda_model
if __name__ == "__main__":
texts = ['作为千元机中为数不多拥有真全面屏的手机,OPPO K3一经推出,就簇拥不少粉丝', \
'很多人在冲着这块屏幕购买了OPPO K3之后,发现原来K3的过人之处不止是在屏幕上', \
'OPPO K3的消费者对这部手机总体还是十分满意的', \
'吉利博越PRO在7月3日全新吉客智能生态系统GKUI19发布会上正式亮相', \
'今年上海车展,长安CS75 PLUS首次亮相', \
'普通版车型采用的是双边共双出式排气布局;运动版本车型采用双边共四出的排气布局']
# 获取分词后的文本列表
words_list = get_text(texts)
print('分词后的文本:')
print(words_list)
# 获取训练后的LDA模型
lda_model = LDA_model(words_list)
# 可以用 print_topic 和 print_topics 方法来查看主题
# 打印所有主题,每个主题显示5个词
topic_words = lda_model.print_topics(num_topics=2, num_words=5)
print('打印所有主题,每个主题显示5个词:')
print(topic_words)
# 输出该主题的的词及其词的权重
words_list = lda_model.show_topic(0, 5)
print('输出该主题的的词及其词的权重:')
print(words_list)
''''
运行显示如下所示:
Building prefix dict from the default dictionary ...
Dumping model to file cache C:\\Users\\think\AppData\Local\Temp\jieba.cache
Loading model cost 8.649 seconds.
Prefix dict has been built successfully.
分词后的文本:
[['拥有', '真', '全面', '手机', 'OPPO', 'K3', '一经', '推出', '簇拥', '不少', '粉丝'], ['人', '屏幕', '购买', 'OPPO', 'K3', '发现', '原来', 'K3', '不止', '屏幕'], ['OPPO', 'K3', '消费者', '部手机', '总体'], ['吉利', '博越', 'PRO', '全新', '吉客', '智能', 'GKUI19', '发布会', '亮相'], ['上海', '长安', 'CS75', 'PLUS', '亮相'], ['版', '车型', '采用', '双边', '共', '出式', '排气', '布局', '版本', '车型', '采用', '双边', '共', '排气', '布局']]
Dictionary(42 unique tokens: ['K3', 'OPPO', '一经', '不少', '全面']...)
打印查看每个单词的id:
{'K3': 0, 'OPPO': 1, '一经': 2, '不少': 3, '全面': 4, '手机': 5, '拥有': 6, '推出': 7, '真': 8, '簇拥': 9, '粉丝': 10, '不止': 11, '人': 12, '原来': 13, '发现': 14, '屏幕': 15, '购买': 16, '总体': 17, '消费者': 18, '部手机': 19, 'GKUI19': 20, 'PRO': 21, '亮相': 22, '全新': 23, '博越': 24, '发布会': 25, '吉利': 26, '吉客': 27, '智能': 28, 'CS75': 29, 'PLUS': 30, '上海': 31, '长安': 32, '共': 33, '出式': 34, '双边': 35, '布局': 36, '排气': 37, '版': 38, '版本': 39, '车型': 40, '采用': 41}
输出每个文档的向量:
[[(0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1)], [(0, 2), (1, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 2), (16, 1)], [(0, 1), (1, 1), (17, 1), (18, 1), (19, 1)], [(20, 1), (21, 1), (22, 1), (23, 1), (24, 1), (25, 1), (26, 1), (27, 1), (28, 1)], [(22, 1), (29, 1), (30, 1), (31, 1), (32, 1)], [(33, 2), (34, 1), (35, 2), (36, 2), (37, 2), (38, 1), (39, 1), (40, 2), (41, 2)]]
打印所有主题,每个主题显示5个词:
[(0, '0.056*"K3" + 0.055*"屏幕" + 0.055*"亮相" + 0.034*"OPPO" + 0.033*"全新"'), (1, '0.048*"采用" + 0.048*"双边" + 0.048*"车型" + 0.048*"排气" + 0.048*"布局"')]
输出该主题的的词及其词的权重:
[('K3', 0.0563279), ('屏幕', 0.05533156), ('亮相', 0.055258993), ('OPPO', 0.033577614), ('全新', 0.033189856)]
'''
|
[
"yangfengling@inttech.cn"
] |
yangfengling@inttech.cn
|
594840a1230e989a0903678c4308812b815ff3e6
|
66f037cc0bf8683a814eb610d06edd3667f962e0
|
/escpos/tests/test_epson_genericescpos.py
|
baf3acf4f890528e24d17c60035b042ca09644d7
|
[
"Apache-2.0"
] |
permissive
|
cemsbr/pyescpos
|
6118e7fcf4b5e85b94639be42cfb6fe87f084ba9
|
58ebc1b544458803c4235f3fa80e8fa376b18ec2
|
refs/heads/master
| 2020-12-08T07:20:24.977694
| 2019-12-30T00:33:08
| 2019-12-30T00:33:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,428
|
py
|
# -*- coding: utf-8 -*-
#
# escpos/tests/test_epson_genericescpos.py
#
# Copyright 2015 Base4 Sistemas Ltda ME
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import pytest
from escpos.impl.epson import GenericESCPOS
from escpos import feature
@pytest.fixture(scope='module')
def printer():
return GenericESCPOS(pytest.FakeDevice())
def test_has_model_attr(printer):
assert hasattr(printer, 'model')
def test_has_feature_attribute(printer):
assert hasattr(printer, 'feature')
assert isinstance(printer.feature, feature.FeatureAttributes)
def test_feature_attribute_columns(printer):
assert hasattr(printer.feature, feature.COLUMNS)
assert printer.feature.columns.normal == 48
assert printer.feature.columns.expanded == 24
assert printer.feature.columns.condensed == 64
|
[
"daniel@base4.com.br"
] |
daniel@base4.com.br
|
be5306049fed1701ff84c2b725f19d30dc048bd9
|
a36501f44a09ca03dd1167e1d7965f782e159097
|
/admin/views/celery.py
|
0045559263cc795693771411bf703a74286ad244
|
[
"Apache-2.0"
] |
permissive
|
ssfdust/full-stack-flask-smorest
|
9429a2cdcaa3ff3538875cc74cff802765678d4b
|
4f866b2264e224389c99bbbdb4521f4b0799b2a3
|
refs/heads/master
| 2023-08-05T08:48:03.474042
| 2023-05-07T01:08:20
| 2023-05-07T01:08:20
| 205,528,296
| 39
| 10
|
Apache-2.0
| 2023-08-31T00:18:42
| 2019-08-31T10:12:25
|
Python
|
UTF-8
|
Python
| false
| false
| 3,026
|
py
|
# Copyright 2019 RedLotus <ssfdust@gmail.com>
# Author: RedLotus <ssfdust@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
celery的管理模块
调度任务管理
任务状态管理
"""
from ..formaters import json_formatter, line_formatter
from .bases import AuthMongView
from ..forms import TaskSelect2Field
class CeleryScheduleView(AuthMongView):
"""
调度任务管理
用以创建调度任务,支持两种格式一是Crontab,
二是Interval,两种方式只能选择一种。
"""
from app.extensions.mongobeat.models import PeriodicTask
can_create = True
can_edit = True
can_delete = True
can_view_details = True
extra_js = ["/static/js/pages/celerybeat.js"] # 拓展表单的js
column_list = ["name", "task", "enabled", "schedule", "last_run_at"]
column_labels = {"schedule": "周期"}
column_editable_list = ["enabled", "run_immediately"]
column_default_sort = []
column_filters = ["name"]
can_view_details = True
form_overrides = {"task": TaskSelect2Field}
def _scheduleinfo(view, context, model, name):
"""调度信息展示"""
return str(model).split(":")[1]
column_formatters = {"schedule": _scheduleinfo}
class CeleryTaskView(AuthMongView):
"""任务查看"""
can_create = False
can_edit = False
can_delete = True
can_view_details = True
details_modal = False
column_default_sort = [("time_start", True)]
column_filters = ["time_start", "date_done"]
column_exclude_list = [
"id",
"delivery_info",
"result",
"children",
"args",
"acknowledged",
"traceback",
"kwargs",
"parent_id",
"type",
]
column_formatters = {
"delivery_info": json_formatter,
"result": json_formatter,
"traceback": line_formatter,
}
def get_list(
self,
page,
sort_column,
sort_desc,
search,
filters,
execute=True,
page_size=None,
):
"""将所有任务置为已读"""
count, query = super().get_list(
page=page,
sort_column=sort_column,
sort_desc=sort_desc,
search=search,
filters=filters,
execute=execute,
page_size=page_size,
)
for item in query:
if item.checked is False:
item.checked = True
item.save()
return count, query
|
[
"ssfdust@gmail.com"
] |
ssfdust@gmail.com
|
757c83c09d9a63c7e07a8ce62f3e008a7d1d516b
|
1515be3015ad988278d5a095416c0a0066a02757
|
/src/users/models/microsoftgraphentity.py
|
b0f43ecc2a37e01c3384617b3fbb889435ca08b5
|
[
"MIT"
] |
permissive
|
peombwa/Sample-Graph-Python-Client
|
2ad494cc5b5fe026edd6ed7fee8cac2dd96aaa60
|
3396f531fbe6bb40a740767c4e31aee95a3b932e
|
refs/heads/master
| 2020-12-29T09:50:38.941350
| 2020-02-05T22:45:28
| 2020-02-05T22:45:28
| 238,561,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Microsoftgraphentity(Model):
"""entity.
:param id:
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(self, id=None):
super(Microsoftgraphentity, self).__init__()
self.id = id
|
[
"peombwa@microsoft.com"
] |
peombwa@microsoft.com
|
088740864d07a9ca84e89627a1bab44538e4be33
|
847273de4b1d814fab8b19dc651c651c2d342ede
|
/.history/Sudoku_II_003_20180618134857.py
|
2745c76be92765bcf40268fd8a5d34aac92e1b25
|
[] |
no_license
|
Los4U/sudoku_in_python
|
0ba55850afcffeac4170321651620f3c89448b45
|
7d470604962a43da3fc3e5edce6f718076197d32
|
refs/heads/master
| 2020-03-22T08:10:13.939424
| 2018-07-04T17:21:13
| 2018-07-04T17:21:13
| 139,749,483
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,466
|
py
|
from random import randint
# Sudoku1 almost solved
sudoku1 = [
[5, 9, 8, 6, 1, 2, 3, 4, 7],
[2, 1, 7, 9, 3, 4, 8, 6, 5],
[6, 4, 3, 5, 8, 7, 1, 2, 9],
[1, 6, 5, 4, 9, 8, 2, 7, 3],
[3, 2, 9, 7, 6, 5, 4, 1, 8],
[7, 8, 4, 3, 2, 1, 5, 9, 6],
[8, 3, 1, 2, 7, 6, 9, 5, 4],
[4, 7, 2, 8, 5, 9, 6, 3, 1],
[9, 5, ' ', ' ', ' ', ' ', ' ', ' ', 2]
]
# Sudoku 2 almost solved
# row1 = [9,8,7,4,3,2,5,6,1]
# row2 = [2,4,3,5,1,6,8,7,9]
# row3 = [5,6,1,7,9,8,4,3,2]
# row4 = [3,9,5,6,4,7,2,1,8]
# row5 = [8,2,4,3,5,1,6,9,7]
# row6 = [1,7,6,2,8,9,3,4,5]
# row7 = [7,1,2,8,6,3,9,5,4]
# row8 = [4,3,8,9,7,5,1,2,6]
# row9 = [' ',5,' ',' ',2,' ',7,' ',' ']
def printSudoku():
i = 0
while i < 10:
if i == 0:
print(" 1 2 3 4 5 6 7 8 9")
print(" -------------------------")
elif i == 3 or i == 6 or i == 9:
print(" -------------------------")
spaceBar = "|"
if i < 9:
print('{2} {1} {0[0]} {0[1]} {0[2]} {1} {0[3]} {0[4]} {0[5]} {1} {0[6]} {0[7]} {0[8]} {1}'.format(sudoku1[i], spaceBar,i+1))
i = i + 1
while True: # prints Sudoku until is solved
printSudoku()
print("Input 3 numbers in format a b c, np. 4 5 8")
print(" a - row number")
print(" b - column number ")
print(" c - value")
# vprint(" r - reset chart to start\n ")
x = input("Input a b c: ")
print("")
numbers = " 0123456789" # conditions of entering the numbers !
if (len(x) != 5) or (str(x[0]) not in numbers) or (str(x[2]) not in numbers) or (
str(x[4]) not in numbers) or (str(x[1]) != " ") or (str(x[3]) != " "):
if x == "r": # reset
print(" Function reset() will be ready in Next Week")
else:
print("Error - wrong number format \n ")
continue
sudoku1[int(x[0])-1][int(x[2])-1] = x[4]
try:
if sum(row1) == 45 and sum(row2) == 45 and sum(row3) == 45 and sum(row4) == 45 and sum(
row5) == 45 and sum(row6) == 45 and sum(row7) == 45 and sum(row8) == 45 and sum(row9) == 45:
print("YOU WIN")
break
except TypeError:
print()
'''
print(" ")
print(" %@@@@@@@ @@@ @@@ (@@@@@@@@@ ,@@@@2@@@@@ @@@, /@@@/ @@@, @@@ ")
print(" @@@* @@@ @@@ (@@( /@@@# .@@@% (@@@ @@@, @@@% @@@, @@@. ")
print(" @@@& @@@ @@@ (@@( @@@* @@@% #@@% @@@,.@@@. @@@, @@@. ")
print(" ,@@@@@@* @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@%@@% @@@, @@@. ")
print(" /@@@@@# @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@,@@@( @@@, @@@. ")
print(" *@@@. @@@ .@@& (@@( @@@. @@@% &@@( @@@, &@@@. @@@* .@@@. ")
print(" &, &@@@ #@@@. ,@@@, (@@( ,&@@@* ,@@@& .@@@@ @@@, (@@@/ #@@@* @@@# ")
print(",@@@@@@@@( (@@@@@@@@% (@@@@@@@@@( #@@@@@@@@@, @@@, ,@@@% ,@@@@@@@@@. \n ")
print("To start game input:")
print(" r - to load random puzzle:")
print(" 1 - to load chart nr 1:")
print(" 2 - to load chart nr 2:")
print(" 3 - to load chart nr 3:")
choice = input("Input here: ")
if choice == "R" or choice == "r":
sudoku_number = randint(0, 1)
rows_fill(sudoku_number)
elif int(choice) == 1:
rows_fill(0)
elif int(choice) == 2:
rows_fill(1)
elif int(choice) == 3:
rows_fill(0)
'''
|
[
"inz.kamil.wos@gmail.com"
] |
inz.kamil.wos@gmail.com
|
b114f3d71ebdae2a74750a2f0d56ad7bd8da3155
|
27f6c33ad3f0240e64aad809d4bd57af3ecda498
|
/Day06/8_Q10.py
|
0ddb90dfa64678823f9b0d3c8f5988ce857c4809
|
[] |
no_license
|
bigdata202005/PythonProject
|
26ce3c0ed3e47cd727606455e6ca95561907dbe4
|
4e0377fdb86db294483fb7a347429bf299e44ce5
|
refs/heads/main
| 2023-01-08T01:48:22.271143
| 2020-11-06T05:15:22
| 2020-11-06T05:15:22
| 310,498,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 703
|
py
|
"""
Q10 사칙연산 계산기
다음과 같이 동작하는 클래스 Calculator를 작성하시오.
cal1 = Calculator([1,2,3,4,5])
cal1.sum() # 합계
15
cal1.avg() # 평균
3.0
cal2 = Calculator([6,7,8,9,10])
cal2.sum() # 합계
40
cal2.avg() # 평균
8.0
"""
class Calculator:
def __init__(self, data_list):
self.data_list = data_list
def sum(self):
print(sum(self.data_list))
def avg(self):
print(round(sum(self.data_list) / len(self.data_list), 1))
if __name__ == '__main__':
cal1 = Calculator([1, 2, 3, 4, 5])
cal1.sum() # 합계
cal1.avg() # 평균
cal2 = Calculator([6, 7, 8, 9, 10])
cal2.sum() # 합계
cal2.avg() # 평균
|
[
"bigdata202005@gmail.com"
] |
bigdata202005@gmail.com
|
96e20d4131f5317dff89c74611053c3b8918cdf8
|
82fce9aae9e855a73f4e92d750e6a8df2ef877a5
|
/Lab/venv/lib/python3.8/site-packages/OpenGL/GLES2/NV/conservative_raster_pre_snap.py
|
df58883753676dd6158183e136d4f6c4516af278
|
[] |
no_license
|
BartoszRudnik/GK
|
1294f7708902e867dacd7da591b9f2e741bfe9e5
|
6dc09184a3af07143b9729e42a6f62f13da50128
|
refs/heads/main
| 2023-02-20T19:02:12.408974
| 2021-01-22T10:51:14
| 2021-01-22T10:51:14
| 307,847,589
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 972
|
py
|
'''OpenGL extension NV.conservative_raster_pre_snap
This module customises the behaviour of the
OpenGL.raw.GLES2.NV.conservative_raster_pre_snap to provide a more
Python-friendly API
Overview (from the spec)
NV_conservative_raster_pre_snap_triangles provides a new mode to achieve
rasterization of triangles that is conservative w.r.t the triangle at
infinite precision i.e. before it is snapped to the sub-pixel grid. This
extension provides a new mode that expands this functionality to lines and
points.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/conservative_raster_pre_snap.txt
'''
from OpenGL.raw.GLES2.NV.conservative_raster_pre_snap import _EXTENSION_NAME
def glInitConservativeRasterPreSnapNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
|
[
"rudnik49@gmail.com"
] |
rudnik49@gmail.com
|
a4080a7404a99836de352ed8a0d32120f99c2fdc
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/6YN2ww3B4cQZ6rTmN_4.py
|
065864678599624588b72aab946bb9938cea33ba
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 839
|
py
|
"""
Write a function that returns `True` if a year is a leap, otherwise it returns
`False`.
A year is a "leap year" if it lasts 366 days, instead of 365 in a typical
year. That extra day is added to the end of the shortest month, creating
February 29.
A leap year occurs every four years, and will take place if the year is a
multiple of four. The exception to this is a year at the beginning of a
century (for example, 1900 or 2000), where the year must be divisible by 400
to be a leap year.
Look at the examples, and if you need help, look at the resources panel.
### Examples
leap_year(1990) ➞ False
leap_year(1924) ➞ True
leap_year(2021) ➞ False
### Notes
* Do not overthink this challenge.
* You can solve the problem with a few lines of code.
"""
def leapYear(year):
return year%4==0
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
a7a3c574f559605b827426b88383a43711079bb1
|
3b50605ffe45c412ee33de1ad0cadce2c5a25ca2
|
/python/paddle/fluid/tests/unittests/test_fleet_base.py
|
fe24c8838ec6c2ac41dde0d8f7ac9911509942b0
|
[
"Apache-2.0"
] |
permissive
|
Superjomn/Paddle
|
f5f4072cf75ac9ecb0ff528876ee264b14bbf8d1
|
7a0b0dab8e58b6a3b28b3b82c43d55c9bd3d4188
|
refs/heads/develop
| 2023-02-04T20:27:54.244843
| 2023-01-26T15:31:14
| 2023-01-26T15:31:14
| 66,896,049
| 4
| 1
|
Apache-2.0
| 2023-04-14T02:29:52
| 2016-08-30T01:45:54
|
C++
|
UTF-8
|
Python
| false
| false
| 7,727
|
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import numpy as np
import paddle
import paddle.distributed.fleet as fleet
import paddle.distributed.fleet.base.role_maker as role_maker
import paddle.fluid as fluid
class TestFleetBase(unittest.TestCase):
def setUp(self):
os.environ["POD_IP"] = "127.0.0.1"
os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36000"
os.environ["PADDLE_TRAINERS_NUM"] = "2"
os.environ[
"PADDLE_PSERVERS_IP_PORT_LIST"
] = "127.0.0.1:36001,127.0.0.2:36002"
def test_init(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
def test_is_first_worker(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
if fleet.is_first_worker():
print("test fleet first worker done.")
def test_worker_index(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
print(fleet.worker_index())
def test_worker_num(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
print(fleet.worker_num())
def test_is_worker(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
if fleet.is_worker():
print("test fleet is worker")
def test_worker_endpoints(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
self.assertEqual(
"127.0.0.1:36000", fleet.worker_endpoints(to_string=True)
)
self.assertEqual(["127.0.0.1:36000"], fleet.worker_endpoints())
def test_server_num(self):
os.environ["TRAINING_ROLE"] = "PSERVER"
os.environ["PADDLE_PORT"] = "36001"
os.environ["POD_IP"] = "127.0.0.1"
role = role_maker.PaddleCloudRoleMaker()
fleet.init(role)
os.environ["PADDLE_TRAINERS_NUM"] = "2"
self.assertEqual(2, fleet.server_num())
def test_server_index(self):
os.environ["TRAINING_ROLE"] = "PSERVER"
os.environ["PADDLE_PORT"] = "36001"
os.environ["POD_IP"] = "127.0.0.1"
role = role_maker.PaddleCloudRoleMaker()
fleet.init(role)
self.assertEqual(0, fleet.server_index())
def test_server_endpoints(self):
os.environ["TRAINING_ROLE"] = "PSERVER"
os.environ["PADDLE_PORT"] = "36001"
os.environ["POD_IP"] = "127.0.0.1"
role = role_maker.PaddleCloudRoleMaker()
fleet.init(role)
if fleet.is_server():
self.assertEqual(
"127.0.0.1:36001,127.0.0.2:36002",
fleet.server_endpoints(to_string=True),
)
self.assertEqual(
["127.0.0.1:36001", "127.0.0.2:36002"], fleet.server_endpoints()
)
def test_is_server(self):
os.environ["TRAINING_ROLE"] = "PSERVER"
os.environ["PADDLE_PORT"] = "36001"
os.environ["POD_IP"] = "127.0.0.1"
role = role_maker.PaddleCloudRoleMaker()
fleet.init(role)
self.assertTrue(fleet.is_server())
def test_util(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
self.assertIsNotNone(fleet.util)
def test_barrier_worker(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
if fleet.is_worker():
fleet.barrier_worker()
def test_init_worker(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
with self.assertRaises(ValueError):
if fleet.is_worker():
fleet.init_worker()
def test_stop_worker(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
with self.assertRaises(ValueError):
if fleet.is_worker():
fleet.stop_worker()
def test_distributed_optimizer(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
optimizer = paddle.optimizer.SGD(learning_rate=0.001)
optimizer = fleet.distributed_optimizer(optimizer)
def test_exception(self):
import paddle.distributed.fleet as fleet
self.assertRaises(Exception, fleet.init_worker)
class TestFleetDygraph(unittest.TestCase):
def setUp(self):
os.environ[
"PADDLE_TRAINER_ENDPOINTS"
] = "127.0.0.1:36213,127.0.0.1:36214"
os.environ["PADDLE_CURRENT_ENDPOINTS"] = "127.0.0.1:36213"
os.environ["PADDLE_TRAINERS_NUM"] = "2"
os.environ["PADDLE_TRAINER_ID"] = "0"
def test_dygraph_method(self):
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32")
a = fluid.dygraph.to_variable(value)
layer = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.Adam(
learning_rate=0.01, parameters=layer.parameters()
)
# remove init cause this UT cannot launch distributed task
adam = fleet.distributed_optimizer(adam)
try:
dp_layer = fleet.distributed_model(layer)
except Exception as e:
# This is just for testing the interface,
# and will not actually be called. Therefore,
# use "try-except" to avoid errors.
lr = 0.001
adam.set_lr(lr)
cur_lr = adam.get_lr()
assert lr == cur_lr
state_dict = adam.state_dict()
adam.set_state_dict(state_dict)
final_strategy = fleet._final_strategy()
class TestFleetBaseSingleError(unittest.TestCase):
def setUp(self):
os.environ.pop("PADDLE_TRAINER_ENDPOINTS")
def gen_data(self):
return {
"x": np.random.random(size=(128, 32)).astype('float32'),
"y": np.random.randint(2, size=(128, 1)).astype('int64'),
}
def test_single_run_collective_minimize(self):
def test_single_error():
input_x = paddle.static.data(
name="x", shape=[-1, 32], dtype='float32'
)
input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64')
fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh')
prediction = paddle.static.nn.fc(
x=fc_1, size=2, activation='softmax'
)
cost = paddle.nn.functional.cross_entropy(
input=prediction,
label=input_y,
reduction='none',
use_softmax=False,
)
avg_cost = paddle.mean(x=cost)
fleet.init(is_collective=True)
# in non_distributed mode(use `python` to launch), raise error if has multi cards
if (
fluid.core.is_compiled_with_cuda()
and fluid.core.get_cuda_device_count() > 1
):
self.assertRaises(ValueError, test_single_error)
else:
test_single_error()
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
Superjomn.noreply@github.com
|
6d35ff8ff3c6737289e4c00e8be3360f52f6ca99
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02773/s910963404.py
|
d617638ace116c73c96cf5a65311b85b85f81d23
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 351
|
py
|
import sys
N = int(input())
input = sys.stdin.readline
lis = {}
for i in range(N):
s = input().rstrip()
if s in lis:
lis[s] += 1
else:
lis[s] = 1
lis2 = sorted(lis.items(), key=lambda x:x[1], reverse=True)
x = next(iter(lis2))
keys = []
for i in lis2:
if i[1] == x[1]:
keys.append(i[0])
ans = sorted(keys)
for i in ans:
print(i)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
253aa95b971032804048120ff1cfdb28608f6cba
|
16e266cf50a712ed29a4097e34504aac0281e6cb
|
/Functions/venv/lib/python3.6/site-packages/BaseExtensions/Logging.py
|
99776019d50ef0f45a7210eaff3874df85494d98
|
[] |
no_license
|
felix-ogutu/PYTHON-PROJECTS
|
9dd4fdcfff6957830587b64c5da3b5c3ade3a27e
|
8c1297dbda495078509d06a46f47dc7ee60b6d4e
|
refs/heads/master
| 2023-06-05T04:41:36.727376
| 2021-06-25T20:36:52
| 2021-06-25T20:36:52
| 380,348,911
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,704
|
py
|
import logging
import logging.config
import os
import sys
import tarfile
from logging.handlers import RotatingFileHandler
from typing import *
__all__ = [
'logging',
'LoggingManager',
'Error_Handler',
'Info_Handler',
'Console_Error', 'Console_Debug', 'Console_Info',
'InfoFilter', 'ErrorFilter', 'DebugFilter', 'PngImagePlugin_Filter',
'LogPaths', 'Formats'
]
class Formats(object):
def __init__(self,
info="""%(name)s ---- %(message)s""",
simple="""[ %(levelname)-10s ] [ %(module)s.%(funcName)s @ Line#: %(lineno)d ] [ %(processName)s.%(threadName)s ]
%(message)s
""",
detailed="""
[ %(levelname)-10s %(asctime)s ]
[ FILE: "%(pathname)s" : %(module)s.%(funcName)s @ Line # %(lineno)d ]
[ Process ID: %(process)-7d | %(processName)s ]
[ Thread ID: %(thread)-7d | %(threadName)s ]
MESSAGE: %(message)s
"""):
self.INFO: Final[str] = info
self.SIMPLE: Final[str] = simple
self.DETAILED: Final[str] = detailed
class LogPaths(object):
def __init__(self, *processes: str, app_name: str, root_path: str, max_logs: int = 5, max_log_size: int = 10240):
self.MAX_LOGS: Final[int] = max_logs
self.MAX_LOG_SIZE: Final[int] = max_log_size
self.APP_NAME: Final[str] = app_name
self._root_path: Final[str] = root_path
self.__log_paths__ = { }
for proc in set(processes):
self.__log_paths__[proc] = os.path.join(root_path, self.GetFileName(proc))
self.__log_paths__[self.GetErrorName(proc)] = os.path.join(root_path, self.GetErrorFileName(proc))
self.__dict__.update(self.__log_paths__)
@property
def logs(self) -> List[str]:
return [os.path.join(self._root_path, f'{name}.{i}') if i != 0 else name for i in range(self.MAX_LOGS + 1) for name in self.__log_paths__.keys()]
def Zip_Log_Files(self, path: str):
with tarfile.open(path, "w:gz") as tar:
for file in self.logs:
tar.add(file, arcname=os.path.basename(file))
def Delete_Log_Files(self):
for file in self.logs:
if os.path.isfile(file): os.remove(file)
@staticmethod
def GetFileName(base: str): return f'{base}.log'
@staticmethod
def GetErrorFileName(base: str): return f'{base}_errors.log'
@staticmethod
def GetErrorName(base: str): return f'{base}_errors'
class DebugFilter(logging.Filter):
_allowed = (logging.DEBUG,)
def filter(self, rec):
return rec.levelno in self._allowed
class InfoFilter(logging.Filter):
_allowed = (logging.INFO,)
def filter(self, rec):
return rec.levelno in self._allowed
class ErrorFilter(logging.Filter):
_allowed = (logging.WARNING, logging.ERROR, logging.CRITICAL, logging.FATAL)
def filter(self, rec):
return rec.levelno in self._allowed
class PngImagePlugin_Filter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
if record.module == 'PngImagePlugin':
return False
return True
class Error_Handler(RotatingFileHandler):
_allowed = (logging.WARNING, logging.ERROR, logging.CRITICAL, logging.FATAL)
def __init__(self, *, file, path: LogPaths, fmt: Formats, **kwargs):
super().__init__(filename=file, maxBytes=path.MAX_LOG_SIZE, backupCount=path.MAX_LOGS, **kwargs)
self.formatter = logging.Formatter(fmt=fmt.DETAILED)
self.setLevel(logging.ERROR)
self.addFilter(ErrorFilter())
class Info_Handler(logging.FileHandler):
_allowed = (logging.DEBUG, logging.INFO)
def __init__(self, *, file, mode='w', fmt: Formats, **kwargs):
super().__init__(filename=file, mode=mode, **kwargs)
self.formatter = logging.Formatter(fmt=fmt.INFO)
self.setLevel(logging.DEBUG)
self.addFilter(InfoFilter())
class Console_Debug(logging.StreamHandler):
_allowed: tuple
def __init__(self, *, fmt: Formats, stream=sys.stdout):
super().__init__(stream)
self.setLevel(logging.DEBUG)
self.formatter = logging.Formatter(fmt=fmt.INFO)
self.addFilter(DebugFilter())
class Console_Info(logging.StreamHandler):
_allowed: tuple
def __init__(self, *, fmt: Formats, stream=sys.stdout):
super().__init__(stream)
self.setLevel(logging.DEBUG)
self.formatter = logging.Formatter(fmt=fmt.INFO)
self.addFilter(InfoFilter())
class Console_Error(logging.StreamHandler):
_allowed: tuple
# def __init__(self, *, allowed=(logging.WARNING, logging.ERROR, logging.CRITICAL, logging.FATAL), stream=sys.stderr):
def __init__(self, *, fmt: Formats, stream=sys.stderr):
super().__init__(stream)
# self._allowed = allowed
self.setLevel(logging.WARNING)
self.formatter = logging.Formatter(fmt=fmt.DETAILED)
self.addFilter(ErrorFilter())
class InstanceError(Exception): pass
class LoggingManager(object):
mapper: Dict[Type, str]
def __init__(self, *types: Type, mapper: Dict[Type, str] = None, paths: LogPaths, fmt: Formats = Formats()):
self.fmt = fmt
self.paths = paths
if not isinstance(mapper, dict):
mapper = { item: item.__name__ for item in set(types) }
self.mapper = mapper
logging.basicConfig(format=fmt.DETAILED, level=logging.DEBUG)
self._root_logger = logging.getLogger()
self._root_logger.handlers.clear()
self.app_logger = logging.getLogger(self.paths.APP_NAME)
logging.getLogger("PIL.PngImagePlugin").disabled = True
def CreateLogger(self, source, *, debug: bool = __debug__) -> logging.Logger:
for key, value in self.mapper.items():
# if issubclass(source, key): raise InstanceError('source is not identified')
if isinstance(source, key):
logger = self.app_logger.getChild(source.__class__.__name__)
logger.addHandler(Info_Handler(file=self.paths.__log_paths__[value], fmt=self.fmt))
logger.addHandler(Error_Handler(file=self.paths.__log_paths__[LogPaths.GetErrorName(value)], fmt=self.fmt, path=self.paths))
logger.addHandler(Console_Error(fmt=self.fmt))
logger.addHandler(Console_Debug(fmt=self.fmt))
logger.addHandler(Console_Info(fmt=self.fmt))
logger.addFilter(PngImagePlugin_Filter())
logger.setLevel(logging.DEBUG if debug else logging.ERROR)
return logger
else:
raise ValueError('source is not identified')
@classmethod
def FromTypes(cls, *types: Type, app_name: str, root_path: str):
mapper = { item: item.__name__ for item in types }
return cls(mapper=mapper,
paths=LogPaths(*mapper.values(), app_name=app_name, root_path=root_path))
if __name__ == '__main__':
from PythonDebugTools import *
class Test(object): pass
class Other(object): pass
m = LoggingManager.FromTypes(Test, Other, app_name='app', root_path='.')
PrettyPrint(m.paths.Test)
PrettyPrint(m.paths.Test_errors)
PrettyPrint(m.paths.Other)
PrettyPrint(m.paths.Other_errors)
PrettyPrint('m.paths.logs', m.paths.logs)
Print(m.CreateLogger(Test(), debug=True))
Print(m.CreateLogger(Other(), debug=True))
try: m.CreateLogger(Other, debug=True)
except Exception as e: print_exception(e)
|
[
"you@example.com"
] |
you@example.com
|
4411eff5cb2a890b6f57eef93094fb70a0a66faa
|
297efd4afeb46c0b56d9a975d76665caef213acc
|
/src/multiplicity/migrations/0026_auto_20181208_0740.py
|
82eca20d3e465894fae450ae2a44b7598e792bfd
|
[
"MIT"
] |
permissive
|
metabolism-of-cities/metabolism-of-cities-platform-v3
|
67716c3daae86a0fe527c18aef26ce29e069cbcc
|
c754d3b1b401906a21640b8eacb6b724a448b31c
|
refs/heads/master
| 2022-12-06T22:56:22.207853
| 2020-08-25T09:53:51
| 2020-08-25T09:53:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 552
|
py
|
# Generated by Django 2.1.3 on 2018-12-08 07:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('multiplicity', '0025_auto_20181208_0719'),
]
operations = [
migrations.AlterField(
model_name='referencespacetype',
name='process',
field=models.ForeignKey(blank=True, limit_choices_to={'slug__isnull': False}, null=True, on_delete=django.db.models.deletion.CASCADE, to='staf.Process'),
),
]
|
[
"paul@penguinprotocols.com"
] |
paul@penguinprotocols.com
|
a80b25b51691207c62aa5e0268abfc49f4b48640
|
61ef327bd1d5ff6db7595221db6823c947dab42b
|
/FlatData/ScenarioScriptGroup1ExcelTable.py
|
691ecbb8b440d3883daab477c5ae12ef5f5095cb
|
[] |
no_license
|
Aikenfell/Blue-Archive---Asset-Downloader
|
88e419686a80b20b57a10a3033c23c80f86d6bf9
|
92f93ffbdb81a47cef58c61ec82092234eae8eec
|
refs/heads/main
| 2023-09-06T03:56:50.998141
| 2021-11-19T12:41:58
| 2021-11-19T12:41:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,653
|
py
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: FlatData
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class ScenarioScriptGroup1ExcelTable(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ScenarioScriptGroup1ExcelTable()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsScenarioScriptGroup1ExcelTable(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# ScenarioScriptGroup1ExcelTable
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# ScenarioScriptGroup1ExcelTable
def DataList(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from FlatData.ScenarioScriptGroup1Excel import ScenarioScriptGroup1Excel
obj = ScenarioScriptGroup1Excel()
obj.Init(self._tab.Bytes, x)
return obj
return None
# ScenarioScriptGroup1ExcelTable
def DataListLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# ScenarioScriptGroup1ExcelTable
def DataListIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
def Start(builder): builder.StartObject(1)
def ScenarioScriptGroup1ExcelTableStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def AddDataList(builder, DataList): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(DataList), 0)
def ScenarioScriptGroup1ExcelTableAddDataList(builder, DataList):
"""This method is deprecated. Please switch to AddDataList."""
return AddDataList(builder, DataList)
def StartDataListVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def ScenarioScriptGroup1ExcelTableStartDataListVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartDataListVector(builder, numElems)
def End(builder): return builder.EndObject()
def ScenarioScriptGroup1ExcelTableEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder)
|
[
"rkolbe96@gmail.com"
] |
rkolbe96@gmail.com
|
2226d8e21d5a65eb4b18bd6bf4a8b2b930d5c84f
|
929a816fc299959d0f8eb0dd51d064be2abd6b78
|
/LeetCode/easy - Array/1304. Find N Unique Integers Sum up to Zero/.ipynb_checkpoints/solution-checkpoint.py
|
038be3063eb5258cd1e9f8bdc9d7066cfb184c3e
|
[
"MIT"
] |
permissive
|
vincent507cpu/Comprehensive-Algorithm-Solution
|
27940da7bc0343921930a2eafbd649da93a5395d
|
04e01e49622457f09af2e1133954f043c0c92cb9
|
refs/heads/master
| 2023-07-20T07:12:15.590313
| 2021-08-23T23:42:17
| 2021-08-23T23:42:17
| 258,644,691
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
class Solution:
# my solution
def sumZero(self, n: int) -> List[int]:
return list(range(n-1)) + [-sum(range(n-1))]
|
[
"vincent507cpu@gmail.com"
] |
vincent507cpu@gmail.com
|
5032205c24261703355ad0399ea9138603b23d16
|
3a9f2b3d79cf214704829427ee280f4b49dca70a
|
/saigon/rat/RuckusAutoTest/tests/ap/CB_AP_Download_Image.py
|
9e2f694d41a392e7f7a8b96829e4fd450cb80c4b
|
[] |
no_license
|
jichunwei/MyGitHub-1
|
ae0c1461fe0a337ef459da7c0d24d4cf8d4a4791
|
f826fc89a030c6c4e08052d2d43af0b1b4b410e3
|
refs/heads/master
| 2021-01-21T10:19:22.900905
| 2016-08-20T03:34:52
| 2016-08-20T03:34:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,890
|
py
|
# Copyright (C) 2010 Ruckus Wireless, Inc. All rights reserved.
# Please make sure the following module docstring is accurate since it will be used in report generation.
"""Description:
Prerequisite (Assumptions about the state of the testbed/DUT):
Required components:
Test parameters:
Result type: PASS/FAIL
Results: PASS
FAIL otherwise
Messages:
- If PASS,
- If FAIL, prints out the reason for failure
Test procedure:
1. Config:
-
2. Test:
-
3. Cleanup:
-
How is it tested: (to be completed by test code developer)
"""
import logging
import os
import re
from RuckusAutoTest.models import Test
from contrib.download import image_resolver as imgres
class CB_AP_Download_Image(Test):
required_components = []
parameter_description = {'ap_build_stream':'build stream of Active Point',
'ap_bno':'build no of Active Point',
}
def config(self, conf):
self._init_test_params(conf)
self._retrive_carrier_bag()
def test(self):
self._download_ap_image()
if self.errmsg:
return self.returnResult('FAIL', self.errmsg)
else:
self._update_carrier_bag()
return self.returnResult('PASS', self.passmsg)
def cleanup(self):
pass
def _init_test_params(self, conf):
self.conf = {}
self.conf.update(conf)
self.errmsg = ''
self.passmsg = ''
def _retrive_carrier_bag(self):
if self.carrierbag.has_key('ap_fw_upgrade_cfg'):
self.conf['ap_fw_upgrade_cfg'] = self.carrierbag['ap_fw_upgrade_cfg']
def _update_carrier_bag(self):
self.carrierbag['image_file_path'] = self.image_file_path
def _download_ap_image(self):
'''
Download ap image from yanming server.
'''
try:
logging.info('Get the image information build server, build stream and build no')
if self.conf.has_key('ap_fw_upgrade_cfg'):
ap_fw_upgrade_cfg = self.conf['ap_fw_upgrade_cfg']
model = 'mf2211'
if self.conf.has_key('model'):
model = self.conf['model']
up_flag = True
if self.conf.has_key('up_flag'):
up_flag = self.conf['up_flag']
all_models_up_cfg = ap_fw_upgrade_cfg['up_cfg']
build_server = ap_fw_upgrade_cfg['build_server']
if all_models_up_cfg.has_key(model):
model_up_cfg = all_models_up_cfg[model]
if up_flag:
ap_build_stream = model_up_cfg['target_build_stream']
ap_bno = int(model_up_cfg['target_bno'])
else:
ap_build_stream = model_up_cfg['baseline_build_stream']
ap_bno = int(model_up_cfg['baseline_bno'])
else:
model_up_cfg = {}
self.errmsg = 'No upgrade config for specified model %s' % (model,)
else:
ap_build_stream = self.conf['ap_build_stream']
ap_bno = self.conf['ap_bno']
if self.conf.has_key('build_server'):
build_server = self.conf['build_server']
else:
build_server = None
self.image_file_path = self._download_image(build_server, ap_build_stream, ap_bno)
logging.info('Firmware config: %s' % self.image_file_path)
self.passmsg = "Download and get image files for %s: location[%s], Build stream[%s], Build no[%s]" % \
(model, os.getcwd(), ap_build_stream, ap_bno)
except Exception, ex:
self.errmsg = ex.message
def _escape(self, file_path):
expr = "[/|\\^\\\\]"
return re.sub(expr, "\\\\", file_path)
def _download_image(self, build_server, build_stream, fw_bno):
'''
Download ap image from build server based on build stream and build no,
and save as <Build stream>.<Build no>.tar.gz
'''
chk_name_list = ["%s.%s.tar.gz" % (build_stream, fw_bno), #MM2225_mainline.85.tar.gz
"%s.%s.bl7" % (build_stream, fw_bno), #MM2225_mainline.85.bl7
]
exist, file_name = self._chk_img_file_local(chk_name_list)
if not exist:
logging.info('Download image from server: [%s:%s:%s]' % (build_server, build_stream, fw_bno))
if build_server:
fname = imgres.download_build(build_stream, fw_bno, build_server)
else:
fname = imgres.download_build(build_stream, fw_bno)
else:
logging.info('Image exist in local computer: %s' % (file_name))
fname = file_name
fw_tar_filename = self._escape(os.path.realpath(fname))
#filetype='(\d+\.){1,5}Bl7$' #'.+\.Bl7$',
#fw_img_full_path = imgres.get_image(fw_tar_filename, filetype = filetype)
#fw_img_filename = fw_img_full_path.split("/")[-1]
return fw_tar_filename
def _chk_img_file_local(self, chk_name_list):
result = False
file_name = ''
for chk_name in chk_name_list:
if os.path.exists(chk_name):
file_name = chk_name
result = True
break
return result, file_name
|
[
"tan@xx.com"
] |
tan@xx.com
|
0cdbc657dc62827a0cc554f6fa825a3bca944a6d
|
bfe6c95fa8a2aae3c3998bd59555583fed72900a
|
/findLadders.py
|
bbee45ac5eb94f388fe2af8dfaa11679d7959a31
|
[] |
no_license
|
zzz136454872/leetcode
|
f9534016388a1ba010599f4771c08a55748694b2
|
b5ea6c21bff317884bdb3d7e873aa159b8c30215
|
refs/heads/master
| 2023-09-01T17:26:57.624117
| 2023-08-29T03:18:56
| 2023-08-29T03:18:56
| 240,464,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,952
|
py
|
from collections import deque
from typing import List
# 不知道是哪个
class Solution1:
def findLadders(self, beginWord: str, endWord: str,
wordList: List[str]) -> int:
wordList.append(beginWord)
if endWord not in wordList:
return []
dic = dict()
levelLog = {word: 5000 for word in wordList}
before = {word: [] for word in wordList}
for word in wordList:
for i in range(len(word)):
key = word[:i] + '*' + word[i + 1:]
if key in dic.keys():
dic[key].append(word)
else:
dic[key] = [word]
queue = [(beginWord, 1)]
levelLog[beginWord] = 1
stopLevel = 5000
while len(queue) > 0:
item = queue[0]
del queue[0]
level = item[1] + 1
if level > stopLevel:
break
for i in range(len(item[0])):
key = item[0][:i] + '*' + item[0][i + 1:]
for neighbor in dic[key]:
if levelLog[neighbor] == 5000:
levelLog[neighbor] = level
before[neighbor].append(item[0])
if neighbor == endWord:
stopLevel = level
queue.append((neighbor, level))
elif levelLog[neighbor] == level and item[0] not in before[
neighbor]:
before[neighbor].append(item[0])
def getAll(word):
if word == beginWord:
return [[beginWord]]
out = []
for b in before[word]:
out += [path + [word] for path in getAll(b)]
return out
return getAll(endWord)
# beginWord = "hit"
# endWord = "cog"
# wordList = ["hot","dot","dog","lot","log","cog"]
#
# sl=Solution()
# print(sl.findLadders(beginWord,endWord,wordList))
class Solution:
def findLadders(self, beginWord: str, endWord: str,
wordList: List[str]) -> List[str]:
if endWord not in wordList:
return []
if beginWord not in wordList:
wordList.append(beginWord)
dic = dict()
for word in wordList:
for i in range(len(word)):
key = word[:i] + '*' + word[i + 1:]
if key in dic.keys():
dic[key].append(word)
else:
dic[key] = [word]
before = {k: "" for k in wordList}
queue = deque([(beginWord, 0)])
level = 0
find = False
while len(queue) > 0:
word, level = queue.popleft()
if level > 5000:
return []
for i in range(len(word)):
key = word[:i] + '*' + word[i + 1:]
for neighbor in dic[key]:
if neighbor == word:
continue
if before[neighbor] == "":
before[neighbor] = word
else:
continue
if neighbor == endWord:
find = True
break
queue.append((neighbor, level + 1))
if find:
break
if find:
break
if not find:
return []
res = []
tmp = endWord
while tmp != beginWord:
res.append(tmp)
tmp = before[tmp]
res.append(tmp)
return res[::-1]
beginWord = "hit"
endWord = "cog"
wordList = ["hot", "dot", "dog", "lot", "log", "cog"]
beginWord = "hit"
endWord = "cog"
wordList = ["hot", "dot", "dog", "lot", "log"]
beginWord = "hot"
endWord = "dog"
wordList = ["hot", "dog"]
print(Solution().findLadders(beginWord, endWord, wordList))
|
[
"zzz136454872@163.com"
] |
zzz136454872@163.com
|
f654f52e05b95498dbdf1c51af76c90d6f91fb31
|
55c24645dd63a1c41037dcfb9fb45bc7bcdea4be
|
/venv/lib/python3.7/site-packages/jwt/__init__.py
|
1e4d63b69dfbe1e69c8b867f879f3f5f7a0ff108
|
[] |
no_license
|
abdullah-nawaz/flask-boilerplate
|
7c42801a21ee3e6a647cc8a7d92e0285f8e86cad
|
01bc7fe1140e8ec613de4a38546a07ddfbdbd254
|
refs/heads/master
| 2022-12-02T05:06:08.297759
| 2020-06-24T21:36:32
| 2020-06-24T21:36:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 859
|
py
|
# -*- coding: utf-8 -*-
# flake8: noqa
"""
JSON Web Token implementation
Minimum implementation based on this spec:
http://self-issued.info/docs/draft-jones-json-web-token-01.html
"""
__title__ = "pyjwt"
__version__ = "1.7.1"
__author__ = "José Padilla"
__license__ = "MIT"
__copyright__ = "Copyright 2015-2018 José Padilla"
from .api_jwt import (
encode,
decode,
register_algorithm,
unregister_algorithm,
get_unverified_header,
PyJWT,
)
from .api_jws import PyJWS
from .exceptions import (
InvalidTokenError,
DecodeError,
InvalidAlgorithmError,
InvalidAudienceError,
ExpiredSignatureError,
ImmatureSignatureError,
InvalidIssuedAtError,
InvalidIssuerError,
ExpiredSignature,
InvalidAudience,
InvalidIssuer,
MissingRequiredClaimError,
InvalidSignatureError,
PyJWTError,
)
|
[
"muhammadabdullah@wanclouds.net"
] |
muhammadabdullah@wanclouds.net
|
91f2400b1ac12ac6674de7b95b057475eb95b9df
|
242e68a7c15e6ced652734d1d0e3e88e1074bb39
|
/climetlab/plotting/drivers/magics/__init__.py
|
00d73dd43bf969323001fd0d6e94f9d13a59f7af
|
[
"Apache-2.0"
] |
permissive
|
mchantry/climetlab
|
e6edf596882560ad0b23572b24ac9e5cd9325891
|
8d655b4ac121a69e7244efe109c04d5e110cdf9e
|
refs/heads/main
| 2023-07-22T01:16:52.859802
| 2021-07-22T09:24:00
| 2021-07-22T09:24:00
| 379,984,648
| 0
| 0
|
Apache-2.0
| 2021-06-24T16:16:38
| 2021-06-24T16:16:38
| null |
UTF-8
|
Python
| false
| false
| 1,348
|
py
|
# (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import os
from collections import defaultdict
import yaml
from climetlab.decorators import locked
MAGICS_KEYS = None
MAGICS_DEF = None
MAGICS_PARAMS = None
_inited = False
@locked
def init():
global _inited, MAGICS_KEYS, MAGICS_DEF, MAGICS_PARAMS
if not _inited:
MAGICS_KEYS = defaultdict(set)
MAGICS_PARAMS = defaultdict(dict)
with open(os.path.join(os.path.dirname(__file__), "magics.yaml")) as f:
MAGICS_DEF = yaml.load(f, Loader=yaml.SafeLoader)
for action, params in MAGICS_DEF.items():
for param in params:
name = param["name"]
MAGICS_KEYS[name].add(action)
MAGICS_PARAMS[action][name] = param
_inited = True
def magics_keys_to_actions():
init()
return MAGICS_KEYS
def magics_keys_definitions():
init()
return MAGICS_DEF
def magics_keys_parameters(name):
init()
return MAGICS_PARAMS[name]
|
[
"baudouin.raoult@ecmwf.int"
] |
baudouin.raoult@ecmwf.int
|
4aba6eb3f7de44f562856e8a5171082f34e66878
|
26e2c68f929ecc8bb5c20c6b8cd200b66d99def5
|
/DbConnect/test.py
|
e842e9eeb91a54098255018397b5627a2650cf58
|
[] |
no_license
|
kirigaikabuto/DjangoLessonsPart
|
ad19c1da0d1da27830c6fdf1b07353632bbc097d
|
4442518ae1f0a8641e066c9a63ff4e55e04d5fe5
|
refs/heads/master
| 2022-11-28T10:29:54.428001
| 2020-08-03T09:26:42
| 2020-08-03T09:26:42
| 273,497,052
| 0
| 0
| null | 2020-08-03T09:26:43
| 2020-06-19T13:11:15
|
Python
|
UTF-8
|
Python
| false
| false
| 675
|
py
|
# 1)Показать продукты
# 2)Создать продукт
# 3)Удалить продукт
# 1
# ...
import psycopg2
def change(sql):
connection = psycopg2.connect(
host="localhost",
port="5432",
user="kirito",
password="passanya",
dbname="crm"
)
cursor = connection.cursor()
cursor.execute(sql)
connection.commit()
def select(sql):
connection = psycopg2.connect(
host="localhost",
port="5432",
user="kirito",
password="passanya",
dbname="crm"
)
cursor = connection.cursor()
cursor.execute(sql)
data = cursor.fetchall()
return data
|
[
"ytleugazy@dar.kz"
] |
ytleugazy@dar.kz
|
9f69bbac15e78ca0cdd3215052def11f8adb988e
|
1a3eb334e9578e23f63e17b4ee8e51d69405d29f
|
/cluster_analysis/interogate_clusters_for_gene_of_interest.py
|
6e995babf1c21610b93935f749d52b041519d792
|
[] |
no_license
|
peterthorpe5/public_scripts
|
6f0ab79c7a748dbd183ee7173576f7bcf25d7f54
|
a3c64198aad3709a5c4d969f48ae0af11fdc25db
|
refs/heads/master
| 2023-02-08T02:34:18.109091
| 2023-01-25T13:04:23
| 2023-01-25T13:04:23
| 43,360,640
| 35
| 23
| null | 2016-09-15T10:01:11
| 2015-09-29T10:21:05
|
Python
|
UTF-8
|
Python
| false
| false
| 6,271
|
py
|
#!/usr/bin/env python
# Code to iterogate clustering with a list
#
# (c) The James Hutton Institute 2016-2017
# Author: Peter Thorpe
import os
from sys import stdin,argv
import sys
from optparse import OptionParser
from collections import Counter
import collections
if "-v" in sys.argv or "--version" in sys.argv:
print("0.01 - get the clusters from a list of seq of interest")
sys.exit(os.system(cmd))
def parse_clusters(clusters):
"""funct to return list of cluserts"""
with open(clusters) as handle:
return handle.read().split("\n")
def get_set_of_interest(infile):
"""funtcion to load in a list of gene names"""
with open(infile) as handle:
data = handle.read().split()
outset = set([])
for entry in data:
outset.add(entry.rstrip())
return outset
##################################################################
usage = """Use as follows:
$ python unique_comprisons.py -i list_of_gene -c cluster_file -a allfile.wanted -o outfile
"""
parser = OptionParser(usage=usage)
parser.add_option("-i","--wanted", dest="infile",
default=None,
help="infile with the names of interest",
metavar="FILE")
parser.add_option("-c","--clusters", dest="clusters",
default="Orthofinder_OrthologousGroups_final.txt",
help="clusters file",
metavar="FILE")
parser.add_option("-a","--all", dest="all_file",
default="all_unique_v1.0.txt",
help="all unique gene names file",
metavar="FILE")
parser.add_option("-o", "--out", dest="out", default="result.out",
help="output filenames")
(options, args) = parser.parse_args()
infile = options.infile
clusters = options.clusters
all_file = options.all_file
out = options.out
################################################################
if __name__ == '__main__':
if not os.path.isfile(clusters):
print("sorry cannot find you %s file" % clusters)
os._exit(0)
if not os.path.isfile(clusters):
print("sorry cannot find you %s infile" % infile)
os._exit(0)
working_dir = os.getcwd()
dest_dir = os.path.join(working_dir, 'results')
try:
os.makedirs(dest_dir)
except OSError:
print("folder already exists, I will write over what is in there!!")
cluster_data = parse_clusters(clusters)
wanted = get_set_of_interest(infile)
all_unique = get_set_of_interest(all_file)
# track the interesting clusters so we dont get repeats
clusters_of_interest = set([])
outfile_path = os.path.join(working_dir, 'results', out)
f_out = open(outfile_path, "w")
allowed = ['Mpe', 'Mca', 'Api','Rpa', 'Dno']
print "starting wanted list = %d " % len(wanted)
# parser through the cluster file
wanted_tracked_count = 0
total_unique_counter = Counter({'Mpe':0, 'Mca':0, 'Api': 0,
'Rpa':0, 'Dno':0})
Total_elements_counter = Counter({'Mpe':0, 'Mca':0, 'Api': 0,
'Rpa':0, 'Dno':0})
Total_elements_matching_wanted_counter = Counter({'Mpe':0, 'Mca':0, 'Api': 0,
'Rpa':0, 'Dno':0})
for line in cluster_data:
line = line.rstrip()
unique_counter_counter = Counter({'Mpe':0, 'Mca':0, 'Api': 0,
'Rpa':0, 'Dno':0})
species_counter = Counter({'Mpe':0, 'Mca':0, 'Api': 0,
'Rpa':0, 'Dno':0})
cluster_elements = line.split()
# each entry separetly
for gene in cluster_elements:
gene = gene.rstrip()
# only if the cluster contains a wanted gene
prefix = gene[:3]
Total_elements_counter[prefix] += 1
if gene in wanted:
# check to see if we have seen this line before
prefix = gene[:3]
Total_elements_matching_wanted_counter[prefix] += 1
if not line in clusters_of_interest:
clusters_of_interest.add(line.rstrip())
# count through th cluster again to see what speices are there
for gene in cluster_elements:
gene = gene.rstrip()
prefix = gene[:3]
if prefix in allowed:
# double check only allowed species are counted
species_counter[prefix] += 1
if gene in all_unique:
unique_counter_counter[prefix] += 1
total_unique_counter[prefix] += 1
if gene in wanted:
wanted_tracked_count = wanted_tracked_count + 1
#print len(line.split())
#print species_counter
#print unique_counter_counter
extra = "Cluster size = \t"
species_counter_od = collections.OrderedDict(sorted(species_counter.items()))
species_counter_od = collections.OrderedDict(sorted(unique_counter_counter.items()))
out_formatted = "%s%d\t\tSPECIES: %s\t\tUNIQUE:\t%s\n" % (extra,
len(line.split()),
species_counter_od,
species_counter_od)
f_out.write(out_formatted)
print "total found = %d" % wanted_tracked_count
print "total_unique_counter = ", collections.OrderedDict(sorted(total_unique_counter.items()))
print "Total_elements_counter = ", collections.OrderedDict(sorted(Total_elements_counter.items()))
print "Total_elements_matching_wanted_counter = ", collections.OrderedDict(sorted(Total_elements_matching_wanted_counter.items()))
f_out.close()
f_out.close()
|
[
"peter.thorpe@hutton.ac.uk"
] |
peter.thorpe@hutton.ac.uk
|
88b9865413cebcdc91c4fcd00f30340d0b864197
|
b284d59bdf2c01977eb6d80795e2c75cb95b9b2c
|
/config/wsgi.py
|
77f49bd2bbd2fcc7f583bd1617f7e6d091ef359a
|
[
"MIT"
] |
permissive
|
CoutinhoElias/danibraz
|
58d27cb30661d06091196cc487a9d902f4f8dac9
|
b21f3ce3477ded74c901fa377a5b2ac5e68faf36
|
refs/heads/master
| 2021-01-20T02:12:30.096953
| 2018-04-01T15:52:40
| 2018-04-01T15:52:40
| 89,386,992
| 0
| 1
|
MIT
| 2017-12-01T16:52:47
| 2017-04-25T17:14:27
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,930
|
py
|
"""
WSGI config for Dani Braz project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# danibraz directory.
app_path = os.path.dirname(os.path.abspath(__file__)).replace('/config', '')
sys.path.append(os.path.join(app_path, 'danibraz'))
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
[
"coutinho.elias@gmail.com"
] |
coutinho.elias@gmail.com
|
0ba95fac59a79e49da28fd1831d5d6b61331dffe
|
5460b47bcf525348b7b615ce67a674b2de787915
|
/working with relational databases in python/Pandas for more complex querying.py
|
34050cdb305cc08e47f2117bb7b700f027857426
|
[] |
no_license
|
AnkitaDeshmukh/Importing-data-in-Python-Part-1
|
79c42677dd9887f7c9b57689634be09412a80fe9
|
3606a0510fc06c009bb30c1553e912bc6a5717a0
|
refs/heads/master
| 2020-03-27T11:44:50.665912
| 2018-08-29T00:41:41
| 2018-08-29T00:41:41
| 146,505,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 788
|
py
|
#Using the function create_engine(), create an engine for the SQLite database Chinook.sqlite and assign it to the variable engine.
#Use the pandas function read_sql_query() to assign to the variable df the DataFrame of results from the following query:
#select all records from the Employee table where the EmployeeId is greater than or equal to 6 and ordered by BirthDate
#(make sure to use WHERE and ORDER BY in this precise order).
# Import packages
from sqlalchemy import create_engine
import pandas as pd
# Create engine: engine
engine = create_engine('sqlite:///Chinook.sqlite')
# Execute query and store records in DataFrame: df
df = pd.read_sql_query(
'SELECT * FROM Employee WHERE EmployeeId >= 6 ORDER BY BirthDate', engine)
# Print head of DataFrame
print(df.head())
|
[
"noreply@github.com"
] |
AnkitaDeshmukh.noreply@github.com
|
0a9ffca0fe6c9a6b6a984c02b4947634976dfac3
|
18f8a1c7122c0b320f17ea31192439779a8c63e8
|
/zoom/fill.py
|
f181b9ee1ee496933103ca4f07a4d176277e13db
|
[
"MIT"
] |
permissive
|
RyanLainchbury/zoom
|
d49afa8d3506fca2c6e426707bd60ba640420a45
|
684a16f4fe3cea3d26f2d520c743a871ca84ecc5
|
refs/heads/master
| 2020-12-25T19:03:12.881247
| 2017-06-09T07:29:27
| 2017-06-09T07:29:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,985
|
py
|
"""
fills templates
"""
import re
parts_re = (
r"""(\w+)\s*=\s*"([^"]*)"|"""
r"""(\w+)\s*=\s*'([^']*)'|"""
r"""(\w+)\s*=\s*([^\s]+)\s*|"""
r""""([^"]*)"|"""
r"""("")|"""
r"""(\w+)"""
)
tag_parts = re.compile(parts_re)
pattern_tpl = r'%s([a-z0-9_]+)\s*(.*?)%s'
patterns = {}
def _fill(tag_start, tag_end, text, callback):
"""do the actual work of filling in tags
>>> def filler(name, *args, **kwargs):
... if name == 'name':
... return 'Joe'
>>> _fill('<dz:', '>', 'Hello <dz:name>!', filler)
'Hello Joe!'
"""
def replace_tag(match):
"""replace a tag"""
name = match.groups(1)[0].lower()
rest = match.group(0)[len(name)+len(tag_start):-len(tag_end)]
parts = tag_parts.findall(rest)
keywords = dict(
a and (a, b) or c and (c, d) or e and (e, f)
for (a, b, c, d, e, f, g, h, i) in parts
if a or c or e
)
args = [
h or i or g or ""
for (_, _, _, _, _, _, g, h, i) in parts
if h or i or g
]
result = callback(name, *args, **keywords)
if result is None:
result = match.group(0)
return str(result)
tags = (tag_start, tag_end)
if tags not in patterns:
patterns[tags] = re.compile(
pattern_tpl % (tag_start, tag_end),
re.IGNORECASE
)
innerre = patterns[tags]
result = []
lastindex = 0
for outermatch in re.finditer("<!--.*?-->", text):
text_between = text[lastindex:outermatch.start()]
new_text = innerre.sub(replace_tag, text_between)
result.append(new_text)
lastindex = outermatch.end()
result.append(outermatch.group())
text_after = text[lastindex:]
result.append(innerre.sub(replace_tag, text_after))
return ''.join(x for x in result)
def fill(text, callback):
"""fill a tag in the double handlebars style
>>> def filler(name, *args, **kwargs):
... if name == 'name':
... name = kwargs.get('language')=='french' and 'Jacques' or 'James'
... if 'upper' in args:
... return name.upper()
... elif 'lower' in args:
... return name.lower()
... else:
... return name
>>> fill('Hello {{name}}!', filler)
'Hello James!'
>>> fill('Hello {{name language=\"french\"}}!', filler)
'Hello Jacques!'
>>> fill('Hello {{name upper}}!', filler)
'Hello JAMES!'
>>> fill('Hello {{name lower language=\"french\"}}!', filler)
'Hello jacques!'
>>> fill('Hello {{name lower language=french}}!', filler)
'Hello jacques!'
>>> fill('Hello {{name}}!', lambda a: None )
'Hello {{name}}!'
>>>
"""
return dzfill(_fill('{{', '}}', text, callback), callback)
def dzfill(text, callback):
"""fill a tag in the <dz: style"""
return _fill('<dz:', '>', text, callback)
|
[
"herb@dynamic-solutions.com"
] |
herb@dynamic-solutions.com
|
fbaf9cc4e178d2fadabfc4ac0a9acb6617193252
|
1a6cbe035adb81fea66615323a836327d06f9e72
|
/year2020/d8.py
|
a9a412e59e1d2f3574d82a8926248a6aa6dee291
|
[] |
no_license
|
ecurtin2/advent-of-code
|
a2607d857408d722b07d4cfc66855edcd019cda7
|
216db926c5bab9bf1ec3cac2aa912c1a2ff70d6c
|
refs/heads/main
| 2022-12-15T10:06:51.202608
| 2022-12-14T17:28:15
| 2022-12-14T17:28:15
| 160,612,467
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,130
|
py
|
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Tuple
class OpCode(Enum):
acc = "acc"
jmp = "jmp"
nop = "nop"
@dataclass
class Emu:
program: List[Tuple[OpCode, int]]
acc: int = 0
pointer: int = 0
history: List[int] = field(default_factory=list)
@staticmethod
def from_iterable(iterable):
return Emu(
[(OpCode[line.split()[0]], int(line.split()[1])) for line in iterable]
)
def with_sub(self, idx: int, code: OpCode, value: int) -> "Emu":
new_program = self.program.copy()
new_program[idx] = code, value
return Emu(new_program)
def execute(self) -> Tuple[int, bool]:
while True:
if self.pointer in self.history:
return self.acc, False
if self.pointer >= len(self.program):
return self.acc, True
self.history.append(self.pointer)
code, val = self.program[self.pointer]
self._execute_code(code, val)
def _execute_code(self, code: OpCode, value: int):
if code == OpCode.acc:
self.acc += value
self.pointer += 1
elif code == OpCode.jmp:
self.pointer += value
elif code == OpCode.nop:
self.pointer += 1
def part1(inp: List[str]) -> int:
emulator = Emu.from_iterable(inp)
return emulator.execute()[0]
def part2(inp: List[str]) -> int:
base_emu = Emu.from_iterable(inp)
for i, (code, val) in enumerate(base_emu.program):
if code == OpCode.nop:
emu = base_emu.with_sub(i, OpCode.jmp, val)
elif code == OpCode.jmp:
emu = base_emu.with_sub(i, OpCode.nop, val)
else:
continue
val, terminated = emu.execute()
if terminated:
return val
def test_part1():
inp = """nop +0
acc +1
jmp +4
acc +3
jmp -3
acc -99
acc +1
jmp -4
acc +6"""
assert part1(inp.splitlines()) == 5
def test_part2():
inp = """nop +0
acc +1
jmp +4
acc +3
jmp -3
acc -99
acc +1
jmp -4
acc +6"""
assert part2(inp.splitlines()) == 8
|
[
"ecurtin2@illinois.edu"
] |
ecurtin2@illinois.edu
|
25b825d27e27e80bf0bf8ad227c956a257a4aeaf
|
c7a1c1ae40e9d95dfb92251dcfbf3c5010e6ba81
|
/unicorn-hat-cpu-status-indicator.py
|
76dbd1127ffdb1b660f19b2e71e99fbcaea3b998
|
[] |
no_license
|
pranavlathigara/Raspberry-Pi-DIY-Projects
|
efd18e2e5b9b8369bb1a5f5418782480cf9bc729
|
0c14c316898d4d06015912ac4a8cb7b71a3980c0
|
refs/heads/master
| 2021-04-06T09:14:28.088223
| 2018-02-19T00:15:22
| 2018-02-19T00:15:22
| 124,649,553
| 1
| 2
| null | 2018-03-10T11:30:59
| 2018-03-10T11:30:59
| null |
UTF-8
|
Python
| false
| false
| 1,500
|
py
|
#!/usr/bin/env python
# https://forums.pimoroni.com/t/unicorn-hat-cpu-status-indicator/6150
# Import the relevant modules
import unicornhat as uh
try:
import psutil
except ImportError:
exit("This script requires psutil.n\Install with: sudo pip install psutil")
# Set the brightness of the UnicornHAT - 1.0 is blindingly bright!
uh.brightness(0.5)
# Run in an infinite loop and display relevant colour on the UnicornHAT.
# Create your own 10 step gradient via http://www.perbang.dk/rgbgradient/
while True:
cpu_raw = psutil.cpu_percent(interval=1)
cpu = int(cpu_raw)
#print cpu # Uncomment out to show CPU usage in the terminal
if cpu < 10:
uh.set_all(0,255,0) # Green
uh.show()
elif (cpu > 11) and (cpu < 20):
uh.set_all(56,255,0)
uh.show()
elif (cpu > 21) and (cpu < 30): # Lime
uh.set_all(113,255,0)
uh.show()
elif (cpu > 31) and (cpu < 40):
uh.set_all(170,255,0)
uh.show()
elif (cpu > 41) and (cpu < 50): # Yellow
uh.set_all(226,255,0)
uh.show()
elif (cpu > 51) and (cpu < 60):
uh.set_all(255,226,0)
uh.show()
elif (cpu > 61) and (cpu < 70): # Orange
uh.set_all(255,170,0)
uh.show()
elif (cpu > 71) and (cpu < 80):
uh.set_all(255,113,0)
uh.show()
elif (cpu > 81) and (cpu < 90):
uh.set_all(255,56,0)
uh.show()
else:
uh.set_all(255,0,0) # Red
uh.show()
|
[
"tdamdouni@users.noreply.github.com"
] |
tdamdouni@users.noreply.github.com
|
1ad5e73bdc6bb2f1b25482eb24098fb40a19d746
|
4bfc3c184e736bb68dccbb6d5657f11c950df002
|
/tests/operators/dynamic_shape/test_dynamic_SIMD_v1.py
|
c742eb60dd2fa29b664aa45796b2afbb2b3fbc98
|
[
"Apache-2.0",
"Zlib",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] |
permissive
|
laekov/akg
|
159aa64ef6135222b5af784c408731275dfa9bdb
|
5316b8cb2340bbf71bdc724dc9d81513a67b3104
|
refs/heads/master
| 2022-12-01T04:09:03.548063
| 2020-08-19T08:38:57
| 2020-08-19T08:41:28
| 288,678,192
| 0
| 0
|
Apache-2.0
| 2020-08-19T08:41:30
| 2020-08-19T08:36:53
|
Python
|
UTF-8
|
Python
| false
| false
| 4,094
|
py
|
import akg
import akg.topi as topi
import akg.tvm as tvm
from gen_random import random_gaussian
from akg.utils import kernel_exec as utils
from akg import platform as cce
import numpy as np
import pdb
dtype = "float16"
mapKey = {"add":"binary", "sub":"binary","div":"binary","mul":"binary","min":"binary","max":"binary",
"abs": "single", "exp": "single", "log": "single", "sqrt": "single",
"adds": "single", "muls": "single"}
insn = "adds"
insnType = mapKey[insn]
def gen_data(shape, dtype):
support_list = {"float16": np.float16, "float32": np.float32}
ma = random_gaussian(shape, miu=1, sigma=0.1)
mb = random_gaussian(shape, miu=1, sigma=0.1)
ma = ma.astype(support_list[dtype])
mb = mb.astype(support_list[dtype])
expect = ma
if insn == "add":
expect = ma + mb
elif insn == "sub":
expect = ma - mb
if insn == "mul":
expect = ma * mb
elif insn == "div":
expect = ma / mb
elif insn == "max":
expect = np.max(ma, mb)
elif insn == "min":
expect = np.min(ma, mb)
elif insn == "abs":
expect = np.abs(ma)
elif insn == "exp":
expect = np.exp(ma)
elif insn == "log":
expect = np.log(ma)
elif insn == "sqrt":
expect = np.sqrt(ma)
elif insn == "adds":
expect = ma + 2
elif insn == "muls":
expect = ma * 2
return ma, mb, expect
def gen_kernel():
kernel_name = "dynamic_1d_" + insn + "_" + dtype
attrs = {}
attrs['enable_multicore'] = False
attrs['enable_post_poly_loop_partition'] = False
attrs['enable_unroll_loop'] = False
attrs['enable_fix_loop_extent'] = False
attrs['enable_double_buffer'] = False
attrs['enable_dynamic'] = True
attrs['dim'] = "0 0 1024 1"
mod = my_dsl(dtype, kernel_name, attrs)
source_code = mod.imported_modules[0].get_source()
print(source_code)
save_cce(source_code)
return mod
def my_dsl(dtype, kernel_name, attrs):
m = tvm.var("M")
n = tvm.var("N")
A = tvm.placeholder((m,), name="A", dtype=dtype)
B = tvm.placeholder((m,), name="B", dtype=dtype)
if insn == "add":
C = topi.add(A, B)
elif insn == "sub":
C = topi.subtract(A, B)
if insn == "mul":
C = topi.multiply(A, B)
elif insn == "div":
C = topi.divide(A, B)
elif insn == "max":
C = topi.maximum(A, B)
elif insn == "min":
C = topi.minimum(A, B)
elif insn == "abs":
C = tvm.compute(A.shape, lambda *index: tvm.abs(A(*index)), name='C')
elif insn == "exp":
C = topi.exp(A)
elif insn == "log":
C = topi.log(A)
elif insn == "sqrt":
C = topi.sqrt(A)
C = topi.log(A)
elif insn == "sqrt":
C = topi.sqrt(A)
elif insn == "adds":
C = A + tvm.const(2, dtype)
elif insn == "muls":
C = A * tvm.const(2, dtype)
# C = tvm.compute((m, ), lambda i: A[i] + B[i], name="C")
s = tvm.create_schedule([C.op])
with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
if insnType == "binary":
mod = akg.build(s, [A, B, C], "cce", name=kernel_name, attrs = attrs, polyhedral=True)
else:
mod = akg.build(s, [A, C], "cce", name=kernel_name, attrs = attrs, polyhedral=True)
return mod
def save_cce(code):
with open("aaaa_code.cce", "w") as f:
f.write(code)
def test_dsl(shape):
print("\n\n\nshape:", shape, "\n\n")
mod = gen_kernel()
ma, mb, expect = gen_data(shape, dtype)
output = np.full(expect.shape, 0, dtype=dtype)
if insnType == "binary":
output = utils.mod_launch(mod, (ma, mb, output))
else:
output = utils.mod_launch(mod, (ma, output))
rtol = atol = 1e-04
cpr_res_is = np.isclose(output, expect, rtol, atol, equal_nan=False)
cpr_res_all = np.allclose(output, expect, rtol, atol, equal_nan=False)
print("\noutput:", output)
print("\nexpect:", expect)
if __name__ == "__main__":
test_dsl((30000,))
#test_dsl((1999,))
# test_dsl((2001,))
|
[
"ckey.chengbin@huawei.com"
] |
ckey.chengbin@huawei.com
|
d0daf7126ac49ceaf9fe4fba467bdcc38254018b
|
83de24182a7af33c43ee340b57755e73275149ae
|
/aliyun-python-sdk-live/aliyunsdklive/request/v20161101/AddPlaylistItemsRequest.py
|
6eb4fbed96c893b1ed0d077e712f2afb6901d726
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-openapi-python-sdk
|
4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f
|
83fd547946fd6772cf26f338d9653f4316c81d3c
|
refs/heads/master
| 2023-08-04T12:32:57.028821
| 2023-08-04T06:00:29
| 2023-08-04T06:00:29
| 39,558,861
| 1,080
| 721
|
NOASSERTION
| 2023-09-14T08:51:06
| 2015-07-23T09:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,222
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdklive.endpoint import endpoint_data
class AddPlaylistItemsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'live', '2016-11-01', 'AddPlaylistItems','live')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ProgramItems(self): # String
return self.get_query_params().get('ProgramItems')
def set_ProgramItems(self, ProgramItems): # String
self.add_query_param('ProgramItems', ProgramItems)
def get_ProgramId(self): # String
return self.get_query_params().get('ProgramId')
def set_ProgramId(self, ProgramId): # String
self.add_query_param('ProgramId', ProgramId)
def get_CasterId(self): # String
return self.get_query_params().get('CasterId')
def set_CasterId(self, CasterId): # String
self.add_query_param('CasterId', CasterId)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_ProgramConfig(self): # String
return self.get_query_params().get('ProgramConfig')
def set_ProgramConfig(self, ProgramConfig): # String
self.add_query_param('ProgramConfig', ProgramConfig)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
9022cbf868abe8040133bf2c58b6759ba2bb8d2d
|
fe70774ff6898c5bdb0c941b4f335de576abfdb6
|
/flopy/modflow/mfsip.py
|
157d25044903290c48a1a97ca47478f9397ad1af
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
robinthibaut/flopy
|
35af468415d1ba6e1de119a7cb335381304fada9
|
22ef330bcfb9259fc23735d6b174d27804b624a0
|
refs/heads/develop
| 2023-06-30T21:43:24.101593
| 2023-06-13T19:46:03
| 2023-06-13T19:46:03
| 255,560,877
| 0
| 0
|
BSD-3-Clause
| 2022-10-10T12:23:38
| 2020-04-14T09:05:42
| null |
UTF-8
|
Python
| false
| false
| 7,748
|
py
|
"""
mfsip module. Contains the ModflowSip class. Note that the user can access
the ModflowSip class as `flopy.modflow.ModflowSip`.
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<https://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/sip.html>`_.
"""
from ..pakbase import Package
class ModflowSip(Package):
"""
MODFLOW Strongly Implicit Procedure Package Class.
Parameters
----------
model : model object
The model object (of type :class:flopy.modflow.mf.Modflow) to which
this package will be added.
mxiter : integer
The maximum number of times through the iteration loop in one time
step in an attempt to solve the system of finite-difference equations.
(default is 200)
nparm : integer
The number of iteration variables to be used.
Five variables are generally sufficient. (default is 5)
accl : float
The acceleration variable, which must be greater than zero
and is generally equal to one. If a zero is entered,
it is changed to one. (default is 1)
hclose : float > 0
The head change criterion for convergence. When the maximum absolute
value of head change from all nodes during an iteration is less than
or equal to hclose, iteration stops. (default is 1e-5)
ipcalc : 0 or 1
A flag indicating where the seed for calculating iteration variables
will come from. 0 is the seed entered by the user will be used.
1 is the seed will be calculated at the start of the simulation from
problem variables. (default is 0)
wseed : float > 0
The seed for calculating iteration variables. wseed is always read,
but is used only if ipcalc is equal to zero. (default is 0)
iprsip : integer > 0
the printout interval for sip. iprsip, if equal to zero, is changed
to 999. The maximum head change (positive or negative) is printed for
each iteration of a time step whenever the time step is an even
multiple of iprsip. This printout also occurs at the end of each
stress period regardless of the value of iprsip. (default is 0)
extension : string
Filename extension (default is 'sip')
unitnumber : int
File unit number (default is None).
filenames : str or list of str
Filenames to use for the package. If filenames=None the package name
will be created using the model name and package extension. If a
single string is passed the package will be set to the string.
Default is None.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow()
>>> sip = flopy.modflow.ModflowSip(ml, mxiter=100, hclose=0.0001)
"""
def __init__(
self,
model,
mxiter=200,
nparm=5,
accl=1,
hclose=1e-5,
ipcalc=1,
wseed=0,
iprsip=0,
extension="sip",
unitnumber=None,
filenames=None,
):
# set default unit number of one is not specified
if unitnumber is None:
unitnumber = ModflowSip._defaultunit()
# call base package constructor
super().__init__(
model,
extension=extension,
name=self._ftype(),
unit_number=unitnumber,
filenames=self._prepare_filenames(filenames),
)
# check if a valid model version has been specified
if model.version == "mfusg":
raise Exception(
f"Error: cannot use {self.name} package "
f"with model version {model.version}"
)
self._generate_heading()
self.url = "sip.html"
self.mxiter = mxiter
self.nparm = nparm
self.accl = accl
self.hclose = hclose
self.ipcalc = ipcalc
self.wseed = wseed
self.iprsip = iprsip
self.parent.add_package(self)
def write_file(self):
"""
Write the package file.
Returns
-------
None
"""
# Open file for writing
f = open(self.fn_path, "w")
f.write(f"{self.heading}\n")
ifrfm = self.parent.get_ifrefm()
if ifrfm:
f.write(f"{self.mxiter} {self.nparm}\n")
f.write(
f"{self.accl} {self.hclose} {self.ipcalc} {self.wseed} {self.iprsip}\n"
)
else:
f.write(f"{self.mxiter:10d}{self.nparm:10d}\n")
f.write(
"{:10.3f}{:10.3g}{:10d}{:10.3f}{:10d}\n".format(
self.accl,
self.hclose,
self.ipcalc,
self.wseed,
self.iprsip,
)
)
f.close()
@classmethod
def load(cls, f, model, ext_unit_dict=None):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
Returns
-------
sip : ModflowSip object
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> sip = flopy.modflow.ModflowSip.load('test.sip', m)
"""
if model.verbose:
print("loading sip package file...")
openfile = not hasattr(f, "read")
if openfile:
filename = f
f = open(filename, "r")
# dataset 0 -- header
while True:
line = f.readline()
if line[0] != "#":
break
ifrfm = model.get_ifrefm()
# dataset 1
if ifrfm:
t = line.strip().split()
mxiter = int(t[0])
nparm = int(t[1])
else:
mxiter = int(line[0:10].strip())
nparm = int(line[10:20].strip())
# dataset 2
line = f.readline()
if ifrfm:
t = line.strip().split()
accl = float(t[0])
hclose = float(t[1])
ipcalc = int(t[2])
wseed = float(t[3])
iprsip = int(t[4])
else:
accl = float(line[0:10].strip())
hclose = float(line[10:20].strip())
ipcalc = int(line[20:30].strip())
wseed = float(line[30:40].strip())
iprsip = int(line[40:50].strip())
if openfile:
f.close()
# set package unit number
unitnumber = None
filenames = [None]
if ext_unit_dict is not None:
unitnumber, filenames[0] = model.get_ext_dict_attr(
ext_unit_dict, filetype=ModflowSip._ftype()
)
return cls(
model,
mxiter=mxiter,
nparm=nparm,
accl=accl,
hclose=hclose,
ipcalc=ipcalc,
wseed=wseed,
iprsip=iprsip,
unitnumber=unitnumber,
filenames=filenames,
)
@staticmethod
def _ftype():
return "SIP"
@staticmethod
def _defaultunit():
return 25
|
[
"noreply@github.com"
] |
robinthibaut.noreply@github.com
|
4d8c07a70b0749f8547c953b05392c9470433b4b
|
f53ceb369fe3ed0e57004510d5836abbac7ce2e1
|
/src/embeddingdb/web/wsgi.py
|
23375a9b9580bc64f7264ca7c79562d6fd97ef3b
|
[
"MIT"
] |
permissive
|
aarek-eng/embeddingdb
|
e3b21a1b8c3ed54a10fdce647e51c1bd560d117c
|
e6c67e92e540c4315045a0b4de5b31490331c177
|
refs/heads/master
| 2022-01-10T17:38:38.526503
| 2019-06-26T21:37:52
| 2019-06-26T21:37:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
# -*- coding: utf-8 -*-
"""A WSGI formulation of the web application.
Also allows the web application to be run with ``python -m embeddingdb.web.wsgi``.
"""
from embeddingdb.web.app import get_app
app = get_app()
if __name__ == '__main__':
app.run()
|
[
"cthoyt@gmail.com"
] |
cthoyt@gmail.com
|
89aa9e1612eed3d6d31f549bbee33b18d54bdb6f
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/bob/c18d50d76b304b2ba3289047cce00533.py
|
f94f1b404b123b82294be4a70207d24c3db05ca6
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 1,199
|
py
|
import re
import nltk
import string
try:
SENT_DETECTOR = nltk.data.load("tokenizers/punkt/english.pickle")
## nltk data must be installed for nltk.data.load to work
except LookupError:
if nltk.download():
SENT_DETECTOR = nltk.data.load("tokenizers/punkt/english.pickle")
else:
sys.exit("nltk download did not successfully complete")
def hey(s):
## if not a string, attempt to cast to a string
if not isinstance(s, basestring):
try:
s = str(s)
except:
return "Whatever."
if is_yell(s):
return "Woah, chill out!"
elif is_question(s):
return "Sure."
elif is_silence(s):
return "Fine. Be that way!"
else:
return "Whatever."
def is_yell(s):
## check if string is change by lower but not by upper
return s == string.upper(s) != string.lower(s)
def is_question(s):
## check for sentence ending with a non white
## space character followed by a question mark
return any([re.search("\S\?$", x) for x in SENT_DETECTOR.tokenize(s)])
def is_silence(s):
## check if any non-whitespace characters are present
return not s.strip()
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
f8ce4049fa17b2f77c746a564018e80505b3ad57
|
44a6e88da453a2e368b014e403843b0c955f21f4
|
/utils/test/test_cmpdirs.py
|
a6803725d5400b4ac3619b4f18d0768c8355442c
|
[
"Artistic-2.0"
] |
permissive
|
golharam/genomics
|
a26b1f9366203ec059cc2e49281909bfc16e6ab4
|
ca0c7c239b0f04353e2f2fa897db9c24a1211596
|
refs/heads/master
| 2020-08-06T10:28:21.604129
| 2019-09-27T07:51:41
| 2019-09-27T07:51:41
| 212,943,378
| 0
| 0
|
Artistic-2.0
| 2019-10-05T04:25:24
| 2019-10-05T04:25:23
| null |
UTF-8
|
Python
| false
| false
| 6,468
|
py
|
#######################################################################
# Tests for cmpdirs.py
#######################################################################
import unittest
import os
import tempfile
import shutil
from bcftbx.Md5sum import Md5Checker
from bcftbx.test.mock_data import TestUtils,ExampleDirLanguages
from cmpdirs import yield_filepairs
from cmpdirs import cmp_filepair
from cmpdirs import cmp_dirs
class TestYieldFilepairs(unittest.TestCase):
def setUp(self):
# Create example directory structure which
# includes files and links
self.d = ExampleDirLanguages()
self.d.create_directory()
def tearDown(self):
# Delete example directory structure
self.d.delete_directory()
def test_yield_filepairs(self):
"""yield_filepairs returns correct set of files and links
"""
# Get all files, links and directories in the example directory
expected = self.d.filelist(include_links=True,include_dirs=True)
# Remove any (non-link) directories from the expected list
expected = filter(lambda x: os.path.islink(x) or not os.path.isdir(x),
expected)
print("Expected = %s" % expected)
# Get all file pairs from the example dir and a
# dummy target directory name
for pair in yield_filepairs(self.d.dirn,'/dummy/dir'):
p1,p2 = pair
self.assertTrue(p1 in expected,"%s not expected" % p1)
# Remove from the list
expected.remove(p1)
# Check target file is as expected
p2_expected = os.path.join('/dummy/dir',
os.path.relpath(p1,self.d.dirn))
self.assertEqual(p2,p2_expected,
"Expected '%s', got '%s'" % (p2,p2_expected))
# List should be empty at the end
self.assertEqual(len(expected),0,
"Some paths not returned: %s" % expected)
class TestCmpFilepair(unittest.TestCase):
def setUp(self):
# Create working directory for test files etc
self.wd = TestUtils.make_dir()
def tearDown(self):
# Remove the container dir
TestUtils.remove_dir(self.wd)
def test_cmp_filepair_identical_files(self):
"""cmp_filepair matches identical files
"""
# Make two identical files and compare them
f1 = TestUtils.make_file('test_file1',"Lorum ipsum",basedir=self.wd)
f2 = TestUtils.make_file('test_file2',"Lorum ipsum",basedir=self.wd)
result = cmp_filepair((f1,f2))
self.assertEqual(result.status,Md5Checker.MD5_OK)
def test_cmp_filepair_different_files(self):
"""cmp_filepair flags mismatch between differing files
"""
# Make two different files and compare them
f1 = TestUtils.make_file('test_file1',"Lorum ipsum",basedir=self.wd)
f2 = TestUtils.make_file('test_file2',"lorum ipsum",basedir=self.wd)
result = cmp_filepair((f1,f2))
self.assertEqual(result.status,Md5Checker.MD5_FAILED)
def test_cmp_filepair_identical_links(self):
"""cmp_filepair matches identical links
"""
# Make two identical symlinks and compare them
f1 = TestUtils.make_sym_link('/dummy/file',link_name='test_link1',basedir=self.wd)
f2 = TestUtils.make_sym_link('/dummy/file',link_name='test_link2',basedir=self.wd)
result = cmp_filepair((f1,f2))
self.assertEqual(result.status,Md5Checker.LINKS_SAME)
def test_cmp_filepair_different_links(self):
"""cmp_filepair flags mismatch between differing links
"""
# Make two identical symlinks and compare them
f1 = TestUtils.make_sym_link('/dummy/file1',link_name='test_link1',basedir=self.wd)
f2 = TestUtils.make_sym_link('/dummy/file2',link_name='test_link2',basedir=self.wd)
result = cmp_filepair((f1,f2))
self.assertEqual(result.status,Md5Checker.LINKS_DIFFER)
def test_cmp_filepair_file_to_link(self):
"""cmp_file flags mismatch between file and link
"""
# Make file and link
f1 = TestUtils.make_file('test_file1',"Lorum ipsum",basedir=self.wd)
f2 = TestUtils.make_sym_link('/dummy/file',link_name='test_link2',basedir=self.wd)
result = cmp_filepair((f1,f2))
self.assertEqual(result.status,Md5Checker.MD5_ERROR)
def test_cmp_filepair_link_to_file(self):
"""cmp_file flags mismatch between link and file
"""
# Make file and link
f1 = TestUtils.make_sym_link('/dummy/file',link_name='test_link1',basedir=self.wd)
f2 = TestUtils.make_file('test_file2',"Lorum ipsum",basedir=self.wd)
result = cmp_filepair((f1,f2))
self.assertEqual(result.status,Md5Checker.TYPES_DIFFER)
class TestCmpDirs(unittest.TestCase):
def setUp(self):
# Create reference example directory structure which
# includes files and links
self.dref = ExampleDirLanguages()
self.dref.create_directory()
# Create copy of reference dir
self.dcpy = ExampleDirLanguages()
self.dcpy.create_directory()
def tearDown(self):
# Delete example directory structures
self.dref.delete_directory()
self.dcpy.delete_directory()
def test_cmp_dirs_identical_dirs(self):
"""cmp_dirs works for identical directories
"""
# Compare dirs
count = cmp_dirs(self.dref.dirn,self.dcpy.dirn)
self.assertEqual(count[Md5Checker.MD5_OK],7)
self.assertEqual(count[Md5Checker.LINKS_SAME],6)
def test_cmp_dirs_different_dirs(self):
"""cmp_dirs works for different directories
"""
# Add more files and links to reference
self.dref.add_file("extra","Additional file")
self.dref.add_link("destination","place/you/want/to/go")
# Add differing files and links
self.dref.add_file("more","Yet another file")
self.dcpy.add_file("more","Yet another file, again")
self.dref.add_link("where_to","somewhere")
self.dcpy.add_link("where_to","somewhere/else")
# Compare dirs
count = cmp_dirs(self.dref.dirn,self.dcpy.dirn)
self.assertEqual(count[Md5Checker.MD5_OK],7)
self.assertEqual(count[Md5Checker.LINKS_SAME],6)
self.assertEqual(count[Md5Checker.MD5_FAILED],1)
self.assertEqual(count[Md5Checker.LINKS_DIFFER],1)
|
[
"peter.briggs@manchester.ac.uk"
] |
peter.briggs@manchester.ac.uk
|
a35539e60d21ee5e4cff7b291d6e310a1f7c4738
|
d6952f048727add5b54a521d04f6c9b5889bcd35
|
/test/test_plugin_package.py
|
d5c894862466254490283a1ebf7fe54be81aa9cf
|
[] |
no_license
|
TfedUD/python-sdk
|
bf719644041c2ab7b741af9c7fb8e5acfe085922
|
7ddc34611de44d2f9c5b217cf9b9e7cec27b2a27
|
refs/heads/master
| 2023-08-10T21:13:45.270193
| 2021-06-21T14:48:36
| 2021-06-21T14:51:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,778
|
py
|
# coding: utf-8
"""
pollination-server
Pollination Server OpenAPI Definition # noqa: E501
The version of the OpenAPI document: 0.13.0
Contact: info@pollination.cloud
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import pollination_sdk
from pollination_sdk.models.plugin_package import PluginPackage # noqa: E501
from pollination_sdk.rest import ApiException
class TestPluginPackage(unittest.TestCase):
"""PluginPackage unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test PluginPackage
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = pollination_sdk.models.plugin_package.PluginPackage() # noqa: E501
if include_optional :
return PluginPackage(
created_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
description = '0',
digest = '0',
icon = '0',
keywords = [
'0'
],
manifest = pollination_sdk.models.plugin.Plugin(
annotations = {
'key' : '0'
},
api_version = 'v1beta1',
config = null,
functions = [
pollination_sdk.models.function.Function(
annotations = {
'key' : '0'
},
command = '0',
description = '0',
inputs = [
null
],
name = '0',
outputs = [
null
],
type = 'Function', )
],
metadata = null,
type = 'Plugin', ),
readme = '# Daylight Factor
This recipe runs a daylight factor simulation.',
tag = '0'
)
else :
return PluginPackage(
digest = '0',
manifest = pollination_sdk.models.plugin.Plugin(
annotations = {
'key' : '0'
},
api_version = 'v1beta1',
config = null,
functions = [
pollination_sdk.models.function.Function(
annotations = {
'key' : '0'
},
command = '0',
description = '0',
inputs = [
null
],
name = '0',
outputs = [
null
],
type = 'Function', )
],
metadata = null,
type = 'Plugin', ),
tag = '0',
)
def testPluginPackage(self):
"""Test PluginPackage"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
[
"antoinedao1@gmail.com"
] |
antoinedao1@gmail.com
|
b0c4412b89a2539c8dbd8859bd81a94448288edd
|
0669d94428c972da19346e356861bf11bd668bc9
|
/test/test_subaccount_response_etat_etat.py
|
91fdf4f9bc18db35924042db2eafccbbc67173c0
|
[] |
no_license
|
mlemee/iSendProPython
|
e9a0f8351e33ae7598bd1380a26c2fe0a1dacd22
|
3add878dbcd682aa41f2bd07f98d8b56c8e5f9f3
|
refs/heads/master
| 2022-06-10T02:27:12.368498
| 2020-05-04T15:48:13
| 2020-05-04T15:48:13
| 261,206,580
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,871
|
py
|
# coding: utf-8
"""
API iSendPro
[1] Liste des fonctionnalités : - envoi de SMS à un ou plusieurs destinataires, - lookup HLR, - récupération des récapitulatifs de campagne, - gestion des répertoires, - ajout en liste noire. - comptage du nombre de caractères des SMS [2] Pour utiliser cette API vous devez: - Créer un compte iSendPro sur https://isendpro.com/ - Créditer votre compte - Remarque: obtention d'un crédit de test possible sous conditions - Noter votre clé de compte (keyid) - Elle vous sera indispensable à l'utilisation de l'API - Vous pouvez la trouver dans le rubrique mon \"compte\", sous-rubrique \"mon API\" - Configurer le contrôle IP - Le contrôle IP est configurable dans le rubrique mon \"compte\", sous-rubrique \"mon API\" - Il s'agit d'un système de liste blanche, vous devez entrer les IP utilisées pour appeler l'API - Vous pouvez également désactiver totalement le contrôle IP # noqa: E501
OpenAPI spec version: 1.1.1
Contact: support@isendpro.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.subaccount_response_etat_etat import SubaccountResponseEtatEtat # noqa: E501
from swagger_client.rest import ApiException
class TestSubaccountResponseEtatEtat(unittest.TestCase):
"""SubaccountResponseEtatEtat unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSubaccountResponseEtatEtat(self):
"""Test SubaccountResponseEtatEtat"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.subaccount_response_etat_etat.SubaccountResponseEtatEtat() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"maxime.le.mee.checkandbang@gmail.com"
] |
maxime.le.mee.checkandbang@gmail.com
|
0e942ea860ba6f547b090dd3e9d362fd8a956a09
|
fb124e51024917d6479fa626d9607ff10f7a3aba
|
/storm-control/storm_control/sc_hardware/none/noneZStageModule.py
|
0367129a52211e72f02bf6d98b88d170fab8f5e2
|
[
"MIT"
] |
permissive
|
BehnamAbaie/storm-control
|
054bd7bbd903ed9635e4d1121c30544f58473c4f
|
0c686321142eccad62ce3365eae22c3b69229b0d
|
refs/heads/main
| 2023-06-18T08:04:01.108874
| 2021-07-14T00:51:15
| 2021-07-14T00:51:15
| 342,049,487
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,948
|
py
|
#!/usr/bin/env python
"""
Emulated Z stage functionality
Hazen 04/17
"""
from PyQt5 import QtCore
import storm_control.hal4000.halLib.halMessage as halMessage
import storm_control.sc_hardware.baseClasses.hardwareModule as hardwareModule
import storm_control.sc_hardware.baseClasses.lockModule as lockModule
class NoneZStageFunctionality(hardwareModule.HardwareFunctionality, lockModule.ZStageFunctionalityMixin):
zStagePosition = QtCore.pyqtSignal(float)
def __init__(self, **kwds):
super().__init__(**kwds)
self.maximum = self.getParameter("maximum")
self.minimum = self.getParameter("minimum")
self.z_position = 0.5 * (self.maximum - self.minimum)
def goAbsolute(self, z_pos):
if (z_pos < self.minimum):
z_pos = self.minimum
if (z_pos > self.maximum):
z_pos = self.maximum
self.z_position = z_pos
self.zStagePosition.emit(self.z_position)
def goRelative(self, z_delta):
z_pos = self.z_position + z_delta
self.goAbsolute(z_pos)
class NoneZStageModule(hardwareModule.HardwareModule):
def __init__(self, module_params = None, qt_settings = None, **kwds):
super().__init__(**kwds)
self.z_stage_functionality = None
configuration = module_params.get("configuration")
self.z_stage_functionality = NoneZStageFunctionality(parameters = configuration.get("parameters"))
def getFunctionality(self, message):
if (message.getData()["name"] == self.module_name):
message.addResponse(halMessage.HalMessageResponse(source = self.module_name,
data = {"functionality" : self.z_stage_functionality}))
def processMessage(self, message):
if message.isType("get functionality"):
self.getFunctionality(message)
|
[
"noreply@github.com"
] |
BehnamAbaie.noreply@github.com
|
dd672e22f6b9460e05c57c72103c761fb7ba5b13
|
1bcbc4666a59cfc1eeec93152a6f4d8ea3103e11
|
/mdx_strike.py
|
c1e2185ea2dda0da5a31eefea5848413e5f0defe
|
[] |
no_license
|
xsren/my_blog
|
a005c991b1f819dbbc1041b95e2f4ce7691bf56a
|
5a7d87b7cc4e70b37b90d292cbb68b949ab2a51f
|
refs/heads/master
| 2021-03-27T15:25:55.806403
| 2017-11-11T14:10:09
| 2017-11-11T14:10:09
| 90,592,822
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
import markdown
STRIKE_RE = r'(-{2})(.+?)\2'
class StrikeExtension(markdown.Extension):
def extendMarkdown(self, md, md_globals):
md.inlinePatterns.add('strike', markdown.inlinepatterns.SimpleTagPattern(STRIKE_RE, 'strike'), '>strong')
def makeExtension(configs=None):
return StrikeExtension(configs=configs)
|
[
"bestrenxs@gmail.com"
] |
bestrenxs@gmail.com
|
9bd05785e015991afd33124377bb512fc14e9e8b
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02889/s191736717.py
|
3fcc479cdce9831954afefa641e5c33340ad8eee
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 944
|
py
|
n, m, L = map(int, input().split())
abc = [list(map(int, input().split())) for _ in range(m)]
q = int(input())
st = [list(map(int, input().split())) for _ in range(q)]
d = [[float('inf') for _ in range(n)] for _ in range(n)]
for a, b, c in abc:
if c > L:
continue
d[a-1][b-1] = c
d[b-1][a-1] = c
def warshall_floyd(d):
for k in range(n):
for i in range(n):
if i == k or d[i][k] > L:
continue
for j in range(n):
if i == j:
continue
d[i][j] = min(d[i][j],d[i][k] + d[k][j])
return d
warshall_floyd(d)
for i in range(n):
for j in range(n):
if i == j:
continue
elif d[i][j] <= L:
d[i][j] = 1
else:
d[i][j] = float('inf')
warshall_floyd(d)
for s, t in st:
if d[s-1][t-1] == float('inf'):
print(-1)
else:
print(d[s-1][t-1] - 1)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
b87550bfd0f03a7059f92fcc41a2d2146199003b
|
ecdf9256853e11d6105e2b9ad92ba912602d97d7
|
/hackerrank/contest/project_euler/prime_pair_connection.py
|
1c15c254a9bb9448f4002f98039f487eeddce0eb
|
[] |
no_license
|
rgsriram/Algorithms
|
364fda568356834e32ec247438d21202bebc838d
|
d4f9acb1a60bd098a601d8173dfdad447a02fd74
|
refs/heads/master
| 2021-01-10T05:11:05.688731
| 2019-03-20T04:59:10
| 2019-03-20T04:59:10
| 49,176,180
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 921
|
py
|
__author__ = 'sriram'
def get_prime_numbers(p1, p2):
primes = []
for num in range(p1, p2):
if num > 1:
for i in range(2, num):
if (num % i) == 0:
break
else:
primes.append(num)
return primes
def find_next_prime(a, b):
for p in range(a, b):
for i in range(2, p):
if p % i == 0:
break
else:
return p
def find_prime_connection(primes):
sum = 0
for i in range(1, len(primes)):
primes[i-1], primes[i]
def prime_pair_connection(p1, p2):
primes = get_prime_numbers(p1, p2)
primes.append(find_next_prime(primes[len(primes)-1]+1, 2*(primes[len(primes)-1]+1)))
find_prime_connection(primes)
if __name__ == '__main__':
n = int(raw_input().strip())
(p1, p2) = map(int, raw_input().strip().split())
prime_pair_connection(p1, p2)
|
[
"srignsh22@gmail.com"
] |
srignsh22@gmail.com
|
b8a736a22cb410537485ef80ae260fcc2f764f8c
|
07ec5a0b3ba5e70a9e0fb65172ea6b13ef4115b8
|
/lib/python3.6/site-packages/tensorflow/python/profiler/tfprof_logger.py
|
2c7113ed5af0951210c044217a30d85aaf6955aa
|
[] |
no_license
|
cronos91/ML-exercise
|
39c5cd7f94bb90c57450f9a85d40c2f014900ea4
|
3b7afeeb6a7c87384049a9b87cac1fe4c294e415
|
refs/heads/master
| 2021-05-09T22:02:55.131977
| 2017-12-14T13:50:44
| 2017-12-14T13:50:44
| 118,736,043
| 0
| 0
| null | 2018-01-24T08:30:23
| 2018-01-24T08:30:22
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:a6c4e234622b91b3aca7f80f19569dcc2f356d4cce446bc9c17942a4daa80264
size 6919
|
[
"seokinj@jangseog-in-ui-MacBook-Pro.local"
] |
seokinj@jangseog-in-ui-MacBook-Pro.local
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.