blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3480e923215921c85813159158d11cb1c3266241
|
72af42076bac692f9a42e0a914913e031738cc55
|
/01, 특강_210705_0706/02, source/CookData(2021.01.15)/Code06-05.py
|
02e7ffd300f97cb0f38c79208316d1ec8986a4e6
|
[] |
no_license
|
goareum93/Algorithm
|
f0ab0ee7926f89802d851c2a80f98cba08116f6c
|
ec68f2526b1ea2904891b929a7bbc74139a6402e
|
refs/heads/master
| 2023-07-01T07:17:16.987779
| 2021-08-05T14:52:51
| 2021-08-05T14:52:51
| 376,908,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
def isStackEmpty() :
global SIZE, stack, top
if (top == -1) :
return True
else :
return False
SIZE = 5
stack = [ None for _ in range(SIZE) ]
top = -1
print("스택이 비었는지 여부 ==>", isStackEmpty())
|
[
"goareum7@gmail.com"
] |
goareum7@gmail.com
|
e7f3e2452713e57c40f10d8e0d92f1f6a021a5e3
|
655050425d88874f570cab93d0613fd22abb8e06
|
/superman/dataset/__init__.py
|
0d5d78e309c63ed29fe0ac160d21389cc225ed51
|
[
"MIT"
] |
permissive
|
vishalbelsare/superman
|
86a23566323325d459d06f6ecad6d575b76972b6
|
346fc0d590f40bfe0141630e3146ec8e7ee18be3
|
refs/heads/master
| 2021-06-09T13:21:07.161740
| 2020-04-14T17:17:32
| 2020-04-14T17:17:32
| 143,188,314
| 0
| 0
|
MIT
| 2021-04-04T18:45:24
| 2018-08-01T17:35:44
|
Python
|
UTF-8
|
Python
| false
| false
| 157
|
py
|
from __future__ import absolute_import
from .ds import TrajDataset, VectorDataset
from .ds_view import DatasetView, MultiDatasetView
from .metadata import *
|
[
"perimosocordiae@gmail.com"
] |
perimosocordiae@gmail.com
|
486f719763164aae76c114ef5ed377847619037d
|
f95e73867e4383784d6fdd6a1c9fe06cffbfd019
|
/PythonToolkit/CameraVideo/VideoObjectTracking.py
|
5fa77270cb9a2b92474011d375924b1047c1764c
|
[] |
no_license
|
linxiaohui/CodeLibrary
|
da03a9ed631d1d44b098ae393b4bd9e378ab38d3
|
96a5d22a8c442c4aec8a064ce383aba8a7559b2c
|
refs/heads/master
| 2021-01-18T03:42:39.536939
| 2018-12-11T06:47:15
| 2018-12-11T06:47:15
| 85,795,767
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,499
|
py
|
# -*- coding: utf-8 -*-
import cv2
import os
import numpy as np
# 摄像头只能一个进程获取
camera = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
#文件的名称,格式,帧率,帧大小,是否彩色
out = cv2.VideoWriter('output.avi',fourcc,24.0,(640,480))
#VideoCapture类的get()方法不能获取摄像头帧速率的准确值(总是返回0)
#print(camera.get(cv2.CAP_PROP_FPS))
print(camera.get(cv2.CAP_PROP_FRAME_HEIGHT), camera.get(cv2.CAP_PROP_FRAME_WIDTH))
objLower = np.array([20, 20, 100], dtype = "uint8")
objUpper = np.array([100, 100, 200],dtype = "uint8")
while True:
(grabbed, frame) = camera.read()
if not grabbed:
break
print(frame.shape)
obj = cv2.inRange(frame, objLower, objUpper)
obj = cv2.GaussianBlur(obj, (3, 3), 0)
#print(obj.shape)
#cv2.inRange的返回值是`a thresholded image`, 因此下面的函数会报错
#gray = cv2.cvtColor(obj, cv2.COLOR_BGR2GRAY)
(_, cnts, _) = cv2.findContours(obj, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(cnts) > 0:
cnt = sorted(cnts, key = cv2.contourArea, reverse = True)[0]
rect = np.int32(cv2.boxPoints(cv2.minAreaRect(cnt)))
cv2.drawContours(frame, [rect], -1, (0, 255, 0), 2)
cv2.imshow("Camera", frame)
#写入文件中
# out.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
camera.release()
out.release()
cv2.destroyAllWindows()
|
[
"llinxiaohui@126.com"
] |
llinxiaohui@126.com
|
ecc202cf0f419b414a4822218103363ee0bb5e9a
|
a1c20ec292350a4c8f2164ba21715414f8a77d19
|
/Udemy/PythonMegaCourse/Section 12-Databases/PythonWithDatabases/script1.py
|
10d35d8e80252412da1c086f2961b0c065006f25
|
[] |
no_license
|
nowacki69/Python
|
2ae621b098241a614836a50cb1102094bf8e689f
|
e5325562801624e43b3975d9f246af25517be55b
|
refs/heads/master
| 2021-08-22T21:30:39.534186
| 2019-09-03T01:21:11
| 2019-09-03T01:21:11
| 168,528,687
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,200
|
py
|
import sqlite3
def create_table():
conn = sqlite3.connect("lite.db")
cur = conn.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS tbl_store (item TEXT, quantity INTEGER, price REAL)")
conn.commit()
conn.close()
def insert(item, quantity, price):
conn = sqlite3.connect("lite.db")
cur = conn.cursor()
cur.execute("INSERT INTO tbl_store VALUES (?, ?, ?)", (item, quantity, price))
conn.commit()
conn.close()
def view():
conn = sqlite3.connect("lite.db")
cur = conn.cursor()
cur.execute("SELECT * FROM tbl_store")
rows = cur.fetchall()
conn.close()
return rows
def delete(item):
conn = sqlite3.connect("lite.db")
cur = conn.cursor()
cur.execute("DELETE FROM tbl_store WHERE item=?", (item,))
conn.commit()
conn.close()
def update(quantity, price, item):
conn = sqlite3.connect("lite.db")
cur = conn.cursor()
cur.execute("UPDATE tbl_store SET quantity=?, price=? WHERE item=?", (quantity, price, item))
conn.commit()
conn.close()
# insert("Wine Glass", 8, 10.5)
# insert("Water Glass", 10, 5)
# insert("Coffee Cup", 10, 5)
# delete("Coffee Cup")
update(11, 6, "Water Glass")
print(view())
|
[
"nowacki69@gmail.com"
] |
nowacki69@gmail.com
|
7b6da5134be3bbf7c5b79c9dd974d501bc13dc0f
|
d5219cd3094e5a72efdf2ed7e321f6c586dee58f
|
/account_report.py
|
dcc6421f6374efdae4c6b8f9d1d1257e606076c1
|
[] |
no_license
|
Descomplica-Marketing/Google-Adwords
|
d721b178bc2384cf069103e2cc83b79d4f529c0d
|
808744852455aea9b24c4bd0fc9e4b065f729e54
|
refs/heads/master
| 2020-06-25T04:41:45.795739
| 2019-07-30T02:43:00
| 2019-07-30T02:43:00
| 199,204,786
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,026
|
py
|
from googleads import adwords
from functions import get_report_df
report_query = (adwords.ReportQueryBuilder()
.Select('Date', 'AccountDescriptiveName', 'AdNetworkType1', 'AdNetworkType2', 'AveragePosition',
'Clicks', 'ConversionRate', 'Conversions', 'Cost', 'Engagements', 'Impressions')
.From('ACCOUNT_PERFORMANCE_REPORT')
.Where('Cost').GreaterThan(0)
.During('YESTERDAY')
.Build())
# Create a list with your client ids
client_ids = ()
report = get_report_df(client_ids, report_query)
# CASTING
# int fields
report[['clicks', 'engagements', 'impressions']] = report[['clicks', 'engagements', 'impressions']].astype(int)
# percentage fields
report['conv_rate'] = report['conv_rate'].str[:-1].astype(float)
# monetary fields
report[['cost']] = report[['cost']].astype(float) / 1000000
# float fields
report[['avg_position', 'conversions']] = report[['avg_position', 'conversions']].astype(float)
print(report)
|
[
"marketing-dev@descomplica.com.br"
] |
marketing-dev@descomplica.com.br
|
ba580ba89c8fad29d726dfb35cbc4a7b1bd9b0e3
|
e48f203d18b4eac537f5f69277fb497be1cec611
|
/backend/mobile_25_app_dev_15680/wsgi.py
|
70ed666f14facf20d2d59d70d3cb062261cc3522
|
[] |
no_license
|
crowdbotics-apps/mobile-25-app-dev-15680
|
107fea56fa665c134a51fc3ea0b642b0b582d15d
|
9423eee5529d4a8a48cda334b1ac6eb2a5760838
|
refs/heads/master
| 2023-01-18T19:37:13.026096
| 2020-11-25T16:05:43
| 2020-11-25T16:05:43
| 315,973,809
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
"""
WSGI config for mobile_25_app_dev_15680 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mobile_25_app_dev_15680.settings')
application = get_wsgi_application()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
9b5d1cdf3282c3ca808fd6c68d5b75f12754cc6c
|
60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24
|
/IronPythonStubs/release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/RebarShapeConstraintAngleFromFixedDir.py
|
53925f2b3e8ecc1e9cd08fd6c3cf1445ad275918
|
[
"MIT"
] |
permissive
|
shnlmn/Rhino-Grasshopper-Scripts
|
a9411098c5d1bbc55feb782def565d535b27b709
|
0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823
|
refs/heads/master
| 2020-04-10T18:59:43.518140
| 2020-04-08T02:49:07
| 2020-04-08T02:49:07
| 161,219,695
| 11
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,811
|
py
|
class RebarShapeConstraintAngleFromFixedDir(RebarShapeConstraint,IDisposable):
"""
A constraint which can be applied to a RebarShapeSegment and drives the angle
of the segment relative to a fixed direction in UV-space.
RebarShapeConstraintAngleFromFixedDir(paramId: ElementId,sign: int,direction: UV)
"""
def Dispose(self):
""" Dispose(self: RebarShapeConstraint,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: RebarShapeConstraint,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,paramId,sign,direction):
""" __new__(cls: type,paramId: ElementId,sign: int,direction: UV) """
pass
Direction=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""A fixed direction in UV-space. The parameter will drive
the segment's angle relative to this direction.
Get: Direction(self: RebarShapeConstraintAngleFromFixedDir) -> UV
Set: Direction(self: RebarShapeConstraintAngleFromFixedDir)=value
"""
Sign=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""When the sign is 1,the Direction is rotated clockwise by the angle's value.
When -1,the Direction is rotated counter-clockwise.
Get: Sign(self: RebarShapeConstraintAngleFromFixedDir) -> int
Set: Sign(self: RebarShapeConstraintAngleFromFixedDir)=value
"""
|
[
"magnetscoil@gmail.com"
] |
magnetscoil@gmail.com
|
c886e273972ecb4f7d0f52ed30b61359eaaba35b
|
4f3a4c194451eae32f1ff7cf3b0db947e3892365
|
/162/main.py
|
b3936348ba1e48aaef408bd1f16b59b4e55140f2
|
[] |
no_license
|
szhongren/leetcode
|
84dd848edbfd728b344927f4f3c376b89b6a81f4
|
8cda0518440488992d7e2c70cb8555ec7b34083f
|
refs/heads/master
| 2021-12-01T01:34:54.639508
| 2021-11-30T05:54:45
| 2021-11-30T05:54:45
| 83,624,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,689
|
py
|
"""
A peak element is an element that is greater than its neighbors.
Given an input array where num[i] ≠ num[i+1], find a peak element and return its index.
The array may contain multiple peaks, in that case return the index to any one of the peaks is fine.
You may imagine that num[-1] = num[n] = -∞.
For example, in array [1, 2, 3, 1], 3 is a peak element and your function should return the index number 2.
click to show spoilers.
Note:
Your solution should be in logarithmic complexity.
"""
class Solution(object):
def findPeakElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
self.l = len(nums)
if self.l == 1:
return 0
elif self.l == 2:
return nums.index(max(nums))
else:
return self.findPeakHelper(nums, 0, self.l)
def findPeakHelper(self, nums, start, end):
"""
:type nums: List[int]
:type start: int
:type end: int
:rtype: int
"""
if end - start <= 1 or start == self.l - 1:
return start
mid = (start + end) // 2
if mid == 0 and nums[mid + 1] < nums[mid]:
return mid
elif mid == self.l - 1 and nums[mid - 1] < nums[mid]:
return mid
elif nums[mid - 1] < nums[mid] and nums[mid + 1] < nums[mid]:
return mid
elif nums[mid - 1] > nums[mid]:
return self.findPeakHelper(nums, start, mid)
elif nums[mid + 1] > nums[mid]:
return self.findPeakHelper(nums, mid + 1, end)
ans = Solution()
print(ans.findPeakElement([1,2,3,4,3]))
|
[
"shao.zhongren@gmail.com"
] |
shao.zhongren@gmail.com
|
828310819742c4c59c63891d0d30da4ec07c7773
|
bea0e4806236daff2a07df2f0949fe046ef76c03
|
/apps/op/controllers/real/sku.py
|
5b16a7fd874b0d32e199632174087a34a606d371
|
[] |
no_license
|
xutaoding/osp_autumn
|
68903f7acf78d7572777a500172e2f8c09afed48
|
daf260ecd5adf553490a8ac6b389a74439234b6a
|
refs/heads/master
| 2021-01-09T21:51:51.248630
| 2015-11-23T08:20:00
| 2015-11-23T08:20:00
| 46,706,177
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,523
|
py
|
# -*- coding: utf-8 -*-
from .. import BaseHandler
from .. import require
from voluptuous import Schema, Any
from autumn.torn.form import Form
from autumn.torn.paginator import Paginator
class SkuList(BaseHandler):
@require()
def get(self):
sql = 'select * from sku where deleted = 0 '
name = self.get_argument('name', '')
params = []
if name:
sql += 'and name like %s '
params.append('%' + name + '%')
sql += 'order by created_at desc'
page = Paginator(self, sql, params)
self.render('real/sku_list.html', page=page, name=name)
add_list = Schema({
'name': str,
'price': str,
'action': Any('edit', 'add'),
}, extra=True)
class SkuAdd(BaseHandler):
@require('storage')
def get(self):
form = Form(self.request.arguments, add_list)
form.action.value = 'add'
self.render('real/sku.html', form=form)
@require('storage')
def post(self):
form = Form(self.request.arguments, add_list)
form.action.value = 'add'
if not form.validate():
return self.render('real/sku.html', form=form)
supplier = self.db.get('select id from supplier where name = "视惠" limit 1')
self.db.execute('insert into sku(name, price, supplier_id, created_at) values(%s, %s, %s, NOW())',
form.name.value, form.price.value, supplier["id"])
self.redirect(self.reverse_url('real.show_sku'))
class SkuEdit(BaseHandler):
@require('storage')
def get(self):
sku = self.db.get('select name, price from sku where id = %s', self.get_argument('id'))
form = Form(sku, add_list)
form.action.value = 'edit'
self.render('real/sku.html', form=form, id=self.get_argument('id'))
@require('storage')
def post(self):
form = Form(self.request.arguments, add_list)
form.action.value = 'edit'
if not form.validate():
return self.render('real/sku.html', form=form, id=self.get_argument('id'))
self.db.execute('update sku set name = %s, price = %s where id = %s',
form.name.value, form.price.value, self.get_argument('id'))
self.redirect(self.reverse_url('real.show_sku'))
class SkuDelete(BaseHandler):
@require('storage')
def post(self):
id = self.get_argument('id')
self.db.execute('update sku set deleted = 1 where id = %s', id)
self.redirect(self.reverse_url('real.show_sku'))
|
[
"xutao.ding@chinascopefinancial.com"
] |
xutao.ding@chinascopefinancial.com
|
489e2344051b765a6a26da0bf20a60620a31b678
|
887fe8fa82b592e678e90e82ea2681f6f8a23a7f
|
/pygrim/formulas/airy.py
|
ac8bdeb4724cc36ec5c0c31787b7d3e823c1ea26
|
[
"MIT"
] |
permissive
|
wbhart/fungrim
|
efcfa32c2466c36f852fb01a9d575add732da46a
|
6febcbeee4b950bb421199706fc0c9ae729fe2e5
|
refs/heads/master
| 2020-08-01T23:07:46.997516
| 2019-09-26T16:32:20
| 2019-09-26T16:32:20
| 211,150,429
| 0
| 0
| null | 2019-09-26T17:57:32
| 2019-09-26T17:57:32
| null |
UTF-8
|
Python
| false
| false
| 5,483
|
py
|
# -*- coding: utf-8 -*-
from ..expr import *
def_Topic(
Title("Airy functions"),
Section("Definitions"),
Entries(
"9ac289",
"5a9d3f",
),
Section("Illustrations"),
Entries(
"b4c968",
"fa65f3",
),
Section("Differential equation"),
Entries(
"51b241",
"de9800", # Wronskian
),
Section("Special values"),
Entries(
"693cfe",
"807917",
"9a8d4d",
"fba07c",
),
Section("Higher derivatives"),
Entries(
"b2e9d0", # ai''
"70ec9f", # bi''
"eadca2", # recurrence
),
Section("Hypergeometric representations"),
Entries(
"01bbb6",
"bd319e",
"20e530",
"4d65e5",
),
Section("Analytic properties"),
Entries(
"def37e",
"1f0577",
"90f31e",
"b88f65",
"7194d4",
"d1f9d0",
"a2df77",
),
)
make_entry(ID("9ac289"),
SymbolDefinition(AiryAi, AiryAi(z), "Airy function of the first kind"))
make_entry(ID("5a9d3f"),
SymbolDefinition(AiryBi, AiryBi(z), "Airy function of the second kind"))
make_entry(ID("b4c968"),
Image(Description("X-ray of", AiryAi(z), "on", Element(z, ClosedInterval(-6,6) + ClosedInterval(-6,6)*ConstI)),
ImageSource("xray_airy_ai")),
description_xray,
)
make_entry(ID("fa65f3"),
Image(Description("X-ray of", AiryBi(z), "on", Element(z, ClosedInterval(-6,6) + ClosedInterval(-6,6)*ConstI)),
ImageSource("xray_airy_bi")),
description_xray,
)
make_entry(ID("51b241"),
Formula(Where(Equal(ComplexDerivative(y(z), For(z, z, 2)) - z*y(z), 0), Equal(y(z), C*AiryAi(z) + D*AiryBi(z)))),
Variables(z, C, D),
Assumptions(And(Element(z, CC), Element(C, CC), Element(D, CC))))
make_entry(ID("de9800"),
Formula(Equal(AiryAi(z)*AiryBi(z,1)-AiryAi(z,1)*AiryBi(z), 1/ConstPi)),
Variables(z),
Element(z, CC))
make_entry(ID("693cfe"),
Formula(EqualAndElement(AiryAi(0), Div(1, Pow(3,Div(2,3))*GammaFunction(Div(2,3))), RealBall(Decimal("0.355028053887817239260063186004"), Decimal("1.84e-31")))))
make_entry(ID("807917"),
Formula(EqualAndElement(AiryAi(0,1), -Div(1, Pow(3,Div(1,3))*GammaFunction(Div(1,3))), RealBall(Decimal("-0.258819403792806798405183560189"), Decimal("2.04e-31")))))
make_entry(ID("9a8d4d"),
Formula(EqualAndElement(AiryBi(0), Div(1, Pow(3,Div(1,6))*GammaFunction(Div(2,3))), RealBall(Decimal("0.614926627446000735150922369094"), Decimal("3.87e-31")))))
make_entry(ID("fba07c"),
Formula(EqualAndElement(AiryBi(0,1), Div(Pow(3,Div(1,6)), GammaFunction(Div(1,3))), RealBall(Decimal("0.448288357353826357914823710399"), Decimal("1.72e-31")))))
make_entry(ID("b2e9d0"),
Formula(Equal(AiryAi(z,2), z*AiryAi(z))),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("70ec9f"),
Formula(Equal(AiryBi(z,2), z*AiryBi(z))),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("eadca2"),
Formula(Where(Equal(ComplexDerivative(y(z), For(z, z, n)), z*ComplexDerivative(y(z), For(z, z, n-2)) + (n-2)*ComplexDerivative(y(z), For(z, z, n-3))),
Equal(y(z), C*AiryAi(z) + D*AiryBi(z)))),
Variables(n, z, C, D),
Assumptions(And(Element(z, CC), Element(n, ZZGreaterEqual(3)), Element(C, CC), Element(D, CC))))
make_entry(ID("01bbb6"),
Formula(Equal(AiryAi(z), AiryAi(0)*Hypergeometric0F1(Div(2,3),z**3/9) + z*AiryAi(0,1)*Hypergeometric0F1(Div(4,3),z**3/9))),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("bd319e"),
Formula(Equal(AiryBi(z), AiryBi(0)*Hypergeometric0F1(Div(2,3),z**3/9) + z*AiryBi(0,1)*Hypergeometric0F1(Div(4,3),z**3/9))),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("20e530"),
Formula(Equal(AiryAi(z,1), AiryAi(0,1)*Hypergeometric0F1(Div(1,3),z**3/9) + (z**2/2)*AiryAi(0)*Hypergeometric0F1(Div(5,3),z**3/9))),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("4d65e5"),
Formula(Equal(AiryBi(z,1), AiryBi(0,1)*Hypergeometric0F1(Div(1,3),z**3/9) + (z**2/2)*AiryBi(0)*Hypergeometric0F1(Div(5,3),z**3/9))),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("def37e"),
Formula(Equal(HolomorphicDomain(C*AiryAi(z) + D*AiryBi(z), z, Union(CC, Set(UnsignedInfinity))), CC)),
Variables(C, D),
Assumptions(And(Element(C, CC), Element(D, CC), Not(And(Equal(C,0), Equal(D,0))))))
make_entry(ID("1f0577"),
Formula(Equal(Poles(C*AiryAi(z) + D*AiryBi(z), z, Union(CC, Set(UnsignedInfinity))), Set())),
Variables(C, D),
Assumptions(And(Element(C, CC), Element(D, CC), Not(And(Equal(C,0), Equal(D,0))))))
make_entry(ID("90f31e"),
Formula(Equal(EssentialSingularities(C*AiryAi(z) + D*AiryBi(z), z, Union(CC, Set(UnsignedInfinity))), Set(UnsignedInfinity))),
Variables(C, D),
Assumptions(And(Element(C, CC), Element(D, CC), Not(And(Equal(C,0), Equal(D,0))))))
make_entry(ID("b88f65"),
Formula(Equal(BranchPoints(C*AiryAi(z) + D*AiryBi(z), z, Union(CC, Set(UnsignedInfinity))), Set())),
Variables(C, D),
Assumptions(And(Element(C, CC), Element(D, CC))))
make_entry(ID("7194d4"),
Formula(Equal(BranchCuts(C*AiryAi(z) + D*AiryBi(z), z, CC), Set())),
Variables(C, D),
Assumptions(And(Element(C, CC), Element(D, CC))))
make_entry(ID("d1f9d0"),
Formula(Subset(Zeros(AiryAi(z), Var(z), Element(z, CC)), RR)))
make_entry(ID("a2df77"),
Formula(Subset(Zeros(AiryAi(z,1), Var(z), Element(z, CC)), RR)))
|
[
"fredrik.johansson@gmail.com"
] |
fredrik.johansson@gmail.com
|
6abc53ffc8fe957131d887d4192ae8073b48d1be
|
6e41636b8fae338cc3d1cb0885ca21c17af64ded
|
/echo-client.py
|
8707638e8dbad4b61d11bb076b8fa20bfcfbc768
|
[] |
no_license
|
Menah3m/socket_work
|
89e89febafce913b904c47549cbab558fb39a1d9
|
6a9eb05be0386fc709c92dc86afa67167d007c0a
|
refs/heads/master
| 2021-03-26T13:42:38.004924
| 2020-03-16T13:26:17
| 2020-03-16T13:26:17
| 247,708,963
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
# Name:echo-client
# Author:Yasu
# Time:2020/3/16
import socket
Host = '127.0.0.1'
Port = 6500
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.connect((Host,Port))
sock.sendall(b'Hello World!')
data = sock.recv(1024)
print('Received!', repr(data))
|
[
"568200065@qq.com"
] |
568200065@qq.com
|
f3d34c28888679e66d0ab10777e49c22b3272308
|
ebcbbe645d70d4f756704d3c5113ba25e8a1814c
|
/fabfile.py
|
7cd23c5d488fb6645aa621eda37ae36c6056dd84
|
[
"BSD-3-Clause"
] |
permissive
|
abhilashsn/standup
|
2ea02078c6616c88f1a8cc28508b948bf4001c30
|
998341af354ed0ddcd15b673ea7af090a7efbce6
|
refs/heads/master
| 2021-01-18T09:11:58.371312
| 2014-08-29T16:25:07
| 2014-08-29T16:25:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| true
| false
| 355
|
py
|
from fabric.api import env, local
env.hosts = ['localhost']
def npm_install():
"""Correctly runs npm install"""
local('cp node.json package.json')
local('npm install')
local('rm package.json')
def test():
"""Run tests with coverage"""
local('nosetests --with-coverage --cover-package=standup '
'--cover-inclusive')
|
[
"rehandalal@gmail.com"
] |
rehandalal@gmail.com
|
b5a71f36acc625624a71208d512343d843b77b43
|
045ec3ae16fc554a05510abc3697557ebc5ce304
|
/CIME/tests/test_unit_hist_utils.py
|
fe6d4866c34d5f7a2d90e63a11b14989b5d4d88b
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
ESMCI/cime
|
c09223ee9b8a463bd00741ff39f60fda7639af89
|
02fad90a379cdbd3c1106cbd63324480f0bf7a22
|
refs/heads/master
| 2023-08-16T07:03:22.224344
| 2023-08-03T19:47:53
| 2023-08-03T19:47:53
| 31,605,662
| 159
| 179
|
NOASSERTION
| 2023-09-12T18:38:42
| 2015-03-03T15:33:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,803
|
py
|
import io
import unittest
from unittest import mock
from CIME.hist_utils import copy_histfiles
from CIME.XML.archive import Archive
class TestHistUtils(unittest.TestCase):
@mock.patch("CIME.hist_utils.safe_copy")
def test_copy_histfiles_exclude(self, safe_copy):
case = mock.MagicMock()
case.get_env.return_value.get_latest_hist_files.side_effect = [
["/tmp/testing.cpl.hi.nc"],
["/tmp/testing.atm.hi.nc"],
]
case.get_env.return_value.exclude_testing.side_effect = [True, False]
case.get_value.side_effect = [
"/tmp", # RUNDIR
None, # RUN_REFCASE
"testing", # CASE
True, # TEST
True, # TEST
]
case.get_compset_components.return_value = ["atm"]
test_files = [
"testing.cpl.hi.nc",
]
with mock.patch("os.listdir", return_value=test_files):
comments, num_copied = copy_histfiles(case, "base")
assert num_copied == 1
@mock.patch("CIME.hist_utils.safe_copy")
def test_copy_histfiles(self, safe_copy):
case = mock.MagicMock()
case.get_env.return_value.get_latest_hist_files.return_value = [
"/tmp/testing.cpl.hi.nc",
]
case.get_env.return_value.exclude_testing.return_value = False
case.get_value.side_effect = [
"/tmp", # RUNDIR
None, # RUN_REFCASE
"testing", # CASE
True, # TEST
]
case.get_compset_components.return_value = []
test_files = [
"testing.cpl.hi.nc",
]
with mock.patch("os.listdir", return_value=test_files):
comments, num_copied = copy_histfiles(case, "base")
assert num_copied == 1
|
[
"boutte.jason@gmail.com"
] |
boutte.jason@gmail.com
|
a5197aea32de8058d3e8c39cda831c551cb28f70
|
91948d5be26636f1f2b941cb933701ea626a695b
|
/problem201_google_triangl number.py
|
8997884bbddd5865d98e2a33713e6f7f60148908
|
[
"MIT"
] |
permissive
|
loghmanb/daily-coding-problem
|
4ae7dd201fde5ee1601e0acae9e9fc468dcd75c9
|
b2055dded4276611e0e7f1eb088e0027f603aa7b
|
refs/heads/master
| 2023-08-14T05:53:12.678760
| 2023-08-05T18:12:38
| 2023-08-05T18:12:38
| 212,894,228
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,424
|
py
|
'''
This problem was asked by Google.
You are given an array of arrays of integers, where each array corresponds to a row in a triangle of numbers.
For example, [[1], [2, 3], [1, 5, 1]] represents the triangle:
1
2 3
1 5 1
We define a path in the triangle to start at the top and go down one row at a time to an adjacent value,
eventually ending with an entry on the bottom row.
For example, 1 -> 3 -> 5. The weight of the path is the sum of the entries.
Write a program that returns the weight of the maximum weight path.
'''
import unittest
class Solution:
def findMaxWeight(self, triangle):
max_val = triangle[0][0]
for i in range(1, len(triangle)):
N = len(triangle[i])
for j in range(N):
if j==0:
triangle[i][j] += triangle[i-1][j]
elif j==N-1:
triangle[i][j] += triangle[i-1][j-1]
else:
triangle[i][j] += max(triangle[i-1][j-1:j+1])
if max_val < triangle[i][j]:
max_val = triangle[i][j]
return max_val
class SolutionTestUnit(unittest.TestCase):
def setUp(self):
self.solution = Solution()
def test_simple_triangle(self):
triangle = [[1], [2, 3], [1, 5, 1]]
self.assertEqual(self.solution.findMaxWeight(triangle), 9)
if __name__ == "__main__":
unittest.main()
|
[
"loghmanb@gmail.com"
] |
loghmanb@gmail.com
|
6fdc84973dc21b05bd052a39b41e700fb09348f8
|
360c777a2b77be466b1cf7c8fd74d6fd04f56b55
|
/nexus_auth/utils/eventlog.py
|
1f7a88e78c520a5d6577a148fd8699927ab01510
|
[
"MIT"
] |
permissive
|
hreeder/nexus-auth
|
790a3b2623ddf443138a4b0f0af1380dbc4db8ae
|
8d51aef01647e32ba4a284f02de73a2caad7cf49
|
refs/heads/master
| 2021-01-10T10:08:37.190558
| 2016-02-29T12:27:21
| 2016-02-29T12:27:21
| 52,789,087
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 913
|
py
|
from nexus_auth import app, db
from nexus_auth.models.logging import AuditLogEntry
import requests
def log_event(origin, action, user, message, log_db=True, log_jabber=True):
if log_db:
entry = AuditLogEntry(user_id=user.uid,
area=origin.lower(),
action_type=action.lower(),
action_item=message)
db.session.add(entry)
db.session.commit()
if log_jabber:
post_jabber(origin, user.username, message)
def post_jabber(origin, username, message):
post_url = app.config['LOGBOT']['url']
secret_key = app.config['LOGBOT']['key']
logbot_args = {
'key': secret_key,
'tag': "Nexus",
'tag2': origin,
'message': username + ": " + message
}
request = requests.request('POST', post_url, data=logbot_args)
|
[
"harry@harryreeder.co.uk"
] |
harry@harryreeder.co.uk
|
4e3dee2f1887cb0184fd6988050a82662ecb68dd
|
0a3e24df172a206a751217e5f85b334f39983101
|
/Design Pattern/sub1/abstract_factory.py
|
132af56149cfd33171faadea742270d4210782f1
|
[] |
no_license
|
yeboahd24/python202
|
1f399426a1f46d72da041ab3d138c582c695462d
|
d785a038183e52941e0cee8eb4f6cedd3c6a35ed
|
refs/heads/main
| 2023-05-06T04:14:19.336839
| 2021-02-10T02:53:19
| 2021-02-10T02:53:19
| 309,841,303
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 647
|
py
|
#!usr/bin/env/python3
from decimal import Decimal
class Factory(object): # acting like abstract method
def build_sequence(self):
return []
def build_number(self, string):
return Decimal(string)
class Loader(object):
def load(string, factory): # note this does not take self
sequence = factory.build_sequence() # list
for substring in string.split(','):
item = factory.build_number(substring)
sequence.append(item)
return sequence
f = Factory()
result = Loader.load('1.23, 4.56', f)
print(result)
|
[
"noreply@github.com"
] |
yeboahd24.noreply@github.com
|
ff22374f949580d7d69de2945038ce61fad4d099
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/336/usersdata/288/98639/submittedfiles/matriz2.py
|
4a79841bb3410b607b59d2b22c904ae1e0d6c91f
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,087
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
cont1=0
cont2=0
cont3=0
n=int(input('Digite um valor >=2: '))
while n<2:
n=int(input('Digite um valor >=2: '))
matriz=np.empy([n,n])
matriztrans=np.empy([2,n])
matrizultprim=np.empy([n,n])
mareizdiag=np.empy([n,n])
for i in range (0,n,1):
for j in range (0,n,1):
matriz[i][j]=float(input('Digite um valor'))
for i in range (0,n,1):
for j in range (0,n,1):
matriztrans[i][i]=matriz[j][i]
for i in range (0,n,1):
for j in range (0,n,1):
matrizultprim[i][j]=matriz[i][n-1-j]
for i in range (0,n,1):
matrizdiag[0][i]=matriz[i][i]
for i in range (0,n,1):
matrizdiag[i][i]=matrizultprim[i][i]
for i in range (0,n,1):
if sum(matriz[i])==sum(matriz[i+1]):
cont1+=1
for i in range (0,n-1,1):
if sum(matriztrans[i]==sum(matriz[i+1]):
cont2+=1
if sum(matrizdiag[0])==sum(matrizdiag[1]):
cont3+=1
if cont1==n-1 and cont2==n-1 and cont3==1:
print ('S')
else:
print ('N')
print ('S')
else:
print ('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
26a316bf7646387f30d2cf7bd6388f587906bc45
|
ff7c392e46baa2774b305a4999d7dbbcf8a3c0b3
|
/ask-sdk-model/ask_sdk_model/interfaces/alexa/presentation/aplt/set_value_command.py
|
630771459bd51d47999d9c25bc5d678cf3ce8029
|
[
"Apache-2.0"
] |
permissive
|
rivamarco/alexa-apis-for-python
|
83d035ba5beb5838ae977777191fa41cbe4ea112
|
62e3a9057a26003e836fa09aa12a2e1c8b62d6e0
|
refs/heads/master
| 2021-01-03T20:44:12.977804
| 2020-02-13T10:27:27
| 2020-02-13T10:29:24
| 240,229,385
| 2
| 0
|
Apache-2.0
| 2020-02-13T10:05:45
| 2020-02-13T10:05:45
| null |
UTF-8
|
Python
| false
| false
| 5,843
|
py
|
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_sdk_model.interfaces.alexa.presentation.aplt.command import Command
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
class SetValueCommand(Command):
"""
Change a dynamic property of a component without redrawing the screen.
:param delay: The delay in milliseconds before this command starts executing; must be non-negative. Defaults to 0.
:type delay: (optional) int
:param description: A user-provided description of this command.
:type description: (optional) str
:param screen_lock: If true, disable the Interaction Timer.
:type screen_lock: (optional) bool
:param when: A conditional expression to be evaluated in device. If false, the execution of the command is skipped. Defaults to true.
:type when: (optional) bool
:param component_id: The id of the component whose value to set.
:type component_id: (optional) str
:param object_property: The name of the property to set.
:type object_property: (optional) str
:param value: The property value to set.
:type value: (optional) str
"""
deserialized_types = {
'object_type': 'str',
'delay': 'int',
'description': 'str',
'screen_lock': 'bool',
'when': 'bool',
'component_id': 'str',
'object_property': 'str',
'value': 'str'
} # type: Dict
attribute_map = {
'object_type': 'type',
'delay': 'delay',
'description': 'description',
'screen_lock': 'screenLock',
'when': 'when',
'component_id': 'componentId',
'object_property': 'property',
'value': 'value'
} # type: Dict
supports_multiple_types = False
def __init__(self, delay=None, description=None, screen_lock=None, when=None, component_id=None, object_property=None, value=None):
# type: (Optional[int], Optional[str], Optional[bool], Union[bool, str, None], Optional[str], Optional[str], Optional[str]) -> None
"""Change a dynamic property of a component without redrawing the screen.
:param delay: The delay in milliseconds before this command starts executing; must be non-negative. Defaults to 0.
:type delay: (optional) int
:param description: A user-provided description of this command.
:type description: (optional) str
:param screen_lock: If true, disable the Interaction Timer.
:type screen_lock: (optional) bool
:param when: A conditional expression to be evaluated in device. If false, the execution of the command is skipped. Defaults to true.
:type when: (optional) bool
:param component_id: The id of the component whose value to set.
:type component_id: (optional) str
:param object_property: The name of the property to set.
:type object_property: (optional) str
:param value: The property value to set.
:type value: (optional) str
"""
self.__discriminator_value = "SetValue" # type: str
self.object_type = self.__discriminator_value
super(SetValueCommand, self).__init__(object_type=self.__discriminator_value, delay=delay, description=description, screen_lock=screen_lock, when=when)
self.component_id = component_id
self.object_property = object_property
self.value = value
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, SetValueCommand):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
|
[
"ask-pyth@dev-dsk-ask-sdk-python-2b-85d79f62.us-west-2.amazon.com"
] |
ask-pyth@dev-dsk-ask-sdk-python-2b-85d79f62.us-west-2.amazon.com
|
05448ed43b175ae1a122ed5a12db2aec2e466780
|
844501294ca37f1859b9aa0a258e6dd6b1bf2349
|
/stubs/parsedatetime/pdt_locales/icu.pyi
|
93e28da6e29ef5045ea2b491419e771557f51683
|
[
"MIT",
"BSD-2-Clause"
] |
permissive
|
1ts-org/snipe
|
2ac1719bc8f6b3b158c04536464f866c34051253
|
ad84a629e9084f161e0fcf811dc86ba54aaf9e2b
|
refs/heads/master
| 2021-06-04T22:32:36.038607
| 2020-03-27T05:18:36
| 2020-04-05T21:50:42
| 18,642,653
| 6
| 3
|
NOASSERTION
| 2019-10-08T02:02:50
| 2014-04-10T16:01:32
|
Python
|
UTF-8
|
Python
| false
| false
| 271
|
pyi
|
# Stubs for parsedatetime.pdt_locales.icu (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any
def icu_object(mapping: Any): ...
def merge_weekdays(base_wd: Any, icu_wd: Any): ...
def get_icu(locale: Any): ...
|
[
"kcr@1ts.org"
] |
kcr@1ts.org
|
62bfc768f22263927da02c41851d63ced55b971a
|
4f74e6d72b98cd1da2190313e4a7eb9d342cc93d
|
/glitchtip/asgi.py
|
f479b235c07dce1d668cb26b81df241eac0257c0
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
adamgogogo/glitchtip-backend
|
ef0c529b71d5a4632a235b40a10e0b428a1cee3a
|
ee71d1b732d92868189d520aa111c09b116b7b22
|
refs/heads/master
| 2023-02-01T23:10:53.734450
| 2020-12-19T19:32:10
| 2020-12-19T19:32:10
| 323,588,534
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
ASGI config for glitchtip project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'glitchtip.settings')
application = get_asgi_application()
|
[
"david@burkesoftware.com"
] |
david@burkesoftware.com
|
3f77faceca54e6c9157b0bf96c05959d07e66987
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/containerregistry/v20170301/list_registry_credentials.py
|
e349f4d7e946280f0116c31183bbc0cfceebebcf
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 2,812
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'ListRegistryCredentialsResult',
'AwaitableListRegistryCredentialsResult',
'list_registry_credentials',
]
@pulumi.output_type
class ListRegistryCredentialsResult:
"""
The response from the ListCredentials operation.
"""
def __init__(__self__, passwords=None, username=None):
if passwords and not isinstance(passwords, list):
raise TypeError("Expected argument 'passwords' to be a list")
pulumi.set(__self__, "passwords", passwords)
if username and not isinstance(username, str):
raise TypeError("Expected argument 'username' to be a str")
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def passwords(self) -> Optional[Sequence['outputs.RegistryPasswordResponse']]:
"""
The list of passwords for a container registry.
"""
return pulumi.get(self, "passwords")
@property
@pulumi.getter
def username(self) -> Optional[str]:
"""
The username for a container registry.
"""
return pulumi.get(self, "username")
class AwaitableListRegistryCredentialsResult(ListRegistryCredentialsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListRegistryCredentialsResult(
passwords=self.passwords,
username=self.username)
def list_registry_credentials(registry_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListRegistryCredentialsResult:
"""
The response from the ListCredentials operation.
:param str registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group to which the container registry belongs.
"""
__args__ = dict()
__args__['registryName'] = registry_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:containerregistry/v20170301:listRegistryCredentials', __args__, opts=opts, typ=ListRegistryCredentialsResult).value
return AwaitableListRegistryCredentialsResult(
passwords=__ret__.passwords,
username=__ret__.username)
|
[
"noreply@github.com"
] |
morrell.noreply@github.com
|
2e9f9655ca4058857243f10f5a530a99417d7011
|
70f1c694bea6178c98b134b9c44952ef6693be9f
|
/CAAS/ReNameSRA_RelocaTEi_ReRunOnResults.py
|
25ba7a64d511690710dea248e4e0a2a1f10ef582
|
[] |
no_license
|
JinfengChen/Rice_pop
|
5c19c5837805e51ddb3b2ffba4baffdc59c9bfd3
|
ef272bf4825b29610c94de55eb53f231fb5febc6
|
refs/heads/master
| 2020-04-07T04:55:36.606594
| 2018-03-02T16:52:53
| 2018-03-02T16:52:53
| 33,501,941
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,540
|
py
|
#!/opt/Python/2.7.3/bin/python
import sys
from collections import defaultdict
import numpy as np
import re
import os
import argparse
import glob
import time
from Bio import SeqIO
def usage():
test="name"
message='''
python ReNameSRA_RelocaTEi.py --input Japonica_fastq
Run RelocaTEi for rice strain in Japonica_fastq
'''
print message
def runjob(script, lines):
cmd = 'perl /rhome/cjinfeng/BigData/software/bin/qsub-pbs.pl --maxjob 80 --lines %s --interval 120 --resource nodes=1:ppn=1,walltime=100:00:00,mem=10G --convert no %s' %(lines, script)
#print cmd
os.system(cmd)
def fasta_id(fastafile):
fastaid = defaultdict(str)
for record in SeqIO.parse(fastafile,"fasta"):
fastaid[record.id] = 1
return fastaid
def readtable(infile):
data = defaultdict(str)
with open (infile, 'r') as filehd:
for line in filehd:
line = line.rstrip()
if len(line) > 2:
unit = re.split(r' ',line)
data[unit[0]] = line
#print unit[0], line
return data
def rerun_shell(outfile, script, topdir):
script_cmd = readtable(script)
script_list= re.split(r' ', script_cmd['python'])
script_list[1] = '/rhome/cjinfeng/BigData/00.RD/RelocaTE2_mPing/scripts/relocaTE_absenceFinder.py'
ofile = open(outfile, 'w')
for i in range(1,13):
script_list[3] = 'Chr%s' %(i)
cmd = ' '.join(script_list)
cmd = re.sub(r'.RepeatMasker.out', r'.mPing.RepeatMasker.out', cmd)
cmd = re.sub(r'/shared/wesslerlab/', r'/bigdata/wesslerlab/shared/', cmd)
print >> ofile, cmd
print >> ofile, 'cat %s/repeat/results/*.all_ref_insert.txt > %s/repeat/results/ALL.all_ref_insert.txt' %(topdir, topdir)
print >> ofile, 'cat %s/repeat/results/*.all_ref_insert.gff > %s/repeat/results/ALL.all_ref_insert.gff' %(topdir, topdir)
ofile.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input')
parser.add_argument('-o', '--output')
parser.add_argument('-g', '--genome')
parser.add_argument('-r', '--repeat')
parser.add_argument('-v', dest='verbose', action='store_true')
args = parser.parse_args()
try:
len(args.input) > 0
except:
usage()
sys.exit(2)
if not args.output:
args.output = '%s' %(os.path.abspath(args.input))
if not args.genome:
args.genome = '/rhome/cjinfeng/BigData/00.RD/RelocaTE_i/Simulation/Reference/MSU_r7.fa'
if not args.repeat:
args.repeat = '/rhome/cjinfeng/BigData/00.RD/RelocaTE_i/Simulation/Reference/ping.fa'
#args.repeat = '/rhome/cjinfeng/BigData/00.RD/RelocaTE_i/Simulation/Reference/Rice.TE.short.unique.fa'
#-t ../input/mping_UNK.fa -g /rhome/cjinfeng/HEG4_cjinfeng/seqlib/MSU_r7.fa -d ../input/FC52_7 -e HEG4 -o mPing_HEG4_UNK -r 1 -p 1 -a 1
#RelocaTE = 'python /rhome/cjinfeng/software/tools/RelocaTE_1.0.3_i/RelocaTE/scripts/relocaTE.py'
#RelocaTE = 'python /rhome/cjinfeng/BigData/00.RD/RelocaTE2/scripts/relocaTE.py'
RelocaTE = 'python /rhome/cjinfeng/BigData/00.RD/RelocaTE2_mPing/scripts/relocaTE.py'
Reference= os.path.abspath(args.genome)
Repeat = os.path.abspath(args.repeat)
project = os.path.split(args.output)[1]
cpu = 16
if not os.path.exists(project):
os.mkdir(project)
print project
read_dirs = glob.glob('%s/ERS*' %(os.path.abspath(args.input)))
ofile = open('%s.run.sh' %(args.output), 'w')
for read_dir in sorted(read_dirs):
outdir = '%s/%s' %(os.path.abspath(args.output), os.path.split(read_dir)[1])
existingTE = '%s.mPing.RepeatMasker.out' %(Reference)
# relocate will not run if there is result exists
#if not os.path.exists(outdir):
if 1:
#relocaTE = '%s --te_fasta %s --genome_fasta %s --fq_dir %s --outdir %s --reference_ins %s' %(RelocaTE, Repeat, Reference, read_dir, outdir, existingTE)
os.system('cp /rhome/cjinfeng/Rice/Rice_population_sequence/Rice_3000/CAAS/existingTE.bed %s/repeat/' %(outdir))
rerun_shell('%s/run_these_jobs_rerun.sh' %(outdir), '%s/shellscripts/step_6/0.repeat.absence.sh' %(outdir), outdir)
shell = 'bash %s/run_these_jobs_rerun.sh > %s/run.log 2> %s/run.log2' %(outdir, outdir, outdir)
#os.system(relocaTE)
#print >> ofile, relocaTE
print >> ofile, shell
ofile.close()
runjob('%s.run.sh' %(args.output), 20)
if __name__ == '__main__':
main()
|
[
"jinfeng7chen@gmail.com"
] |
jinfeng7chen@gmail.com
|
dc21944a63569648b8573e6a433f04b29ad4e6be
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_mac.py
|
5e19d494c3783030806afa034e10968ddcfdb409
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
#calss header
class _MAC():
def __init__(self,):
self.name = "MAC"
self.definitions = [u'a waterproof coat (= one that does not allow rain to pass through): ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
70f2b9210119f2d5879668567b0e9e207d2bba8e
|
2153a7ecfa69772797e379ff5642d52072a69b7c
|
/library/test/test_compiler/testcorpus/74_class_super_nested.py
|
a1686c98c0d832a9bd8aedfe422ee687411b168b
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"Python-2.0"
] |
permissive
|
KCreate/skybison
|
a3789c84541f39dc6f72d4d3eb9783b9ed362934
|
d1740e08d8de85a0a56b650675717da67de171a0
|
refs/heads/trunk
| 2023-07-26T04:50:55.898224
| 2021-08-31T08:20:46
| 2021-09-02T19:25:08
| 402,908,053
| 1
| 0
|
NOASSERTION
| 2021-09-03T22:05:57
| 2021-09-03T22:05:57
| null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
def fun():
class Foo:
def __init__(self):
super().__init__()
def no_super(self):
return
|
[
"emacs@fb.com"
] |
emacs@fb.com
|
c4441728e4c2fd660644adf3f31bc7b040393369
|
3b11dc40c7d772fffeb4d8683e5c9791c41f6454
|
/custom/community/general/status_bar_clock/controllers/__init__.py
|
5dacb5b2ac9861e919e683cd781d6bbe6496ff37
|
[] |
no_license
|
Jacky-odoo/Ecobank
|
b986352abac9416ab00008a4abaec2b1f1a1f262
|
5c501bd03a22421f47c76380004bf3d62292f79d
|
refs/heads/main
| 2023-03-09T18:10:45.058530
| 2021-02-25T14:11:12
| 2021-02-25T14:11:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
# -*- coding: utf-8 -*-
# © 2018-Today Aktiv Software (http://www.aktivsoftware.com).
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import main
|
[
"francisbnagura@gmail.com"
] |
francisbnagura@gmail.com
|
6fef571360e9314e5f9c731b7aac3ac1e1026cf4
|
9242319ca7796c6a3b18e760ddbf8290944d4b49
|
/test/test_enocean_receiver.py
|
1e118b4b6390d5bdfd67f5a6a7059a35599cbe38
|
[
"MIT"
] |
permissive
|
MainRo/python-flock
|
79cfd7ce4edab40439c556b6621768438868d16c
|
e1faa78d6aba374493336651848daadad82387a8
|
refs/heads/master
| 2021-01-10T19:16:52.907538
| 2015-11-18T21:15:38
| 2015-11-18T21:15:38
| 29,210,634
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,817
|
py
|
from unittest import TestCase
from mock import patch, call
from flock.controller.enocean.protocol import EnoceanReceiver
class TestProtocol(EnoceanReceiver):
def packet_received(self, data):
return
@patch.object(TestProtocol, 'packet_received')
class EnoceanReceiverTestCase(TestCase):
def test_receive_without_connection(self, test_packet_received):
protocol = TestProtocol()
data = '\x55\x00\x02\x00\x08\x42\x01\x02\x42'
protocol.dataReceived(data)
def test_receive_one_packet(self, test_packet_received):
protocol = TestProtocol()
protocol.connectionMade()
data = '\x55\x00\x02\x00\x08\x42\x01\x02\x42'
protocol.dataReceived(data)
test_packet_received.assert_called_with(ord(data[4]), data[6:8], '')
def test_receive_packet_from_chunks(self, test_packet_received):
protocol = TestProtocol()
protocol.connectionMade()
data = '\x55\x00\x02\x00\x08\x42\x01\x02\x42'
protocol.dataReceived(data[:3])
protocol.dataReceived(data[3:6])
protocol.dataReceived(data[6:])
test_packet_received.assert_called_with(ord(data[4]), data[6:8], '')
def test_receive_several_packets(self, test_packet_received):
protocol = TestProtocol()
protocol.connectionMade()
data = '\x55\x00\x02\x00\x08\x42\x01\x02\x42'
protocol.dataReceived(data)
test_packet_received.assert_called_with(ord(data[4]), data[6:8], '')
data = '\x55\x00\x02\x00\x08\x42\x03\x04\x42'
protocol.dataReceived(data)
test_packet_received.assert_called_with(ord(data[4]), data[6:8], '')
data = '\x55\x00\x02\x00\x08\x42\x05\x06\x42'
protocol.dataReceived(data)
test_packet_received.assert_called_with(ord(data[4]), data[6:8], '')
|
[
"romain.picard@oakbits.com"
] |
romain.picard@oakbits.com
|
aea7626754bf828a192cfb8b75e2737f4ca1bb81
|
751d837b8a4445877bb2f0d1e97ce41cd39ce1bd
|
/edabit/get-the-sum-of-all-array-elements.py
|
93bdaa39d63ddef0d914699453ead734f12d464a
|
[
"MIT"
] |
permissive
|
qeedquan/challenges
|
d55146f784a3619caa4541ac6f2b670b0a3dd8ba
|
56823e77cf502bdea68cce0e1221f5add3d64d6a
|
refs/heads/master
| 2023-08-11T20:35:09.726571
| 2023-08-11T13:02:43
| 2023-08-11T13:02:43
| 115,886,967
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
u"""
Create a function that takes an array and returns the sum of all numbers in the array.
Examples
getSumOfItems([2, 7, 4]) ➞ 13
getSumOfItems([45, 3, 0]) ➞ 48
getSumOfItems([-2, 84, 23]) ➞ 105
"""
import operator
def sum(xs):
return reduce(operator.add, xs)
assert(sum([2, 7, 4]) == 13)
assert(sum([45, 3, 0]) == 48)
assert(sum([-2, 84, 23]) == 105)
|
[
"qeed.quan@gmail.com"
] |
qeed.quan@gmail.com
|
0e602dfaa845ca851703c413ce1a548c2cec1477
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/IsvAuthSceneInfo.py
|
59bf8806f92b1610731009758157f4352bc08cb4
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,493
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class IsvAuthSceneInfo(object):
def __init__(self):
self._scene_code = None
self._scene_permissions = None
@property
def scene_code(self):
return self._scene_code
@scene_code.setter
def scene_code(self, value):
self._scene_code = value
@property
def scene_permissions(self):
return self._scene_permissions
@scene_permissions.setter
def scene_permissions(self, value):
self._scene_permissions = value
def to_alipay_dict(self):
params = dict()
if self.scene_code:
if hasattr(self.scene_code, 'to_alipay_dict'):
params['scene_code'] = self.scene_code.to_alipay_dict()
else:
params['scene_code'] = self.scene_code
if self.scene_permissions:
if hasattr(self.scene_permissions, 'to_alipay_dict'):
params['scene_permissions'] = self.scene_permissions.to_alipay_dict()
else:
params['scene_permissions'] = self.scene_permissions
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = IsvAuthSceneInfo()
if 'scene_code' in d:
o.scene_code = d['scene_code']
if 'scene_permissions' in d:
o.scene_permissions = d['scene_permissions']
return o
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
b4e67e6713486739e9c8bab0cafa343f4640205b
|
a53998e56ee06a96d59d97b2601fd6ec1e4124d7
|
/Mysql/day5/insert.py
|
ff40cb7ca8bbbe14a540f848df8e7f9f0de5504f
|
[] |
no_license
|
zh-en520/aid1901
|
f0ec0ec54e3fd616a2a85883da16670f34d4f873
|
a56f82d0ea60b2395deacc57c4bdf3b6bc73bd2e
|
refs/heads/master
| 2020-06-28T21:16:22.259665
| 2019-08-03T07:09:29
| 2019-08-03T07:09:29
| 200,344,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
# insert_test.py
# 插入测试
import pymysql
from db_conf import *
#创建数据库连接
try:
conn = pymysql.connect(host,user,passwd,dbname)
cursor = conn.cursor() #获取游标
#定义sql语句
# sql = '''insert into orders
# (order_id,cust_id,amt)
# values('201801010002','C0002',444.55)
# '''
sql = 'delete from orders where amt=444.55'
cursor.execute(sql)#执行SQL语句
conn.commit()#提交事务
print('插入成功')
except Exception as e:
conn.rollback()#回滚事务
print(e)
cursor.close()
conn.close()
|
[
"zh_en520@163.com"
] |
zh_en520@163.com
|
84d572034fee02964f3c369d9520d27f1e66dad6
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03679/s869008403.py
|
48a02e5eb2d96856b887b33de82b64a54cfa0625
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
x, a, b = map(int, input().split())
a *= -1
a += b
if a <= 0:
print("delicious")
elif a <= x:
print("safe")
else:
print("dangerous")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
33ba7708ad40728b600a25ef784d8dd3537d2f47
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_foreclose.py
|
123d60eb470560c241e67bbf37217dea6bac11e8
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 507
|
py
|
#calss header
class _FORECLOSE():
def __init__(self,):
self.name = "FORECLOSE"
self.definitions = [u'(especially of banks) to take back property that was bought with borrowed money because the money was not being paid back as formally agreed: ', u'to prevent something from being considered as a possibility in the future: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
c14adaf27ac8d243d23ed47ee8d7fd61530e0bb1
|
d9504b779ca6d25a711c13fafc1b8669c60e6f62
|
/shape_recognition/libraries/general/confighardware.py
|
017e0a4ded22f83515c5ec8383669473fd3c48c2
|
[
"MIT"
] |
permissive
|
ys1998/tactile-shape-recognition
|
dcdd3f4da66b4b3f6159dccf9cec4d367f6483d9
|
b5ab6f1cdf04ff23e14b467a590533e7ee740b52
|
refs/heads/master
| 2020-03-18T03:01:17.985074
| 2018-07-28T09:46:16
| 2018-07-28T09:46:16
| 134,218,923
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,249
|
py
|
# -*- coding: utf-8 -*-
'''
#-------------------------------------------------------------------------------
# NATIONAL UNIVERSITY OF SINGAPORE - NUS
# SINGAPORE INSTITUTE FOR NEUROTECHNOLOGY - SINAPSE
# Singapore
# URL: http://www.sinapseinstitute.org
#-------------------------------------------------------------------------------
# Neuromorphic Engineering Group
# ONR Presentation: June 13th, 2018
#-------------------------------------------------------------------------------
# Description:
#-------------------------------------------------------------------------------
'''
#-------------------------------------------------------------------------------
class FileHardwareHandler():
def __init__(self):
self.fileHandler = None
self.UR10 = None
self.iLimb = None
self.tactileBoards = None
def getParameters(self):
return self.UR10, self.iLimb, self.tactileBoards
#loads the parameters stored in the file
def load(self):
#open the file stream for reading
self.fileHandler = open('hardware.cfg','r')
#read all the lines
lines = self.fileHandler.readlines()
#print(lines) #debugging
urs = lines[0].split('\n')[0].split(' ')
self.UR10 = [urs[0], urs[1]]
hands = lines[1].split('\n')[0].split(' ')
self.iLimb = [hands[0],hands[1]]
boards = lines[2].split('\n')[0].split(' ')
self.tactileBoards = [boards[0],boards[1]]
#generates a new file containing the hardware configuration parameters
#ur10: ip addresses for left and right arm (UR10)
#ilimb: com ports for left and right hands (iLimb)
#tactile: com ports for left and right hands (tactile boards)
def save(self,ur10,ilimb,tactile):
#open the file stream for writing
self.fileHandler = open('hardware.cfg','w')
self.fileHandler.write(str(ur10[0]) + ' ' + str(ur10[1]) + '\n')
self.fileHandler.write(str(ilimb[0]) + ' ' + str(ilimb[1]) + '\n')
self.fileHandler.write(str(tactile[0]) + ' ' + str(tactile[1]) + '\n')
self.fileHandler.close()
#-------------------------------------------------------------------------------
|
[
"yashshah2398@gmail.com"
] |
yashshah2398@gmail.com
|
3bf43960f9da4d4e5d45b38e5e355589716262fb
|
b26e1704b963881e7681712c923787772ac463ec
|
/Courses/PythonBeyondTheBasics(OO-Programming)_David-Blaikie/5_DecoratorsAbstractOverloading/Overloading.py
|
363bc69ec423eaffc2ff2f78f05e6164582b97c9
|
[] |
no_license
|
lakshmikantdeshpande/Python-Courses
|
64f8a397b727042f2662fa7597ea0e73491717f3
|
d15364b42c182d3487532853bde37deb48865494
|
refs/heads/master
| 2021-09-02T06:51:19.490674
| 2017-12-31T06:55:53
| 2017-12-31T06:55:53
| 94,890,325
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,174
|
py
|
import abc
class GetSetParent(object):
__metaclass__ = abc.ABCMeta
def __init__(self, value):
self.val = 0
def set_val(self, value):
self.val = value
def get_val(self):
return self.val
@abc.abstractmethod
def showdoc(self):
return
class GetSetInt(GetSetParent):
def set_val(self, value):
if not isinstance(value, int):
value = 0
super(GetSetInt, self).set_val(value)
def showdoc(self):
print('GetSetInt object {} only accepts integer values'.format(id(self)))
class GetSetList(GetSetInt):
def __init__(self, value=0):
self.vallist = [value]
def get_val(self):
return self.vallist[-1]
def get_vals(self):
return self.vallist
def set_val(self, value):
self.vallist.append(value)
def showdoc(self):
print('GetSetList object, len({}), stores '
'history of values set'.format(self.vallist))
gsint = GetSetInt(57)
gsint.set_val(5)
print(gsint.get_val())
gsint.showdoc()
print()
gslist = GetSetList(98)
print(gslist.get_val())
gslist.set_val(45)
print(gslist.get_vals())
gslist.showdoc()
|
[
"lakshmikantdeshpande@gmail.com"
] |
lakshmikantdeshpande@gmail.com
|
0a59b48100a131f414e36062aac414f027c0acbf
|
d85f3bfcc7efb3313bd77ba43abbde8527c731d9
|
/ch09/bottle_test.py
|
970ecfd6283ec60311f0f7a3468eeda61c222bd3
|
[] |
no_license
|
freebz/Introducing-Python
|
8c62767e88b89eb614abd3ea4cf19aae946f5379
|
ecf2082946eac83072328a80ed1e06b416ef5170
|
refs/heads/master
| 2020-04-08T21:14:42.398462
| 2018-11-29T17:03:11
| 2018-11-29T17:03:11
| 159,736,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 263
|
py
|
import requests
resp = requests.get('http://localhost:9999/echo/Mothra')
if resp.status_code == 200 and \
resp.text == 'Say hello to my little friend: Mothra!':
print('It worked! That almost never happens!')
else:
print('Argh, got this:', resp.text)
|
[
"freebz@hananet.net"
] |
freebz@hananet.net
|
713e8ca9e8e74d63af7aaad24b92e81f479bd8fe
|
1d63d795ceb579f3f4ac7d27d61ad2c3298dd8d1
|
/vendor/blockwise_view.py
|
d52cfd63cbe14666bfdd5bf7da6bdd7ef32d8076
|
[
"MIT"
] |
permissive
|
austinpray/hey-brether
|
4fd1f95f08673e35a15c53a9d4aca43502d0dc7e
|
8fa9f83df0da5bc570f60a83a27f72babecb0c18
|
refs/heads/master
| 2021-08-27T16:42:29.578384
| 2021-08-22T21:41:22
| 2021-08-22T21:41:22
| 142,895,876
| 3
| 1
|
MIT
| 2018-07-30T20:13:18
| 2018-07-30T15:42:09
|
Python
|
UTF-8
|
Python
| false
| false
| 3,745
|
py
|
# lifted from
# https://github.com/ilastik/lazyflow/blob/master/lazyflow/utility/blockwise_view.py
# https://github.com/ilastik/lazyflow/blob/e98aa81cf2d55595df41b161d8e74607ec09c716/lazyflow/utility/blockwise_view.py
from __future__ import division
from builtins import map
import numpy
try:
# If you use vigra, we do special handling to preserve axistags
import vigra
_vigra_available = True
except ImportError:
_vigra_available = False
def blockwise_view( a, blockshape, aslist=False, require_aligned_blocks=True ):
"""
Return a 2N-D view of the given N-D array, rearranged so each ND block (tile)
of the original array is indexed by its block address using the first N
indexes of the output array.
Note: This function is nearly identical to ``skimage.util.view_as_blocks()``, except:
- "imperfect" block shapes are permitted (via require_aligned_blocks=False)
- only contiguous arrays are accepted. (This function will NOT silently copy your array.)
As a result, the return value is *always* a view of the input.
Args:
a: The ND array
blockshape: The tile shape
aslist: If True, return all blocks as a list of ND blocks
instead of a 2D array indexed by ND block coordinate.
require_aligned_blocks: If True, check to make sure no data is "left over"
in each row/column/etc. of the output view.
That is, the blockshape must divide evenly into the full array shape.
If False, "leftover" items that cannot be made into complete blocks
will be discarded from the output view.
Here's a 2D example (this function also works for ND):
>>> a = numpy.arange(1,21).reshape(4,5)
>>> print a
[[ 1 2 3 4 5]
[ 6 7 8 9 10]
[11 12 13 14 15]
[16 17 18 19 20]]
>>> view = blockwise_view(a, (2,2), False)
>>> print view
[[[[ 1 2]
[ 6 7]]
<BLANKLINE>
[[ 3 4]
[ 8 9]]]
<BLANKLINE>
<BLANKLINE>
[[[11 12]
[16 17]]
<BLANKLINE>
[[13 14]
[18 19]]]]
Inspired by the 2D example shown here: http://stackoverflow.com/a/8070716/162094
"""
assert a.flags['C_CONTIGUOUS'], "This function relies on the memory layout of the array."
blockshape = tuple(blockshape)
outershape = tuple(numpy.array(a.shape) // blockshape)
view_shape = outershape + blockshape
if require_aligned_blocks:
assert (numpy.mod(a.shape, blockshape) == 0).all(), \
"blockshape {} must divide evenly into array shape {}"\
.format( blockshape, a.shape )
# inner strides: strides within each block (same as original array)
intra_block_strides = a.strides
# outer strides: strides from one block to another
inter_block_strides = tuple(a.strides * numpy.array(blockshape))
# This is where the magic happens.
# Generate a view with our new strides (outer+inner).
view = numpy.lib.stride_tricks.as_strided(a,
shape=view_shape,
strides=(inter_block_strides+intra_block_strides))
# Special handling for VigraArrays
if _vigra_available and isinstance(a, vigra.VigraArray) and hasattr(a, 'axistags'):
view_axistags = vigra.AxisTags([vigra.AxisInfo() for _ in blockshape] + list(a.axistags))
view = vigra.taggedView(view, view_axistags)
if aslist:
return list(map(view.__getitem__, numpy.ndindex(outershape)))
return view
if __name__ == "__main__":
import doctest
doctest.testmod()
|
[
"austin@austinpray.com"
] |
austin@austinpray.com
|
d9040e75c7238b479f6ec9ac27efe459df9a5925
|
652121d51e6ff25aa5b1ad6df2be7eb341683c35
|
/programs/e2boxadjust.py
|
04168ce207137b8a80ac1faef462b4297f3943e0
|
[] |
no_license
|
jgalaz84/eman2
|
be93624f1c261048170b85416e517e5813992501
|
6d3a1249ed590bbc92e25fb0fc319e3ce17deb65
|
refs/heads/master
| 2020-04-25T18:15:55.870663
| 2015-06-05T20:21:44
| 2015-06-05T20:21:44
| 36,952,784
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,711
|
py
|
#!/usr/bin/env python
#
# Author: Steven Ludtke, 3/29/15 (sludtke@bcm.edu)
# Copyright (c) 2000-2015 Baylor College of Medicine
#
# This software is issued under a joint BSD/GNU license. You may use the
# source code in this file under either license. However, note that the
# complete EMAN2 and SPARX software packages have some GPL dependencies,
# so you are responsible for compliance with the licenses of these packages
# if you opt to use BSD licensing. The warranty disclaimer below holds
# in either instance.
#
# This complete copyright notice must be included in any revised version of the
# source code. Additional authorship citations may be added, but existing
# author citations must be preserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 2111-1307 USA
#
from EMAN2 import *
from math import *
import time
import os
import sys
def main():
progname = os.path.basename(sys.argv[0])
usage = """prog [options] <sets/file.lst> <classmx_xx.hdf> <box3d 1> <box3d 2> ...
This program is part of a pipeline to improve centering of subtomograms prior to alignment and averaging.
This is a typical workflow:
1) select 3-D particles and store locations in .box3d files with names agreeing with the name of the tomogram, eg
rawtomograms/file0001.hdf -> rawtomograms/file0001.box3d
2) extract the 3-D particles from the tomograms, and generate Z-projections corresponding to each particle (this may be done automatically by e2spt_boxer)
3) insure that the Z-projections are normalized, filtered and have postive (white) contrast against the background, eg:
e2proc2d.py spt_particles/file0001_prjsz.hdf spt_particles/file0001_prjsz.hdf --inplace --process filter.lowpass.gauss:cutoff_abs=0.1 --process filter.highpass.gauss:cutoff_abs=0.02 --mult -1 --process normalize.edgemean
4) build a set containing all of the z projection stacks, eg
e2proclst.py spt_particles/file0001_prjsz.hdf spt_particles/file0002_prjsz.hdf --create sets/all_prjsz.lst
5) run e2refine2d.py
6) run this program, eg
e2boxadjust.py sets/all_prjsz.lst r2d_01/classmx_08.hdf rawtomograms/*box3d -v 2
7) re-extract the particles using the new _cen.box3d files
"""
parser = EMArgumentParser(usage=usage,version=EMANVERSION)
#parser.add_argument("--normproj",action="store_true",help="Normalize the projections resulting from 'project', such that the length of each vector is 1",default=False)
#parser.add_argument("--normalize",type=str,help="Normalize the input images using the named processor. Specify None to disable.",default="normalize.unitlen")
parser.add_argument("--verbose", "-v", dest="verbose", action="store", metavar="n",type=int, default=0, help="verbose level [0-9], higner number means higher level of verboseness")
parser.add_argument("--ppid", type=int, help="Set the PID of the parent process, used for cross platform PPID",default=-1)
(options, args) = parser.parse_args()
# if len(args)>0 : parser.error("e2basis.py takes no arguments, only options")
logid=E2init(sys.argv,options.ppid)
# The classmx file contains particle alignment information
classmx=EMData(args[1],0)
nptcl=classmx["ny"]
cmxtx=EMData(args[1],2)
cmxty=EMData(args[1],3)
cmxalpha=EMData(args[1],4)
cmxmirror=EMData(args[1],5)
print "Classmx has info on ",nptcl," particles"
# The files containing the particle locations
boxfiles=[base_name(args[i],nodir=True) for i in xrange(2,len(args))]
# The .lst file allowing us to reference original files from the information in cls files
lsx=LSXFile(args[0])
lpfile=None
skipfile=True
for p in xrange(nptcl):
# The number and file of particle N
pn,pfile,com = lsx[p]
if pfile!=lpfile:
# write the boxes from the last file
if not skipfile: write_boxes(curboxfile,curboxes)
skipfile=False
pfileb=base_name(pfile,nodir=True)
if not pfileb in boxfiles :
print "No box file found for: ",pfileb
lpfile=pfile
skipfile=True
continue
# This is the file containing the box locations for this range of particles
curboxfile=args[boxfiles.index(pfileb)+2]
p0=p
if options.verbose: print pfileb,"->",curboxfile
# These are the box locations within that file
curboxes=[[int(j) for j in i.split()] for i in file(curboxfile,"r") if i[0]!="#"]
lpfile=pfile
else:
if skipfile : continue # we've already identified this as a file we don't have box locations for
ptclxf=Transform({"type":"2d","alpha":cmxalpha[0,p],"mirror":int(cmxmirror[0,p]),"tx":cmxtx[0,p],"ty":cmxty[0,p]})
pt2d=ptclxf.get_pre_trans_2d()
curboxes[p-p0][0]-=pt2d[0]
curboxes[p-p0][1]-=pt2d[1]
if options.verbose>1:
try: print "{}) {}: {}\t {:d},{:d}".format(p,p-p0,pfileb,int(pt2d[0]),int(pt2d[1]))
except: pass
if not skipfile: write_boxes(curboxfile,curboxes)
E2end(logid)
def write_boxes(curboxfile,curboxes):
print "Writing updated boxes for: ",curboxfile
out=file(curboxfile.split(".")[0]+"_cen.box3d","w")
for b in curboxes: out.write("{:d}\t{:d}\t{:d}\n".format(int(b[0]),int(b[1]),int(b[2])))
if __name__== "__main__":
main()
|
[
"jgalaz@gmail.com"
] |
jgalaz@gmail.com
|
81c1ef2bb1721cb5d7b56e1f3673cd180502ce97
|
eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7
|
/google/ads/googleads/v4/googleads-py/google/ads/googleads/v4/services/services/language_constant_service/transports/base.py
|
3eb9eee19afd04d6ca83dd2da69278f139f23822
|
[
"Apache-2.0"
] |
permissive
|
Tryweirder/googleapis-gen
|
2e5daf46574c3af3d448f1177eaebe809100c346
|
45d8e9377379f9d1d4e166e80415a8c1737f284d
|
refs/heads/master
| 2023-04-05T06:30:04.726589
| 2021-04-13T23:35:20
| 2021-04-13T23:35:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,721
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.ads.googleads.v4.resources.types import language_constant
from google.ads.googleads.v4.services.types import language_constant_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-ads-googleads',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class LanguageConstantServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for LanguageConstantService."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/adwords',
)
def __init__(
self, *,
host: str = 'googleads.googleapis.com',
credentials: credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_language_constant: gapic_v1.method.wrap_method(
self.get_language_constant,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_language_constant(self) -> typing.Callable[
[language_constant_service.GetLanguageConstantRequest],
language_constant.LanguageConstant]:
raise NotImplementedError
__all__ = (
'LanguageConstantServiceTransport',
)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
7a202d230c87166a61c32289c482c0b4b5907e88
|
d042b8895dc8347356fa4d5984d07bff41eecc73
|
/tools/bt_store.py
|
61a04f137230eb3634848d34dbc1a8d1e41c65c3
|
[
"Apache-2.0"
] |
permissive
|
jzx1230/obtainfo
|
257b075c32c3448096391f258f42dd7f0c081350
|
883c29ab0a462d11682b60b9b52b2fc93031b816
|
refs/heads/master
| 2021-05-08T04:19:33.810848
| 2015-10-13T10:10:10
| 2015-10-13T10:10:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,111
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
import json
import codecs
import random
import pymongo
import sqlite3
import libtorrent
import chardet
import datetime
from pony.orm import *
from django.utils.encoding import force_unicode, DjangoUnicodeDecodeError
re_urn = re.compile(ur'xt=urn:btih:(\w+)')
re_pass = re.compile(r'^_____padding_file')
re_file_name = re.compile(r"[\/\\\:\*\?\"\<\>\|]")
prefix = ["[www.obtainfo.com]", "www.obtainfo.com_"]
db = Database('sqlite', 'obtainfo.sqlite', create_db=True)
class Torrent(db.Entity):
id = PrimaryKey(str, 40)
upload = Required(bool, default=False, index=True)
netdisk = Required(bool, default=False, index=True)
torrent = Required(buffer, lazy=True)
db.generate_mapping(create_tables=True)
is_exist_urn = lambda urn : db.exists("* from Torrent where id = $urn")
def unicode_name(name):
for encoding in ['utf-8', 'gbk', 'gb2312', 'gb18030', 'latin1', 'big5']:
try:
return force_unicode(name, encoding)
except DjangoUnicodeDecodeError:
continue
else:
try:
return force_unicode(name, chardet.detect(name)['encoding'])
except DjangoUnicodeDecodeError:
raise
@db_session
def save_torrent_to_db(urn, blob):
torrent = Torrent(id=urn, torrent=blob)
commit()
# check torrent file for get urn
def check_torrent(content):
try:
metadata = libtorrent.bdecode(content)
info = libtorrent.torrent_info(metadata)
urn = str(info.info_hash()).lower()
return urn
except:
return None
# content is torrent raw bin data, 校验是否已经存在
@db_session
def save_torrent(content):
urn = check_torrent(content)
if urn:
if not is_exist_urn(urn):
save_torrent_to_db(urn, sqlite3.Binary(content))
else:
print 'dumplate urn %s' % urn
return True
else:
print 'check urn %s fail' % urn
return False
@db_session
def get_server_magnet(num=5000):
server = pymongo.Connection().server
count = 0
urns = list()
magnets = list()
max_urns = 0
df = set(db.select("id from Torrent"))
for d in server.server.find():
for m in d['resource']['download']:
try:
urn = re.findall(ur'xt=urn:btih:(\w+)', m['link'])[0].lower()
if len(urn) > max_urns:
max_urns = len(urn)
if urn not in df:
magnets.append(m['link'])
urns.append(urn)
count += 1
if count >= num:
print max_urns
return (magnets, urns)
except IndexError:
pass
print max_urns
return (magnets, urns)
# rules = ['full', 'upload', 'netdisk']
@db_session
def dump_urn(where, rule='upload', status=False, update=False, num=1000):
torrents = select(t.id for t in Torrent)
urn = [t.lower() for t in torrents]
with codecs.open(os.path.join(where, 'urn.json'), 'wb', 'utf-8') as f:
json.dump(urn, f)
@db_session
def dump_torrent(where, rule='upload', status=False, update=False, num=1000):
if rule == 'upload':
torrents = Torrent.select(lambda t: t.upload == status)
elif rule == 'netdisk':
torrents = Torrent.select(lambda t: t.netdisk == status)
else:
torrents = select(t for t in Torrent)
if num != -1:
torrents = torrents[ : num]
for t in torrents:
src = os.path.join(where, t.id[:2], "%s.torrent" % t.id)
if not os.path.exists(os.path.join(where, t.id[:2])):
os.mkdir(os.path.join(where, t.id[:2]))
with open(src, 'wb') as f:
bin_data = t.torrent
if bin_data:
f.write(bin_data)
else:
f.write(t.torrent)
if update == True:
if rule == 'upload':
t.set(upload = not status)
elif rule == 'netdisk':
t.set(netdisk = not status)
# 从外部文件夹加载种子文件到数据库中
def load_torrent(directory):
for name in os.listdir(directory):
src = os.path.join(directory, name)
if os.path.isdir(src):
load_torrent(src)
else:
with open(src, 'rb') as source:
content = source.read()
save_torrent(content)
def rename_torrent(directory, newfolder=None):
for name in os.listdir(directory):
src = os.path.join(directory, name)
with open(src, 'rb') as source:
meta = libtorrent.bdecode(source.read())
try:
torrent_name = unicode_name(meta['info']['name'])
except:
continue
if not newfolder:
new = os.path.join(directory, re_file_name.sub('', torrent_name) + '.torrent' )
else:
new = os.path.join(newfolder, re_file_name.sub('', torrent_name) + '.torrent' )
os.rename(src, new)
@db_session
def stats_torrent(rule='upload', status=False):
if rule == 'upload':
return select(t for t in Torrent if t.upload == status).count()
elif rule == 'netdisk':
return select(t for t in Torrent if t.netdisk == status).count()
else:
return select(t for t in Torrent).count()
|
[
"pczhaoyun@gmail.com"
] |
pczhaoyun@gmail.com
|
0aeef5b781b774b162ccd571d7f2cfa4c241ed0e
|
e75148cf551a6b88c1af821ec1f624dbd8678900
|
/tests/test_models/test_user.py
|
a997a9efdbcc970a3b699cd68c2599245042198c
|
[] |
no_license
|
pforciol/AirBnB_clone
|
8f4e22e4370bb22bf6c5a33af0d33384615c1be6
|
996141f4dd58c3e228231edd48ef831a1a1547d6
|
refs/heads/master
| 2023-03-20T00:00:26.283048
| 2021-03-03T11:41:56
| 2021-03-03T11:41:56
| 337,645,428
| 1
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,482
|
py
|
#!/usr/bin/python3
"""
Contains the TestUserDocs classes
"""
from datetime import datetime
import inspect
from models import user
from models.base_model import BaseModel
import pep8
import unittest
User = user.User
class TestUserDocs(unittest.TestCase):
"""Tests to check the documentation and style of User class"""
@classmethod
def setUpClass(cls):
"""Set up for the doc tests"""
cls.user_f = inspect.getmembers(User, inspect.isfunction)
def test_pep8_conformance_user(self):
"""Test that models/user.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['models/user.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_pep8_conformance_test_user(self):
"""Test that tests/test_models/test_user.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['tests/test_models/test_user.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_user_module_docstring(self):
"""Test for the user.py module docstring"""
self.assertIsNot(user.__doc__, None,
"user.py needs a docstring")
self.assertTrue(len(user.__doc__) >= 1,
"user.py needs a docstring")
def test_user_class_docstring(self):
"""Test for the City class docstring"""
self.assertIsNot(User.__doc__, None,
"User class needs a docstring")
self.assertTrue(len(User.__doc__) >= 1,
"User class needs a docstring")
def test_user_func_docstrings(self):
"""Test for the presence of docstrings in User methods"""
for func in self.user_f:
self.assertIsNot(func[1].__doc__, None,
"{:s} method needs a docstring".format(func[0]))
self.assertTrue(len(func[1].__doc__) >= 1,
"{:s} method needs a docstring".format(func[0]))
class TestUser(unittest.TestCase):
"""Test the User class"""
def test_is_subclass(self):
"""Test that User is a subclass of BaseModel"""
user = User()
self.assertIsInstance(user, BaseModel)
self.assertTrue(hasattr(user, "id"))
self.assertTrue(hasattr(user, "created_at"))
self.assertTrue(hasattr(user, "updated_at"))
def test_email_attr(self):
"""Test that User has attr email, and it's an empty string"""
user = User()
self.assertTrue(hasattr(user, "email"))
self.assertEqual(user.email, "")
def test_password_attr(self):
"""Test that User has attr password, and it's an empty string"""
user = User()
self.assertTrue(hasattr(user, "password"))
self.assertEqual(user.password, "")
def test_first_name_attr(self):
"""Test that User has attr first_name, and it's an empty string"""
user = User()
self.assertTrue(hasattr(user, "first_name"))
self.assertEqual(user.first_name, "")
def test_last_name_attr(self):
"""Test that User has attr last_name, and it's an empty string"""
user = User()
self.assertTrue(hasattr(user, "last_name"))
self.assertEqual(user.last_name, "")
def test_to_dict_creates_dict(self):
"""test to_dict method creates a dictionary with proper attrs"""
u = User()
new_d = u.to_dict()
self.assertEqual(type(new_d), dict)
for attr in u.__dict__:
self.assertTrue(attr in new_d)
self.assertTrue("__class__" in new_d)
def test_to_dict_values(self):
"""test that values in dict returned from to_dict are correct"""
t_format = "%Y-%m-%dT%H:%M:%S.%f"
u = User()
new_d = u.to_dict()
self.assertEqual(new_d["__class__"], "User")
self.assertEqual(type(new_d["created_at"]), str)
self.assertEqual(type(new_d["updated_at"]), str)
self.assertEqual(new_d["created_at"], u.created_at.strftime(t_format))
self.assertEqual(new_d["updated_at"], u.updated_at.strftime(t_format))
def test_str(self):
"""test that the str method has the correct output"""
user = User()
string = "[User] ({}) {}".format(user.id, user.__dict__)
self.assertEqual(string, str(user))
|
[
"pierre.forcioli@holbertonschool.com"
] |
pierre.forcioli@holbertonschool.com
|
5c6f6a8921c5ac8bc3e36835c949fe3bf80386ba
|
46d09a43bbb7ea25c7e949fc3d4910779378f9fc
|
/pipeline/jinja2/ext.py
|
3505f15a806422d8877b7e72d0c6a4e96ffa8490
|
[
"MIT"
] |
permissive
|
vstoykov/django-pipeline
|
9af20061a7ee93c167c7a4900fe8fb37143511f8
|
e33375455174adb37e568efa2eccb988a86e132b
|
refs/heads/master
| 2021-01-16T20:52:33.504023
| 2013-02-25T08:12:10
| 2013-02-25T08:12:10
| 8,404,848
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,082
|
py
|
from __future__ import unicode_literals
from jinja2 import nodes, TemplateSyntaxError
from jinja2.ext import Extension
from django.contrib.staticfiles.storage import staticfiles_storage
from pipeline.packager import PackageNotFound
from pipeline.utils import guess_type
from pipeline.templatetags.compressed import CompressedMixin
class PipelineExtension(CompressedMixin, Extension):
tags = set(['compressed_css', 'compressed_js'])
def parse(self, parser):
tag = next(parser.stream)
package_name = parser.parse_expression()
if not package_name:
raise TemplateSyntaxError("Bad package name", tag.lineno)
args = [package_name]
if tag.value == "compressed_css":
return nodes.CallBlock(self.call_method('package_css', args), [], [], []).set_lineno(tag.lineno)
if tag.value == "compressed_js":
return nodes.CallBlock(self.call_method('package_js', args), [], [], []).set_lineno(tag.lineno)
return []
def package_css(self, package_name, *args, **kwargs):
try:
package = self.package_for(package_name, 'css')
except PackageNotFound:
return '' # fail silently, do not return anything if an invalid group is specified
return self.render_compressed(package, 'css')
def render_css(self, package, path):
template_name = package.template_name or "pipeline/css.jinja"
context = package.extra_context
context.update({
'type': guess_type(path, 'text/css'),
'url': staticfiles_storage.url(path)
})
template = self.environment.get_template(template_name)
return template.render(**context)
def render_individual_css(self, package, paths):
tags = [self.render_css(package, path) for path in paths]
return '\n'.join(tags)
def package_js(self, package_name, *args, **kwargs):
try:
package = self.package_for(package_name, 'js')
except PackageNotFound:
return '' # fail silently, do not return anything if an invalid group is specified
return self.render_compressed(package, 'js')
def render_js(self, package, path):
template_name = package.template_name or "pipeline/js.jinja"
context = package.extra_context
context.update({
'type': guess_type(path, 'text/javascript'),
'url': staticfiles_storage.url(path)
})
template = self.environment.get_template(template_name)
return template.render(**context)
def render_inline(self, package, js):
context = package.extra_context
context.update({
'source': js
})
template = self.environment.get_template("pipeline/inline_js.jinja")
return template.render(**context)
def render_individual_js(self, package, paths, templates=None):
tags = [self.render_js(package, js) for js in paths]
if templates:
tags.append(self.render_inline(package, templates))
return '\n'.join(tags)
|
[
"timothee.peignier@tryphon.org"
] |
timothee.peignier@tryphon.org
|
51d3af2173d61c3bb263950d8b476c00b5b27f45
|
48832d27da16256ee62c364add45f21b968ee669
|
/res/scripts/client/gui/scaleform/daapi/view/meta/miniclientcomponentmeta.py
|
7baac3f39ceaefa51e89191231fb90b8907f9ebf
|
[] |
no_license
|
webiumsk/WOT-0.9.15.1
|
0752d5bbd7c6fafdd7f714af939ae7bcf654faf7
|
17ca3550fef25e430534d079876a14fbbcccb9b4
|
refs/heads/master
| 2021-01-20T18:24:10.349144
| 2016-08-04T18:08:34
| 2016-08-04T18:08:34
| 64,955,694
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 790
|
py
|
# 2016.08.04 19:51:42 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/MiniClientComponentMeta.py
from gui.Scaleform.framework.entities.BaseDAAPIComponent import BaseDAAPIComponent
class MiniClientComponentMeta(BaseDAAPIComponent):
"""
DO NOT MODIFY!
Generated with yaml.
__author__ = 'yaml_processor'
@extends BaseDAAPIComponent
null
"""
def onHyperlinkClick(self):
"""
:return :
"""
self._printOverrideError('onHyperlinkClick')
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\meta\miniclientcomponentmeta.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 19:51:42 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
8f889de0036edda16cc3215daa0c446fe28675f8
|
747f759311d404af31c0f80029e88098193f6269
|
/extra-addons/syst_hr_payroll_ma/hr.py
|
e4887dca32ebf3ab1fd708f0c1ed3fdeb57426be
|
[] |
no_license
|
sgeerish/sirr_production
|
9b0d0f7804a928c0c582ddb4ccb7fcc084469a18
|
1081f3a5ff8864a31b2dcd89406fac076a908e78
|
refs/heads/master
| 2020-05-19T07:21:37.047958
| 2013-09-15T13:03:36
| 2013-09-15T13:03:36
| 9,648,444
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,063
|
py
|
import netsvc
from osv import fields, osv
import pooler
from tools.translate import _
import time
class hr_employee(osv.osv):
_inherit = 'hr.employee'
_columns = {
'matricule' : fields.char('Matricule', size=64),
'cin' : fields.char('CIN', size=64),
'date': fields.date('Date entree', help='Cette date est requipe pour le calcule de la prime d\'anciennete'),
'anciennete': fields.boolean('Prime anciennete', help='Est ce que cet employe benificie de la prime d\'anciennete'),
'mode_reglement' : fields.selection([('virement', 'Virement'), ('cheque', 'Cheque'), ('espece', 'Espece'), ], 'Mode De Reglement'),
'bank' : fields.char('Banque', size=128),
'compte' : fields.char('Compte bancaire', size=128),
'chargefam' : fields.integer('Nombre de personnes a charge'),
'logement': fields.float('Abattement Fr Logement'),
'affilie':fields.boolean('Affilie', help='Est ce qu on va calculer les cotisations pour cet employe'),
'address_home' : fields.char('Adresse Personnelle', size=128),
'address' : fields.char('Adresse Professionnelle', size=128),
'phone_home' : fields.char('Telephone Personnel', size=128),
'licexpiry' : fields.char('Lic Expiry', size=128),
'licenseno' : fields.char('Lic No', size=128),
'licensetyp' : fields.char('Lic Type', size=128),
}
_defaults = {
'chargefam' : lambda * a: 0,
'logement' : lambda * a: 0,
'anciennete' : lambda * a: 'True',
'affilie' : lambda * a: 'True',
'date' : lambda * a: time.strftime('%Y-%m-%d'),
'mode_reglement' : lambda * a: 'virement'
}
hr_employee()
class hr_contract(osv.osv) :
_inherit = 'hr.contract'
_description = 'Employee Contract'
_columns = {
'working_days_per_month' : fields.integer('jours travailles par mois'),
'hour_salary' : fields.float('salaire Heure'),
'monthly_hour_number' : fields.float('Nombre Heures par mois'),
'cotisation':fields.many2one('hr.payroll_ma.cotisation.type', 'Type cotisations', required=True),
'rubrique_ids': fields.one2many('hr.payroll_ma.ligne_rubrique', 'id_contract', 'Les rubriques'),
}
_defaults = {
'working_days_per_month' : lambda * a : 26,
}
def net_to_brute(self, cr, uid, ids, context={}):
pool = pooler.get_pool(cr.dbname)
id_contract = ids[0]
contract = pool.get('hr.contract').browse(cr, uid, id_contract)
salaire_base = contract.wage
cotisation = contract.cotisation
personnes = contract.employee_id.chargefam
params = self.pool.get('hr.payroll_ma.parametres')
objet_ir = self.pool.get('hr.payroll_ma.ir')
id_ir = objet_ir.search(cr, uid, [])
liste = objet_ir.read(cr, uid, id_ir, ['debuttranche', 'fintranche', 'taux', 'somme'])
ids_params = params.search(cr, uid, [])
dictionnaire = params.read(cr, uid, ids_params[0])
abattement = personnes * dictionnaire['charge']
base = 0
salaire_brute = salaire_base
trouve=False
trouve2=False
while(trouve == False):
salaire_net_imposable=0
cotisations_employee=0
for cot in cotisation.cotisation_ids :
if cot.plafonee and salaire_brute >= cot.plafond:
base = cot.plafond
else : base = salaire_brute
cotisations_employee += base * cot['tauxsalarial'] / 100
fraispro = salaire_brute * dictionnaire['fraispro'] / 100
if fraispro < dictionnaire['plafond']:
salaire_net_imposable = salaire_brute - fraispro - cotisations_employee
else :
salaire_net_imposable = salaire_brute - dictionnaire['plafond'] - cotisations_employee
for tranche in liste:
if(salaire_net_imposable >= tranche['debuttranche']/12) and (salaire_net_imposable < tranche['fintranche']/12):
taux = (tranche['taux'])
somme = (tranche['somme']/12)
ir = (salaire_net_imposable - (somme*12))*taux/100 - abattement
if(ir < 0):ir = 0
salaire_net=salaire_brute - cotisations_employee - ir
if(int(salaire_net)==int(salaire_base) and trouve2==False):
trouve2=True
salaire_brute-=1
if(round(salaire_net,2)==salaire_base):trouve=True
elif trouve2==False : salaire_brute+=0.5
elif trouve2==True : salaire_brute+=0.01
self.write(cr, uid, [contract.id], {'wage' : round(salaire_brute,2)})
return True
hr_contract()
class hr_holidays_status(osv.osv):
_inherit = "hr.holidays.status"
_description = 'Holidays'
_columns = {
'payed':fields.boolean('paye', required=False),
}
_defaults = {
'payed': lambda * args: True
}
hr_holidays_status()
|
[
"geerish@omerp.net"
] |
geerish@omerp.net
|
82a567e6b34c488be5c6911c9716d0e5e39267f5
|
51ce07a419abe50f49e7bb6a6c036af291ea2ef5
|
/3.Algorithm/03. String/회문.py
|
3d2cc4d08812c91f1e68a534526433192562357f
|
[] |
no_license
|
salee1023/TIL
|
c902869e1359246b6dd926166f5ac9209af7b1aa
|
2905bd331e451673cbbe87a19e658510b4fd47da
|
refs/heads/master
| 2023-03-10T09:48:41.377704
| 2021-02-24T10:47:27
| 2021-02-24T10:47:27
| 341,129,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 644
|
py
|
def is_pal(matrix,N,M):
# 행
for i in range(N):
for j in range(N-M+1):
string = matrix[i][j:j+M]
if string == string[::-1]:
return string
# 열
for i in range(N):
for j in range(N-M+1):
c_string = ''
for m in range(M):
c_string += matrix[j+m][i]
if c_string == c_string[::-1]:
return c_string
# ---------------------------------------------
T = int(input())
for tc in range(1, 1 + T):
N, M = map(int, input().split())
matrix = [input() for _ in range(N)]
print(f'#{tc} {is_pal(matrix,N,M)}')
|
[
"dltmddk1023@gmail.com"
] |
dltmddk1023@gmail.com
|
a6559663df3b494d7fa2c3b72625adbfc4ff5eb5
|
d6fe71e3e995c03b8f5151ab1d53411b77b325ba
|
/walklist_api_service/models/ping_response_payload_headers.py
|
b0cc9e09ae0c79a6d0ca74aee306b8002286ff8f
|
[] |
no_license
|
mwilkins91/petpoint-scraper
|
95468ae9951deaa8bd3bef7d88c0ff660146c1a3
|
dd0c60c68fc6a7d11358aa63d28fdf07fff3c7cd
|
refs/heads/master
| 2022-11-27T00:02:50.654404
| 2020-08-09T18:41:40
| 2020-08-09T18:41:40
| 286,180,666
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,252
|
py
|
# coding: utf-8
"""
The Enrichment List
The THS enrichment list # noqa: E501
OpenAPI spec version: 1.0.0
Contact: contactme@markwilkins.co
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PingResponsePayloadHeaders(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'content_type': 'str'
}
attribute_map = {
'content_type': 'Content-Type'
}
def __init__(self, content_type=None): # noqa: E501
"""PingResponsePayloadHeaders - a model defined in Swagger""" # noqa: E501
self._content_type = None
self.discriminator = None
if content_type is not None:
self.content_type = content_type
@property
def content_type(self):
"""Gets the content_type of this PingResponsePayloadHeaders. # noqa: E501
:return: The content_type of this PingResponsePayloadHeaders. # noqa: E501
:rtype: str
"""
return self._content_type
@content_type.setter
def content_type(self, content_type):
"""Sets the content_type of this PingResponsePayloadHeaders.
:param content_type: The content_type of this PingResponsePayloadHeaders. # noqa: E501
:type: str
"""
self._content_type = content_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PingResponsePayloadHeaders, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PingResponsePayloadHeaders):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"contactme@markwilkins.co"
] |
contactme@markwilkins.co
|
af611b2c565ba96222cf52efd00902fede83b397
|
7cf119239091001cbe687f73018dc6a58b5b1333
|
/datashufflepy-zeus/src/branch_scripts2/NEWS/ZX_CJXW_GJJRJG/ZX_CJXW_GJJRJG_GZW_DFSM.py
|
61ffc39ae2d35de2d050385674f862a2e53b7081
|
[
"Apache-2.0"
] |
permissive
|
ILKKAI/dataETL
|
0f5b80c3482994f735f092a1e01fa1009bac4109
|
32f7ec3aaaf32b5074536a615cb9cd5c28bd499c
|
refs/heads/master
| 2022-04-04T19:27:05.747852
| 2020-02-28T11:17:48
| 2020-02-28T11:17:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
# -*- coding: utf-8 -*-
from database._mongodb import MongoClient
def data_shuffle(data):
return data
if __name__ == '__main__':
main_mongo = MongoClient(entity_code="ZX_CJXW_GJJRJG_GZW_DFSM", mongo_collection="ZX_CJXW_HY")
data_list = main_mongo.main()
for data in data_list:
re_data = data_shuffle(data)
print(re_data)
|
[
"499413642@qq.com"
] |
499413642@qq.com
|
595aa5a10ef9b0fb0a73ced16c54c8e221cf046c
|
2e1617aec5614ad695fd6ee8dfc0ffb77c54dad1
|
/sources/Yalkut Shimoni/Nach/Yalkut_Shimoni_on_Nach.py
|
3ca221c24e7c961394c09c1cbcfc94c0e0676dbc
|
[] |
no_license
|
bachrach44/Sefaria-Data
|
ad875a552b858828ca2bbd4bbf1da8363dfef038
|
b33d781c1bde12568caa01c19e5ad9ec874d160f
|
refs/heads/master
| 2020-12-14T18:50:44.616694
| 2015-10-19T13:59:18
| 2015-10-19T13:59:18
| 17,557,774
| 0
| 0
| null | 2015-08-24T20:59:26
| 2014-03-09T04:33:13
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,248
|
py
|
# -*- coding: utf-8 -*-
import urllib
import urllib2
from urllib2 import URLError, HTTPError
import json
import pdb
import os
import sys
p = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, p)
os.environ['DJANGO_SETTINGS_MODULE'] = "sefaria.settings"
from local_settings import *
sys.path.insert(0, SEFARIA_PROJECT_PATH)
from sefaria.model import *
def post_index(index):
url = SEFARIA_SERVER+'api/v2/raw/index/'+index["title"].replace(" ", "_")
indexJSON = json.dumps(index)
values = {
'json': indexJSON,
'apikey': API_KEY
}
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
try:
response = urllib2.urlopen(req)
print response.read()
except HTTPError, e:
print 'Error code: ', e.code
root = JaggedArrayNode()
root.key = "yalkut_on_nach"
root.add_title("Yalkut Shimoni on Nach", "en", primary=True)
root.add_title(u"""ילקות שמעוני על נ״ח""", "he", primary=True)
root.depth = 2
root.sectionNames = ["Remez", "Paragraph"]
root.heSectionNames = [u"רמז", u"פסקה"]
root.addressTypes = ["Integer", "Integer"]
root.validate()
index = {
"title": "Yalkut Shimoni on Nach",
"categories": ["Midrash"],
"schema": root.serialize()
}
post_index(index)
|
[
"skaplan@brandeis.edu"
] |
skaplan@brandeis.edu
|
7a378105c860099dd61297282e87b87d6ce4006d
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/EHzL3v25wYp7E4AFC_5.py
|
8a5ec4b5bd16208bd51e95590a5ec8900a2b9c45
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 656
|
py
|
"""
Write a function that returns `True` if you can use the letters of the first
string to create the second string. Letters are **case sensitive**.
### Examples
can_build("aPPleAL", "PAL") ➞ True
can_build("aPPleAL", "apple") ➞ False
can_build("a", "") ➞ True
can_build("aa", "aaa") ➞ False
### Notes
Letters in the first string can be used only once.
"""
def can_build(s1, s2):
result = True
for i in s2:
if i not in s2:
result = False
if s1.count(i) < s2.count(i):
result = False
if result is False:
return False
else:
return True
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
062d52021c8a30a2bffcf1ee2952f42480968f14
|
27c8fc9bc336c49dbe172df5774e786106b4d6b7
|
/api/migrations/0001_initial.py
|
eb1b71fb66d5a2dc1eaac76e0a337a3677b5772f
|
[] |
no_license
|
BunmiAdeleke123/blog
|
18169c3ab8b75742bf8bc3c73a20f25f02166533
|
5dcfb10cb844b62871d442f72cc37d136092adfe
|
refs/heads/main
| 2023-09-01T02:00:39.123781
| 2021-10-26T13:11:49
| 2021-10-26T13:11:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 985
|
py
|
# Generated by Django 3.2.8 on 2021-10-26 04:27
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('slug', models.SlugField()),
('description', models.TextField()),
('date_added', models.DateField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-date_added'],
},
),
]
|
[
"you@example.com"
] |
you@example.com
|
5a38b04405d890de23e3283d6e0d1a7fbcc357bb
|
33a5bce52506b8c01ee250830f28aacc46382989
|
/accounts/forms.py
|
e0b7f5b79f689f4afce7274ef52e5b9dc3bbbe7c
|
[] |
no_license
|
stsh1119/django_request_trap
|
76ecac7709fc8c7847c479afa9b2cc384cf9cd0c
|
c55bfb8f0316f8ac03ffb7d60da48599289c0b81
|
refs/heads/main
| 2023-07-06T18:58:30.063801
| 2021-07-25T08:39:42
| 2021-07-25T08:39:42
| 380,741,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
class CustomUserCreationForm(UserCreationForm):
class Meta:
model = get_user_model()
fields = ('email', 'username', )
class CustomUserChangeForm(UserChangeForm):
class Meta:
model = get_user_model()
fields = ('email', 'username',)
|
[
"stshlaptop@gmail.com"
] |
stshlaptop@gmail.com
|
bfa0d79f8e6172fe2680868e91558d1789cb9387
|
cee4dd54ea44f91511a8b886b2d763604afeb95d
|
/app/emails.py
|
7cada0fc646cae0a6c88583b4bd278ef9a973824
|
[] |
no_license
|
damnedsteven/emcn_wh
|
0520ebe12b8d986905e2d7d8f7eea530f69fba96
|
0180d47e8484e691b880433f3d07f6c3068477b8
|
refs/heads/master
| 2021-01-19T12:29:25.767510
| 2017-04-13T02:09:18
| 2017-04-13T02:09:18
| 88,033,338
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 876
|
py
|
from .decorators import async
from flask_mail import Message
from app import app, mail
from flask import render_template
from config import ADMINS
@async
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
send_async_email(app, msg)
def follower_notification(followed, follower):
send_email("[microblog] %s is now following you!" % follower.username,
ADMINS[0],
[followed.email],
render_template("follower_email.txt",
user=followed, follower=follower),
render_template("follower_email.html",
user=followed, follower=follower))
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
5724646c2d8664888cbf6def9ea0bdcc3cc5374a
|
36a782b4fe282b96803458f9f6bf44355e4cf71f
|
/rotkehlchen/fval.py
|
bb601200d30b198e9dde67eb0d36edbb5ad6be60
|
[
"BSD-3-Clause"
] |
permissive
|
DUMIE505/rotki
|
23911887c7cc741a5d682fb90c98e09828b8c66f
|
046169bc068c7b29a271d6e80d978e02d0ea76ac
|
refs/heads/develop
| 2023-01-07T20:32:57.215529
| 2020-01-29T22:58:28
| 2020-01-31T08:58:03
| 237,394,542
| 0
| 0
|
BSD-3-Clause
| 2022-12-27T15:35:36
| 2020-01-31T09:04:50
| null |
UTF-8
|
Python
| false
| false
| 6,879
|
py
|
from decimal import Decimal, InvalidOperation
from typing import Any, Union
from rotkehlchen.errors import ConversionError
# Here even though we got __future__ annotations using FVal does not seem to work
AcceptableFValInitInput = Union[float, bytes, Decimal, int, str, 'FVal']
AcceptableFValOtherInput = Union[int, 'FVal']
class FVal():
"""A value to represent numbers for financial applications. At the moment
we use the python Decimal library but the abstraction will help us change the
underlying implementation if needed.
At the moment we do not allow any operations against floating points. Even though
floating points could be converted to Decimals before each operation we will
use this restriction to make sure floating point numbers are rooted from the codebase first.
"""
__slots__ = ('num',)
def __init__(self, data: AcceptableFValInitInput):
try:
if isinstance(data, float):
self.num = Decimal(str(data))
elif isinstance(data, bytes):
# assume it's an ascii string and try to decode the bytes to one
self.num = Decimal(data.decode())
elif isinstance(data, bool):
# This elif has to come before the isinstance(int) check due to
# https://stackoverflow.com/questions/37888620/comparing-boolean-and-int-using-isinstance
raise ValueError(f'Invalid type bool for data given to FVal constructor')
elif isinstance(data, (Decimal, int, str)):
self.num = Decimal(data)
elif isinstance(data, FVal):
self.num = data.num
else:
raise ValueError(f'Invalid type {type(data)} of data given to FVal constructor')
except InvalidOperation:
raise ValueError(
'Expected string, int, float, or Decimal to initialize an FVal.'
'Found {}.'.format(type(data)),
)
def __str__(self) -> str:
return str(self.num)
def __repr__(self) -> str:
return 'FVal({})'.format(str(self.num))
def __gt__(self, other: AcceptableFValOtherInput) -> bool:
evaluated_other = evaluate_input(other)
return self.num.compare_signal(evaluated_other) == Decimal('1')
def __lt__(self, other: AcceptableFValOtherInput) -> bool:
evaluated_other = evaluate_input(other)
return self.num.compare_signal(evaluated_other) == Decimal('-1')
def __le__(self, other: AcceptableFValOtherInput) -> bool:
evaluated_other = evaluate_input(other)
return self.num.compare_signal(evaluated_other) in (Decimal('-1'), Decimal('0'))
def __ge__(self, other: AcceptableFValOtherInput) -> bool:
evaluated_other = evaluate_input(other)
return self.num.compare_signal(evaluated_other) in (Decimal('1'), Decimal('0'))
def __eq__(self, other: object) -> bool:
evaluated_other = evaluate_input(other)
return self.num.compare_signal(evaluated_other) == Decimal('0')
def __add__(self, other: AcceptableFValOtherInput) -> 'FVal':
evaluated_other = evaluate_input(other)
return FVal(self.num.__add__(evaluated_other))
def __sub__(self, other: AcceptableFValOtherInput) -> 'FVal':
evaluated_other = evaluate_input(other)
return FVal(self.num.__sub__(evaluated_other))
def __mul__(self, other: AcceptableFValOtherInput) -> 'FVal':
evaluated_other = evaluate_input(other)
return FVal(self.num.__mul__(evaluated_other))
def __truediv__(self, other: AcceptableFValOtherInput) -> 'FVal':
evaluated_other = evaluate_input(other)
return FVal(self.num.__truediv__(evaluated_other))
def __floordiv__(self, other: AcceptableFValOtherInput) -> 'FVal':
evaluated_other = evaluate_input(other)
return FVal(self.num.__floordiv__(evaluated_other))
def __pow__(self, other: AcceptableFValOtherInput) -> 'FVal':
evaluated_other = evaluate_input(other)
return FVal(self.num.__pow__(evaluated_other))
def __radd__(self, other: AcceptableFValOtherInput) -> 'FVal':
evaluated_other = evaluate_input(other)
return FVal(self.num.__radd__(evaluated_other))
def __rsub__(self, other: AcceptableFValOtherInput) -> 'FVal':
evaluated_other = evaluate_input(other)
return FVal(self.num.__rsub__(evaluated_other))
def __rmul__(self, other: AcceptableFValOtherInput) -> 'FVal':
evaluated_other = evaluate_input(other)
return FVal(self.num.__rmul__(evaluated_other))
def __rtruediv__(self, other: AcceptableFValOtherInput) -> 'FVal':
evaluated_other = evaluate_input(other)
return FVal(self.num.__rtruediv__(evaluated_other))
def __rfloordiv__(self, other: AcceptableFValOtherInput) -> 'FVal':
evaluated_other = evaluate_input(other)
return FVal(self.num.__rfloordiv__(evaluated_other))
def __float__(self) -> float:
return float(self.num)
# --- Unary operands
def __neg__(self) -> 'FVal':
return FVal(self.num.__neg__())
def __abs__(self) -> 'FVal':
return FVal(self.num.copy_abs())
# --- Other operations
def fma(self, other: AcceptableFValOtherInput, third: AcceptableFValOtherInput) -> 'FVal':
"""
Fused multiply-add. Return self*other+third with no rounding of the
intermediate product self*other
"""
evaluated_other = evaluate_input(other)
evaluated_third = evaluate_input(third)
return FVal(self.num.fma(evaluated_other, evaluated_third))
def to_percentage(self, precision: int = 4) -> str:
return '{:.{}%}'.format(self.num, precision)
def to_int(self, exact: bool) -> int:
"""
Tries to convert to int, If `exact` is true then it will convert only if
it is a whole decimal number; i.e.: if it has got nothing after the decimal point
Raises:
ConversionError: If exact was True but the FVal is actually not an exact integer.
"""
if exact and self.num.to_integral_exact() != self.num:
raise ConversionError(f'Tried to ask for exact int from {self.num}')
return int(self.num)
def is_close(self, other: AcceptableFValInitInput, max_diff: str = "1e-6") -> bool:
evaluated_max_diff = FVal(max_diff)
if not isinstance(other, FVal):
other = FVal(other)
diff_num = abs(self.num - other.num)
return diff_num <= evaluated_max_diff.num
def evaluate_input(other: Any) -> Union[Decimal, int]:
"""Evaluate 'other' and return its Decimal representation"""
if isinstance(other, FVal):
return other.num
elif not isinstance(other, int):
raise NotImplementedError("Expected either FVal or int.")
return other
|
[
"lefteris@refu.co"
] |
lefteris@refu.co
|
cf396a701c4fabd74562aaed43cb8505fbdc5b23
|
f95d2646f8428cceed98681f8ed2407d4f044941
|
/numpydemo/01/day01/demo02_ndarray.py
|
d7decab2c5e6ccd28291779733520ff24d15eec1
|
[] |
no_license
|
q2806060/python-note
|
014e1458dcfa896f2749c7ebce68b2bbe31a3bf8
|
fbe107d668b44b78ae0094dbcc7e8ff8a4f8c983
|
refs/heads/master
| 2020-08-18T01:12:31.227654
| 2019-10-17T07:40:40
| 2019-10-17T07:40:40
| 215,731,114
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
"""
demo02_ndarray.py
"""
import numpy as np
a = np.array([[1, 2, 3, 4],
[5, 6, 7, 8]])
print(a, a.shape)
# 起始值1, 终止值10, 步长1
b = np.arange(1, 10, 2)
print(b)
# 创建5个元素全为0的数组
c = np.zeros(5, dtype='int32')
print(c, c.dtype)
# 创建5个元素全为1的数组
d = np.ones(5, dtype='int32')
print(d, d.dtype)
# 创建数组e与f, 结构与a相同, e中全0, f中全1
e = np.zeros_like(a)
f = np.ones_like(a)
print(e)
print(f / 5)
|
[
"C8916BA958F57D5A740E38E94644A3F8@i-search.com.cn"
] |
C8916BA958F57D5A740E38E94644A3F8@i-search.com.cn
|
ae1ad986a8d66093295f5b63687111f255f1453e
|
521efcd158f4c69a686ed1c63dd8e4b0b68cc011
|
/airflow/providers/google/cloud/transfers/gdrive_to_local.py
|
113a389c6fbf22314e9345d4caac708ef5871c83
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
coutureai/RaWorkflowOrchestrator
|
33fd8e253bfea2f9a82bb122ca79e8cf9dffb003
|
cd3ea2579dff7bbab0d6235fcdeba2bb9edfc01f
|
refs/heads/main
| 2022-10-01T06:24:18.560652
| 2021-12-29T04:52:56
| 2021-12-29T04:52:56
| 184,547,783
| 5
| 12
|
Apache-2.0
| 2022-11-04T00:02:55
| 2019-05-02T08:38:38
|
Python
|
UTF-8
|
Python
| false
| false
| 3,834
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Optional, Sequence, Union
from airflow.models import BaseOperator
from airflow.providers.google.suite.hooks.drive import GoogleDriveHook
class GoogleDriveToLocalOperator(BaseOperator):
"""
Writes a Google Drive file into local Storage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleDriveToLocalOperator`
:param output_file: Path to downloaded file
:type output_file: str
:param folder_id: The folder id of the folder in which the Google Drive file resides
:type folder_id: str
:param file_name: The name of the file residing in Google Drive
:type file_name: str
:param drive_id: Optional. The id of the shared Google Drive in which the file resides.
:type drive_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = [
"output_file",
"folder_id",
"file_name",
"drive_id",
"impersonation_chain",
]
def __init__(
self,
*,
output_file: str,
file_name: str,
folder_id: str,
drive_id: Optional[str] = None,
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.output_file = output_file
self.folder_id = folder_id
self.drive_id = drive_id
self.file_name = file_name
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context):
self.log.info('Executing download: %s into %s', self.file_name, self.output_file)
gdrive_hook = GoogleDriveHook(
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
file_metadata = gdrive_hook.get_file_id(
folder_id=self.folder_id, file_name=self.file_name, drive_id=self.drive_id
)
with open(self.output_file, "wb") as file:
gdrive_hook.download_file(file_id=file_metadata["id"], file_handle=file)
|
[
"noreply@github.com"
] |
coutureai.noreply@github.com
|
906ab9e89725e37b6440b90331db571633a5fd2f
|
8bbeb7b5721a9dbf40caa47a96e6961ceabb0128
|
/python3/396.Rotate Function(旋转函数).py
|
5a9f84b92f2c5deadb136fc43540cab6fbb4e3a4
|
[
"MIT"
] |
permissive
|
lishulongVI/leetcode
|
bb5b75642f69dfaec0c2ee3e06369c715125b1ba
|
6731e128be0fd3c0bdfe885c1a409ac54b929597
|
refs/heads/master
| 2020-03-23T22:17:40.335970
| 2018-07-23T14:46:06
| 2018-07-23T14:46:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,078
|
py
|
"""
<p>
Given an array of integers <code>A</code> and let <i>n</i> to be its length.
</p>
<p>
Assume <code>B<sub>k</sub></code> to be an array obtained by rotating the array <code>A</code> <i>k</i> positions clock-wise, we define a "rotation function" <code>F</code> on <code>A</code> as follow:
</p>
<p>
<code>F(k) = 0 * B<sub>k</sub>[0] + 1 * B<sub>k</sub>[1] + ... + (n-1) * B<sub>k</sub>[n-1]</code>.</p>
<p>Calculate the maximum value of <code>F(0), F(1), ..., F(n-1)</code>.
</p>
<p><b>Note:</b><br />
<i>n</i> is guaranteed to be less than 10<sup>5</sup>.
</p>
<p><b>Example:</b>
<pre>
A = [4, 3, 2, 6]
F(0) = (0 * 4) + (1 * 3) + (2 * 2) + (3 * 6) = 0 + 3 + 4 + 18 = 25
F(1) = (0 * 6) + (1 * 4) + (2 * 3) + (3 * 2) = 0 + 4 + 6 + 6 = 16
F(2) = (0 * 2) + (1 * 6) + (2 * 4) + (3 * 3) = 0 + 6 + 8 + 9 = 23
F(3) = (0 * 3) + (1 * 2) + (2 * 6) + (3 * 4) = 0 + 2 + 12 + 12 = 26
So the maximum value of F(0), F(1), F(2), F(3) is F(3) = 26.
</pre>
</p><p>给定一个长度为 <em>n</em> 的整数数组 <code>A</code> 。</p>
<p>假设 <code>B<sub>k</sub></code> 是数组 <code>A</code> 顺时针旋转 <em>k</em> 个位置后的数组,我们定义 <code>A</code> 的“旋转函数” <code>F</code> 为:</p>
<p><code>F(k) = 0 * B<sub>k</sub>[0] + 1 * B<sub>k</sub>[1] + ... + (n-1) * B<sub>k</sub>[n-1]</code>。</p>
<p>计算<code>F(0), F(1), ..., F(n-1)</code>中的最大值。</p>
<p><strong>注意:</strong><br />
可以认为<em> n</em> 的值小于 10<sup>5</sup>。</p>
<p><strong>示例:</strong></p>
<pre>
A = [4, 3, 2, 6]
F(0) = (0 * 4) + (1 * 3) + (2 * 2) + (3 * 6) = 0 + 3 + 4 + 18 = 25
F(1) = (0 * 6) + (1 * 4) + (2 * 3) + (3 * 2) = 0 + 4 + 6 + 6 = 16
F(2) = (0 * 2) + (1 * 6) + (2 * 4) + (3 * 3) = 0 + 6 + 8 + 9 = 23
F(3) = (0 * 3) + (1 * 2) + (2 * 6) + (3 * 4) = 0 + 2 + 12 + 12 = 26
所以 F(0), F(1), F(2), F(3) 中的最大值是 F(3) = 26 。
</pre>
<p>给定一个长度为 <em>n</em> 的整数数组 <code>A</code> 。</p>
<p>假设 <code>B<sub>k</sub></code> 是数组 <code>A</code> 顺时针旋转 <em>k</em> 个位置后的数组,我们定义 <code>A</code> 的“旋转函数” <code>F</code> 为:</p>
<p><code>F(k) = 0 * B<sub>k</sub>[0] + 1 * B<sub>k</sub>[1] + ... + (n-1) * B<sub>k</sub>[n-1]</code>。</p>
<p>计算<code>F(0), F(1), ..., F(n-1)</code>中的最大值。</p>
<p><strong>注意:</strong><br />
可以认为<em> n</em> 的值小于 10<sup>5</sup>。</p>
<p><strong>示例:</strong></p>
<pre>
A = [4, 3, 2, 6]
F(0) = (0 * 4) + (1 * 3) + (2 * 2) + (3 * 6) = 0 + 3 + 4 + 18 = 25
F(1) = (0 * 6) + (1 * 4) + (2 * 3) + (3 * 2) = 0 + 4 + 6 + 6 = 16
F(2) = (0 * 2) + (1 * 6) + (2 * 4) + (3 * 3) = 0 + 6 + 8 + 9 = 23
F(3) = (0 * 3) + (1 * 2) + (2 * 6) + (3 * 4) = 0 + 2 + 12 + 12 = 26
所以 F(0), F(1), F(2), F(3) 中的最大值是 F(3) = 26 。
</pre>
"""
class Solution:
def maxRotateFunction(self, A):
"""
:type A: List[int]
:rtype: int
"""
|
[
"lishulong@wecash.net"
] |
lishulong@wecash.net
|
fdfe2f9a3d3e3b660f7c7340ac430fa1a6359434
|
0b88a8e39e894fe72fd389505bdad786a6713439
|
/lib/setup3.py
|
51f4b3fbb0457190d4eb492ba1a479eb2d2b9855
|
[
"Apache-2.0"
] |
permissive
|
Trevol/pytorch-retinanet
|
d61a76c4a1cc14667ee0149440da865a8628d81f
|
e9c87a6c195d814d19edeca19166a5559df7da4a
|
refs/heads/master
| 2020-05-24T17:41:33.785410
| 2019-05-29T08:52:39
| 2019-05-29T08:52:39
| 187,391,911
| 0
| 0
| null | 2019-05-18T18:23:50
| 2019-05-18T18:23:50
| null |
UTF-8
|
Python
| false
| false
| 5,010
|
py
|
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
from distutils.core import setup
from Cython.Build import cythonize
from distutils.extension import Extension
from Cython.Distutils import build_ext
import subprocess
import numpy as np
import os
from os.path import join as pjoin
def find_in_path(name, path):
"Find a file in a search path"
# Adapted fom
# http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found, everything
is based on finding 'nvcc' in the PATH.
"""
# first check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# otherwise, search the PATH for NVCC
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home':home, 'nvcc':nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')}
for k, v in cudaconfig.items():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
CUDA = locate_cuda()
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def customize_compiler_for_nvcc(self):
"""inject deep into distutils to customize how the dispatch
to gcc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on."""
# tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# save references to the default compiler_so and _comple methods
default_compiler_so = self.compiler_so
super = self._compile
# now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
ext_modules = [Extension('nms.gpu_nms',
['nms/nms_kernel.cu', 'nms/gpu_nms.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with
# gcc the implementation of this trick is in customize_compiler() below
extra_compile_args={'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_35',
'--ptxas-options=-v',
'-c',
'--compiler-options',
"'-fPIC'"]},
include_dirs = [numpy_include, CUDA['include']]
)]
setup(
name='fast_rcnn',
ext_modules=ext_modules,
# inject our custom trigger
cmdclass={'build_ext': custom_build_ext},
)
# python setup3.py build_ext --inplace
|
[
"iam.vladimir.tretyak@gmail.com"
] |
iam.vladimir.tretyak@gmail.com
|
90751ed36f2869cf56e4df448cb42533a8bd624c
|
fcde32709c62b8ee86da459bb7c8eee52c848118
|
/code/day17/作业00.py
|
18b8f5c56d28eafa61ce240cdb8b8aa5ca0c34f6
|
[] |
no_license
|
klaus2015/py_base
|
6b92d362c3d7dc0e09205a037f4d580381dac94d
|
ec32c731c1c2f6a0dab87f1d167397e4fa86b8de
|
refs/heads/master
| 2022-07-28T15:49:30.383648
| 2020-05-11T15:31:43
| 2020-05-11T15:31:43
| 261,777,278
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,485
|
py
|
"""
3. 定义敌人类(姓名,攻击力,防御力,血量)
创建敌人列表,使用list_helper实现下列功能.
(1) 查找姓名是"灭霸"的敌人
(2) 查找攻击力大于10的所有敌人
(3) 查找活的敌人数量
"""
from common.list_helper import *
class Enemy:
"""
敌人类
"""
def __init__(self,name,hp,basic_damage,defensive):
self.name = name
self.hp = hp
self.basic_damage = basic_damage
self.defensive = defensive
def __str__(self):
return "%s,%d,,%d,,%d" % (self.name, self.hp, self.basic_damage, self.defensive)
list_enemy = [
Enemy("红骷髅", 200, 50, 5),
Enemy("灭霸", 500, 150, 20),
Enemy("海拉", 250, 100, 6),
Enemy("奥创", 0, 100, 12),
Enemy("蜘蛛侠", 0, 80, 11),
Enemy("成昆",80,30,10)
]
# re = ListHelper.find_single(list_enemy,lambda item:item.name =="灭霸")
# print(re)
re = ListHelper.find_all(list_enemy,lambda item:item.basic_damage > 10)
# result = list(re)
# for item in result:
# print(item)
print("-------------")
for item in re:
print(item)
re = ListHelper.get_count(list_enemy,lambda item:item.hp > 0)
print(re)
# 判断敌人列表中是否存在"成昆"
re = ListHelper.is_exits(list_enemy,lambda item:item.name == "成昆")
print(re)
#判断敌人列表中是否攻击力小于5或者防御力小于10的敌人
re = ListHelper.is_exits(list_enemy,lambda item:item.basic_damage < 5 or item.defensive <10)
print(re)
|
[
"598467866@qq.com"
] |
598467866@qq.com
|
5f42709ff47192bb9f442f01303bd687ae7209b6
|
7f8db5b974a747632729d16c431de7aca007af00
|
/0x03-python-data_structures/5-no_c.py
|
2a1bab55ec3424fa4324cbf2cbf8bae79f944bf2
|
[] |
no_license
|
thomasmontoya123/holbertonschool-higher_level_programming
|
6f5ceb636167efba1e36ed2dee7bf83b458f6751
|
48b7c9dccac77ccb0f57da1dc1d150f356612b13
|
refs/heads/master
| 2020-07-22T22:31:13.744490
| 2020-02-13T22:54:17
| 2020-02-13T22:54:17
| 207,351,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
#!/usr/bin/python3
def no_c(my_string):
string = ""
for character in my_string:
if character != 'c' and character != 'C':
string = string + character
return string
|
[
"tomasmontoya123@gmail.com"
] |
tomasmontoya123@gmail.com
|
c5f0c7f55c1bb473ee3127e3fb4c8ada7b3f9263
|
32cd5452417a6637f5e087c1a0c2c6405fbaf915
|
/src/app/share/caheFiles.py
|
bf976ef14e20e73e12adaf7d364be8fe1b80b256
|
[] |
no_license
|
549982170/SZLife_assisttant
|
766bedff61cabe73513a449525f57c8bb5a9afb6
|
014ebc526a671e3b3c972d476ba29439fd7decbf
|
refs/heads/master
| 2021-01-01T03:58:45.182066
| 2018-04-12T10:49:46
| 2018-04-12T10:49:46
| 97,097,807
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
# coding:utf-8
# !/user/bin/python
'''
Created on 2018年4月12日
@author: yizhiwu
内存缓存对象
'''
CACHE = {} # 函数值缓存
LOCK_DICT = {} # 线程池字典
|
[
"549982170@qq.com"
] |
549982170@qq.com
|
8e50dd5499fa0d29586e52cf45552d1f423fc3df
|
f6c6b7ac39deb9f2cc0a7ef8fb6459ca09fc27dd
|
/handlers/async_tasks.py
|
8b33938195c156d334b41d736e78f0fca2f49e83
|
[
"MIT"
] |
permissive
|
via-jiang/celery-demo
|
3112b28de81b3bc477629d51453d069e814e8adc
|
48eaa732bc9270f6f54eab48ecc57ed98ee52e44
|
refs/heads/master
| 2021-09-13T18:27:42.893328
| 2018-04-16T01:48:41
| 2018-04-16T01:48:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,044
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/1/16 下午4:12
# @Author : Matrix
# @Github : https://github.com/blackmatrix7/
# @Blog : http://www.cnblogs.com/blackmatrix/
# @File : async_task.py
# @Software: PyCharm
import logging
from time import sleep
from manage import celery
__author__ = 'blackmatrix'
@celery.task
def async_send_email(send_from, send_to, subject, content):
"""
模拟异步发送邮件的操作
:param send_from:
:param send_to:
:param subject:
:param content:
:return:
"""
logging.info('模拟异步发送邮件的操作')
logging.info(send_from, send_to, subject, content)
# 休眠
sleep(5)
@celery.task
def async_push_message(send_to, content):
"""
模拟异步推送消息
:param send_to:
:param content:
:return:
"""
logging.info('模拟异步推送消息')
logging.info('send_to: {}'.format(send_to))
logging.info('content: {}'.format(content))
# 休眠
sleep(10)
if __name__ == '__main__':
pass
|
[
"codecolor@outlook.com"
] |
codecolor@outlook.com
|
e8dc54e0d29dcbd3078510bb079021898840fe06
|
26d5c795d8aa83bf5cb3f228675ff51e2f704f57
|
/scripts/tuple2json
|
eb4a6177f70c27c5ba91553a6c9515e04d247a90
|
[] |
no_license
|
binarymachines/mercury
|
8e13bb10c67a056fe88e02f558d73f1f1b95d028
|
db3e2425f4e77a44a97c740f7fff90312a1bd33f
|
refs/heads/master
| 2023-07-08T11:35:26.867494
| 2023-06-25T00:46:23
| 2023-06-25T00:46:23
| 94,708,610
| 2
| 6
| null | 2023-02-15T21:50:06
| 2017-06-18T19:31:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,595
|
#!/usr/bin/env python
'''
Usage:
tuple2json --delimiter <delimiter> --keys=<key>... [--skip <num_lines>] [--limit=<limit>]
tuple2json --delimiter <delimiter> --datafile <file> --keys=<key>... [--skip <num_lines>] [--limit=<limit>]
'''
'''
+mdoc+
tuple2json takes a list of tuples (represented as CSV records with the specified delimiter) and turns them
into a list of corresponding key:value JSON records whose keys are the comma-separated, ORDERED list of names
passed to the --keys parameter.
If the --datafile option is set, tuple2json reads its input records from the <file> parameter; if not,
it reads them from standard input.
tuple2json assumes a headlerless CSV file; it depends solely on the keys passed to it. If you are transforming a CSV
file which contains a header, you must either remove it before passing the data, or use the --skip parameter;
otherwise the first record it generates will be a nonsense record.
tuple2json is often used in conjunction with tuplegen to turn a set of lists into a single JSONL file.
+mdoc+
'''
import os, sys
import json
import docopt
from snap import common
from mercury.utils import read_stdin
def generate_dict_from_tuple(line: str, delimiter: str, keys: list):
# TODO: decide what to do if key count and token count do not match
output_record = {}
tokens = line.split(delimiter)
index = 0
for key in keys:
output_record[key] = tokens[index]
index += 1
return output_record
def main(args):
keys = args['--keys'][0].split(',')
delimiter = args['<delimiter>']
limit = int(args.get('--limit') or -1)
skip_count = int(args.get('<num_lines>') or -1)
line_count = 0
if args['--datafile']:
with open(args['<file>'], 'r') as f:
for line in f:
if line_count == limit:
break
if line_count < skip_count:
line_count += 1
continue
record = generate_dict_from_tuple(line.strip(), delimiter, keys)
print(json.dumps(record))
line_count += 1
else: # read data from standard input
for line in read_stdin():
if line_count == limit:
break
if line_count < skip_count:
line_count += 1
continue
record = generate_dict_from_tuple(line.strip(), delimiter, keys)
print(json.dumps(record))
line_count += 1
if __name__ == '__main__':
args = docopt.docopt(__doc__)
main(args)
|
[
"binarymachineshop@gmail.com"
] |
binarymachineshop@gmail.com
|
|
c91f3a8f8c97bee6dfad05db9551a7fd74a02a78
|
59880d47a533cf1f45f927adafff22d5ffb4796a
|
/Python/fullStackDjango/fullStackBooks/apps/books/migrations/0002_auto_20170524_0029.py
|
9208e593e61e618fa8fa5d072fff9f74e252ea46
|
[] |
no_license
|
mightymcalpine/DojoAssignments
|
2bc7bb791630040dbb62da917a26b74bbdd574e4
|
9c0d80953f6ddbe840314f3d333b5f4590e0c9f4
|
refs/heads/master
| 2021-01-18T00:07:07.128554
| 2017-06-05T16:38:35
| 2017-06-05T16:38:35
| 84,257,743
| 0
| 0
| null | 2017-06-02T05:34:36
| 2017-03-07T23:47:27
|
Python
|
UTF-8
|
Python
| false
| false
| 383
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-24 00:29
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('books', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='booksDB',
new_name='bookDB',
),
]
|
[
"larscodus@gmail.com"
] |
larscodus@gmail.com
|
2fece7e214d7330dd51e1119ce24086e0cdf399f
|
02b9a9f40f56502c94d064654e030c521c2f325a
|
/django_task/utils.py
|
533c749435edbf9ae8321797f72884ec9c9c8e23
|
[
"MIT"
] |
permissive
|
samlex20/django-task
|
7c454922cf667853bc8678bfbb871e0f014b33b1
|
9966dd9a3366c10b3658298ff29c62250e5ec46f
|
refs/heads/master
| 2020-05-30T21:48:11.947777
| 2019-06-03T04:50:53
| 2019-06-03T04:50:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,459
|
py
|
from __future__ import unicode_literals
import uuid
import os
import time
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.utils import formats
from django.utils import timezone
from django.apps import apps
def get_object_by_uuid_or_404(model, uuid_pk):
"""
Calls get_object_or_404(model, pk=uuid_pk)
but also prevents "badly formed hexadecimal UUID string" unhandled exception
"""
try:
uuid.UUID(uuid_pk)
except Exception as e:
raise Http404(str(e))
return get_object_or_404(model, pk=uuid_pk)
def format_datetime(dt, include_time=True):
"""
Apply datetime format suggested for all admin views.
Here we adopt the following rule:
1) format date according to active localization
2) append time in military format
"""
if dt is None:
return ''
# convert to localtime
try:
dt = timezone.localtime(dt)
except ValueError:
# Probably 'astimezone() cannot be applied to a naive datetime'
pass
text = formats.date_format(dt, use_l10n=True, format='SHORT_DATE_FORMAT')
if include_time:
text += dt.strftime(' %H:%M:%S')
return text
def remove_file_and_cleanup(filepath):
"""
Removes specified file, than it's folder if left empty
"""
folder = os.path.dirname(filepath)
# remove file
if os.path.isfile(filepath):
os.remove(filepath)
# finally, remove folder if empty
if os.path.isdir(folder) and len(os.listdir(folder)) <= 0:
os.rmdir(folder)
def get_model_from_id(model_cls, id, timeout=1000, retry_count=10):
"""
Retrieve a record
"""
dt = timeout / retry_count
for i in range(retry_count):
try:
task = model_cls.objects.get(id=id)
return task
except model_cls.DoesNotExist:
pass
time.sleep(dt / 1000.0)
return None
def revoke_pending_tasks():
from .models import Task
models = apps.get_models()
task_models = [model for model in models if issubclass(model, Task) and model != Task]
counter = 0
for model in task_models:
queryset = model.objects.filter(status__in=Task.TASK_STATUS_PENDING_VALUES)
n = queryset.count()
print('revoking %s objects (%d) ...' % (model.__name__, n))
#model.objects.all().delete()
queryset.update(status='REVOKED')
counter += n
return counter
|
[
"morlandi@brainstorm.it"
] |
morlandi@brainstorm.it
|
45e666a317a1cd2194028b38c269dcc81f2249a7
|
02406958bffadbce13240ea1fb5013bc005fa332
|
/src/main/python/matrixext.py
|
2b40104f311e9b6f6405f5791190fb618f232aec
|
[] |
no_license
|
Jcamilorada/Networks
|
4b03209f0324e7a00c5236b7215158684ea969ee
|
1200529e501f9366bc38bb02b1d45f3079c976d3
|
refs/heads/master
| 2021-01-10T16:36:06.311138
| 2015-11-17T03:05:44
| 2015-11-17T03:05:44
| 45,878,436
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,925
|
py
|
from BeautifulSoup import BeautifulSoup
import urllib2
import re
from subprocess import call
from os import walk, path
from joblib import Parallel, delayed
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
def download_file(download_url):
req = urllib2.Request(download_url, headers=hdr)
try:
page = urllib2.urlopen(req)
except urllib2.HTTPError, e:
print e.fp.read()
index = download_url.rfind('/')
file_name = download_url[index + 1:]
f = open(file_name, 'w')
f.write(page.read())
f.close()
def download_pass_file(match_url):
req = urllib2.Request(match_url, headers=hdr)
try:
page = urllib2.urlopen(req)
except urllib2.HTTPError, e:
print e.fp.read()
html_page = page.read()
soup = BeautifulSoup(html_page)
for link in soup.findAll('a', attrs={'href': re.compile("passingdistribution\.pdf$")}):
download_file(link.get('href'))
def get_fifa_pass_distributions(championship_url, base_url):
req = urllib2.Request(championship_url, headers=hdr)
try:
page = urllib2.urlopen(req)
except urllib2.HTTPError, e:
print e.fp.read()
html_page = page.read()
soup = BeautifulSoup(html_page)
matches = []
for link in soup.findAll('a', attrs={'href': re.compile("^/worldcup/matches/round=")}):
download_pass_file(base_url + link.get('href'))
def export_fifa_pass_dist_to_csv(file_path, output_path):
file_name = file_path[file_path.rfind('/') + 1:file_path.rfind('.pdf')]
file_name_header = 'header_' + file_name + '.csv'
file_name_body = 'body_' + file_name + '.csv'
call(['jruby -S tabula '+ file_path + ' -a 20.00,12.75,200.5,561 ' + '-o ' + output_path+file_name_header], shell=True)
call(['jruby -S tabula ' + file_path + ' -a 200.00,12.75,700.5,561 ' + '-o ' + output_path+file_name_body], shell=True)
def export_fifa_dir(files_path, output_path):
file_names = []
for root, dirs, files in walk(files_path):
for name in files:
if 'pdf' in name:
file_names.append(path.join(root, name))
else:
continue
n_jobs = 10
Parallel(n_jobs=n_jobs, verbose=50)(delayed(export_fifa_pass_dist_to_csv)(file_name, output_path) for file_name in file_names)
export_fifa_dir(
'/Users/developer3/git/Networks/fifa_2014_pass_distributions/pdf_raw_data/',
'/Users/developer3/git/Networks/fifa_2014_pass_distributions/csv/'
)
#get_fifa_pass_distributions('http://www.fifa.com/worldcup/archive/brazil2014/matches/index.html', 'http://www.fifa.com/')
|
[
"fico89@gmail.com"
] |
fico89@gmail.com
|
960761f93b8ca58af370503f84744e74126847fb
|
74f68049235709841416010ec1e18e8085762113
|
/.history/recursion_20191203122123.py
|
7190b3a32cacbf96d5a3743a23aa3f6f8bcaf7a3
|
[] |
no_license
|
camilooob/pythonisfun
|
749692517a6c6b147c428996c01fb3b2fa1aef14
|
55453e1f80f92c3756ee5f0338f93fc2a0d9beec
|
refs/heads/master
| 2020-09-21T21:59:44.308526
| 2020-01-20T17:41:56
| 2020-01-20T17:41:56
| 224,947,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 188
|
py
|
def rec_count (number):
print (number)
# Base case
if number == 0:
return
rec_count (number + 1) # A recursive call with a different argument
print (number)
rec_count (5)
|
[
"camilobaq@hotmail.com"
] |
camilobaq@hotmail.com
|
3f06ec717874a0469c361153d673f147c29904fb
|
b5a6f10c886fba6584d2ac7b4a29c69975826dbb
|
/clients/python/pricemonitor_api_client/models/get_all_domains_v3_api_response.py
|
12e4f77837ed38236957ce51bf022e5fd7b9404f
|
[] |
no_license
|
Patagona/pricemonitor-clients
|
8c4f842ca3d4e459c77ac329ad488cb3e4c858bf
|
cf2d689bf9ed6ddea9501324cada918c3a88b4f8
|
refs/heads/master
| 2023-08-31T20:12:58.844253
| 2023-08-31T15:26:25
| 2023-08-31T15:26:25
| 279,618,794
| 1
| 1
| null | 2023-07-03T13:55:28
| 2020-07-14T15:09:38
|
Python
|
UTF-8
|
Python
| false
| false
| 3,671
|
py
|
# coding: utf-8
"""
Pricemonitor API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 0.0.6561
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from pricemonitor_api_client.configuration import Configuration
class GetAllDomainsV3ApiResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'data': 'ComPatagonaPricemonitorShareApiGetAllDomainsV3'
}
attribute_map = {
'data': 'data'
}
def __init__(self, data=None, local_vars_configuration=None): # noqa: E501
"""GetAllDomainsV3ApiResponse - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._data = None
self.discriminator = None
self.data = data
@property
def data(self):
"""Gets the data of this GetAllDomainsV3ApiResponse. # noqa: E501
:return: The data of this GetAllDomainsV3ApiResponse. # noqa: E501
:rtype: ComPatagonaPricemonitorShareApiGetAllDomainsV3
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this GetAllDomainsV3ApiResponse.
:param data: The data of this GetAllDomainsV3ApiResponse. # noqa: E501
:type: ComPatagonaPricemonitorShareApiGetAllDomainsV3
"""
if self.local_vars_configuration.client_side_validation and data is None: # noqa: E501
raise ValueError("Invalid value for `data`, must not be `None`") # noqa: E501
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetAllDomainsV3ApiResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, GetAllDomainsV3ApiResponse):
return True
return self.to_dict() != other.to_dict()
|
[
"jenkins@patagona.de"
] |
jenkins@patagona.de
|
7ffb45eecdac616fe9c5ce26f57f0bae55224092
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/number/test_recorder.py
|
635354b11760014111d91d22eb6242e3d9db539a
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,035
|
py
|
"""The tests for number recorder."""
from __future__ import annotations
from datetime import timedelta
from unittest.mock import patch
import pytest
from homeassistant.components import number
from homeassistant.components.number import ATTR_MAX, ATTR_MIN, ATTR_MODE, ATTR_STEP
from homeassistant.components.recorder import Recorder
from homeassistant.components.recorder.history import get_significant_states
from homeassistant.const import ATTR_FRIENDLY_NAME, Platform
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from homeassistant.util import dt as dt_util
from tests.common import async_fire_time_changed
from tests.components.recorder.common import async_wait_recording_done
@pytest.fixture(autouse=True)
async def number_only() -> None:
"""Enable only the number platform."""
with patch(
"homeassistant.components.demo.COMPONENTS_WITH_CONFIG_ENTRY_DEMO_PLATFORM",
[Platform.NUMBER],
):
yield
async def test_exclude_attributes(recorder_mock: Recorder, hass: HomeAssistant) -> None:
"""Test number registered attributes to be excluded."""
assert await async_setup_component(hass, "homeassistant", {})
await async_setup_component(
hass, number.DOMAIN, {number.DOMAIN: {"platform": "demo"}}
)
await hass.async_block_till_done()
now = dt_util.utcnow()
async_fire_time_changed(hass, now + timedelta(minutes=5))
await hass.async_block_till_done()
await async_wait_recording_done(hass)
states = await hass.async_add_executor_job(
get_significant_states, hass, now, None, hass.states.async_entity_ids()
)
assert len(states) > 1
for entity_states in states.values():
for state in entity_states:
assert ATTR_MIN not in state.attributes
assert ATTR_MAX not in state.attributes
assert ATTR_STEP not in state.attributes
assert ATTR_MODE not in state.attributes
assert ATTR_FRIENDLY_NAME in state.attributes
|
[
"noreply@github.com"
] |
home-assistant.noreply@github.com
|
2688ed15e9167a2616e98fcead68c08c2428e039
|
d53a274a61ffe894a6e5648edf86f78145f0af7b
|
/tests/modes/test_stream.py
|
921a3c083d2c4c932abd4ccd6c99093791ebc99f
|
[
"MIT"
] |
permissive
|
deresmos/delogger
|
8611d88714ffffddb5fa9bc12586fffb89fb8c11
|
c185e4fd844414d561f521103975b95bd31aff43
|
refs/heads/main
| 2021-06-02T23:10:02.814473
| 2020-10-19T13:53:27
| 2020-10-19T13:53:27
| 153,235,270
| 5
| 0
|
MIT
| 2020-10-19T13:53:29
| 2018-10-16T06:40:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,391
|
py
|
from pathlib import Path
from delogger import Delogger
from delogger.modes.stream import StreamColorDebugMode
from delogger.modes.stream import StreamDebugMode
from delogger.modes.stream import StreamInfoMode
from tests.lib.base import Assert
from tests.lib.base import DeloggerTestBase
class TestStreamMode(DeloggerTestBase):
def test_stream_info_mode(self, capsys):
delogger = Delogger("stream_info_mode")
delogger.load_modes(StreamInfoMode())
logger = delogger.get_logger()
self.execute_log(logger)
self.check_normal_stream_log(logger, capsys, is_color=False)
Assert._bool(not Path(self.OUTPUT_DIRPATH).is_dir())
def test_stream_debug_mode(self, capsys):
delogger = Delogger(name="stream_debug_mode")
delogger.load_modes(StreamDebugMode())
logger = delogger.get_logger()
self.execute_log(logger)
self.check_debug_stream_log(logger, capsys, is_color=False)
Assert._bool(not Path(self.OUTPUT_DIRPATH).is_dir())
def test_stream_color_debug_mode(self, capsys):
delogger = Delogger(name="stream_color_debug_mode")
delogger.load_modes(StreamColorDebugMode())
logger = delogger.get_logger()
self.execute_log(logger)
self.check_debug_stream_log(logger, capsys, is_color=True)
Assert._bool(not Path(self.OUTPUT_DIRPATH).is_dir())
|
[
"deresmos@gmail.com"
] |
deresmos@gmail.com
|
ceef2844a259de469756bc457bcfb2a756811678
|
8e1668e35a8df9968ab14d16db089b51dbe6dd51
|
/python/algorithms/sort/merge_sort.py
|
167273e24de284bb4cbd1cbfa418f619369e139c
|
[] |
no_license
|
Chalmiller/competitive_programming
|
f1ec0184d1ff247201522ab90ca8e66b3f326afc
|
b437080d1ba977c023baf08b7dc5c3946784e183
|
refs/heads/master
| 2021-03-24T05:11:59.383916
| 2020-08-24T22:07:41
| 2020-08-24T22:07:41
| 247,519,998
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 867
|
py
|
def merge(S1, S2, S):
"""Merge two sorted Python lists S1 and S2 into properly sized list S."""
i = j = 0
while i + j < len(S):
if j == len(S2) or (i < len(S1) and S1[i] < S2[j]):
S[i+j] = S1[i] # copy ith element of S1 as next item of S
i += 1
else:
S[i+j] = S2[j] # copy jth element of S2 as next item of S
j += 1
def merge_sort(S):
"""Sort the elements of Python list S using the merge-sort algorithm."""
n = len(S)
if n < 2:
return # list is already sorted
# divide
mid = n // 2
S1 = S[0:mid] # copy of first half
S2 = S[mid:n] # copy of second half
# conquer (with recursion)
merge_sort(S1) # sort copy of first half
merge_sort(S2) # sort copy of second half
# merge results
merge(S1, S2, S) # merge sorted halves back into S
|
[
"chalmiller1@gmail.com"
] |
chalmiller1@gmail.com
|
c3adb838965981ec18e6903f217ac461b3ef093c
|
35fc084d330e62575c12bc714cbf414c082e9f8a
|
/ramdisk/target/common/usr/lib/python3.7/site-packages/typepy/checker/_ipaddress.py
|
4ae9e749ecdb997ae763955301b1fca4c387ae1e
|
[
"Python-2.0"
] |
permissive
|
BM1880-BIRD/bm1880-system-sdk
|
8de97c6c0985b3bee8b06fb5fd2ee8daec693665
|
eff2d6f5442676c04a221a62139864658208f57e
|
refs/heads/master
| 2022-04-08T09:20:47.919696
| 2020-03-09T02:43:08
| 2020-03-09T02:43:08
| 159,283,885
| 29
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,262
|
py
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import absolute_import
from ._checker import (
TypeChecker,
TypeCheckerStrictLevel,
)
from ._common import isstring
class IpAddressTypeCheckerStrictLevel0(TypeCheckerStrictLevel):
def is_instance(self):
return self._is_ipaddress(self._value)
def is_valid_after_convert(self, converted_value):
return self._is_ipaddress(converted_value)
@staticmethod
def _is_ipaddress(value):
import ipaddress
return isinstance(
value, (ipaddress.IPv4Address, ipaddress.IPv6Address))
class IpAddressTypeCheckerStrictLevel1(IpAddressTypeCheckerStrictLevel0):
def is_exclude_instance(self):
return (
isstring(self._value) or
super(IpAddressTypeCheckerStrictLevel1, self).is_exclude_instance()
)
class IpAddressTypeChecker(TypeChecker):
def __init__(self, value, strict_level):
super(IpAddressTypeChecker, self).__init__(
value=value,
checker_mapping={
0: IpAddressTypeCheckerStrictLevel0,
1: IpAddressTypeCheckerStrictLevel1,
},
strict_level=strict_level)
|
[
"haitao.suo@bitmain.com"
] |
haitao.suo@bitmain.com
|
a5b0fc1063e34fd3268bf0dbb5dee86d3b3a34f8
|
c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce
|
/flask/flaskenv/Lib/site-packages/tensorflow/python/keras/testing_utils.py
|
57c5585363b515e8ffb00717a04f25b25e9cec10
|
[] |
no_license
|
AhsonAslam/webapi
|
54cf7466aac4685da1105f9fb84c686e38f92121
|
1b2bfa4614e7afdc57c9210b0674506ea70b20b5
|
refs/heads/master
| 2020-07-27T06:05:36.057953
| 2019-09-17T06:35:33
| 2019-09-17T06:35:33
| 208,895,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:f948b539999a928461c50492838f0aa264bc8aa39b722055b4f4a8317d5e8e0b
size 25030
|
[
"github@cuba12345"
] |
github@cuba12345
|
9ae2fca88541a98fc212a099fea1c9bbf40bfec5
|
d4e573e8eae32db155fe5931b3e2dcd3aa48969b
|
/indigo/lib/python2.7/dist-packages/rocon_app_manager_msgs/srv/_Init.py
|
c91a36b46fa4aec26c1d464d23d1f93378458753
|
[] |
no_license
|
javierdiazp/myros
|
ee52b0a7c972d559a1a377f8de4eb37878b8a99b
|
7571febdfa881872cae6378bf7266deca7901529
|
refs/heads/master
| 2022-11-09T09:24:47.708988
| 2016-11-10T16:56:28
| 2016-11-10T16:56:28
| 73,733,895
| 0
| 1
| null | 2022-10-25T05:16:35
| 2016-11-14T18:19:06
|
C++
|
UTF-8
|
Python
| false
| false
| 7,687
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from rocon_app_manager_msgs/InitRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class InitRequest(genpy.Message):
_md5sum = "c1f3d28f1b044c871e6eff2e9fc3c667"
_type = "rocon_app_manager_msgs/InitRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """
string name
"""
__slots__ = ['name']
_slot_types = ['string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
name
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(InitRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.name is None:
self.name = ''
else:
self.name = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8')
else:
self.name = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8')
else:
self.name = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from rocon_app_manager_msgs/InitResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class InitResponse(genpy.Message):
_md5sum = "eb13ac1f1354ccecb7941ee8fa2192e8"
_type = "rocon_app_manager_msgs/InitResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """bool result
"""
__slots__ = ['result']
_slot_types = ['bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
result
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(InitResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.result is None:
self.result = False
else:
self.result = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_struct_B.pack(self.result))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.result,) = _struct_B.unpack(str[start:end])
self.result = bool(self.result)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_struct_B.pack(self.result))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.result,) = _struct_B.unpack(str[start:end])
self.result = bool(self.result)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_B = struct.Struct("<B")
class Init(object):
_type = 'rocon_app_manager_msgs/Init'
_md5sum = 'ee91d68745ef4d7a247816a59dffedf2'
_request_class = InitRequest
_response_class = InitResponse
|
[
"javier.diaz.palacios@gmail.com"
] |
javier.diaz.palacios@gmail.com
|
5be7597d9b79497f129d448a582e4da7ba3720fe
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r9/Gen/DecFiles/options/11164450.py
|
aa5dc4673014eec3a42cde5a51ad6a896418a48f
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 917
|
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r9/Gen/DecFiles/options/11164450.py generated: Fri, 27 Mar 2015 16:10:17
#
# Event Type: 11164450
#
# ASCII decay Descriptor: {[[B0]nos -> (D- => K+ pi- pi- pi0) pi+]cc, [[B0]os -> (D+ => K- pi+ pi+ pi0) pi-]cc}
#
from Configurables import Generation
Generation().EventType = 11164450
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bd_Dpi,Kpipipi0=phsp,DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 511,-511 ]
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
b79d7f133f726c28e34a6124100816702b418473
|
d0320f7a0d23cf6dbc93bed6af5cf22625a1998c
|
/QueensPuzzleFaster.py
|
afa69f2f96a4ebb46146776d6760744c448c41ad
|
[
"MIT"
] |
permissive
|
cmcclana/CIS2001-Fall2017
|
f8ba05cd20406bd3e743d1abd13320dfb0aaf234
|
d601f1ef87fb49852ce7a7223537baa5f3a3e664
|
refs/heads/master
| 2021-08-08T06:59:37.543336
| 2017-11-09T20:40:44
| 2017-11-09T20:40:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,766
|
py
|
class QueensPuzzle:
QUEEN = 'Q'
SPACE = ' '
def __init__(self, number_of_queens):
self.number_of_queens_on_board = 0
self.board = [ [ QueensPuzzle.SPACE ] * number_of_queens ] * number_of_queens
self.diagonals_minus = [True] * number_of_queens * 2
self.diagonals_plus = [True] * number_of_queens * 2
self.rows = [True] * number_of_queens
self.total_number_of_solutions = 0
self.total_queens = number_of_queens
def Print(self):
for row in self.board:
print('-' * ( len(self.board) * 2 + 1) )
print('|', end="")
for character in row:
print( character, end='|')
print()
print('-' * ( len(self.board) * 2 + 1) )
print()
def IsRowOpen(self, row_number):
return self.rows[row_number]
def IsDiagonalOpen(self, row_number, col_number):
return self.diagonals_minus[( row_number - col_number )] and self.diagonals_plus[( row_number + col_number )]
def CanPlaceQueen(self, row_number, col_number):
return self.IsRowOpen(row_number) and self.IsDiagonalOpen(row_number, col_number)
def Solve(self):
if ( self.number_of_queens_on_board == len(self.board) ):
# self.Print()
# for row in range(len(self.board)):
# for col in range(len(self.board)):
# if self.board[row][col] == QueensPuzzle.QUEEN:
# print( "%d" % (row-col), end=" ")
# print()
self.total_number_of_solutions += 1
#print(self.total_number_of_solutions)
else:
for row in range(len(self.board)):
if self.CanPlaceQueen(row, self.number_of_queens_on_board):
self.rows[row] = False
self.diagonals_minus[(row - self.number_of_queens_on_board )] = False
self.diagonals_plus[(row + self.number_of_queens_on_board )] = False
self.board[row][self.number_of_queens_on_board] = QueensPuzzle.QUEEN
self.number_of_queens_on_board += 1
self.Solve()
self.number_of_queens_on_board -= 1
self.board[row][self.number_of_queens_on_board] = QueensPuzzle.SPACE
self.rows[row] = True
self.diagonals_minus[(row - self.number_of_queens_on_board )] = True
self.diagonals_plus[(row + self.number_of_queens_on_board )] = True
number_of_queens = int( input("How many queens do you want to try and put on the board?"))
queensPuzzle = QueensPuzzle(number_of_queens)
queensPuzzle.Solve()
print("Total Solutions: %d" % queensPuzzle.total_number_of_solutions )
|
[
"eric.charnesky@careevolution.com"
] |
eric.charnesky@careevolution.com
|
b33d3bf4771d52646ef6acbedbd83ff655913fe4
|
8fbe463322c675d1e1b11adbf5ddfbca77a71c3b
|
/utils/code.py
|
56280c1e875f22c62163b1ab9e26a6ab05dc25c5
|
[] |
no_license
|
qhuydtvt/tk-poll
|
7cdcd456c3fdca226a31868297df891d8ba2a890
|
1a0b9898f7ae9e3ab9f4d81ee8ee737b581de0d6
|
refs/heads/master
| 2021-05-06T13:36:04.976284
| 2018-10-28T09:42:42
| 2018-10-28T09:42:42
| 113,245,001
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
from shortuuid import ShortUUID
import string
alphabet = string.ascii_uppercase
helper = ShortUUID(alphabet=alphabet)
def code(length):
return helper.random(length=length)
def code_6():
return code(6)
|
[
"qhuydtvt@gmail.com"
] |
qhuydtvt@gmail.com
|
fcd708af9bfc4b3f389194c77969d032fa8bedc7
|
b5e10b2cfe261beca4a275d5b4b562acaa12bd06
|
/zentral/contrib/monolith/events/__init__.py
|
c0fbc28569df9cce42affaf22b924388cb7df0d3
|
[
"Apache-2.0"
] |
permissive
|
mikemcdonald/zentral
|
29dc0a6b3284be00ccc99ca3eb4ac2f4474c12a7
|
4aa03937abfbcea6480aa04bd99f4da7b8dfc923
|
refs/heads/master
| 2021-06-24T16:16:26.216665
| 2017-09-11T18:53:21
| 2017-09-11T18:53:21
| 103,157,487
| 0
| 1
| null | 2017-09-11T16:02:43
| 2017-09-11T16:02:43
| null |
UTF-8
|
Python
| false
| false
| 3,038
|
py
|
import logging
from zentral.core.events.base import BaseEvent, EventMetadata, EventRequest, register_event_type
logger = logging.getLogger('zentral.contrib.monolith.events')
ALL_EVENTS_SEARCH_DICT = {"tag": "monolith"}
class MonolithMunkiRequestEvent(BaseEvent):
event_type = "monolith_munki_request"
tags = ["monolith", "heartbeat"]
register_event_type(MonolithMunkiRequestEvent)
class MonolithSyncCatalogsRequestEvent(BaseEvent):
event_type = "monolith_sync_catalogs_request"
tags = ["monolith"]
register_event_type(MonolithSyncCatalogsRequestEvent)
class MonolithUpdateCacheServerRequestEvent(BaseEvent):
event_type = "monolith_update_cache_server_request"
tags = ["monolith"]
register_event_type(MonolithUpdateCacheServerRequestEvent)
class MonolithRepositoryUpdateEvent(BaseEvent):
event_type = "monolith_repository_update"
tags = ["monolith"]
payload_aggregations = [
("action", {"type": "terms", "bucket_number": 4, "label": "Decisions"}),
]
register_event_type(MonolithRepositoryUpdateEvent)
# Utility functions
def post_monolith_munki_request(msn, user_agent, ip, **payload):
MonolithMunkiRequestEvent.post_machine_request_payloads(msn, user_agent, ip, [payload])
def post_monolith_sync_catalogs_request(user_agent, ip):
event_class = MonolithSyncCatalogsRequestEvent
if user_agent or ip:
request = EventRequest(user_agent, ip)
else:
request = None
metadata = EventMetadata(event_class.event_type,
request=request,
tags=event_class.tags)
event = event_class(metadata, {})
event.post()
def post_monolith_cache_server_update_request(user_agent, ip, cache_server=None, errors=None):
event_class = MonolithUpdateCacheServerRequestEvent
if user_agent or ip:
request = EventRequest(user_agent, ip)
else:
request = None
metadata = EventMetadata(event_class.event_type,
request=request,
tags=event_class.tags)
if cache_server:
payload = cache_server.serialize()
payload["status"] = 0
else:
# flatten errors
payload = {"errors": {attr: ", ".join(err) for attr, err in errors.items()}}
payload["status"] = 1
event = event_class(metadata, payload)
event.post()
def post_monolith_repository_updates(repository, payloads, request=None):
event_class = MonolithRepositoryUpdateEvent
repository_serialized_info = repository.serialize_for_event()
if request:
request = EventRequest.build_from_request(request)
for index, payload in enumerate(payloads):
metadata = EventMetadata(event_class.event_type,
index=index,
request=request,
tags=event_class.tags)
payload.update({"repository": repository_serialized_info})
event = event_class(metadata, payload)
event.post()
|
[
"eric.falconnier@112hz.com"
] |
eric.falconnier@112hz.com
|
ff0eb43da6776fc0eac6b6f8c96830917c6afff1
|
ba602dc67ad7bb50133aeb312f3c6c54627b3dec
|
/data/3955/WA_py/508593.py
|
06675fcbb26d3e323e2a68115c546a11b7a4de8d
|
[] |
no_license
|
Dearyyyyy/TCG
|
0d21d89275906157372d775f33309ce337e6bc95
|
7b80de16de2d3f5d95a7c4ed95d45a9e38882e67
|
refs/heads/master
| 2020-12-27T23:19:44.845918
| 2020-02-04T01:59:23
| 2020-02-04T01:59:23
| 238,101,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
# coding=utf-8
while True:
a,b=input().split(" ")
a=str(a)
b=str(b)
for j in a:
c=j
break
for p in b:
d=p
if c==d:
print("YES")
else:
print("NO")
|
[
"543271544@qq.com"
] |
543271544@qq.com
|
45b72754c05463b85f4f32701ecd1784ceb7c7ed
|
fb7f1533b03d5ea083da8c7dce448c914f25d5a3
|
/bookbuilder/book/migrations/0003_chapter_image_paragraph.py
|
cc6c0e1cfbab3c14f2533278220074b4f6f12cee
|
[] |
no_license
|
Mark-Seaman/Book-Builder
|
7fccfe60afe10564e666fd125ae22b4362abfac2
|
18b5c14c11000da576ea16908b019de0aab50d0b
|
refs/heads/master
| 2023-01-09T14:23:25.641654
| 2020-11-11T22:52:39
| 2020-11-11T22:52:39
| 293,887,524
| 0
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,640
|
py
|
# Generated by Django 3.1.1 on 2020-09-24 17:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('book', '0002_auto_20200915_1812'),
]
operations = [
migrations.CreateModel(
name='Chapter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('chapter_num', models.IntegerField()),
('book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='book.book')),
],
),
migrations.CreateModel(
name='Paragraph',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('order', models.IntegerField()),
('chapter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='book.chapter')),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('src', models.CharField(max_length=100)),
('alt', models.CharField(max_length=100)),
('order', models.IntegerField()),
('chapter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='book.chapter')),
],
),
]
|
[
"Mark.Seaman@imac.net"
] |
Mark.Seaman@imac.net
|
ceabf86ddd55006ceff709e98ee806d5ec567bd0
|
a54007706a09b387690f79fd7ffd889decad42f1
|
/day32/18_特殊的where方法.py
|
dd0736b5dc3881b37cdcb9223e7e78cb823cfa21
|
[] |
no_license
|
lvah/201903python
|
d425534544a1f91e5b80b5ff0de5ca34037fe6e9
|
1415fcb7697dfa2884d94dcd8963477e12fe0624
|
refs/heads/master
| 2020-07-06T16:45:37.882819
| 2019-09-08T10:13:07
| 2019-09-08T10:13:07
| 203,082,401
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
import pandas as pd
import numpy as np
import string
# &**********series中的where方法运行结果和numpy中完全不同;
s1 = pd.Series(np.arange(5), index=list(string.ascii_lowercase[:5]))
# print(s1.where(s1 > 3))
# 对象中不大于3的元素赋值为10;
print(s1.where(s1 > 3, 10))
# 对象中大于3的元素赋值为10;
print(s1.mask(s1 > 3, 10))
|
[
"root@foundation0.ilt.example.com"
] |
root@foundation0.ilt.example.com
|
fdc71274dc682931542826201880a243b8d96ffc
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/leetCode/DP/BitwiseORsOfSubarray.py
|
436f6200d70c69b2173aec53cf6d08c850f9a229
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,238
|
py
|
"""
We have an array A of non-negative integers.
For every (contiguous) subarray B = [A[i], A[i+1], ..., A[j]] (with i <= j), we take the bitwise OR of all the elements in B, obtaining a result A[i] | A[i+1] | ... | A[j].
Return the number of possible results. (Results that occur more than once are only counted once in the final answer.)
Example 1:
Input: [0]
Output: 1
Explanation:
There is only one possible result: 0.
Example 2:
Input: [1,1,2]
Output: 3
Explanation:
The possible subarrays are [1], [1], [2], [1, 1], [1, 2], [1, 1, 2].
These yield the results 1, 1, 2, 1, 3, 3.
There are 3 unique values, so the answer is 3.
Example 3:
Input: [1,2,4]
Output: 6
Explanation:
The possible results are 1, 2, 3, 4, 6, and 7.
对子数组进行或运算,最后结果是有多少个唯一的解。
思路是DP:
走的弯路:
一开始写的:
[1, 1, 2, 2, 4]
A[0] = {1}
基于A[0],判断是否在A[0]里,不在的话在添加,在的话就继承A[0]。
A[1] = {1}
A[2] = {1, 2, 3}
A[3] = {1, 2 ,3}
运行到这里都没什么错误,因为就碰巧进行了一次相邻的或运算。
A[4] = {1, 2, 3, 4, 5, 6, 7}
到了这里就有了错误,4不应该与这么多进行或运算。
这里就不知道怎么做了,如果要把上一次的结果也加到里面,怎么才能保证所进行的或运算不包含不相邻的两个点如:
[1, 2, 4]
不会进行 [1,4]的运算。
重新的梳理应该是:
[1]
A[0] = {1}
[ 1] [1, 1]
A[1] = {1}
注意,这里与上一个进行或运算,但不把上一个也存到A[2]里面,
[ 2] [1, 1, 2] [ 1, 2]
A[2] = {2, 3}
基于上一个,但不会将上一个的结果加到本次里影响最终运算。
---
最终输出结果时,进行一次全部的set整理。
测试地址:
https://leetcode.com/contest/weekly-contest-100/problems/bitwise-ors-of-subarrays/
Accepted.
"""
c.. Solution o..
___ subarrayBitwiseORs A
"""
:type A: List[int]
:rtype: int
"""
__ n.. A:
r_ 0
dp = [{A[0]}]
___ i __ r..(1, l..(A)):
new = {A[i]}
___ j __ dp[i-1]:
new.add(j|A[i])
dp.a.. new)
r_ l..(s...union(*dp))
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
9a22c52ec3248feaf483c3d56b8667dd2f1e8c3d
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_coding.py
|
a254547576f9229c8cf3428e8dfbf7c636522ad4
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
from xai.brain.wordbase.nouns._cod import _COD
#calss header
class _CODING(_COD, ):
def __init__(self,):
_COD.__init__(self)
self.name = "CODING"
self.specie = 'nouns'
self.basic = "cod"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
9b803a8bef5841f4caa1aca59c39e166c2b74190
|
dcab6930a95a3c5530e9b9bfba0e495667c98599
|
/Data_Analysis/Data_camp_lecture/Manipulation(Pandas)/summarystatistics_ex4.py
|
e9819acf44e289ed025cdce3b8e86af349c1d586
|
[] |
no_license
|
wxlovolxw/GIWON-S-STUDY
|
7db1fb30dfc16c8bc60592d0696434f1482ecdde
|
6a622b5d372741b4f9d215f649235353f3e645cd
|
refs/heads/master
| 2023-06-23T06:05:03.058692
| 2021-07-26T05:37:35
| 2021-07-26T05:37:35
| 283,812,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
# Sort sales_1_1 by date
sales_1_1 = sales_1_1.sort_values("date",ascending=True)
# Get the cumulative sum of weekly_sales, add as cum_weekly_sales col
sales_1_1["cum_weekly_sales"] = sales_1_1.weekly_sales.cumsum()
# Get the cumulative max of weekly_sales, add as cum_max_sales col
sales_1_1["cum_max_sales"] = sales_1_1.weekly_sales.cummax()
# See the columns you calculated
print(sales_1_1[["date", "weekly_sales", "cum_weekly_sales", "cum_max_sales"]])
|
[
"62921934+wxlovolxw@users.noreply.github.com"
] |
62921934+wxlovolxw@users.noreply.github.com
|
491113ea1a9970929b7916b82c56331f33432aee
|
abfcee924f57ee2011443703d4869f828e548910
|
/account_move_import/__openerp__.py
|
be0b63316135994bd728cd6f5bb5313233dd3bcd
|
[] |
no_license
|
Comunitea/external_modules
|
fb68cbf84cee1c6aa748f4f10e2999b9bb6aadf5
|
9718281e31b4a4f6395d8bed54adf02799df6221
|
refs/heads/8.0
| 2023-09-03T17:18:37.652200
| 2022-02-10T09:33:33
| 2022-02-10T09:33:33
| 49,890,295
| 4
| 33
| null | 2022-10-10T11:34:24
| 2016-01-18T16:47:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,427
|
py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Management Solution
#
# Copyright (c) 2009-2015 Noviat nv/sa (www.noviat.com).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Account Move Import',
'version': '8.0.0.4.1',
'license': 'AGPL-3',
'author': 'Noviat',
'website': 'http://www.noviat.com',
'category': 'Accounting & Finance',
'summary': 'Import Accounting Entries',
'depends': ['account'],
'data': [
'views/account_move.xml',
'wizard/import_move_line_wizard.xml',
],
'demo': [
'demo/account_move.xml',
],
'installable': True,
}
|
[
"omarcs7r@gmail.com"
] |
omarcs7r@gmail.com
|
1b735b2356f26bdb52cb9b1903b806556a163b23
|
6fce025097cebfd9d1dd37f6611e7fdfdbea90e6
|
/rainfields/band_quest/data_loader.py
|
1e4559f99a809d5068facd1ae4e57f3b0fd4f369
|
[] |
no_license
|
ANU-WALD/pluvi_pondus
|
ec0439d19acdcf4fdf712d6b14a1714297d661b2
|
ff8680f7115ab2cb75138bf6705abb59618e47d1
|
refs/heads/master
| 2021-07-01T14:32:14.501631
| 2020-08-22T09:41:28
| 2020-08-22T09:41:28
| 138,804,652
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,872
|
py
|
import xarray as xr
import tensorflow as tf
import numpy as np
import datetime
import os
import random
tf.compat.v1.enable_eager_execution()
class gen:
def __call__(self, fname, band, jitter=False):
dsg = xr.open_dataset(fname.decode("utf-8"))
for t in dsg.time:
d = datetime.datetime.utcfromtimestamp(t.astype(int) * 1e-9)
if not os.path.isfile("/data/pluvi_pondus/Rainfields/310_{}.prcp-c10.nc".format(d.strftime("%Y%m%d_%H%M%S"))):
continue
if np.datetime64(d) not in dsg.time.data:
continue
rf_fp = "/data/pluvi_pondus/Rainfields/310_{}.prcp-c10.nc".format(d.strftime("%Y%m%d_%H%M%S"))
dsp = xr.open_dataset(rf_fp)
prec = dsp['precipitation'].data[2::2, 402::2]
b = dsg['B{}'.format(band)].sel(time=t).data[2::2, 402::2]
# Added Normalisation
b = b / 273
yield (b[:, :, None], prec[:, :, None])
dsg.close()
def CompleteFNames(fnames, band):
d = None
if band in [8, 12]:
d = '0812'
elif band in [9, 10]:
d = '0910'
elif band in [11, 13]:
d = '1113'
elif band in [14, 15]:
d = '1415'
return [path.format(d) for path in fnames]
def HimfieldsDataset(fnames, band, batch_size=2):
fnames = CompleteFNames(fnames, band)
print(fnames)
ds = tf.data.Dataset.from_tensor_slices(fnames)
ds = ds.interleave(lambda fname: tf.data.Dataset.from_generator(gen(), (tf.float32, tf.float32), (tf.TensorShape([1024, 1024, 1]), tf.TensorShape([1024, 1024, 1])), args=(fname, band)), cycle_length=len(fnames), block_length=1, num_parallel_calls=None)
ds = ds.shuffle(128, seed=None)
ds = ds.batch(batch_size)
return ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
|
[
"pablo.larraondo@anu.edu.au"
] |
pablo.larraondo@anu.edu.au
|
782fb8abbf0dca149bb77484bf1ff34f827f910f
|
52fc25b679bfb962a17c18420d16692706f8697e
|
/WebScrape3.py
|
9b2b919f854ffe3786389c0ee5b7495e3988c244
|
[] |
no_license
|
adanque/Data-Gathering-Techniques-using-APIs-and-Web-scraping-with-Python
|
15fecd4da2f71a065cff6be2f2aa67410bc0bb75
|
7d3dc205e4aba5bd9444513fdce465d31805f178
|
refs/heads/main
| 2023-03-15T08:42:27.212870
| 2021-03-21T21:08:01
| 2021-03-21T21:08:01
| 349,256,716
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
#from BeautifulSoup import BeautifulSoup
from urllib.request import urlopen
from bs4 import BeautifulSoup as bs
soup = bs(
urllib.urlopen('kitco.com/kitco-gold-index.html').read())
|
[
"adanque@gmail.com"
] |
adanque@gmail.com
|
6f46684631905e99275e3e85d175b2b16e8a63d0
|
facb8b9155a569b09ba66aefc22564a5bf9cd319
|
/wp2/merra_scripts/03_model_fitting/merraRF882/391-tideGauge.py
|
17fa5644f4a0e20042369b1e1ddd12b589bc5156
|
[] |
no_license
|
moinabyssinia/modeling-global-storm-surges
|
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
|
6e385b2a5f0867df8ceabd155e17ba876779c1bd
|
refs/heads/master
| 2023-06-09T00:40:39.319465
| 2021-06-25T21:00:44
| 2021-06-25T21:00:44
| 229,080,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,456
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 4 15:51:30 2020
This program is designed to validate a Random Forest
model by using the KFOLD method
@author: Michael Tadesse
"""
#import packages
import os
import glob
import numpy as np
import pandas as pd
from sklearn import metrics
from scipy import stats
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import datetime
from sklearn.ensemble import RandomForestRegressor
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
def validateRF():
"""
run KFOLD method for regression
"""
#defining directories
dir_in = "/lustre/fs0/home/mtadesse/merraAllLagged"
dir_out = "/lustre/fs0/home/mtadesse/merraRFValidation"
surge_path = "/lustre/fs0/home/mtadesse/05_dmax_surge_georef"
#cd to the lagged predictors directory
os.chdir(dir_in)
x = 391
y = 392
#empty dataframe for model validation
df = pd.DataFrame(columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse'])
#looping through
for tg in range(x,y):
os.chdir(dir_in)
#filter only .csv files
tgNames = []
for file in glob.glob("*.csv"):
tgNames.append(file)
tg_name = sorted(tgNames)[tg]
print(tg_name)
##########################################
#check if this tg is already taken care of
##########################################
os.chdir(dir_out)
if os.path.isfile(tg_name):
print("this tide gauge is already taken care of")
return "file already analyzed!"
os.chdir(dir_in)
#load predictor
pred = pd.read_csv(tg_name)
pred.drop('Unnamed: 0', axis = 1, inplace = True)
#add squared and cubed wind terms (as in WPI model)
pickTerms = lambda x: x.startswith('wnd')
wndTerms = pred.columns[list(map(pickTerms, pred.columns))]
wnd_sqr = pred[wndTerms]**2
wnd_cbd = pred[wndTerms]**3
pred = pd.concat([pred, wnd_sqr, wnd_cbd], axis = 1)
#standardize predictor data
dat = pred.iloc[:,1:]
scaler = StandardScaler()
print(scaler.fit(dat))
dat_standardized = pd.DataFrame(scaler.transform(dat), \
columns = dat.columns)
pred_standardized = pd.concat([pred['date'], dat_standardized], axis = 1)
#load surge data
os.chdir(surge_path)
surge = pd.read_csv(tg_name)
surge.drop('Unnamed: 0', axis = 1, inplace = True)
#remove duplicated surge rows
surge.drop(surge[surge['ymd'].duplicated()].index, axis = 0, inplace = True)
surge.reset_index(inplace = True)
surge.drop('index', axis = 1, inplace = True)
#adjust surge time format to match that of pred
time_str = lambda x: str(datetime.strptime(x, '%Y-%m-%d'))
surge_time = pd.DataFrame(list(map(time_str, surge['ymd'])), columns = ['date'])
time_stamp = lambda x: (datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
surge_new = pd.concat([surge_time, surge[['surge', 'lon', 'lat']]], axis = 1)
#merge predictors and surge to find common time frame
pred_surge = pd.merge(pred_standardized, surge_new.iloc[:,:2], on='date', how='right')
pred_surge.sort_values(by = 'date', inplace = True)
#find rows that have nans and remove them
row_nan = pred_surge[pred_surge.isna().any(axis =1)]
pred_surge.drop(row_nan.index, axis = 0, inplace = True)
pred_surge.reset_index(inplace = True)
pred_surge.drop('index', axis = 1, inplace = True)
#in case pred and surge don't overlap
if pred_surge.shape[0] == 0:
print('-'*80)
print('Predictors and Surge don''t overlap')
print('-'*80)
continue
pred_surge['date'] = pd.DataFrame(list(map(time_stamp, \
pred_surge['date'])), \
columns = ['date'])
#prepare data for training/testing
X = pred_surge.iloc[:,1:-1]
y = pd.DataFrame(pred_surge['surge'])
y = y.reset_index()
y.drop(['index'], axis = 1, inplace = True)
#apply PCA
pca = PCA(.95)
pca.fit(X)
X_pca = pca.transform(X)
#apply 10 fold cross validation
kf = KFold(n_splits=10, random_state=29)
metric_corr = []; metric_rmse = []; #combo = pd.DataFrame(columns = ['pred', 'obs'])
for train_index, test_index in kf.split(X):
X_train, X_test = X_pca[train_index], X_pca[test_index]
y_train, y_test = y['surge'][train_index], y['surge'][test_index]
#train regression model
rf= RandomForestRegressor(n_estimators = 50, random_state = 101, \
min_samples_leaf = 1)
rf.fit(X_train, y_train)
#predictions
predictions = rf.predict(X_test)
# pred_obs = pd.concat([pd.DataFrame(np.array(predictions)), \
# pd.DataFrame(np.array(y_test))], \
# axis = 1)
# pred_obs.columns = ['pred', 'obs']
# combo = pd.concat([combo, pred_obs], axis = 0)
#evaluation matrix - check p value
if stats.pearsonr(y_test, predictions)[1] >= 0.05:
print("insignificant correlation!")
continue
else:
print(stats.pearsonr(y_test, predictions))
metric_corr.append(stats.pearsonr(y_test, predictions)[0])
print(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
print()
metric_rmse.append(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
#number of years used to train/test model
num_years = (pred_surge['date'][pred_surge.shape[0]-1] -\
pred_surge['date'][0]).days/365
longitude = surge['lon'][0]
latitude = surge['lat'][0]
num_pc = X_pca.shape[1] #number of principal components
corr = np.mean(metric_corr)
rmse = np.mean(metric_rmse)
print('num_year = ', num_years, ' num_pc = ', num_pc ,'avg_corr = ',np.mean(metric_corr), ' - avg_rmse (m) = ', \
np.mean(metric_rmse), '\n')
#original size and pca size of matrix added
new_df = pd.DataFrame([tg_name, longitude, latitude, num_years, num_pc, corr, rmse]).T
new_df.columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse']
df = pd.concat([df, new_df], axis = 0)
#save df as cs - in case of interruption
os.chdir(dir_out)
df.to_csv(tg_name)
#run script
validateRF()
|
[
"michaelg.tadesse@gmail.com"
] |
michaelg.tadesse@gmail.com
|
488dc76a0442a7b08a9df5d702b3718760e75d5e
|
9851c3f47c1aa165bc0d239074fe238f82055875
|
/LeetCode/0412. Fizz Buzz/solution.py
|
bc2af790c4bad5cd76258e058f6079a7df8e9841
|
[
"Apache-2.0"
] |
permissive
|
InnoFang/algo-set
|
12f886dbec0da664327d26bcaf02c1316151a643
|
2419a7d720bea1fd6ff3b75c38342a0ace18b205
|
refs/heads/master
| 2023-03-16T09:51:24.631068
| 2023-03-13T11:08:54
| 2023-03-13T11:08:54
| 86,413,001
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
"""
8 / 8 test cases passed.
Runtime: 28 ms
Memory Usage: 15.2 MB
"""
class Solution:
def fizzBuzz(self, n: int) -> List[str]:
return ["FizzBuzz" if i % 3 == 0 and i % 5 == 0 else \
"Fizz" if i % 3 == 0 else \
"Buzz" if i % 5 == 0 else \
str(i) for i in range(1, n + 1)]
|
[
"innofang@outlook.com"
] |
innofang@outlook.com
|
9d0ed4a9bba517cc8e6767aa4c0fff77878212c4
|
1edfd072fae205d766e7c488f1af64f3af9fc23a
|
/src/python/sensors/microphone/microphone.py
|
c2f1fa4bf16a131848871f8e709d0692804cbd54
|
[] |
no_license
|
kth-social-robotics/multisensoryprocessing
|
17fc96eb3776642de1075103eeb461125020c892
|
867abe6c921fbf930ac26e0f43a8be0404817bcd
|
refs/heads/master
| 2021-01-21T11:50:16.348566
| 2018-11-05T14:48:42
| 2018-11-05T14:48:42
| 102,027,696
| 4
| 2
| null | 2018-02-20T15:14:22
| 2017-08-31T17:39:58
|
C++
|
UTF-8
|
Python
| false
| false
| 2,429
|
py
|
import pyaudio
import sys
import time
import msgpack
sys.path.append('../..')
import numpy as np
import re
from shared import create_zmq_server, MessageQueue
import sys
import wave
import datetime
if len(sys.argv) != 2:
exit('please only supply sound card name')
device_names_string = sys.argv[1]
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
CHUNK = 2000
zmq_socket_1, zmq_server_addr_1 = create_zmq_server()
zmq_socket_2, zmq_server_addr_2 = create_zmq_server()
mq = MessageQueue('microphone-sensor')
p = pyaudio.PyAudio()
device_index = None
for i in range(p.get_device_count()):
device = p.get_device_info_by_index(i)
if device['name'].startswith('[{}]'.format(device_names_string)):
device_index = i
if not device_index:
exit('please connect a proper soundcard')
device_names = device_names_string.split(',')
mq.publish(
exchange='sensors',
routing_key='microphone.new_sensor.{}'.format(device_names[0]),
body={'address': zmq_server_addr_1, 'file_type': 'audio'}
)
mq.publish(
exchange='sensors',
routing_key='microphone.new_sensor.{}'.format(device_names[1]),
body={'address': zmq_server_addr_2, 'file_type': 'audio'}
)
session_name = datetime.datetime.now().isoformat().replace('.', '_').replace(':', '_') + device_names_string
# Let's be on the safe side and recording this to the computer...
waveFile = wave.open('{}.wav'.format(session_name), 'wb')
waveFile.setnchannels(CHANNELS)
waveFile.setsampwidth(p.get_sample_size(FORMAT))
waveFile.setframerate(RATE)
def callback(in_data, frame_count, time_info, status):
result = np.fromstring(in_data, dtype=np.uint16)
result = np.reshape(result, (frame_count, 2))
the_time = mq.get_shifted_time()
zmq_socket_1.send(msgpack.packb((result[:, 0].tobytes(), the_time)))
zmq_socket_2.send(msgpack.packb((result[:, 1].tobytes(), the_time)))
waveFile.writeframes(in_data)
return None, pyaudio.paContinue
stream = p.open(
format=FORMAT,
channels=CHANNELS,
rate=RATE,
input_device_index=device_index,
input=True,
frames_per_buffer=CHUNK,
stream_callback=callback
)
try:
input('[*] Serving at {} and {}. To exit press enter'.format(zmq_server_addr_1, zmq_server_addr_2))
finally:
waveFile.close()
stream.stop_stream()
stream.close()
zmq_socket_1.send(b'CLOSE')
zmq_socket_2.send(b'CLOSE')
zmq_socket_1.close()
zmq_socket_2.close()
|
[
"pjjonell@kth.se"
] |
pjjonell@kth.se
|
bc7bec551e2c03787c3416668a264b12cadc4258
|
c18e1fa174e1b0e6d56e9f1a8a3708099c3cd248
|
/learning_from_mcvine/res_sims/Ei_30/E-7.98574177896_hkl-1.2631196834,-0.985399386564,0.344825718364/run.py
|
c8c8bb10719d8e3c5cea76da9babf3f432f8b649
|
[] |
no_license
|
pnave95/ORNL_public_research
|
e0662657b41969f3f3dc263ea4c62a042d85547a
|
58cad7508f9d29c17af5419f05522c2f724e717e
|
refs/heads/master
| 2021-01-20T00:39:58.308250
| 2017-06-22T20:03:57
| 2017-06-22T20:03:57
| 89,160,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,185
|
py
|
#!/usr/bin/env python
import mcvine.cli
from numpy import array
from mcvine_workflow.singlextal.resolution import use_res_comps as urc
beam_neutrons_path = '/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_30_1e9/out/neutrons'
instrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter')
samplexmlpath = '/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_30/E-7.98574177896_hkl-1.2631196834,-0.985399386564,0.344825718364/sample/sampleassembly.xml'
psi = 9.102125241479965e-05
hkl2Q = array([[ -6.60765593e-01, 9.34283256e-01, -7.78047243e-17],
[ 6.60638026e-01, 4.67231832e-01, -8.09165116e-01],
[ -6.60638026e-01, -4.67231832e-01, -8.09165116e-01]])
pp = array([ 2.71888109, 1.26794542, -0.36478751])
pixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2], pp[0]))
t_m2p = 0.0068059207318674521
Q = array([-0.04417126, -1.80163508, 0.51832987])
E = -7.9857417789580012
hkl_projection = array([-0.94671581, -0.60236987, 0.47499947])
urc.run(
beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel, t_m2p,
Q, E, hkl_projection, Nbuffer=100000)
|
[
"p63@analysis-node03.sns.gov"
] |
p63@analysis-node03.sns.gov
|
dee6e4cddedf7e291e576951014671d28cee09bb
|
9b4bd7bb36d6e2d63973c724ca1ceb1c5e123ee1
|
/launcher/deployment/migrations/0014_auto__add_field_deployment_remote_container_id__add_field_deployment_r.py
|
5ad9d4cab5e81057db503de2651b3868113f73af
|
[] |
no_license
|
zennro/launcher
|
39de39345a15b1f544222503e5cf82992c1e62c8
|
25651d1ffa29adad18d1e003f69720bea9671d7c
|
refs/heads/master
| 2020-04-20T19:16:59.492609
| 2014-02-27T16:22:30
| 2014-02-27T16:22:30
| 17,900,826
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,171
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Deployment.remote_container_id'
db.add_column(u'deployment_deployment', 'remote_container_id',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
# Adding field 'Deployment.remote_app_id'
db.add_column(u'deployment_deployment', 'remote_app_id',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Deployment.remote_container_id'
db.delete_column(u'deployment_deployment', 'remote_container_id')
# Deleting field 'Deployment.remote_app_id'
db.delete_column(u'deployment_deployment', 'remote_app_id')
models = {
u'deployment.deployment': {
'Meta': {'object_name': 'Deployment'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deploy_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'expiration_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'launch_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'deployments'", 'to': u"orm['deployment.Project']"}),
'reminder_mail_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'remote_app_id': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'remote_container_id': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Deploying'", 'max_length': '50'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'deployment.deploymenterrorlog': {
'Meta': {'object_name': 'DeploymentErrorLog'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deployment': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'error_log'", 'unique': 'True', 'to': u"orm['deployment.Deployment']"}),
'error_log': ('django.db.models.fields.TextField', [], {}),
'http_status': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'deployment.project': {
'Meta': {'ordering': "['name']", 'object_name': 'Project'},
'default_password': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'default_username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'github_url': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_name': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'ports': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'status': ('model_utils.fields.StatusField', [], {'default': "'Inactive'", 'max_length': '100', u'no_check_for_status': 'True'}),
'survey_form_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['deployment']
|
[
"filip@jukic.me"
] |
filip@jukic.me
|
e7959142b07770d2de4bb62d1aee359a7aaf3c7b
|
161e01f92e3810edff17054851259e54a1432921
|
/rapid7vmconsole/models/resources_user_account.py
|
4c2e48f07118ad180885bdef93b98cfe414424cf
|
[
"MIT"
] |
permissive
|
Tofuhippo/vm-console-client-python
|
f3ffa3257b1928791fef090404377b43c3ff28d5
|
3c856923be1caf22c29a5d309713b8940546b57b
|
refs/heads/master
| 2020-06-26T04:07:37.251955
| 2019-07-29T20:45:40
| 2019-07-29T20:45:40
| 199,524,127
| 0
| 0
|
MIT
| 2019-07-29T20:42:37
| 2019-07-29T20:42:37
| null |
UTF-8
|
Python
| false
| false
| 4,187
|
py
|
# coding: utf-8
"""
Python InsightVM API Client
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from rapid7vmconsole.models.link import Link # noqa: F401,E501
from rapid7vmconsole.models.user_account import UserAccount # noqa: F401,E501
class ResourcesUserAccount(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'links': 'list[Link]',
'resources': 'list[UserAccount]'
}
attribute_map = {
'links': 'links',
'resources': 'resources'
}
def __init__(self, links=None, resources=None): # noqa: E501
"""ResourcesUserAccount - a model defined in Swagger""" # noqa: E501
self._links = None
self._resources = None
self.discriminator = None
if links is not None:
self.links = links
if resources is not None:
self.resources = resources
@property
def links(self):
"""Gets the links of this ResourcesUserAccount. # noqa: E501
Hypermedia links to corresponding or related resources. # noqa: E501
:return: The links of this ResourcesUserAccount. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this ResourcesUserAccount.
Hypermedia links to corresponding or related resources. # noqa: E501
:param links: The links of this ResourcesUserAccount. # noqa: E501
:type: list[Link]
"""
self._links = links
@property
def resources(self):
"""Gets the resources of this ResourcesUserAccount. # noqa: E501
The resources returned. # noqa: E501
:return: The resources of this ResourcesUserAccount. # noqa: E501
:rtype: list[UserAccount]
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this ResourcesUserAccount.
The resources returned. # noqa: E501
:param resources: The resources of this ResourcesUserAccount. # noqa: E501
:type: list[UserAccount]
"""
self._resources = resources
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResourcesUserAccount, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourcesUserAccount):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"zachary_youtz@rapid7.com"
] |
zachary_youtz@rapid7.com
|
aef74e6af7e67ac384a7f081ccb14a6821297285
|
7f76ae284ab2649def3da9d609beb4dbad9cb57d
|
/SnakesLadders/make_game.py
|
718247d55956e1bcbfbca7d83b279b140d16efda
|
[
"CC0-1.0"
] |
permissive
|
robclewley/DataScotties
|
0f696fe32debe1aee4f5fdc8e5fac4d9b94eeb99
|
63cca1c2fb5ffd75f4c99507ac497ae7cefec04d
|
refs/heads/master
| 2021-01-10T05:17:12.412352
| 2016-02-29T23:51:11
| 2016-02-29T23:51:11
| 50,062,479
| 9
| 17
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
import snakesladders as SL
def make_game_from_dict(setup):
"""
`setup` parameter is a dictionary, e.g. loaded from JSON file
Returns a GameFSM object configured from the dictionary.
"""
game = SL.GameFSM(setup['size'])
for s1, s2 in setup['snakes']:
game.all_states[s1].link = s2
for l1, l2 in setup['ladders']:
game.all_states[l1].link = l2
game.make_state_kinds()
return game
|
[
"rob.clewley@gmail.com"
] |
rob.clewley@gmail.com
|
9359c407b694b3443b2748371e2164ab388b93b7
|
b155be1edeac8183736ababc64b52f07f15e3269
|
/appengine/swarming/handlers_exceptions.py
|
dcacd94a38946b90180532a2ab8c9ffdfdf66fd9
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
luci/luci-py
|
d9ef3325a2b193d3f127363c012fe60860ea91fd
|
10cc5fdcca53e2a1690867acbe6fce099273f092
|
refs/heads/main
| 2022-11-26T09:32:20.640834
| 2022-11-24T15:11:30
| 2022-11-24T15:11:30
| 33,140,918
| 84
| 36
|
Apache-2.0
| 2022-11-23T13:56:13
| 2015-03-30T18:39:14
|
Python
|
UTF-8
|
Python
| false
| false
| 477
|
py
|
# Copyright 2021 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Exceptions raised by methods called by prpc/endpoints handlers."""
class BadRequestException(Exception):
"""The request is invalid."""
class PermissionException(Exception):
"""Permission requirements are not fulfilled."""
class InternalException(Exception):
"""Unexpected error occurred."""
|
[
"infra-scoped@luci-project-accounts.iam.gserviceaccount.com"
] |
infra-scoped@luci-project-accounts.iam.gserviceaccount.com
|
55c96c6259dbae33f993fa4591c05bbd163957fc
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/41/usersdata/138/21859/submittedfiles/gravitacional.py
|
e1574ee30ef0e241c176b4af46ebad86bb520b4d
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,380
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import funcoes
#ENTRADA
dimensao = input('Digite a dimensao das matrizes: ')
matrizA = input('Digite a Matriz A como uma única linha entre aspas: ')
matrizD = input('Digite a Matriz D como uma única linha entre aspas: ')
alfa = input('Digite o valor de alfa: ')
#PREPARANDO A ENTRADA
T = np.zeros((dimensao,dimensao))
A = np.fromstring(matrizA, sep=' ').reshape(dimensao, dimensao)
d = np.fromstring(matrizD, sep=' ').reshape(dimensao, dimensao)
#comece aqui...
#INÍCIO
def somaC(m):
b=[]
for j in range(0,a.shape[1],1):
soma=0
for i in range(0,a.shape[0],):
soma=soma+a[i,j]
b.append(soma)
return b
def somaL(m):
b=[]
for i in range(0,a.shape[0],1):
soma=0
for j in range(0,a.shape[1],1):
soma= soma+a[i,j]
b.append(soma)
return b
def matrizT(a,o,d):
a=somaC(m)
o=somaL(m)
T=[]
for i in range(0,d.shape[0],1):
for j in range(0,d.shape[1],1):
Tc[i,j]=(o[i])*(a[i])*((1/d[i,j]))**alfa
soma=0
for k in range(0,dimensao,1):
soma[i,k]=soma+(a[k]*(1/d[i,k]))
T[i,j]=Tc[i,j]/soma[i,k]
T.append(T[i,j])
return T
#SAÍDA
somatorio = sum(sum(T))
print('%.4f' % somatorio)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
7d2bda3c4063f35013dcfc03874bce7500ebf9a5
|
4f3d283fc8f07af65d857370294986dbc37c520c
|
/kata/done/masodfok2.py
|
4a39751880ab6565e0832f3ad85bfe51d95b1aa1
|
[] |
no_license
|
csxeba/wiw
|
69a36ed6fee4240d6ed545e4f4e6d60c46090921
|
7251eeaaa98424a95c5837bddd6979ddbf0dd1ec
|
refs/heads/master
| 2020-06-28T11:46:21.993337
| 2017-02-16T21:20:25
| 2017-02-16T21:20:25
| 67,785,378
| 0
| 0
| null | 2016-09-13T09:52:33
| 2016-09-09T09:22:54
|
Python
|
UTF-8
|
Python
| false
| false
| 965
|
py
|
"""
Ez egy dokumentációs string a programunk elején.
Ide szokták beírni, hogy mit csinál a program.
Ez a program definiál egy másodfokú egyenlet megoldó
függvényt és megold néhány másodfokú egyenletet.
Írj egy függvényt, ami paraméterként a másodfokú
egyenlet a, b és c együtthatóit várja:
ax**2 + bx + c = 0 forma esetén.
-b +- gyök(b**2 - 4ac)
----------------------
2a
négyzetgyököt az importált sqrt() függvénnyel tudsz vonni.
"""
from math import sqrt
def megoldo(a, b, c):
"""Másodfokú egyenlet megoldó.
Visszatér a másodfokú egyenlet listába rendezett két gyökével."""
pass
def main():
"""Megoldandó másodfokú egyenletek"""
egyenletek = [
[1, -3, -10],
[2, -9, 4],
[1, -3, -4],
[1, -7, 0],
[1, -2, 3],
[1, -3, 2],
[4, -11, 6]
]
# Oldd meg és írasd ki ciklussal!
if __name__ == '__main__':
main()
|
[
"csxeba@gmail.com"
] |
csxeba@gmail.com
|
1436bed40ecd073c5238666b7406512170c8414c
|
d7cfe98faeb0fe1b4ce02d54d8bbedaca82764f7
|
/1106_문제풀이/swea_5251_최소이동거리_solution(heap).py
|
2182c4588a300d2baac9a7f0efef028ae351d25d
|
[] |
no_license
|
Anseik/algorithm
|
27cb5c8ec9692cf705a8cea1d60e079a7d78ef72
|
925404006b84178682206fbbb3b989dcf4c3dee9
|
refs/heads/master
| 2023-02-26T00:02:01.696624
| 2021-02-03T14:10:28
| 2021-02-03T14:10:28
| 301,753,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 860
|
py
|
import sys
import heapq
sys.stdin = open('swea_5251_최소이동거리_solution.txt')
def dijkstra_heap():
dist = [987654321] * (V + 1)
visited = [False] * (V + 1)
heap = []
dist[0] = 0
heapq.heappush(heap, (0, 0))
while heap:
w, v = heapq.heappop(heap)
if not visited[v]:
visited[v] = True
dist[v] = w
for i in range(V + 1):
if not visited[i] and dist[i] > dist[v] + adj[v][i]:
heapq.heappush(heap, (dist[v] + adj[v][i], i))
return dist[V]
T = int(input())
for tc in range(1, T + 1):
V, E = map(int, input().split())
adj = [[987654321] * (V + 1) for _ in range(V + 1)]
for i in range(E):
st, ed, w = map(int, input().split())
adj[st][ed] = w
ans = dijkstra_heap()
print('#{} {}'.format(tc, ans))
|
[
"kma9271@naver.com"
] |
kma9271@naver.com
|
352f6e17c51d8f3ef0e4cc8edce87cce6667407d
|
bb981602d111b709efec6279c3fccc6ef9efcc13
|
/blog/migrations/0001_initial.py
|
35b8244eae51e2a60ba29dd8edcdd84583a02924
|
[] |
no_license
|
sompodsign/shampad_blog_pro
|
3897e5f95b48341b4058a5e42bb1ea70f1f9866e
|
b031b950e778b1534f433a33b84b37e93186e9b2
|
refs/heads/main
| 2023-02-27T13:15:00.984530
| 2021-02-11T16:06:01
| 2021-02-11T16:06:01
| 337,414,751
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,548
|
py
|
# Generated by Django 3.1.5 on 2021-01-24 04:04
import ckeditor.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import taggit.managers
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('taggit', '0003_taggeditem_add_unique_index'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('subheading', models.CharField(max_length=300, null=True)),
('slug', models.SlugField(max_length=250, unique_for_date='publish')),
('body', ckeditor.fields.RichTextField(blank=True, null=True)),
('publish', models.DateTimeField(default=django.utils.timezone.now)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('draft', 'Draft'), ('published', 'Published')], default='draft', max_length=10)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_posts', to=settings.AUTH_USER_MODEL)),
('tags', taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags')),
],
options={
'ordering': ('-publish',),
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80)),
('email', models.EmailField(max_length=254)),
('body', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('active', models.BooleanField(default=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.post')),
],
options={
'ordering': ('created',),
},
),
]
|
[
"sompodsign@gmail.com"
] |
sompodsign@gmail.com
|
b0b78586d69b34335c3a959605e75ab6c38817cf
|
73758dde83d1a1823c103e1a4ba71e7c95168f71
|
/nsd2004/devweb/mysite/mysite/settings.py
|
58e918a61f92bb516161549dc5c59d8d67ec1e17
|
[] |
no_license
|
tonggh220/md_5_nsd_notes
|
07ffdee7c23963a7a461f2a2340143b0e97bd9e1
|
a58a021ad4c7fbdf7df327424dc518f4044c5116
|
refs/heads/master
| 2023-07-02T01:34:38.798929
| 2021-05-12T08:48:40
| 2021-05-12T08:48:40
| 393,885,415
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,240
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.12.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w)u!*(f(^*=k_^m&%cu8+%qyp)z($2!$r=nr68&_&xbllq&d*n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'dj2004',
'USER': 'root',
'PASSWORD': 'tedu.cn',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"zhangzg@tedu.cn"
] |
zhangzg@tedu.cn
|
2d1650cd972e452861a4ceffa9d5cd93f683468b
|
c8c4f6c72ffc801cc24d69617c170c3ee093dc86
|
/ReadMeCleaner.py
|
8ae4a9379ea46720a301f31a7d8736eb1dde9e5f
|
[] |
no_license
|
PasaOpasen/MathClasses
|
831d85eab3c038841a40ae3e7b0896dcf6706993
|
d11f124e09217fdc6deccfc59feb1a81378be1af
|
refs/heads/master
| 2021-12-14T17:54:36.235687
| 2021-11-30T08:15:21
| 2021-11-30T08:15:21
| 237,803,522
| 8
| 3
| null | 2020-12-03T14:07:40
| 2020-02-02T16:53:44
|
C#
|
UTF-8
|
Python
| false
| false
| 246
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 25 16:10:36 2020
@author: qtckp
"""
with open('README.md', 'r') as f:
t = f.readlines()
with open('README.md', 'w') as f:
for l in t:
f.write(l.lstrip() if l != '\n' else l)
|
[
"qtckpuhdsa@gmail.com"
] |
qtckpuhdsa@gmail.com
|
827ef53c1ea908502f11b3dce5f71710fb9c9100
|
93bd129c0d189124bb690670b22c4a80edda95b2
|
/pg/libs/log_lib.py
|
41f888b4bebff25a45783e32903571945e0d7155
|
[] |
no_license
|
vdeandrade/32id-tomo
|
0690564d263392f52c3d239bd7aec7a0bfc507e1
|
ea0f5debf121648c1366c50f0a239ee938e6a32f
|
refs/heads/master
| 2020-12-22T16:00:35.300527
| 2020-01-28T21:51:35
| 2020-01-28T21:51:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,320
|
py
|
'''
Log Lib for Sector 2-BM
'''
import logging
# Logging defines
__GREEN = "\033[92m"
__RED = '\033[91m'
__YELLOW = '\033[33m'
__ENDC = '\033[0m'
logger = None
info_extra={'endColor': __ENDC, 'color': __GREEN}
warn_extra={'endColor': __ENDC, 'color': __YELLOW}
error_extra={'endColor': __ENDC, 'color': __RED}
def info(msg):
global logger
global info_extra
logger.info(msg, extra=info_extra)
def error(msg):
global logger
global error_extra
logger.error(msg, extra=error_extra)
def warning(msg):
global logger
global warn_extra
logger.warning(msg, extra=warn_extra)
def setup_logger(log_name, stream_to_console=True):
global logger
global info_extra
global warn_extra
global error_extra
info_extra['logger_name'] = log_name
warn_extra['logger_name'] = log_name
error_extra['logger_name'] = log_name
logger = logging.getLogger(log_name)
fHandler = logging.FileHandler(log_name)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(color)s %(message)s %(endColor)s")
fHandler.setFormatter(formatter)
logger.addHandler(fHandler)
if stream_to_console:
ch = logging.StreamHandler()
ch.setFormatter(formatter)
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
|
[
"decarlof@gmail.com"
] |
decarlof@gmail.com
|
54ddf4c157f62fca30b578f135e52359bccbe23c
|
a644ae249712bddb9cb1b8d2a75812c107736fe5
|
/test/test2.py
|
362c1e5a84349bcf063528ac0c493fc057941bfd
|
[
"MIT"
] |
permissive
|
linsalrob/SEED_Servers_Python
|
a834b7ce763e9d1e89cb76530e847f3fe6422df8
|
a2d2aa8c64547e94c3d6d031ebba46b8f9ed5716
|
refs/heads/master
| 2021-06-02T17:40:30.301307
| 2020-02-04T20:37:01
| 2020-02-04T20:37:01
| 38,650,052
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
import sys
from servers.SAP import SAPserver
server=SAPserver()
genomeID = '83333.1'
sys.stderr.write("Genome: " + str(genomeID) + "\n")
prots = server.all_proteins( {"-id" : genomeID} )
print("protein length " + str(len(prots)))
|
[
"raedwards@gmail.com"
] |
raedwards@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.