blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b61968dae81932acb65d39767e1e265e0cacf305
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_167/ch50_2019_06_11_13_14_47_474417.py
|
72022271fba4966682e6c89d23ed329447de3c78
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
def numero_no_indice (lista):
l=[]
i=0
posicao=0
while i < len(lista):
if i[lista]==posicao:
i+=1
posicao+=1
x.append(i)
return l
|
[
"you@example.com"
] |
you@example.com
|
52aaa46352ec022d7349aab054fec390a4a8e785
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03359/s399632677.py
|
4b9e4ef5080c28b6522f683bf717ce679011d8cb
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
import sys
read = sys.stdin.read
readline = sys.stdin.readline
readlines = sys.stdin.readlines
sys.setrecursionlimit(10 ** 9)
INF = 1 << 60
MOD = 1000000007
def main():
a, b = map(int, readline().split())
ans = 0
for i in range(1, 13):
if i < a:
ans += 1
elif i == a and i <= b:
ans += 1
print(ans)
return
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
a51a38a2e71d34f8aa6a2e82026c584eeacddb12
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03363/s286998192.py
|
091f166035f13606310d3cb4f31a2b7bd57a4859
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
from collections import defaultdict
n = int(input())
li_a = list(map(int, input().split()))
s = [0] * (n+1)
dd = defaultdict(lambda:0)
for i in range(n):
s[i+1] += s[i] + li_a[i]
for i in range(n+1):
dd[s[i]] += 1
ans = 0
for key in dd:
ans += (dd[key]*dd[key] - dd[key])//2
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
cc2fb3795c9dafa0f738c912004965c3b7b25f9a
|
ab4f0df599159b2c3929c24b12e2766718efdacb
|
/pyplan_engine/classes/CustomImports.py
|
e28736de5115b4624154fca75a5693ce79bf591c
|
[
"MIT"
] |
permissive
|
jorgedouglas71/pyplan-ide
|
f01ec438f727ee0dea01b0d265155b49a26ccdb8
|
5ad0e4a2592b5f2716ff680018f717c65de140f5
|
refs/heads/master
| 2020-09-25T03:10:01.201707
| 2019-12-04T15:39:55
| 2019-12-04T15:39:55
| 225,904,173
| 0
| 0
|
MIT
| 2019-12-04T15:57:06
| 2019-12-04T15:57:05
| null |
UTF-8
|
Python
| false
| false
| 1,083
|
py
|
custom = {
}
defaults = {
"abs": abs,
"range": range,
"abs": abs,
"dict": dict,
"min": min,
"all": all,
"hex": hex,
"next": next,
"slice": slice,
"any": any,
"divmod": divmod,
"object": object,
"sorted": sorted,
"ascii": ascii,
"enumerate": enumerate,
"input": input,
"oct": oct,
"bin": bin,
"int": int,
"str": str,
"bool": bool,
"isinstance": isinstance,
"ord": ord,
"sum": sum,
"bytearray": bytearray,
"filter": filter,
"issubclass": issubclass,
"pow": pow,
"bytes": bytes,
"float": float,
"iter": iter,
"tuple": tuple,
"callable": callable,
"format": format,
"len": len,
"property": property,
"type": type,
"chr": chr,
"frozenset": frozenset,
"list": list,
"range": range,
"locals": locals,
"repr": repr,
"zip": zip,
"map": map,
"reversed": reversed,
"complex": complex,
"max": max,
"round": round
}
imports = {**custom, **defaults}
# bypass for tests
imports["bypass"] = True
|
[
"fbrussa@novix.com"
] |
fbrussa@novix.com
|
991ef0d671ac96b7e9c8992788bc4c9aaefb75ee
|
eb7b84b70b335e913aa7d6bf9effba3034810e27
|
/FirstApp_Flask/employeeInfo.py
|
73e6fe9ffb1ca4a70d0a2d3879854a6a22310f0c
|
[] |
no_license
|
chavhanpunamchand/flask
|
ccc27c9909799bcc4fa9208b49c478942d569eb4
|
840c7299eb5cfc708f8a6cbefe8955115af9b179
|
refs/heads/master
| 2023-02-21T13:57:43.012794
| 2021-01-28T11:09:41
| 2021-01-28T11:09:41
| 326,448,594
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
class Employee:
def __init__(self,eid,ename,eage,egen,ecity,esalary,email,erole,eskil,ehobs):
self.empid=eid
self.empName=ename
self.empAge=eage
self.eGender=egen
self.empCity=ecity
self.empSalary=esalary
self.empEmail=email
self.empRole=erole
self.empSkill=eskil
self.emphobs=ehobs
def __str__(self):
return f" EmpId : {self.empid} " \
f" Name :{self.empName} " \
f" Age :{self.eGender}" \
f" City :{self.empCity}" \
f" Salary:{self.empSalary}" \
f" Role:{self.empRole} " \
f" Email:{self.empEmail}" \
f" Skill:{self.empSkill}" \
f" Hobies:{self.emphobs} "
def __repr__(self):
return self
# if __name__ == '__main__':
# s1=Employee(101,'Punamchand',27,'M','Pune',25000,'chavhanpunamchand@gmail.com','SSE','Python,Java','Cricket,Hocky')
# print(s1)
|
[
"chavhanpunamchand@gmail.com"
] |
chavhanpunamchand@gmail.com
|
c6283b432e7cc517ab49d5868cb8608925fc9ed8
|
60eec837687166288bed41aa406fb92ff8b63f1b
|
/model/tensorflow/examples/tfdnnc/tfdnnc.py
|
d764e5f63f5b4c9824f4e993163d0e65ed8a9156
|
[
"MIT",
"LicenseRef-scancode-generic-export-compliance"
] |
permissive
|
shivam-bit/dffml
|
1c5ca75e946fddec78fd3943bdcb6cf5650bcd89
|
4c1e7870dba12c298d5a7f4740a23af634cacb7e
|
refs/heads/master
| 2022-06-19T22:28:10.396364
| 2020-05-08T21:53:53
| 2020-05-08T21:53:53
| 262,698,152
| 0
| 0
|
MIT
| 2020-05-10T02:36:10
| 2020-05-10T02:36:09
| null |
UTF-8
|
Python
| false
| false
| 1,128
|
py
|
from dffml import CSVSource, Features, DefFeature
from dffml.noasync import train, accuracy, predict
from dffml_model_tensorflow.dnnc import DNNClassifierModel
model = DNNClassifierModel(
features=Features(
DefFeature("SepalLength", float, 1),
DefFeature("SepalWidth", float, 1),
DefFeature("PetalLength", float, 1),
DefFeature("PetalWidth", float, 1),
),
predict=DefFeature("classification", int, 1),
epochs=3000,
steps=20000,
classifications=[0, 1, 2],
clstype=int,
)
# Train the model
train(model, "iris_training.csv")
# Assess accuracy (alternate way of specifying data source)
print("Accuracy:", accuracy(model, CSVSource(filename="iris_test.csv")))
# Make prediction
for i, features, prediction in predict(
model,
{
"PetalLength": 4.2,
"PetalWidth": 1.5,
"SepalLength": 5.9,
"SepalWidth": 3.0,
},
{
"PetalLength": 5.4,
"PetalWidth": 2.1,
"SepalLength": 6.9,
"SepalWidth": 3.1,
},
):
features["classification"] = prediction["classification"]["value"]
print(features)
|
[
"johnandersenpdx@gmail.com"
] |
johnandersenpdx@gmail.com
|
bb74ddac0a7bc8a9c141488aea6f2faa98c9f820
|
8fb1d41797595550418ecfc0e7558f38254b4606
|
/django/contrib/flatpages/tests/forms.py
|
7282f009c9dc86d398bb8e67aa09a2233c66ef42
|
[
"MIT"
] |
permissive
|
hunch/hunch-gift-app
|
2aad70a9f18124bf0de02d7a125fa93c765da008
|
8c7cad24cc0d9900deb4175e6b768c64a3d7adcf
|
refs/heads/master
| 2016-09-06T03:13:52.153974
| 2012-03-26T18:11:59
| 2012-03-26T18:11:59
| 1,191,221
| 6
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,245
|
py
|
from django.contrib.flatpages.admin import FlatpageForm
from django.test import TestCase
class FlatpageAdminFormTests(TestCase):
def setUp(self):
self.form_data = {
'title': "A test page",
'content': "This is a test",
'sites': [1],
}
def test_flatpage_admin_form_url_validation(self):
"The flatpage admin form validates correctly validates urls"
self.assertTrue(FlatpageForm(data=dict(url='/new_flatpage/', **self.form_data)).is_valid())
self.assertTrue(FlatpageForm(data=dict(url='/some.special~chars/', **self.form_data)).is_valid())
self.assertTrue(FlatpageForm(data=dict(url='/some.very_special~chars-here/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a space/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a % char/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a ! char/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a & char/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a ? char/', **self.form_data)).is_valid())
|
[
"gleitz@hunch.com"
] |
gleitz@hunch.com
|
5bf881452b66fca7b77a5d8014fa09f19fdeb494
|
87163acf1614292be250754f28114f89013f73a3
|
/Codechef/COLOR.py
|
bb42d808690439dedd343a0a5262394440066d8d
|
[] |
no_license
|
khush-01/Python-codes
|
742a9d9966d2ceb3ad2e7c78e34ef88e55df955a
|
da3cae8df0aafe763399066eefc9b786538fdb35
|
refs/heads/main
| 2023-03-20T04:37:14.020134
| 2021-03-12T04:56:30
| 2021-03-12T04:56:30
| 346,941,048
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 151
|
py
|
for _ in range(int(input())):
n = int(input())
s = input()
a = {'R': 0, 'G': 0, 'B': 0}
for x in s:
a[x] += 1
print(n - max(a.values()))
|
[
"noreply@github.com"
] |
khush-01.noreply@github.com
|
6200af4f551c8e6534b8a3a0ca176346d8b54d3e
|
dfff816642f4e1afeab268f441906a6d811d3fb4
|
/polling_stations/apps/data_collection/management/commands/import_swale.py
|
1450c85e75893e03f2e966075e02b5698e14b4c4
|
[] |
permissive
|
mtravis/UK-Polling-Stations
|
2c07e03d03959492c7312e5a4bfbb71e12320432
|
26e0331dc29253dc436a0462ffaa01e974c5dc52
|
refs/heads/master
| 2020-05-14T18:36:31.501346
| 2019-04-17T12:54:57
| 2019-04-17T12:54:57
| 181,912,382
| 0
| 0
|
BSD-3-Clause
| 2019-04-17T14:48:26
| 2019-04-17T14:48:26
| null |
UTF-8
|
Python
| false
| false
| 3,327
|
py
|
from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseHalaroseCsvImporter
class Command(BaseHalaroseCsvImporter):
council_id = "E07000113"
addresses_name = (
"local.2019-05-02/Version 1/polling_station_export-2019-02-28Swle.csv"
)
stations_name = (
"local.2019-05-02/Version 1/polling_station_export-2019-02-28Swle.csv"
)
elections = ["local.2019-05-02"]
def address_record_to_dict(self, record):
rec = super().address_record_to_dict(record)
uprn = record.uprn.strip().lstrip("0")
if uprn in [
"200001875126", # ME122HP -> ME122LZ : Morland House Augustine Road, Minster-on-Sea, Sheerness, Kent
"100061078702", # ME122LE -> ME122LA : Uppal Villa Minster Drive, Minster-on-Sea, Sheerness, Kent
"100061081138", # ME122LS -> ME122LX : 150 Scarborough Drive, Minster-on-Sea, Sheerness, Kent
"10023197317", # ME122SG -> ME122SH : 1A Baldwin Cottage Baldwin Road, Minster-on-Sea, Sheerness, Kent
"10023197934", # ME122LT -> ME122LX : 152 Scarborough Drive, Minster-on-Sea, Sheerness, Kent
"100061080835", # ME123JE -> ME123HT : 1A Rosemary Avenue, Minster-on-Sea, Sheerness, Kent
"10023200030", # ME103TU -> ME123TU : 28 Petunia Avenue, Minster-on-Sea, Sheerness, Kent
"10035061220", # ME121AG -> ME122AG : 178 Invicta Road, Sheerness, Kent
"10093083738", # ME124JB -> ME101QA : Flat Above Marinos Fish Bar 212 London Road, Sittingbourne, Kent
"100061078990", # ME123PA -> ME123NZ : 382B Minster Road, Minster-on-Sea, Sheerness, Kent
"100061083074", # ME122SG -> ME122SD : Llamedos The Glen, Minster-on-Sea, Sheerness, Kent
"100062379223", # ME124JA -> ME124JB : Sheringham Bell Farm Lane, Minster-on-Sea, Sheerness, Kent
"100061073637", # ME124JA -> ME124JB : The Laurels Bell Farm Lane, Minster-on-Sea, Sheerness, Kent
"100061073623", # ME124JA -> ME124JB : Merry Moments Bell Farm Lane, Minster-on-Sea, Sheerness, Kent
"200002539987", # ME101NL -> ME101NS : Flat L 94 London Road, Sittingbourne, Kent
]:
rec["accept_suggestion"] = True
if uprn in [
"100062087806", # ME121TP -> ME122RT : Flat 2 36A Broadway, Sheerness, Kent
"100062087803", # ME121TP -> ME122RT : Flat 1 36A Broadway, Sheerness, Kent
"200002535746", # ME121NX -> ME102RD : 45A High Street, Sheerness, Kent
"10013741961", # ME130SG -> ME104ES : Winterbourne Cottage Annexe Rushett Lane, Norton, Faversham, Kent
"10023196555", # ME122DH -> ME121AG : 3 The Crescent Parklands Village The Broadway, Minster-on-Sea, Sheerness, Kent
"10023196556", # ME122DH -> ME121AG : 1 The Crescent Parklands Village The Broadway, Minster-on-Sea, Sheerness, Kent
"10023200723", # ME137JG -> ME98XF : 23 West Street, Faversham, Kent
]:
rec["accept_suggestion"] = False
return rec
def station_record_to_dict(self, record):
rec = super().station_record_to_dict(record)
if record.pollingstationnumber in ["113", "114"]:
rec["location"] = Point(0.735912, 51.337309, srid=4326)
return rec
|
[
"chris.shaw480@gmail.com"
] |
chris.shaw480@gmail.com
|
efbce9c46c5c14d023e8afee6f5fd7be1921c7d8
|
4c252eb68446d5fd050e28a6b5ba1a7879b70b0a
|
/pyuavcan/transport/can/media/socketcan/__init__.py
|
c6ee7989569c0ca65d99e412f740125f7b1a788f
|
[
"MIT"
] |
permissive
|
jxltom/pyuavcan
|
ce2cdf3a95ba4c6f3a0fd8aae24b341e46481fae
|
42063b65ee2af431ab485f228d1ed5465a576449
|
refs/heads/master
| 2021-01-16T15:09:48.547764
| 2020-05-26T09:31:25
| 2020-05-26T09:31:25
| 243,163,363
| 0
| 0
|
MIT
| 2020-02-26T03:53:47
| 2020-02-26T03:53:46
| null |
UTF-8
|
Python
| false
| false
| 901
|
py
|
#
# Copyright (c) 2019 UAVCAN Development Team
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <pavel.kirienko@zubax.com>
#
"""
The module is always importable but is functional only on GNU/Linux.
For testing or experimentation on a local machine it is often convenient to use a virtual CAN bus instead of a real one.
Using SocketCAN, one can set up a virtual CAN bus interface as follows::
modprobe can
modprobe can_raw
modprobe vcan
ip link add dev vcan0 type vcan
ip link set vcan0 mtu 72 # Enable CAN FD by configuring the MTU of 64+8
ip link set up vcan0
Where ``vcan0`` can be replaced with any other valid interface name.
Please read the SocketCAN documentation for more information.
"""
from sys import platform as _platform
if _platform == 'linux':
from ._socketcan import SocketCANMedia as SocketCANMedia
|
[
"pavel.kirienko@gmail.com"
] |
pavel.kirienko@gmail.com
|
7e17aef7c234a83dd5f625f3f79766cdd0c89a88
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02689/s473246876.py
|
934535d4b4972167937a8498aa36c123addb4e8b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
n, m = map(int, input().split())
h = list(map(int, input().split()))
ab = [list(map(int, input().split())) for _ in range(m)]
ans = [1]*n
for i in range(m):
if h[ab[i][0]-1] == h[ab[i][1]-1]:
ans[ab[i][0]-1] = 0
ans[ab[i][1]-1] = 0
elif h[ab[i][0]-1] > h[ab[i][1]-1]:
ans[ab[i][1]-1] = 0
else:
ans[ab[i][0]-1] = 0
print(sum(ans))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
1058bf0e58b27114ec41ce159f6a6412a3122981
|
2e3430eefb94fe6bc6ea8256ceadaf25bbf34e76
|
/puma/helpers/testing/mixin/__init__.py
|
e5d5031dc92b6e294d4d8a37e566046b3bcab595
|
[
"Apache-2.0"
] |
permissive
|
gift-surg/puma
|
33e08b464fe4241da512fefcab5e8909e6f1d768
|
58beae3459a0c8d96adfe9af323e26868428df4d
|
refs/heads/master
| 2022-11-27T11:26:25.557773
| 2020-06-04T11:21:38
| 2020-06-04T11:21:38
| 198,999,143
| 1
| 0
|
Apache-2.0
| 2020-07-29T16:36:50
| 2019-07-26T10:38:46
|
Python
|
UTF-8
|
Python
| false
| false
| 173
|
py
|
from puma.helpers.testing.mixin.not_a_test_case import NotATestCase # noqa: F401
from puma.helpers.testing.mixin.not_a_test_case_enum import NotATestCaseEnum # noqa: F401
|
[
"richard.miles@fast-chat.co.uk"
] |
richard.miles@fast-chat.co.uk
|
aeba0cf66cd6b63cc2cd690e704544f6c0591260
|
8541f4118c6093c84e78d768285e7007ee5f6a6c
|
/apps/inventory/migrations/0009_auto_20151220_1554.py
|
e2b8366a29c48b68bbe2d712b6d251b066c4ba96
|
[] |
no_license
|
iraycd/awecounting
|
c81a8ca6b7a4a942e63cf6b7d723f9883e57a107
|
388df4de63146e0a9a211afa522ec50e0f3df443
|
refs/heads/master
| 2021-01-15T23:30:27.439759
| 2016-03-16T10:34:40
| 2016-03-16T10:34:40
| 57,046,467
| 1
| 0
| null | 2016-04-25T14:03:40
| 2016-04-25T14:03:40
| null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import njango.fields
class Migration(migrations.Migration):
dependencies = [
('inventory', '0008_auto_20151219_1547'),
]
operations = [
migrations.AlterField(
model_name='sale',
name='date',
field=njango.fields.BSDateField(default=njango.fields.today),
),
]
|
[
"roshanshrestha01@gmail.com"
] |
roshanshrestha01@gmail.com
|
5b5451db8c50ff61a68abf0e926e2717d703c310
|
5e381364c2ab31ff3618369085afffba6caa8edb
|
/recipes/ghc-filesystem/all/conanfile.py
|
a6f41d56f7d61a30937802fdd0e2b28998c42def
|
[
"MIT"
] |
permissive
|
CAMOBAP/conan-center-index
|
16aea68a6d22da22831ba985773125e8eda08f00
|
67d57532bdad549fef3fa6cb8fcdfa86bc55e4f1
|
refs/heads/master
| 2023-07-30T08:58:57.285571
| 2021-10-02T14:57:54
| 2021-10-02T14:57:54
| 323,262,699
| 1
| 0
|
MIT
| 2021-05-29T13:37:04
| 2020-12-21T07:30:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,846
|
py
|
import os
from conans import ConanFile, CMake, tools
class GhcFilesystemRecipe(ConanFile):
name = "ghc-filesystem"
description = "A header-only single-file std::filesystem compatible helper library"
topics = ("conan", "ghc-filesystem", "header-only", "filesystem")
homepage = "https://github.com/gulrak/filesystem"
url = "https://github.com/conan-io/conan-center-index"
license = "MIT"
generators = "cmake"
no_copy_source = True
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = "filesystem-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["GHC_FILESYSTEM_BUILD_TESTING"] = False
self._cmake.definitions["GHC_FILESYSTEM_BUILD_EXAMPLES"] = False
self._cmake.definitions["GHC_FILESYSTEM_WITH_INSTALL"] = True
self._cmake.configure(source_folder=self._source_subfolder)
return self._cmake
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib"))
def package_id(self):
self.info.header_only()
def package_info(self):
self.cpp_info.names["cmake_find_package"] = "ghcFilesystem"
self.cpp_info.names["cmake_find_package_multi"] = "ghcFilesystem"
self.cpp_info.components["filesystem"].names["cmake_find_package"] = "ghc_filesystem"
self.cpp_info.components["filesystem"].names["cmake_find_package_multi"] = "ghc_filesystem"
|
[
"noreply@github.com"
] |
CAMOBAP.noreply@github.com
|
183611ac29eb22fef0ace8fc886bf34f6a42b118
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Non_Linear_Finite_Element_Analysis_of_Solids_and_Structures_Borst/pyfem-1.0/pyfem/solvers/Solver.py
|
2f5f42f519614cbfcae51e0f61b1ad62fe84b778
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,684
|
py
|
############################################################################
# This Python file is part of PyFEM-1.0, released on Aug. 29, 2012. #
# The PyFEM code accompanies the book: #
# #
# 'Non-Linear Finite Element Analysis of Solids and Structures' #
# R. de Borst, M.A. Crisfield, J.J.C. Remmers and C.V. Verhoosel #
# John Wiley and Sons, 2012, ISBN 978-0470666449 #
# #
# The code is written by J.J.C. Remmers, C.V. Verhoosel and R. de Borst. #
# Comments and suggestions can be sent to: #
# PyFEM-support@tue.nl #
# #
# The latest version can be downloaded from the web-site: #
# http://www.wiley.com/go/deborst #
# #
# The code is open source and intended for educational and scientific #
# purposes only. If you use PyFEM in your research, the developers would #
# be grateful if you could cite the book. #
# #
# Disclaimer: #
# The authors reserve all rights but do not guarantee that the code is #
# free from errors. Furthermore, the authors shall not be liable in any #
# event caused by the use of the program. #
############################################################################
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
class Solver:
def __init__( self , props , globdat ):
solverProps = getattr( props, "solver" )
solverType = solverProps.type
exec "from pyfem.solvers."+solverType+" import "+solverType
props.currentModule = "solver"
self.solver = eval(solverType+"( props , globdat )")
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
def run( self , props , globdat ):
self.solver.run( props , globdat )
|
[
"me@yomama.com"
] |
me@yomama.com
|
5b80fd002f0f429389f82658da7f4196dab59f60
|
9640f0b9a51ead702e1fc70c2571c116893f5046
|
/products/migrations/0004_auto_20201006_2154.py
|
04bb2798e01b633ba64b7ddc57eb2292697db4c1
|
[] |
no_license
|
CODE-Easyy/optic-backend
|
d7f46194d91fa25e1d54c4a9cb88faac9ca57ba9
|
ed885265125b9d95be9467d95308964d25551230
|
refs/heads/main
| 2023-01-07T04:10:00.746112
| 2020-10-31T01:00:13
| 2020-10-31T01:00:13
| 302,156,695
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,255
|
py
|
# Generated by Django 3.1.2 on 2020-10-06 21:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0003_auto_20201006_2115'),
]
operations = [
migrations.RemoveField(
model_name='brand',
name='cat',
),
migrations.RemoveField(
model_name='brand',
name='subcat',
),
migrations.RemoveField(
model_name='material',
name='cat',
),
migrations.RemoveField(
model_name='material',
name='subcat',
),
migrations.RemoveField(
model_name='opticalpower',
name='cat',
),
migrations.RemoveField(
model_name='opticalpower',
name='subcat',
),
migrations.RemoveField(
model_name='radius',
name='cat',
),
migrations.RemoveField(
model_name='radius',
name='subcat',
),
migrations.RemoveField(
model_name='volume',
name='cat',
),
migrations.RemoveField(
model_name='volume',
name='subcat',
),
]
|
[
"mr.esenyazov@gmail.com"
] |
mr.esenyazov@gmail.com
|
2ed9c19851b9bc62ae4c409439f33728ff16f76a
|
8692807f1dfa8c18c61df07cfafbbd27d4e66fba
|
/previous_problems/BEGINNER/CHN09.sol.py
|
a30740f15472fbe4e2738dd1ba776740691bc28d
|
[] |
no_license
|
sharmakajal0/codechef_problems
|
00381e9bf1996b859e46f087c2ffafd9d7a10ef1
|
0b979029e0a821f47fbdd6f9c624daee785a02e7
|
refs/heads/master
| 2020-05-29T15:04:40.459979
| 2020-03-29T08:44:53
| 2020-03-29T08:44:53
| 189,212,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
#!/usr/bin/env python
for _ in range(int(input())):
s = str(input())
amber = 0
brass = 0
for i in s:
if i == 'a':
amber += 1
elif i == 'b':
brass += 1
if amber > brass:
print(brass)
elif brass > amber:
print(amber)
else:
print(amber)
|
[
"sharma.kajal.sitm@gmail.com"
] |
sharma.kajal.sitm@gmail.com
|
11a635d63cb3cc6ec4f23bc2d39a60be2dea79e2
|
e37f5df6d380e9395e2433558be05090c2e2cd72
|
/tspec_mapping/make_index_from_field.py
|
8384803f596600e08a0ab8f71024bd3eefc7a835
|
[] |
no_license
|
keflavich/tspec_mapping
|
c8bcff914095cb3bc2bfc9dccdcbe7897e5399c7
|
b61e561235a5c3c104cb3a59e016fc252dbd5845
|
refs/heads/master
| 2021-01-18T14:15:14.241892
| 2015-02-13T06:22:29
| 2015-02-13T06:22:29
| 7,315,394
| 0
| 0
| null | 2013-09-28T15:54:17
| 2012-12-25T07:32:22
|
Python
|
UTF-8
|
Python
| false
| false
| 5,970
|
py
|
import astroquery.irsa
import astrometry
import astropy.io.fits
import numpy as np
import atpy
import os
def make_index_from_table(table,fieldname,fov=None,clobber=False,**kwargs):
"""
Given a table with RA and Dec columns (case-sensitive!), build an astrometry.net
quad index
Parameters
----------
table : Table
astropy.io.table or atpy.table instance (recarrays require different cleaning operations)
fieldname : str
Seed name for the output catalog file and index file
clobber : bool
Overwrite existing fits files / indices?
kwargs :
Are passed to astrometry.build_index
"""
#fitstable = astropy.io.fits.BinTableHDU(data=table)
newtable = atpy.Table(name=fieldname)
for colname in table.dtype.names:
newtable.add_column(colname, table[colname])
# sanitize fieldname
fieldname = fieldname.replace(" ","_") # what other chars should I be careful of?
#fitstable.writeto(fieldname+".fits",clobber=clobber)
newtable.write(fieldname+".fits",overwrite=clobber)
if fov is None:
# guess the FOV... sort of
rarange = (np.max(table['RA']) - np.min(table['RA']))*3600
decrange = (np.max(table['Dec'])-np.min(table['Dec']))*3600
fov = (rarange+decrange)/2.
return make_index_from_fitstable(fieldname+'.fits',fieldname,fov=fov,**kwargs)
def make_index_from_fitstable(fitstablename, fieldname=None, fov=None, preset_list=None, **kwargs):
"""
Build an index from a FITS table already on disk (very thin wrapper of build_index)
Parameters
----------
fitstablename : str
Full path to a .fits table with the 2nd header being a BinTableHDU for
astrometry's build-index to parse
preset_list : list
List of presets, in the range -5 to 21, to build indices for
fov : int
field of view in arcseconds
fieldname : str
output prefix for the index file. If not specified, will use the root string
of the fitsfilename
"""
if fov is None and 'scale_number' not in kwargs and preset_list is None:
raise ValueError("Must specify a preset or a FOV")
elif 'scale_number' in kwargs:
presets = [kwargs.pop('scale_number')]
elif preset_list is not None:
presets = preset_list
else:
# determine appropriate "presets" to use
preset = astrometry.get_closest_preset(fov/60.)
if preset > -4:
presets = [preset-2, preset-1,preset,preset+1]
elif preset > -5:
presets = [preset-1,preset,preset+1]
else:
presets = [preset,preset+1]
if fieldname is None:
fieldname = os.path.split( os.path.splitext(fitstablename)[0] )[1]
stdout,stderr = "",""
for preset in presets:
_stdout,_stderr = astrometry.build_index(fieldname+".fits",scale_number=preset,**kwargs)
stdout += _stdout
stderr += _stderr
return stdout,stderr
def make_index_from_field_2MASS(coords, fieldname, fov=900, clobber=False,
quality_exclude="UX", **kwargs):
"""
Create an index file. The input should be IRSA-parseable coordinates, e.g.
a name, ra/dec, or glon/glat coords
Parameters
----------
coords : str
IRSA-parseable coordinates or SIMBAD-readable filename
fieldname : str
Prefix string for output file name
fov : int
Field of view to include in arcseconds (Circular)
clobber : bool
Overwrite existing data files?
quality_exclude : str
Entries in the catalog with these characters will be excluded
Example
-------
>>> make_index_from_field_2MASS('Sgr C','Sgr C',300,scan_catalog=True,clobber=True)
>>> make_index_from_field_2MASS('266.1512 -29.4703','Sgr C',300,scan_catalog=True,clobber=True)
>>> make_index_from_field_2MASS('359.4288 -00.0898 gal','Sgr C',300,scan_catalog=True,clobber=True)
"""
fieldname = fieldname.replace(" ","_") # what other chars should I be careful of?
table = astroquery.irsa.query_gator_box('pt_src_cat',coords,fov)
table.rename_column('ra','RA')
table.rename_column('dec','Dec')
table = table[table['extd_flg']==0] # don't use extended sources; they're bad for astrometry
cleantable = _clean_table(table)
return make_index_from_table(cleantable,fieldname,fov=fov,clobber=clobber,**kwargs)
def make_index_from_field_UKIDSS(glon,glat,fieldname,catalog='GPS',fov=900,clobber=False,**kwargs):
"""
Create an index file. The input should be UKIDSS-parseable coordinates, e.g.
glon,glat (so far, only a galactic lon/lat query tool is implemented
Example
-------
>>> make_index_from_field_UKIDSS(359.4288,-00.0898,'Sgr C',fov=300,scan_catalog=True,clobber=True)
"""
fieldname = fieldname.replace(" ","_") # what other chars should I be careful of?
ukquery = astroquery.ukidss.UKIDSSQuery()
ukquery.programmeID = catalog
uktable = ukquery.get_catalog_gal(glon,glat,radius=fov/60.)[0]
uktable.writeto(fieldname+".fits",clobber=clobber)
#bintab = table[0][1]
#bintab.data = bintab.data.astype(newtype)
#table.rename_column('ra','RA')
#table.rename_column('dec','Dec')
#cleantable = _clean_table(table)
return make_index_from_fitstable(fieldname+".fits",fieldname=fieldname,fov=fov,**kwargs)
def _clean_table(table):
"""
Hack to convert a table to a FITS-friendly numpy ndarray;
this will become obsolete when astropy's table includes a FITS writer
"""
float_types = [np.float, np.float128, np.float16, np.float32, np.float64, np.float_, np.floating]
new_fields = [(k,np.dtype('S8')) if v[0].type == np.object_ else
(k,np.float64) if v[0].type in float_types else (k,v[0])
for (k,v) in table._data.dtype.fields.iteritems()]
new_array = np.array(table._data, dtype=new_fields)
return new_array
|
[
"keflavich@gmail.com"
] |
keflavich@gmail.com
|
c423aa3aae2fbf9cab46298e451c9a089326366a
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/vcFgGJHxhTwRiLK5d_14.py
|
4a507ff87ec2b6c51b89dc31487dd6f18bceb1b8
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
from fractions import gcd
def smallest(n):
lcm = 1
for i in range(1, n+1):
lcm = (lcm * i)//(gcd(int(lcm), i))
return int(lcm)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
463cf524dbd01e72dbb67f1b1b24dff9c7398279
|
8bbb508b2efd88beff28e072b6ba3ad5ad86f9c7
|
/Config_ipython_import/ipython/import_here.py
|
f079107a00955301b1fb74660f6674d951bf3ce6
|
[] |
no_license
|
wubinbai/2019
|
1f31a96d49c3383e696f208358347341783f6141
|
5f1e191a037b793abb97120f4e13185782c64a2c
|
refs/heads/master
| 2021-06-30T11:48:16.230757
| 2020-09-25T09:09:01
| 2020-09-25T09:09:01
| 162,510,722
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,467
|
py
|
import numpy as np
import sys
#np.set_printoptions(threshold=sys.maxsize)
np.set_printoptions(threshold=100)
import pandas as pd
import matplotlib.pyplot as plt
plt.ion()
import seaborn as sns
from tqdm import tqdm
def plot_whole(df):
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(df)
h = help
# Better help function he():
def he():
global ar
ar = input('Enter the function name for help:')
help(eval(ar))
# for . operation of dir
# use eval(repr(xxx.i))
from pandas import read_csv as pdrc
# for ipython to display all results in the jupyter notebook:
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
def my_plot(data_array):
plt.figure()
plt.plot(data_array)
plt.grid()
def my_plotas(data_array):
plt.figure()
plt.plot(data_array)
plt.plot(data_array,'b*')
plt.grid()
def save_model_keras(model,save_path):
from keras.utils import plot_model
plot_model(model,show_shapes=True,to_file=save_path)
def torchviz_pdf(model,input_tensor):
from torchviz import make_dot
vis_graph = make_dot(model(input_tensor), params=dict(model.named_parameters()))
vis_graph.view() # 会在当前目录下保存一个“Digraph.gv.pdf”文件,并在默认浏览器中打开
def torch_summary(model,input_size):
from torchsummary import summary
summary(model.cuda(), input_size=input_size)
|
[
"wubinbai@yahoo.com"
] |
wubinbai@yahoo.com
|
d9b120c6b3b4f2a5ecffddf3d0f51f0bbf07cc85
|
401f783a202949adbf144b5780bcd87a6daf2299
|
/code/python/Day-89/SumNested.py
|
790c9104c096a14e9c9ead74bef4561443cec9d7
|
[] |
no_license
|
TalatWaheed/100-days-code
|
1934c8113e6e7be86ca86ea66c518d2f2cedf82a
|
b8fd92d4ddb6adc4089d38ac7ccd2184f9c47919
|
refs/heads/master
| 2021-07-04T14:28:45.363798
| 2019-03-05T13:49:55
| 2019-03-05T13:49:55
| 140,101,486
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
def sum1(lst):
total = 0
for element in lst:
if (type(element) == type([])):
total = total + sum1(element)
else:
total = total + element
return total
print( "Sum is:",sum1([[1,2],[3,4]]))
|
[
"noreply@github.com"
] |
TalatWaheed.noreply@github.com
|
7258c482e8ee9b88d64784f4b8132d59778a383d
|
6c721f3cfce6dc88396cd3b5f6a59d65a2ea5033
|
/some_learn/Data_Set_handle/Caltech-Dateset/test/caltech_generate_xml/generate_xml.py
|
5252a995b10a61e6a84100101e23d4a81ac98bc2
|
[
"MIT"
] |
permissive
|
unicoe/PycharmProjects
|
20a3dabe88c7874da54451c7bb16999afc0eee35
|
23ff314eb5ac9bfa01a8278089d722b5d0061751
|
refs/heads/master
| 2020-03-23T09:16:25.907188
| 2019-12-21T03:10:49
| 2019-12-21T03:10:49
| 141,377,686
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,550
|
py
|
#!/usr/bin/env python
# coding:utf-8
#from xml.etree.ElementTree import Element, SubElement, tostring
from lxml.etree import Element, SubElement, tostring
import pprint
from xml.dom.minidom import parseString
import os
def mkdir(path):
import os
path = path.strip()
path = path.rstrip("\\")
isExists = os.path.exists(path)
if not isExists:
os.makedirs(path)
print path + 'ok'
return True
else:
print path + 'failed!'
return False
def generate_xml(file_info, obj):
node_root = Element('annotation')
node_folder = SubElement(node_root, 'folder')
node_folder.text = file_info[0]
node_filename = SubElement(node_root, 'filename')
node_filename.text = file_info[1]
node_size = SubElement(node_root, 'size')
node_width = SubElement(node_size, 'width')
node_width.text = '640'
node_height = SubElement(node_size, 'height')
node_height.text = '480'
node_depth = SubElement(node_size, 'depth')
node_depth.text = '3'
for obj_i in obj:
print obj_i
node_object = SubElement(node_root, 'object')
node_name = SubElement(node_object, 'name')
#node_name.text = 'mouse'
node_name.text = 'person'
node_name = SubElement(node_object, 'difficult')
# node_name.text = 'mouse'
node_name.text = '0'
node_bndbox = SubElement(node_object, 'bndbox')
node_xmin = SubElement(node_bndbox, 'xmin')
#node_xmin.text = '99'
node_xmin.text = obj_i['xmin']
node_ymin = SubElement(node_bndbox, 'ymin')
#node_ymin.text = '358'
node_ymin.text = obj_i['ymin']
node_xmax = SubElement(node_bndbox, 'xmax')
#node_xmax.text = '135'
node_xmax.text = obj_i['xmax']
node_ymax = SubElement(node_bndbox, 'ymax')
#node_ymax.text = '375'
node_ymax.text = obj_i['ymax']
xml = tostring(node_root, pretty_print=True) #格式化显示,该换行的换行
dom = parseString(xml)
#file_root = '/home/user/Downloads/caltech_data_set/data_train/'
file_root = '/home/user/Downloads/caltech_data_set/data_reasonable_3_19/'
file_name = file_root + file_info[0];
mkdir (file_name)
fw = open(file_name+"/"+file_info[1].split('.')[0]+".xml", 'a+')
fw.write(xml)
print "xml _ ok"
fw.close()
#for debug
#print xml
def printPath(level, path):
global allFileNum
'''''
打印一个目录下的所有文件夹和文件
'''
# 所有文件夹,第一个字段是次目录的级别
dirList = []
# 所有文件
fileList = []
# 返回一个列表,其中包含在目录条目的名称(google翻译)
files = os.listdir(path)
# 先添加目录级别
dirList.append(str(level))
for f in files:
if(os.path.isdir(path + '/' + f)):
# 排除隐藏文件夹。因为隐藏文件夹过多
if(f[0] == '.'):
pass
else:
# 添加非隐藏文件夹
dirList.append(f)
if(os.path.isfile(path + '/' + f)):
# 添加文件
fileList.append(f)
# 当一个标志使用,文件夹列表第一个级别不打印
i_dl = 0
for dl in dirList:
if(i_dl == 0):
i_dl = i_dl + 1
else:
# 打印至控制台,不是第一个的目录
print '-' * (int(dirList[0])), dl
# 打印目录下的所有文件夹和文件,目录级别+1
printPath((int(dirList[0]) + 1), path + '/' + dl)
print fileList
for fl in fileList:
# 打印文件
#print '-' * (int(dirList[0])), fl
# 随便计算一下有多少个文件
#allFileNum = allFileNum + 1
"""
操作文件进行读写
"""
print fl[12:17],fl[17:21]
file_info = []
file_info.append(fl[12:17]+'/'+fl[17:21])
print file_info
print path
file_name = path+"/"+fl
fw = open(file_name, 'r');
line_content = fw.readlines()
fw.close()
print line_content
tmp = -1
obj = []
con_len = len(line_content)
try:
string = line_content[0].split(" ")
tmp = int(string[0])
except Exception:
continue
file_info.append(str(tmp) + '.jpg')
xmin = str(int(float(string[1])))
ymin = str(int(float(string[2])))
xmax = str(int(float(string[1]) + float(string[3])))
ymax = str(int(float(string[2]) + float(string[4])))
dict1 = {}
dict1["xmin"] = xmin
dict1["ymin"] = ymin
dict1["xmax"] = xmax
dict1["ymax"] = ymax
obj.append(dict1)
for con_i in xrange(1, con_len):
string = line_content[con_i].split(" ")
tmp1 = int(string[0])
if tmp == tmp1:
xmin = str(int(float(string[1])))
ymin = str(int(float(string[2])))
xmax = str(int(float(string[1]) + float(string[3])))
ymax = str(int(float(string[2]) + float(string[4])))
dict1 = {}
dict1["xmin"] = xmin
dict1["ymin"] = ymin
dict1["xmax"] = xmax
dict1["ymax"] = ymax
obj.append(dict1)
elif tmp1 > 0:
generate_xml(file_info, obj)
obj = []
tmp = tmp1
file_info[1] = str(tmp1) + ".jpg"
xmin = str(int(float(string[1])))
ymin = str(int(float(string[2])))
xmax = str(int(float(string[1]) + float(string[3])))
ymax = str(int(float(string[2]) + float(string[4])))
dict1 = {}
dict1["xmin"] = xmin
dict1["ymin"] = ymin
dict1["xmax"] = xmax
dict1["ymax"] = ymax
obj.append(dict1)
continue
def read_annotations_generate_fileinfo_obj(file_path):
pass
if __name__=="__main__":
#
# file_info = ['set00/V000', '1.jpg']
#
# obj = []
# obj1 = {"xmin":"1", "ymin":"1", "xmax":"5", "ymax":"5"}
# obj2 = {"xmin":"2", "ymin":"2", "xmax":"6", "ymax":"6"}
# obj.append(obj1)
# obj.append(obj2)
#
# generate_xml(file_info, obj)
#
printPath(1, "/home/user/Downloads/caltech_data_set/data_reasonable_3_19")
#printPath(1, "/home/user/Downloads/caltech_data_set/data_reasonable_train")
|
[
"unicoe@163.com"
] |
unicoe@163.com
|
c91effaaaa7685813f70b6e5719bd5298048a24f
|
f714db4463dd37fc33382364dc4b1963a9053e49
|
/src/sentry/integrations/msteams/utils.py
|
3dd201dde1ca3bb9f4e5d09390e9154ffa859fb7
|
[
"BUSL-1.1",
"Apache-2.0"
] |
permissive
|
macher91/sentry
|
92171c2ad23564bf52627fcd711855685b138cbd
|
dd94d574403c95eaea6d4ccf93526577f3d9261b
|
refs/heads/master
| 2021-07-07T08:23:53.339912
| 2020-07-21T08:03:55
| 2020-07-21T08:03:55
| 140,079,930
| 0
| 0
|
BSD-3-Clause
| 2020-05-13T11:28:35
| 2018-07-07T11:50:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,755
|
py
|
from __future__ import absolute_import
from sentry.models import Integration
from sentry.utils.compat import filter
from .client import MsTeamsClient
MSTEAMS_MAX_ITERS = 100
def channel_filter(channel, name):
# the general channel has no name in the list
# retrieved from the REST API call
if channel.get("name"):
return name == channel.get("name")
else:
return name == "General"
def get_channel_id(organization, integration_id, name):
try:
integration = Integration.objects.get(
provider="msteams", organizations=organization, id=integration_id
)
except Integration.DoesNotExist:
return None
team_id = integration.external_id
client = MsTeamsClient(integration)
# handle searching for channels first
channel_list = client.get_channel_list(team_id)
filtered_channels = list(filter(lambda x: channel_filter(x, name), channel_list))
if len(filtered_channels) > 0:
return filtered_channels[0].get("id")
# handle searching for users
members = client.get_member_list(team_id, None)
for i in range(MSTEAMS_MAX_ITERS):
member_list = members.get("members")
continuation_token = members.get("continuationToken")
filtered_members = list(filter(lambda x: x.get("name") == name, member_list))
if len(filtered_members) > 0:
# TODO: handle duplicate username case
user_id = filtered_members[0].get("id")
tenant_id = filtered_members[0].get("tenantId")
return client.get_user_conversation_id(user_id, tenant_id)
if not continuation_token:
return None
members = client.get_member_list(team_id, continuation_token)
return None
|
[
"noreply@github.com"
] |
macher91.noreply@github.com
|
15b4273440bb14b4e5c6be697d42d8398d30d59e
|
6034b74d0d31997aa46f669d3d77bb46e199a430
|
/tests/test_dataset_processor.py
|
6cae4a56b6cba92bb31d6fa50efab979b3b32076
|
[
"Apache-2.0"
] |
permissive
|
bupt-nlp/sequence-labeling-models
|
5ea641eec4fad1f470bdb7f3dd64ef0b892ba557
|
75bb8c24098ad9f307605fad61d811bcd299cfcc
|
refs/heads/master
| 2023-06-01T00:59:56.753952
| 2021-06-19T11:28:13
| 2021-06-19T11:28:13
| 287,694,702
| 2
| 2
|
Apache-2.0
| 2020-08-15T07:07:06
| 2020-08-15T06:54:35
| null |
UTF-8
|
Python
| false
| false
| 5,152
|
py
|
from __future__ import annotations
import os
from typing import List, Tuple
from allennlp.data import Vocabulary, Instance
from allennlp.data.data_loaders import SimpleDataLoader, DataLoader
from allennlp.data.token_indexers.pretrained_transformer_indexer import PretrainedTransformerIndexer
from allennlp.data.dataset_readers.sequence_tagging import SequenceTaggingDatasetReader
from allennlp.models.model import Model
from allennlp.models.simple_tagger import SimpleTagger
from allennlp.modules.text_field_embedders.basic_text_field_embedder import BasicTextFieldEmbedder
from allennlp.modules.token_embedders.pretrained_transformer_embedder import PretrainedTransformerEmbedder
from allennlp.modules.seq2seq_encoders import PassThroughEncoder
from allennlp.training.trainer import Trainer
from allennlp.training import GradientDescentTrainer
import torch
from allennlp.data import (
DataLoader,
DatasetReader,
Instance,
Vocabulary,
TextFieldTensors,
)
from allennlp.data.data_loaders import SimpleDataLoader
from allennlp.models import Model
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.training.optimizers import AdamOptimizer
from utils.dataset_processor import read_line, convert_bmes_to_sequence_tagging, convert_two_array_to_text_classification_corpus
def test_read_line():
line = '我爱 O'
tokens, labels = read_line(line)
assert tokens == ['我', '爱']
assert labels == ['O', 'O']
line = '吴京 PER'
tokens, labels = read_line(line)
assert tokens == ['吴', '京']
assert labels == ['B-PER', 'E-PER']
line = '吴京京 PER'
tokens, labels = read_line(line)
assert tokens == ['吴', '京', '京']
assert labels == ['B-PER', 'M-PER', 'E-PER']
def test_bmes_converter():
base_dir = './data/weibo'
for file in os.listdir(base_dir):
if not file.endswith('.txt'):
continue
input_file = os.path.join(base_dir, file)
output_file = os.path.join(base_dir, file.replace('txt', 'corpus'))
convert_bmes_to_sequence_tagging(input_file, output_file)
def test_sequence_tagging_reader():
model_name = 'bert-base-chinese'
bert_token_indexers = PretrainedTransformerIndexer(model_name=model_name)
reader = SequenceTaggingDatasetReader(token_indexers={"tokens": bert_token_indexers})
train_file = './data/weibo/train.corpus'
dev_file = './data/weibo/dev.corpus'
test_file = './data/weibo/dev.corpus'
train_instances = list(reader.read(train_file))
dev_instances = list(reader.read(dev_file))
test_instances = list(reader.read(test_file))
vocab: Vocabulary = Vocabulary.from_instances(train_instances)
assert vocab.get_namespaces() is not None
bert_text_field_embedder = PretrainedTransformerEmbedder(model_name=model_name)
tagger = SimpleTagger(
vocab=vocab,
text_field_embedder=BasicTextFieldEmbedder(
token_embedders={
'tokens': bert_text_field_embedder
}
),
encoder=PassThroughEncoder(bert_text_field_embedder.get_output_dim()),
calculate_span_f1=True,
label_encoding="BMES",
# verbose_metrics=True
)
train_data_loader, dev_data_loader = build_data_loaders(train_instances, dev_instances)
train_data_loader.index_with(vocab)
dev_data_loader.index_with(vocab)
trainer = build_trainer(model=tagger, serialization_dir='./output', train_loader=train_data_loader, dev_loader=dev_data_loader)
print("Starting training")
trainer.train()
print("Finished training")
def build_data_loaders(
train_data: List[Instance],
dev_data: List[Instance],
) -> Tuple[DataLoader, DataLoader]:
train_loader = SimpleDataLoader(train_data, 8, shuffle=True)
dev_loader = SimpleDataLoader(dev_data, 8, shuffle=False)
return train_loader, dev_loader
def build_trainer(
model: Model,
serialization_dir: str,
train_loader: DataLoader,
dev_loader: DataLoader,
) -> Trainer:
parameters = [(n, p) for n, p in model.named_parameters() if p.requires_grad]
optimizer = AdamOptimizer(parameters) # type: ignore
trainer = GradientDescentTrainer(
model=model,
serialization_dir=serialization_dir,
data_loader=train_loader,
validation_data_loader=dev_loader,
num_epochs=5,
optimizer=optimizer,
)
return trainer
def test_ask_ubuntu_corpus():
base_dirs = [
'./data/ask_ubuntu/intent-classification',
'./data/banking/intent-classification',
'./data/chatbot/intent-classification',
'./data/clinc/intent-classification',
'./data/hwu/intent-classification',
'./data/web_applications/intent-classification',
]
for base_dir in base_dirs:
for file in os.listdir(base_dir):
if file.endswith('.corpus'):
continue
file_path = os.path.join(base_dir, file)
convert_two_array_to_text_classification_corpus(
file_path
)
if __name__ == "__main__":
test_sequence_tagging_reader()
|
[
"1435130236@qq.com"
] |
1435130236@qq.com
|
f98f2e83b1096b53c66e15f0fbf035ddc93c1d8e
|
197420c1f28ccb98059888dff214c9fd7226e743
|
/happy_pythoning_cource_2_prodv/3.1 Bool/3.1. bool.py
|
be2e6728722c29ced0d3c7440e63523df6444bf2
|
[] |
no_license
|
Vovanuch/python-basics-1
|
fc10b6f745defff31364b66c65a704a9cf05d076
|
a29affec12e8b80a1d3beda3a50cde4867b1dee2
|
refs/heads/master
| 2023-07-06T17:10:46.341121
| 2021-08-06T05:38:19
| 2021-08-06T05:38:19
| 267,504,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
'''
Что будет выведено на экран в результате выполнения следующей программы?
'''
numbers = [-6, -8, 0, 1, 3, 8, -7, 12, 17, 24, 25, 3, 5, 1]
res = 0
for num in numbers:
res += (num % 2 == 1) and (num > 1)
print(res)
|
[
"vetohin.vladimir@gmail.com"
] |
vetohin.vladimir@gmail.com
|
cfcc64c2dd2a5398b2f9b30038ba8ada47ef3178
|
31023b59e743b5bef1c2c935dc1f2b26e8e10e9b
|
/线程进程/进程间通信/数据共享.py
|
ef96b8051c960f8e1af50dca4856fa31d9922af5
|
[] |
no_license
|
hsyy673150343/PythonLearning
|
417650d8ab5dbafbede08ef40223b29e82738443
|
817c6bd4c2ecba2549fa0be9f0c41337fe5acfdf
|
refs/heads/master
| 2020-05-18T06:36:25.648709
| 2019-05-23T13:40:59
| 2019-05-23T13:40:59
| 184,239,403
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 600
|
py
|
#!/usr/bin/env python
# -*- coding:utf8 -*-
# @TIME :2019/3/20 19:33
# @Author : 洪松
# @File : 数据共享.py
from multiprocessing import Process, Manager
def f(d, l,n):
d[n] = n
d["name"] ="alvin"
l.append(n)
if __name__ == '__main__':
with Manager() as manager:
d = manager.dict()
l = manager.list(range(5))
p_list = []
for i in range(10):
p = Process(target=f, args=(d,l,i))
p.start()
p_list.append(p)
for res in p_list:
res.join()
print(d)
print(l)
|
[
"13096337080@163.com"
] |
13096337080@163.com
|
4d4d16b4c0f08b20c039e19a68cbe44848bc592a
|
e9a210d6e58bd0450197dfb4bbbc03f66788a297
|
/eventplus/talks/migrations/0005_auto_20171031_0209.py
|
3d87473b034b28fb35cd41f7b8916e329532208b
|
[] |
no_license
|
werberth/EventPlus
|
3895587c8f4b2b6cc03507d10badd3c3cd2c28c9
|
d80c2ab5ec30fa83950380567195514df1cc9892
|
refs/heads/master
| 2021-08-28T17:00:21.363698
| 2017-12-12T21:10:16
| 2017-12-12T21:10:16
| 108,935,215
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 449
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-10-31 05:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('talks', '0004_auto_20171029_2142'),
]
operations = [
migrations.AlterField(
model_name='talk',
name='date',
field=models.DateField(verbose_name='Date'),
),
]
|
[
"werberthvinicius@gmail.com"
] |
werberthvinicius@gmail.com
|
3548555fe01d146e2f03abab2eaebe86099355ba
|
71e49ae86e6398f3278a031edaeebee2218c8b9b
|
/todo_django/todo_django/settings.py
|
6abb1e168465164fa89608d6d98a73a2d985b19f
|
[] |
no_license
|
swestphal/todo
|
38558688063089b30e0ac1ca4906ef1f2d816ad8
|
97ef67696abd4e46fcc5a61270d6ad60596de321
|
refs/heads/main
| 2023-05-13T11:25:26.614616
| 2021-05-28T15:26:19
| 2021-05-28T15:26:19
| 371,403,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,405
|
py
|
"""
Django settings for todo_django project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-6zj_#s34n7qjx$z739po7=4kl&a7i^*%t2w=w$n4986h+y4#x7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1']
CORS_ORIGIN_ALLOW_ALL = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'task'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo_django.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo_django.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"hello@swestphal.io"
] |
hello@swestphal.io
|
e5715062db6a8e7bb894b4f5bd2d275d98e604be
|
0fb505765604b586c3a46e608fc23930f8501db5
|
/venv/lib/python2.7/site-packages/django/contrib/messages/apps.py
|
1a9189383ef26083e87cbc3f46cf61eaad518797
|
[
"MIT"
] |
permissive
|
domenicosolazzo/practice-django
|
b05edecc302d97c97b7ce1de809ea46d59e2f0e6
|
44e74c973384c38bd71e7c8a1aacd1e10d6a6893
|
refs/heads/master
| 2021-08-19T15:36:22.732954
| 2015-01-22T18:42:14
| 2015-01-22T18:42:14
| 25,118,384
| 0
| 0
|
MIT
| 2021-06-10T19:50:51
| 2014-10-12T12:08:47
|
Python
|
UTF-8
|
Python
| false
| false
| 196
|
py
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class MessagesConfig(AppConfig):
name = 'django.contrib.messages'
verbose_name = _("Messages")
|
[
"solazzo.domenico@gmail.com"
] |
solazzo.domenico@gmail.com
|
7f847a8a75b0bd5215fc381713b860453fc20803
|
d05a59feee839a4af352b7ed2fd6cf10a288a3cb
|
/xlsxwriter/test/comparison/test_chart_format24.py
|
017038f19ec35b57c30a8bd3b6c3821087c59443
|
[
"BSD-2-Clause-Views"
] |
permissive
|
elessarelfstone/XlsxWriter
|
0d958afd593643f990373bd4d8a32bafc0966534
|
bb7b7881c7a93c89d6eaac25f12dda08d58d3046
|
refs/heads/master
| 2020-09-24T06:17:20.840848
| 2019-11-24T23:43:01
| 2019-11-24T23:43:01
| 225,685,272
| 1
| 0
|
NOASSERTION
| 2019-12-03T18:09:06
| 2019-12-03T18:09:05
| null |
UTF-8
|
Python
| false
| false
| 1,555
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_format24.xlsx')
def test_create_file(self):
"""Test the creation of an XlsxWriter file with chart formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [115374720, 115389568]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
chart.set_chartarea({'fill': {'color': 'yellow', 'transparency': 75}})
chart.set_plotarea({'fill': {'color': 'red', 'transparency': 25}})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
ec9733d55c01eb7e99f50e9f9fa51fd0cb38aaf4
|
6662aa37e6a5df5ef72359f5e52ba74a77ef00b6
|
/sensors/gps_2.py
|
886adcf175323e317a3326910e154dad195e52be
|
[] |
no_license
|
rice-eclipse/RTR-PocketBeagle
|
b3f4a7536a28d5e412e7184dc8399558264f0e78
|
11939281355204c08f32da316ee35061def254f0
|
refs/heads/master
| 2023-01-01T18:28:53.201874
| 2020-10-24T18:48:15
| 2020-10-24T18:48:15
| 295,223,021
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,667
|
py
|
# Simple GPS module demonstration.
# Will print NMEA sentences received from the GPS, great for testing connection
# Uses the GPS to send some commands, then reads directly from the GPS
import time
import board
#import busio
import adafruit_gps
import digitalio
# Create a serial connection for the GPS connection using default speed and
# a slightly higher timeout (GPS modules typically update once a second).
# These are the defaults you should use for the GPS FeatherWing.
# For other boards set RX = GPS module TX, and TX = GPS module RX pins.
#uart = busio.UART(board.TX, board.RX, baudrate=9600, timeout=10)
# for a computer, use the pyserial library for uart access
import serial
import os
permission_command = "sudo chmod 777 /dev/ttyO0"
sudoPassword = "temppwd"
os.system('echo %s|sudo -S %s' % (sudoPassword, permission_command))
reset = digitalio.DigitalInOut(board.P1_31)
reset.direction = digitalio.Direction.OUTPUT
standby = digitalio.DigitalInOut(board.P1_35)
standby.direction = digitalio.Direction.OUTPUT
fix = digitalio.DigitalInOut(board.P1_34)
fix.direction = digitalio.Direction.INPUT
reset.value = True #set reset pin to high, pin is active low
standby.value = True #set standby pin to high, pin is active low
uart = serial.Serial("/dev/ttyO0", baudrate=9600, timeout=1)
# If using I2C, we'll create an I2C interface to talk to using default pins
# i2c = board.I2C()
# Create a GPS module instance.
gps = adafruit_gps.GPS(uart) # Use UART/pyserial
# gps = adafruit_gps.GPS_GtopI2C(i2c) # Use I2C interface
# Initialize the GPS module by changing what data it sends and at what rate.
# These are NMEA extensions for PMTK_314_SET_NMEA_OUTPUT and
# PMTK_220_SET_NMEA_UPDATERATE but you can send anything from here to adjust
# the GPS module behavior:
# https://cdn-shop.adafruit.com/datasheets/PMTK_A11.pdf
# Turn on the basic GGA and RMC info (what you typically want)
# gps.send_command(b"PMTK314,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0")
# Turn on just minimum info (RMC only, location):
# gps.send_command(b'PMTK314,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0')
# Turn off everything:
# gps.send_command(b'PMTK314,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0')
# Tuen on everything (not all of it is parsed!)
gps.send_command(b'PMTK314,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0')
# Set update rate to once a second (1hz) which is what you typically want.
gps.send_command(b"PMTK220,1000")
# Or decrease to once every two seconds by doubling the millisecond value.
# Be sure to also increase your UART timeout above!
# gps.send_command(b'PMTK220,2000')
# You can also speed up the rate, but don't go too fast or else you can lose
# data during parsing. This would be twice a second (2hz, 500ms delay):
# gps.send_command(b'PMTK220,500')
# Main loop runs forever printing data as it comes in
timestamp = time.monotonic()
while True:
#os.system('echo %s|sudo -S %s' % (sudoPassword, permission_command))
if uart.inWaiting:
print("in waiting")
else:
print("not in waiting")
if fix.value:
print("fix pin is HIGH")
else:
print("fix pin is LOW")
try:
data = gps.read(32) # read up to 32 bytes
print(data) # this is a bytearray type
if data is not None:
# convert bytearray to string
data_string = "".join([chr(b) for b in data])
print(data_string, end="")
if time.monotonic() - timestamp > 5:
# every 5 seconds...
gps.send_command(b"PMTK605") # request firmware version
timestamp = time.monotonic()
except Exception as e:
print("Exception Caught: " + str(e))
|
[
"debian@beaglebone.localdomain"
] |
debian@beaglebone.localdomain
|
317316afc2eda10a2c3dfedce7768855c411e93a
|
6aa36fee3f4fcc9ac8f5509e51ea6bd8fc05b39b
|
/virtualenv-flask/lib/python2.7/site-packages/cybox/helper.py
|
f65d1b9819c9e719d461e09d6c1d1c63d37795f0
|
[] |
no_license
|
syn-ack-zack/msg-stix-parser
|
8c46c4d897d579162f224360a077ac42f28ffe89
|
1edb7c3b6d60f76f24b91830a1ae7076d46ede14
|
refs/heads/master
| 2021-03-27T15:01:07.344754
| 2016-09-30T16:43:22
| 2016-09-30T16:43:22
| 69,684,161
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,178
|
py
|
#!/usr/bin/env python
# Copyright (c) 2013, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
'''
CybOX Common Indicator API
An api for creating observables for common indicators:
ipv4 addresses, domain names, file hashes, and urls.
'''
import sys
import uuid
from cybox.core import Observables, Observable, Object
from cybox.common import Hash
from cybox.objects.address_object import Address
from cybox.objects.file_object import File
from cybox.objects.uri_object import URI
def create_ipv4_observable(ipv4_address):
'''Create a CybOX Observable representing an IPv4 address'''
ipv4_object = Address.from_dict({'address_value': ipv4_address,
'category': Address.CAT_IPV4})
return Observable(ipv4_object)
def create_ipv4_list_observables(list_ipv4_addresses):
'''Create a list of CybOX Observables, each representing an IPv4 address'''
ipv4_objects = []
list_observables = []
for ipv4_address in list_ipv4_addresses:
ipv4_object = create_ipv4_object(ipv4_address)
observable = Observable(ipv4_object)
list_observables.append(observable)
return list_observables
def create_email_address_observable(email_address):
'''Create a CybOX Observable representing an IPv4 address'''
email_address_object = Address.from_dict({'address_value': email_address,
'category': Address.CAT_EMAIL})
return Observable(email_address_object)
def create_domain_name_observable(domain_name):
'''Create a CybOX Observable representing a domain name.'''
domain_name_object = URI.from_dict({'value': domain_name,
'type': URI.TYPE_DOMAIN})
return Observable(domain_name_object)
def create_file_hash_observable(fn, hash_value):
'''Create a CybOX Observable representing a file hash.'''
hash_ = Hash(hash_value)
file_ = File()
file_.file_name = fn
file_.add_hash(hash_)
return Observable(file_)
def create_url_observable(url):
url_object = URI.from_dict({'value': url, 'type': URI.TYPE_URL})
return Observable(url_object)
|
[
"nagaich@localhost.localdomain"
] |
nagaich@localhost.localdomain
|
7197e5b320ce581df451038b10cf2e2ca4b3c204
|
e28009b0a4584e8d128ed6fbd4ba84a1db11d1b9
|
/824.Goat Latin/824.Goat Latin.py
|
9a074961d672b68bff7b9d3ae8faee89d99a8b11
|
[] |
no_license
|
jerrylance/LeetCode
|
509d16e4285296167feb51a80d6c382b3833405e
|
06ed3e9b27a3f1c0c517710d57fbbd794fd83e45
|
refs/heads/master
| 2020-12-02T23:10:27.382142
| 2020-08-02T02:03:54
| 2020-08-02T02:03:54
| 231,141,551
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 906
|
py
|
# LeetCode Solution
# Zeyu Liu
# 2019.6.12
# 824.Goat Latin
from typing import List
# method 1 straight forward
class Solution:
def toGoatLatin(self, S: str) -> str:
vowel = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']
res = []
S = S.split()
for i,v in enumerate(S):
if v[0] in vowel:
v += 'ma'
else:
v = v[1:] + v[0]
v += 'ma'
v += 'a' * (i + 1)
res.append(v)
return " ".join(res)
# transfer method
solve = Solution()
print(solve.toGoatLatin("I speak Goat Latin"))
# method 2 one-line
class Solution:
def toGoatLatin(self, S: str) -> str:
return ' '.join((w if w[0].lower() in 'aeiou' else w[1:] + w[0]) + 'ma' + 'a' * (i + 1) for i, w in enumerate(S.split()))
# transfer method
solve = Solution()
print(solve.toGoatLatin("I speak Goat Latin"))
|
[
"noreply@github.com"
] |
jerrylance.noreply@github.com
|
5948942590110adf43f925fd248d417aae92946d
|
ea88fac1da1b02e77180c2e20e1fd49b919eecf5
|
/installer/core/providers/aws/boto3/rds.py
|
30e547edc215610260da165de7c20d672e56f79c
|
[
"Apache-2.0"
] |
permissive
|
jacob-kinzer/pacbot
|
7eeb667505e0ddfd333f6e423b51b8199b481692
|
d78605e9e3fd8c34435636495cd6d51c677754e3
|
refs/heads/master
| 2020-04-23T21:46:42.251948
| 2019-02-06T20:47:08
| 2019-02-06T20:47:08
| 171,480,569
| 0
| 0
| null | 2019-02-19T13:41:01
| 2019-02-19T13:41:00
| null |
UTF-8
|
Python
| false
| false
| 1,610
|
py
|
import boto3
def get_rds_client(access_key, secret_key, region):
return boto3.client(
'rds',
region_name=region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
def check_rds_instance_exists(instance_identifier, access_key, secret_key, region):
client = get_rds_client(access_key, secret_key, region)
try:
response = client.describe_db_instances(
DBInstanceIdentifier=instance_identifier
)
return True if len(response['DBInstances']) else False
except:
return False
def check_rds_option_group_exists(name, access_key, secret_key, region):
client = get_rds_client(access_key, secret_key, region)
try:
response = client.describe_option_groups(
OptionGroupName=name
)
return True if len(response['OptionGroupsList']) else False
except:
return False
def check_rds_parameter_group_exists(name, access_key, secret_key, region):
client = get_rds_client(access_key, secret_key, region)
try:
response = client.describe_db_parameter_groups(
DBParameterGroupName=name
)
return True if len(response['DBParameterGroups']) else False
except:
return False
def check_rds_subnet_group_exists(name, access_key, secret_key, region):
client = get_rds_client(access_key, secret_key, region)
try:
response = client.describe_db_subnet_groups(
DBSubnetGroupName=name
)
return True if len(response['DBSubnetGroups']) else False
except:
return False
|
[
"sanjnur@gmail.com"
] |
sanjnur@gmail.com
|
929c23a9620adf2f1c003ec9135e45ad81261b01
|
010c5fbc97731286be00028ff33fc981d943bca3
|
/primal/src/system/bin-data/lift_over.py
|
a2fcb668400bd441d0a7b9f2cd1b78e1afe870a1
|
[] |
no_license
|
orenlivne/ober
|
6ce41e0f75d3a8baebc53e28d7f6ae4aeb645f30
|
810b16b2611f32c191182042240851152784edea
|
refs/heads/master
| 2021-01-23T13:48:49.172653
| 2014-04-03T13:57:44
| 2014-04-03T13:57:44
| 6,902,212
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,139
|
py
|
#!/usr/bin/env python
'''
============================================================
Convert coordinates read from stdin from one human genome
build to another.
Usage: lift_over.py <from-build> <to-build>
stdin line format: chrom bp_in_from_build
stdout line format: bp_in_to_build, or '-' if not found
Created on February 19, 2014
@author: Oren Livne <livne@uchicago.edu>
============================================================
'''
import sys, traceback, util
from pyliftover import LiftOver
if __name__ == '__main__':
try:
src, target = sys.argv[1:3]
if src == target:
for _, bp in (line.strip().split(' ') for line in sys.stdin):
print '%d %d' % (int(bp), int(bp))
else:
lo = LiftOver(src, target)
for chrom, bp in (line.strip().split(' ') for line in sys.stdin):
out = lo.convert_coordinate('chr' + chrom, int(bp))
if not out:
print '-'
else:
print '%d' % (out[0][1],)
except:
traceback.print_exc(file=sys.stdout)
sys.exit(util.EXIT_FAILURE)
|
[
"oren.livne@gmail.com"
] |
oren.livne@gmail.com
|
4ac2d30029f1696f61dc06f4ed732f1df62f1975
|
5ce393ca7d50b35c41f65188dfe8423646b2fcbc
|
/flask_with_spark/utilities/decorators.py
|
eef1b78e8690d55b6feb452f294174d73054c5b1
|
[
"MIT"
] |
permissive
|
todhm/have_you_read_this_book
|
0c7894319705ed4f047a6b936ab80ea239e5509c
|
905cb1934bafc987b76b6e57dbc63285f491ac88
|
refs/heads/master
| 2022-11-24T07:36:12.514646
| 2018-08-07T05:00:33
| 2018-08-07T05:00:33
| 141,091,550
| 2
| 0
|
MIT
| 2022-11-11T07:26:01
| 2018-07-16T05:42:52
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 750
|
py
|
from functools import wraps
from flask import session,request,redirect,url_for, abort
def login_required(f):
@wraps(f)
def decorated_function(*args,**kwargs):
if session.get('email') is None:
return redirect(url_for('user_app.login',next=request.url))
if session.get('userIntId') is None :
return redirect(url_for('user_app.login',next=request.url))
return f(*args,**kwargs)
return decorated_function
def logout_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get('email') or session.get('userIntId'):
return redirect(url_for('shopping_app.homepage',next=request.url))
return f(*args,**kwargs)
return decorated_function
|
[
"todhm@nate.com"
] |
todhm@nate.com
|
4defe82f22c6b83db3cff78f6e922aefcdaf6ec9
|
f569978afb27e72bf6a88438aa622b8c50cbc61b
|
/douyin_open/VideoListVideoList/models/inline_response200_data.py
|
84ccfc29afdb3da81d954674bb7beb2eca74eec5
|
[] |
no_license
|
strangebank/swagger-petstore-perl
|
4834409d6225b8a09b8195128d74a9b10ef1484a
|
49dfc229e2e897cdb15cbf969121713162154f28
|
refs/heads/master
| 2023-01-05T10:21:33.518937
| 2020-11-05T04:33:16
| 2020-11-05T04:33:16
| 310,189,316
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,561
|
py
|
# coding: utf-8
"""
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class InlineResponse200Data(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'error_code': 'ErrorCode',
'description': 'Description',
'cursor': 'Cursor',
'has_more': 'HasMore',
'list': 'list[Video]'
}
attribute_map = {
'error_code': 'error_code',
'description': 'description',
'cursor': 'cursor',
'has_more': 'has_more',
'list': 'list'
}
def __init__(self, error_code=None, description=None, cursor=None, has_more=None, list=None): # noqa: E501
"""InlineResponse200Data - a model defined in Swagger""" # noqa: E501
self._error_code = None
self._description = None
self._cursor = None
self._has_more = None
self._list = None
self.discriminator = None
self.error_code = error_code
self.description = description
self.cursor = cursor
self.has_more = has_more
if list is not None:
self.list = list
@property
def error_code(self):
"""Gets the error_code of this InlineResponse200Data. # noqa: E501
:return: The error_code of this InlineResponse200Data. # noqa: E501
:rtype: ErrorCode
"""
return self._error_code
@error_code.setter
def error_code(self, error_code):
"""Sets the error_code of this InlineResponse200Data.
:param error_code: The error_code of this InlineResponse200Data. # noqa: E501
:type: ErrorCode
"""
if error_code is None:
raise ValueError("Invalid value for `error_code`, must not be `None`") # noqa: E501
self._error_code = error_code
@property
def description(self):
"""Gets the description of this InlineResponse200Data. # noqa: E501
:return: The description of this InlineResponse200Data. # noqa: E501
:rtype: Description
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this InlineResponse200Data.
:param description: The description of this InlineResponse200Data. # noqa: E501
:type: Description
"""
if description is None:
raise ValueError("Invalid value for `description`, must not be `None`") # noqa: E501
self._description = description
@property
def cursor(self):
"""Gets the cursor of this InlineResponse200Data. # noqa: E501
:return: The cursor of this InlineResponse200Data. # noqa: E501
:rtype: Cursor
"""
return self._cursor
@cursor.setter
def cursor(self, cursor):
"""Sets the cursor of this InlineResponse200Data.
:param cursor: The cursor of this InlineResponse200Data. # noqa: E501
:type: Cursor
"""
if cursor is None:
raise ValueError("Invalid value for `cursor`, must not be `None`") # noqa: E501
self._cursor = cursor
@property
def has_more(self):
"""Gets the has_more of this InlineResponse200Data. # noqa: E501
:return: The has_more of this InlineResponse200Data. # noqa: E501
:rtype: HasMore
"""
return self._has_more
@has_more.setter
def has_more(self, has_more):
"""Sets the has_more of this InlineResponse200Data.
:param has_more: The has_more of this InlineResponse200Data. # noqa: E501
:type: HasMore
"""
if has_more is None:
raise ValueError("Invalid value for `has_more`, must not be `None`") # noqa: E501
self._has_more = has_more
@property
def list(self):
"""Gets the list of this InlineResponse200Data. # noqa: E501
由于置顶的原因, list长度可能比count指定的数量多一些或少一些。 # noqa: E501
:return: The list of this InlineResponse200Data. # noqa: E501
:rtype: list[Video]
"""
return self._list
@list.setter
def list(self, list):
"""Sets the list of this InlineResponse200Data.
由于置顶的原因, list长度可能比count指定的数量多一些或少一些。 # noqa: E501
:param list: The list of this InlineResponse200Data. # noqa: E501
:type: list[Video]
"""
self._list = list
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InlineResponse200Data, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse200Data):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"strangebank@gmail.com"
] |
strangebank@gmail.com
|
0955c0d21f9a00b48526ba7a9c1c612b49f3afa2
|
a0eea3e416a050168aba0876c33a8fddf310868a
|
/src/eavatar.x.hub/eavatar/hub/avatar.py
|
db30881d9fe987bf3e1717de9e5f9638a4226eeb
|
[] |
no_license
|
eavatar/exchange-hub
|
a0dee945da6922408773a8d5bd1b9029fcf55dd6
|
c78cdc6aa357442001c76c0daaca3597cd8b4adf
|
refs/heads/master
| 2021-01-13T14:28:29.968008
| 2015-03-05T13:47:18
| 2015-03-05T13:47:18
| 30,749,051
| 0
| 0
| null | 2015-03-05T13:47:19
| 2015-02-13T09:16:04
|
Python
|
UTF-8
|
Python
| false
| false
| 3,307
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
"""
Avatar-specific functionality.
"""
import json
import time
import logging
import falcon
from datetime import datetime, timedelta
from cqlengine import columns
from cqlengine.models import Model
from eavatar.hub.app import api
from eavatar.hub import views
from eavatar.hub import managers
from eavatar.hub.hooks import check_authentication
from eavatar.hub.util import crypto, codecs
logger = logging.getLogger(__name__)
def _default_expired_at():
return datetime.utcnow() + timedelta(seconds=86400)
# models #
class Avatar(Model):
"""
Represents anything with an identity that can send or receive messages.
"""
xid = columns.Text(primary_key=True, partition_key=True)
owner_xid = columns.Text(default=None)
created_at = columns.DateTime(default=datetime.utcnow)
modified_at = columns.DateTime(default=datetime.utcnow)
expired_at = columns.DateTime(default=_default_expired_at)
# properties = columns.Map(columns.Text, columns.Text)
# links = columns.Set(columns.Text)
# aliases = columns.Set(columns.Text)
class Possession(Model):
"""
Represents relationship between an avatar and its possessions.
"""
owner_xid = columns.Text(primary_key=True)
avatar_xid = columns.Text(primary_key=True, clustering_order="ASC")
@staticmethod
def find_possessions(owner_xid):
return Possession.objects(owner_xid=owner_xid)
# managers #
class AvatarManager(managers.BaseManager):
model = Avatar
def __init__(self):
super(AvatarManager, self).__init__(self.model)
# views #
@falcon.before(check_authentication)
class AvatarCollection(views.ResourceBase):
def on_get(self, req, resp):
"""
Gets avatars belongs to the client.
:param req:
:param resp:
:return:
"""
owner_xid = req.context['client_xid']
qs = Possession.find_possessions(owner_xid)
resp.body = views.EMPTY_LIST
resp.status = falcon.HTTP_200
def on_put(self, req, resp):
try:
data = json.load(req.stream)
avatar = Avatar(xid=data.get('xid'), kind=data.get('kind'))
avatar.save()
resp.body = views.RESULT_OK
resp.status = falcon.HTTP_200
except Exception, e:
logger.error(e)
raise
@falcon.before(check_authentication)
class AvatarResource(views.ResourceBase):
def on_get(self, req, resp, avatar_xid):
if 'self' == avatar_xid:
avatar_xid = req.context['client_xid']
self_link = {
"rel": "self",
"href": "%s" % (req.uri,)
}
messages_link = {
"rel": "messages",
"href": "%s/messages" % (req.uri,),
}
result = dict(
subject="%s" % (req.uri,),
aliases=["avatar:%s" % avatar_xid],
links=[self_link, messages_link],
)
resp.content_type = b"application/jrd+json"
resp.data = json.dumps(result)
resp.status = falcon.HTTP_200
# routes
logger.debug("Binding routes for Avatar module.")
_avatar_resource = AvatarResource()
api.add_route("/{avatar_xid}", _avatar_resource)
|
[
"sam@eavatar.com"
] |
sam@eavatar.com
|
f3dd5b4353dff6f6cef8c9eb4092d47fdf047aaa
|
80b545522375b2b8bbfdff0f540b1172e53b140c
|
/djecommerce/settings/development.py
|
3911c6f54c156da5237c8b83486eff63c55810d2
|
[] |
no_license
|
DuncanMoyo/django-ecommerce-website
|
18e1e8dcf358de6758ad7974a703145ed5cab4db
|
21783c3d4159adffabbfc522099cf9c55346bed8
|
refs/heads/master
| 2022-12-11T15:14:51.039929
| 2019-08-31T20:48:59
| 2019-08-31T20:48:59
| 196,033,607
| 0
| 0
| null | 2022-12-08T05:51:59
| 2019-07-09T15:14:11
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,244
|
py
|
from .base import *
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1']
INSTALLED_APPS += [
'debug_toolbar'
]
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware',]
# DEBUG TOOLBAR SETTINGS
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
]
def show_toolbar(request):
return True
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TOOLBAR_CALLBACK': show_toolbar
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
STRIPE_PUBLIC_KEY = 'pk_test_tx8uE8Va8yN3YmRlRZ0fZULC00RLAevl8q'
STRIPE_SECRET_KEY = 'sk_test_M6hBTe1EWdJqZplNq23kn54Q00HcSnrNNZ'
|
[
"duncanfmoyo@gmail.com"
] |
duncanfmoyo@gmail.com
|
691d6cbe2f9a10ae712c4829e33e1f99b6e87147
|
f1e7f6c34316828eb541d76e160c43e2e61743c8
|
/ate/main.py
|
7b19cea3a56cefb0c8e9b9d5baa47c7a1d9d312e
|
[
"MIT"
] |
permissive
|
luochun3731/ApiTestEngine
|
216fc95b8e1d5423cc24083ffac22d119f4a789e
|
b75e17e7c7a73047660fe13accab4653aa0fa5fb
|
refs/heads/master
| 2021-01-01T15:47:26.516079
| 2017-07-18T04:24:51
| 2017-07-18T04:24:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,597
|
py
|
import os
import argparse
import logging
import unittest
import HtmlTestRunner
from ate import runner, utils
class ApiTestCase(unittest.TestCase):
""" create a testcase.
"""
def __init__(self, test_runner, testcase):
super(ApiTestCase, self).__init__()
self.test_runner = test_runner
self.testcase = testcase
def runTest(self):
""" run testcase and check result.
"""
result = self.test_runner.run_test(self.testcase)
self.assertEqual(result, (True, []))
def create_suite(testset):
""" create test suite with a testset, it may include one or several testcases.
each suite should initialize a seperate Runner() with testset config.
"""
suite = unittest.TestSuite()
test_runner = runner.Runner()
config_dict = testset.get("config", {})
test_runner.init_config(config_dict, level="testset")
testcases = testset.get("testcases", [])
for testcase in testcases:
if utils.PYTHON_VERSION == 3:
ApiTestCase.runTest.__doc__ = testcase['name']
else:
ApiTestCase.runTest.__func__.__doc__ = testcase['name']
test = ApiTestCase(test_runner, testcase)
suite.addTest(test)
return suite
def create_task(testcase_path):
""" create test task suite with specified testcase path.
each task suite may include one or several test suite.
"""
task_suite = unittest.TestSuite()
testsets = utils.load_testcases_by_path(testcase_path)
for testset in testsets:
suite = create_suite(testset)
task_suite.addTest(suite)
return task_suite
def main():
""" parse command line options and run commands.
"""
parser = argparse.ArgumentParser(
description='Api Test Engine.')
parser.add_argument(
'--testcase-path', default='testcases',
help="testcase file path")
parser.add_argument(
'--log-level', default='INFO',
help="Specify logging level, default is INFO.")
parser.add_argument(
'--report-name',
help="Specify report name, default is generated time.")
args = parser.parse_args()
log_level = getattr(logging, args.log_level.upper())
logging.basicConfig(level=log_level)
testcase_path = args.testcase_path.rstrip('/')
task_suite = create_task(testcase_path)
output_folder_name = os.path.basename(os.path.splitext(testcase_path)[0])
kwargs = {
"output": output_folder_name,
"report_name": args.report_name
}
HtmlTestRunner.HTMLTestRunner(**kwargs).run(task_suite)
|
[
"mail@debugtalk.com"
] |
mail@debugtalk.com
|
70141887bb0e0c852255ad1ce9359dbad564f6f4
|
5d1e737816e53cdb0056031f49267467b0a8de77
|
/visualize_cspace_and_workspace.py
|
aa42f09da550cf0bb13d9c643deee5e32f830a16
|
[] |
no_license
|
aorthey/configuration-space-visualizer
|
ab4fe297b586d0e7acb2bc58c14b09253da47fec
|
e839cc839be37de98c6f34ff5ca6347173fc8e45
|
refs/heads/master
| 2020-06-01T09:56:01.209503
| 2020-04-28T10:43:27
| 2020-04-28T10:43:27
| 190,740,072
| 2
| 3
| null | 2019-07-25T11:20:53
| 2019-06-07T12:30:03
|
Python
|
UTF-8
|
Python
| false
| false
| 2,745
|
py
|
import sys
import numpy as np
from src.cspace_visualizer import *
import os
if not os.path.exists("images"):
os.makedirs("images")
from worlds import manipulator_2dof as World
from matplotlib.ticker import MaxNLocator
world = World.Manipulator2dof()
worldName = world.getName()
N = 200
c1 = (0.9,0.9,0.9)
c2 = (0.7,0.7,0.7)
c3 = (0.5,0.5,0.5)
q1 = np.linspace(-np.pi,np.pi,N)
q2 = np.linspace(-np.pi,np.pi,N)
P1 = []
P2 = []
M = np.zeros((q1.shape[0], q2.shape[0]))
for i in range(q1.shape[0]):
for j in range(q2.shape[0]):
q = np.array((q1[i],q2[j]))
if not world.isFeasible(q):
M[i,j] = 1
P1 = np.append(P1,q1[i])
P2 = np.append(P2,q2[j])
infeasibleColumns = np.sum(M,axis=1)>=N
Q1 = []
Q2 = []
for i in range(q1.shape[0]):
for j in range(q2.shape[0]):
q = np.array((q1[i],q2[j]))
if infeasibleColumns[i]:
Q1 = np.append(Q1,q1[i])
Q2 = np.append(Q2,q2[j])
font_size = 25
offset = 0.5
p1 = np.array([0.5,2.57])
p2 = np.array([-1.57,-0.9])
p3 = np.array([2.5,-1.2])
symbol='x'
greyshade = 0.75
x1loc= (p1[0]-offset,p1[1]-offset)
x2loc= (p2[0]-offset,p2[1]-offset)
x3loc= (p3[0]-offset,p3[1]-offset)
###########################################################
fig = plt.figure(0)
fig.patch.set_facecolor('white')
ax = fig.gca()
ax.set_xlabel(r'x',fontsize=font_size)
ax.set_ylabel(r'y',rotation=1.57,fontsize=font_size)
ax.tick_params(axis='both', which='major', pad=15)
lim=1.1
plt.axis([-lim,lim,-lim,lim])
world.COLOR = c1
world.plotRobotAtConfig(ax,p1)
world.COLOR = c2
world.plotRobotAtConfig(ax,p2)
world.COLOR = c3
world.plotRobotAtConfig(ax,p3)
w1 = world.getEndeffectorPositions(p1)
w2 = world.getEndeffectorPositions(p2)
w3 = world.getEndeffectorPositions(p3)
yoffset = np.array((0.0,0.1))
xoffset = np.array((0.1,0.0))
ax.annotate(r''+symbol+'_1', w1+yoffset)
ax.annotate(r''+symbol+'_2', w2-2*yoffset-2*xoffset)
ax.annotate(r''+symbol+'_3', w3+yoffset)
world.plotObstacles(ax)
plt.savefig("images/"+worldName+"_workspace.png", bbox_inches='tight')
############################################################
fig = plt.figure(1)
fig.patch.set_facecolor('white')
ax = fig.gca()
ax.set_xlabel(r'\theta_1',fontsize=font_size)
ax.set_ylabel(r'\theta_2',rotation=1.57,fontsize=font_size)
ax.tick_params(axis='both', which='major', pad=15)
lim=3.14
plt.axis([-lim,lim,-lim,lim])
ax.annotate(r''+symbol+'_1', x1loc)
ax.annotate(r''+symbol+'_2', x2loc)
ax.annotate(r''+symbol+'_3', x3loc)
plt.plot(p1[0],p1[1],'o',color='black',markersize=10)
plt.plot(p2[0],p2[1],'o',color='black',markersize=10)
plt.plot(p3[0],p3[1],'o',color='black',markersize=10)
plotCSpaceDelaunayGrey(P1,P2,0.15)
plt.savefig("images/"+worldName+"_configuration_space.png", bbox_inches='tight')
plt.show()
|
[
"andreas.orthey@gmx.de"
] |
andreas.orthey@gmx.de
|
b1d35b14319ee88f737697a4ff38b5548b64a8e8
|
cd4bbecc3f713b0c25508d0c5674d9e103db5df4
|
/toontown/fishing/DistributedFishingTarget.py
|
69a3ef13a7dc66adb05d96a2986dc55852deddf6
|
[] |
no_license
|
peppythegod/ToontownOnline
|
dce0351cfa1ad8c476e035aa3947fdf53de916a6
|
2e5a106f3027714d301f284721382cb956cd87a0
|
refs/heads/master
| 2020-04-20T05:05:22.934339
| 2020-01-02T18:05:28
| 2020-01-02T18:05:28
| 168,646,608
| 11
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,803
|
py
|
from pandac.PandaModules import *
from direct.distributed.ClockDelta import *
from direct.interval.IntervalGlobal import *
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedNode
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.directutil import Mopath
from toontown.toonbase import ToontownGlobals
from direct.actor import Actor
import FishingTargetGlobals
import random
import math
from toontown.effects import Bubbles
class DistributedFishingTarget(DistributedNode.DistributedNode):
notify = DirectNotifyGlobal.directNotify.newCategory(
'DistributedFishingTarget')
radius = 2.5
def __init__(self, cr):
DistributedNode.DistributedNode.__init__(self, cr)
NodePath.__init__(self)
self.pond = None
self.centerPoint = (0, 0, 0)
self.maxRadius = 1.0
self.track = None
def generate(self):
self.assign(render.attachNewNode('DistributedFishingTarget'))
shadow = loader.loadModel('phase_3/models/props/drop_shadow')
shadow.setPos(0, 0, -0.10000000000000001)
shadow.setScale(0.33000000000000002)
shadow.setColorScale(1, 1, 1, 0.75)
shadow.reparentTo(self)
self.bubbles = Bubbles.Bubbles(self, render)
self.bubbles.renderParent.setDepthWrite(0)
self.bubbles.start()
DistributedNode.DistributedNode.generate(self)
def disable(self):
if self.track:
self.track.finish()
self.track = None
self.bubbles.destroy()
del self.bubbles
self.pond.removeTarget(self)
self.pond = None
DistributedNode.DistributedNode.disable(self)
def delete(self):
del self.pond
DistributedNode.DistributedNode.delete(self)
def setPondDoId(self, pondDoId):
self.pond = base.cr.doId2do[pondDoId]
self.pond.addTarget(self)
self.centerPoint = FishingTargetGlobals.getTargetCenter(
self.pond.getArea())
self.maxRadius = FishingTargetGlobals.getTargetRadius(
self.pond.getArea())
def getDestPos(self, angle, radius):
x = radius * math.cos(angle) + self.centerPoint[0]
y = radius * math.sin(angle) + self.centerPoint[1]
z = self.centerPoint[2]
return (x, y, z)
def setState(self, stateIndex, angle, radius, time, timeStamp):
ts = globalClockDelta.localElapsedTime(timeStamp)
pos = self.getDestPos(angle, radius)
if self.track and self.track.isPlaying():
self.track.finish()
self.track = Sequence(
LerpPosInterval(
self, time - ts, Point3(*pos), blendType='easeInOut'))
self.track.start()
def getRadius(self):
return self.radius
|
[
"47166977+peppythegod@users.noreply.github.com"
] |
47166977+peppythegod@users.noreply.github.com
|
92d2540caa11631b2211b05ce1cd8a3ec146552b
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_currycombs.py
|
697fc9498aafda38d96c4b64c23d5633a1a39751
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
#calss header
class _CURRYCOMBS():
def __init__(self,):
self.name = "CURRYCOMBS"
self.definitions = currycomb
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['currycomb']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
70ffe5314419bb5044b3a9f8f244447a6aad5a93
|
c1267fbec95318184e7388cddf9b7085f797d514
|
/2023/05 May/db05232023.py
|
5dc2d55800b8a6bd5a3a2bcf895e58b5ac20691d
|
[
"MIT"
] |
permissive
|
vishrutkmr7/DailyPracticeProblemsDIP
|
1aedfd2e173847bf22989a6b0ec550acebb2bd86
|
2c365f633a1e1bee281fbdc314969f03b17ac9ec
|
refs/heads/master
| 2023-05-31T23:49:52.135349
| 2023-05-28T09:32:12
| 2023-05-28T09:32:12
| 199,596,248
| 10
| 4
|
MIT
| 2022-11-02T21:31:59
| 2019-07-30T07:12:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,310
|
py
|
"""
You are on the command line of a computer in the root directory and given a list of commands to run.
The commands consist of three possible operations, ../ which brings you to the parent directory (or
nowhere if you’re already in the root directory), ./ which keeps you in the current directory, and
directory_name/ which moves you to a new directory with the specified name (these named directories
are guaranteed to always exist). After running all the commands, return the amount of commands
required to bring you back to the root directory.
Note: The commands must be run in the order they appear in commands.
Ex: Given the following commands…
commands = ["a/", "b/"], return 2 (we've gone two directories deeper, so we must go backwards
two directories which requires 2 commands).
Ex: Given the following commands…
commands = ["a/", "../"], return 0.
"""
class Solution:
def minOperations(self, commands):
count = 0
for command in commands:
if command == "../":
count -= 1
elif command == "./":
continue
else:
count += 1
return count
# Test Cases
if __name__ == "__main__":
s = Solution()
print(s.minOperations(["a/", "b/"]))
print(s.minOperations(["a/", "../"]))
|
[
"vishrutkmr7@gmail.com"
] |
vishrutkmr7@gmail.com
|
a87207e9eff593413fc1fe99434da4f68da15b15
|
025abc9e70eb347e688a90bdf3030db120e8824b
|
/python_script/test_rolling_sphere.py
|
c4f43ab61ff56c9f2f8b47da5e867ab321a5335b
|
[
"BSD-3-Clause"
] |
permissive
|
listenzcc/3D_model_matlab
|
6953a2ebbb55373a749aebc5101166e327c45cb5
|
2dcaf50a51cf02591737aaa6b4924ed3848f1840
|
refs/heads/master
| 2020-04-24T02:34:58.093612
| 2019-05-28T04:05:29
| 2019-05-28T04:05:29
| 171,641,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,090
|
py
|
# code: utf-8
from mpl_toolkits.mplot3d import proj3d
from matplotlib import animation
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_aspect("equal")
# draw sphere
def init():
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:10j]
x = np.cos(u)*np.sin(v)
y = np.sin(u)*np.sin(v)
z = np.cos(v)
sphere = ax.plot_wireframe(x, y, z, color="r")
sphere.__setattr__('target', 1)
return sphere
def is_target(x, t='target'):
return hasattr(x, t)
def animate(i):
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:10j]
u += i*5
x = np.cos(u)*np.sin(v)
y = np.sin(u)*np.sin(v)
z = np.cos(v)
ax.findobj(is_target)[0].remove()
sphere = ax.plot_wireframe(x, y, z, color="r")
sphere.__setattr__('target', 1)
return sphere
ani = animation.FuncAnimation(fig=fig,
func=animate,
frames=100,
init_func=init,
interval=20,
blit=False)
plt.show()
|
[
"listenzcc@mail.bnu.edu.cn"
] |
listenzcc@mail.bnu.edu.cn
|
3eb0059440e55929683569010648a2bfee9d1a04
|
9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56
|
/google/ads/googleads/v9/services/services/campaign_criterion_simulation_service/transports/base.py
|
aa5d97f44787ec6884d649ce8947e8a4c96799a0
|
[
"Apache-2.0"
] |
permissive
|
GerhardusM/google-ads-python
|
73b275a06e5401e6b951a6cd99af98c247e34aa3
|
676ac5fcb5bec0d9b5897f4c950049dac5647555
|
refs/heads/master
| 2022-07-06T19:05:50.932553
| 2022-06-17T20:41:17
| 2022-06-17T20:41:17
| 207,535,443
| 0
| 0
|
Apache-2.0
| 2019-09-10T10:58:55
| 2019-09-10T10:58:55
| null |
UTF-8
|
Python
| false
| false
| 4,107
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v9.resources.types import (
campaign_criterion_simulation,
)
from google.ads.googleads.v9.services.types import (
campaign_criterion_simulation_service,
)
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class CampaignCriterionSimulationServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for CampaignCriterionSimulationService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_campaign_criterion_simulation: gapic_v1.method.wrap_method(
self.get_campaign_criterion_simulation,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def get_campaign_criterion_simulation(
self,
) -> typing.Callable[
[
campaign_criterion_simulation_service.GetCampaignCriterionSimulationRequest
],
campaign_criterion_simulation.CampaignCriterionSimulation,
]:
raise NotImplementedError
__all__ = ("CampaignCriterionSimulationServiceTransport",)
|
[
"noreply@github.com"
] |
GerhardusM.noreply@github.com
|
81218cbe35e6ef4a09627951ac0eab617bd94d2c
|
fa0c53ac2a91409eaf0fc7c082a40caae3ffa0d8
|
/com/lc/python_1_100_Days_Demo/Day01-15/Day09/code/employee.py
|
ac82d97c526cf06934a23fa8d2368df0c8c0cd8a
|
[] |
no_license
|
ahviplc/pythonLCDemo
|
aba6d8deb1e766841461bd772560d1d50450057b
|
22f149600dcfd4d769e9f74f1f12e3c3564e88c2
|
refs/heads/master
| 2023-07-24T01:41:59.791913
| 2023-07-07T02:32:45
| 2023-07-07T02:32:45
| 135,969,516
| 7
| 2
| null | 2023-02-02T03:24:14
| 2018-06-04T04:12:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,824
|
py
|
"""
抽象类 / 方法重写 / 多态
实现一个工资结算系统 公司有三种类型的员工
- 部门经理固定月薪12000元/月
- 程序员按本月工作小时数每小时100元
- 销售员1500元/月的底薪加上本月销售额5%的提成
输入员工的信息 输出每位员工的月薪信息
Version: 0.1
Author: LC
DateTime:2018年9月18日13:55:50
一加壹博客最Top-一起共创1+1>2的力量!~LC
LC博客url: http://oneplusone.top/index.html
"""
from abc import ABCMeta, abstractmethod
class Employee(object, metaclass=ABCMeta):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
@abstractmethod
def get_salary(self):
pass
class Manager(Employee):
# 想一想: 如果不定义构造方法会怎么样
def __init__(self, name):
# 想一想: 如果不调用父类构造器会怎么样
super().__init__(name)
def get_salary(self):
return 12000
class Programmer(Employee):
def __init__(self, name):
super().__init__(name)
def set_working_hour(self, working_hour):
self._working_hour = working_hour
def get_salary(self):
return 100 * self._working_hour
class Salesman(Employee):
def __init__(self, name):
super().__init__(name)
def set_sales(self, sales):
self._sales = sales
def get_salary(self):
return 1500 + self._sales * 0.05
if __name__ == '__main__':
emps = [Manager('武则天'), Programmer('狄仁杰'), Salesman('白元芳')]
for emp in emps:
if isinstance(emp, Programmer):
working_hour = int(input('请输入%s本月工作时间: ' % emp.name))
emp.set_working_hour(working_hour)
elif isinstance(emp, Salesman):
sales = float(input('请输入%s本月销售额: ' % emp.name))
emp.set_sales(sales)
print('%s本月月薪为: ¥%.2f元' % (emp.name, emp.get_salary()))
|
[
"ahlc@sina.cn"
] |
ahlc@sina.cn
|
8ca313a6343af6f39fca663bf4e7b08003d71448
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_2/klmale001/question2.py
|
74abd3ce5a120172d6cab928acbe28d3e19dd740
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,046
|
py
|
print('Welcome to the 30 Second Rule Expert')
print('------------------------------------')
print('Answer the following questions by selecting from among the options.')
if input('Did anyone see you? (yes/no)\n')=="yes":
if input('Was it a boss/lover/parent? (yes/no)\n')=='yes':
if input('Was it expensive? (yes/no)\n')=='yes':
if input('Can you cut off the part that touched the floor? (yes/no)\n')=='yes':
print('Decision: Eat it.')
else: print('Decision: Your call.')
else:
if input('Is it chocolate? (yes/no)\n')=='yes':
print('Decision: Eat it.')
else:
print("Decision: Don't eat it.")
else:
print('Decision: Eat it.')
else:
if input('Was it sticky? (yes/no)\n')=='yes':
if input('Is it a raw steak? (yes/no)\n')=='yes':
if input('Are you a puma? (yes/no)\n')=='yes':
print('Decision: Eat it.')
else:
print("Decision: Don't eat it.")
else:
if input('Did the cat lick it? (yes/no)\n')=='yes':
if input('Is your cat healthy? (yes/no)\n')=='yes':
print('Decision: Eat it.')
else:
print('Decision: Your call.')
else:
print('Decision: Eat it.')
else:
if input('Is it an Emausaurus? (yes/no)\n')=='yes':
if input('Are you a Megalosaurus? (yes/no)\n')=='yes':
print('Decision: Eat it.')
else:
print("Decision: Don't eat it.")
else:
if input('Did the cat lick it? (yes/no)\n')=='yes':
if input('Is your cat healthy? (yes/no)\n')=='yes':
print('Decision: Eat it.')
else:
print('Decision: Your call.')
else:
print('Decision: Eat it.')
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
bceef0b93c3691d490c863dae8725908c66c9136
|
92a6b3dfbaa5ee932b7ecebcb5dad66c878163d5
|
/机器学习的代码/hello_world_linear_model.py
|
c4455c7476f4610d89a4df7aabe14b25e497a6d2
|
[] |
no_license
|
Color4/2017_code_updata_to_use
|
c99e9cf409904531813e69e5106b4650a1863979
|
a531d2b61d6984b4101af68bf1e54783d5e0843b
|
refs/heads/master
| 2020-03-31T20:14:34.366303
| 2018-10-08T04:58:10
| 2018-10-08T04:58:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,835
|
py
|
import tensorflow as tf
import numpy
import matplotlib.pyplot as plt
rng = numpy.random
# Parameters
learning_rate = 0.01
training_epochs = 2000
display_step = 50
# Training Data
train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1])
train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3])
n_samples = train_X.shape[0]
# tf Graph Input
X = tf.placeholder("float")
Y = tf.placeholder("float")
# Create Model
# Set model weights
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
# Construct a linear model
activation = tf.add(tf.multiply(X, W), b)
# Minimize the squared errors
cost = tf.reduce_sum(tf.pow(activation-Y, 2))/(2*n_samples) #L2 loss
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) #Gradient descent
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
#Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(sess.run(cost, feed_dict={X: train_X, Y:train_Y})), "W=", sess.run(W), "b=", sess.run(b))
print("Optimization Finished!")
print("cost=", sess.run(cost, feed_dict={X: train_X, Y: train_Y}),"W=", sess.run(W), "b=", sess.run(b))
#Graphic display
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
plt.legend()
plt.show()
|
[
"zhangp@origimed.com"
] |
zhangp@origimed.com
|
b0b0f2739ca965c635b18a5027894999d2f8ca3c
|
ddaef23fe2e8da8153d69c5fc95421cc70f0d76c
|
/hc/api/decorators.py
|
bff56dda347bd98b691fd63c97deb7217b1f5210
|
[
"BSD-3-Clause"
] |
permissive
|
PagerTree/healthchecks
|
ee706b7b86a96c86d00428bdae96f4711e03bbde
|
ae53aaaa3a9f3e420858f9b877dd5047fae689ee
|
refs/heads/master
| 2023-01-10T00:14:15.735248
| 2022-12-20T14:23:33
| 2022-12-20T14:23:33
| 114,281,135
| 0
| 1
| null | 2017-12-14T18:03:00
| 2017-12-14T18:03:00
| null |
UTF-8
|
Python
| false
| false
| 3,708
|
py
|
from __future__ import annotations
import json
from functools import wraps
from django.db.models import Q
from django.http import HttpResponse, JsonResponse
from hc.accounts.models import Project
from hc.lib.jsonschema import ValidationError, validate
def error(msg, status=400):
return JsonResponse({"error": msg}, status=status)
def authorize(f):
@wraps(f)
def wrapper(request, *args, **kwds):
if "HTTP_X_API_KEY" in request.META:
api_key = request.META["HTTP_X_API_KEY"]
elif hasattr(request, "json"):
api_key = str(request.json.get("api_key", ""))
else:
api_key = ""
if len(api_key) != 32:
return error("missing api key", 401)
try:
request.project = Project.objects.get(api_key=api_key)
except Project.DoesNotExist:
return error("wrong api key", 401)
request.readonly = False
request.v = 2 if request.path_info.startswith("/api/v2/") else 1
return f(request, *args, **kwds)
return wrapper
def authorize_read(f):
@wraps(f)
def wrapper(request, *args, **kwds):
if "HTTP_X_API_KEY" in request.META:
api_key = request.META["HTTP_X_API_KEY"]
elif hasattr(request, "json"):
api_key = str(request.json.get("api_key", ""))
else:
api_key = ""
if len(api_key) != 32:
return error("missing api key", 401)
write_key_match = Q(api_key=api_key)
read_key_match = Q(api_key_readonly=api_key)
try:
request.project = Project.objects.get(write_key_match | read_key_match)
except Project.DoesNotExist:
return error("wrong api key", 401)
request.readonly = api_key == request.project.api_key_readonly
request.v = 2 if request.path_info.startswith("/api/v2/") else 1
return f(request, *args, **kwds)
return wrapper
def validate_json(schema={"type": "object"}):
"""Parse request json and validate it against `schema`.
Put the parsed result in `request.json`.
If schema is None then only parse and check if the root
element is a dict. Supports a limited subset of JSON schema spec.
"""
def decorator(f):
@wraps(f)
def wrapper(request, *args, **kwds):
if request.method == "POST" and request.body:
try:
request.json = json.loads(request.body.decode())
except ValueError:
return error("could not parse request body")
else:
request.json = {}
try:
validate(request.json, schema)
except ValidationError as e:
return error("json validation error: %s" % e)
return f(request, *args, **kwds)
return wrapper
return decorator
def cors(*methods):
methods = set(methods)
methods.add("OPTIONS")
methods_str = ", ".join(methods)
def decorator(f):
@wraps(f)
def wrapper(request, *args, **kwds):
if request.method == "OPTIONS":
# Handle OPTIONS here
response = HttpResponse(status=204)
elif request.method in methods:
response = f(request, *args, **kwds)
else:
response = HttpResponse(status=405)
response["Access-Control-Allow-Origin"] = "*"
response["Access-Control-Allow-Headers"] = "X-Api-Key"
response["Access-Control-Allow-Methods"] = methods_str
response["Access-Control-Max-Age"] = "600"
return response
return wrapper
return decorator
|
[
"cuu508@gmail.com"
] |
cuu508@gmail.com
|
29922bc33b6c89fc40112d18ddfad5c53622011d
|
16270d37d819a35777ab6d6a6430cd63551917f1
|
/handlers/newpost.py
|
7299307b23a91c08ba2ee439648df4749e4e1c09
|
[] |
no_license
|
SteadBytes/multi-user-blog
|
de28673155b81bd22e6cb57825670beca9ffdcf0
|
a3269a949753c72fad0915a1a6feafe50d75c4e3
|
refs/heads/master
| 2021-06-22T13:42:58.381491
| 2017-06-28T15:22:14
| 2017-06-28T15:22:14
| 89,211,765
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,182
|
py
|
from handlers.blog import BlogHandler
from models.blogpost import BlogPost
from helpers import *
class NewPostHandler(BlogHandler):
"""Handler for '/blog/newpost'.
"""
def render_new_post(self, subject="", content="", error=""):
self.render("new_post.html.j2", subject=subject,
content=content, error=error)
@BlogHandler.user_logged_in
def get(self):
self.render_new_post()
@BlogHandler.user_logged_in
def post(self):
"""Gets data from input form.
Retrieves post subject and title from input form and creates a new User
model then redirects to a permalink for the post.
If subject and content are invalid the form is re-loaded with
appropriate error messages.
"""
subject = self.request.get("subject")
content = self.request.get("content")
if subject and content:
post = BlogPost.make(self.user, subject, content)
post.put()
self.redirect("/blog/%s" % str(post.key().id()))
else:
error = "Please enter subject and content!"
self.render_new_post(subject, content, error)
|
[
"="
] |
=
|
a5911ccfb35e51051246e197db71d55138d3c634
|
6df062cecf36b3e0ae46a84e28b3af50f179c442
|
/autouri/metadata.py
|
e5ab73e28370c1cdcc262b3ccad79285964efd3d
|
[
"MIT"
] |
permissive
|
ENCODE-DCC/autouri
|
81b8b0807c05f17c125fe90f125f516a5a1fcec7
|
73d97ac2ee98bd153abe2f34cbdd89bf3c7aea24
|
refs/heads/master
| 2023-05-12T06:11:00.576561
| 2023-05-04T17:33:52
| 2023-05-04T17:33:52
| 222,572,865
| 0
| 1
|
MIT
| 2023-05-04T17:33:53
| 2019-11-19T00:33:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,380
|
py
|
"""URIMetadata and helper functions for metadata
"""
import warnings
from base64 import b64decode
from binascii import hexlify
from collections import namedtuple
from datetime import datetime, timezone
from dateparser import parse as dateparser_parse
from dateutil.parser import parse as dateutil_parse
URIMetadata = namedtuple("URIMetadata", ("exists", "mtime", "size", "md5"))
def get_seconds_from_epoch(timestamp: str) -> float:
"""If dateutil.parser.parse cannot parse DST timezones
(e.g. PDT, EDT) correctly, then use dateparser.parse instead.
"""
utc_epoch = datetime(1970, 1, 1, tzinfo=timezone.utc)
utc_t = None
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
utc_t = dateutil_parse(timestamp)
except Exception:
pass
if utc_t is None or utc_t.tzname() not in ("UTC", "Z"):
utc_t = dateparser_parse(timestamp)
utc_t = utc_t.astimezone(timezone.utc)
return (utc_t - utc_epoch).total_seconds()
def base64_to_hex(b: str) -> str:
return hexlify(b64decode(b)).decode()
def parse_md5_str(raw: str) -> str:
"""Check if it's based on base64 then convert it to hexadecimal string."""
raw = raw.strip("\"'")
if len(raw) == 32:
return raw
else:
try:
return base64_to_hex(raw)
except Exception:
pass
|
[
"leepc12@gmail.com"
] |
leepc12@gmail.com
|
a5a87d9cb88235ea15ab3e8f5388bacc96dbfbea
|
b1926953762b3720a9b5e4f7e136c3f296765f4b
|
/0x09-utf8_validation/0-validate_utf8.py
|
c026393dd80a6751dde74b661baa470fb11e7cea
|
[] |
no_license
|
Nukemenonai/holbertonschool-interview
|
2fd637e50d943fb5735012eee074f4c272c86e83
|
3c2727aaf91f4cccfce7c05406cac8b9c10f6e2d
|
refs/heads/master
| 2023-06-26T13:32:30.568587
| 2021-07-29T01:20:08
| 2021-07-29T01:20:08
| 280,456,322
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 714
|
py
|
#!/usr/bin/python3
"""
utf validator
"""
def validUTF8(data):
"""
checks if an array if integers representing a UTF-8 is valid UTF-8
data: list of integers
returns: true if valid else 0
"""
n_bytes = 0
for num in data:
bin_rep = format(num, '#010b')[-8:]
if n_bytes == 0:
for bit in bin_rep:
if bit == '0':
break
n_bytes += 1
if n_bytes == 0:
continue
if n_bytes == 1 or n_bytes > 4:
return False
else:
if not (bin_rep[0] == '1' and bin_rep[1] == '0'):
return False
n_bytes -= 1
return n_bytes == 0
|
[
"david.giovanni.ovalle@gmail.com"
] |
david.giovanni.ovalle@gmail.com
|
9bb14ece250d2ec5a2cf8da15216e6244b49cb44
|
d7df481f20826849b208f2b62eff53d8d107eeff
|
/ch5/multi3.py
|
3ec9f68a0da4de3c70e95033f4d0a2367ec0ee86
|
[] |
no_license
|
jemuelb/programming-python-2
|
5f847d591129ae2071cf2937f7853fda654a4e09
|
07f1b36f6017f7c310ac89a62fa8f8280aac9b8c
|
refs/heads/master
| 2021-05-01T19:23:15.748425
| 2018-03-04T22:53:30
| 2018-03-04T22:53:30
| 121,020,259
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,457
|
py
|
"""
Use multiprocess shared memory objects to communicate.
Passed objects are shared, but globals are not on Windows.
Last test here reflects common use case: distributing work.
"""
import os
from multiprocessing import Process, Value, Array
procs = 3
count = 0
def showdata(label, val, arr):
"""
print data values in this process
"""
msg = '%-12s: pid:%4s, global:%s, value:%s, array:%s'
print(msg % (label, os.getpid(), count, val.value, list(arr)))
def updater(val, arr):
"""
communicate via shared memory
"""
global count
count += 1
val.value += 1
for i in range(3):
arr[i] += 1
if __name__ == '__main__':
scalar = Value('i', 0)
vector = Array('d', procs)
# show start value in parent process
showdata('parent start', scalar, vector)
# spawn child, pass in shared memory
p = Process(target=showdata, args=('child ', scalar, vector))
p.start(); p.join()
# pass in shared memory updated in parent, wait for each to finish
# each child sees updates in parent so far for args (but not global)
print('\nloop1 (updates in parent, serial children)...')
for i in range(procs):
count += 1
scalar.value += 1
vector[i] += 1
p = Process(target=showdata, args=(('process %s' % i), scalar, vector))
p.start(); p.join
# same as prior, but allow children to run in parallel
# all see the last iteration's result because all share objects
print('\nloop2 (updates in parent, parallel children)...')
ps = []
for i in range(procs):
count += 1
scalar.value += 1
vector[i] += 1
p = Process(target=showdata, args=(('process %s' % i), scalar, vector))
p.start()
ps.append(p)
for p in ps:
p.join()
# shared memory updated in spawned children, wait for each
print('\nloop3 (updates in serial children)...')
for i in range(procs):
p = Process(target=updater, args=(scalar, vector))
p.start()
p.join()
showdata('parent temp', scalar, vector)
# same, but allow children to update in parallel
ps = []
print('\nloop4 (updates in parallel children)...')
for i in range(procs):
p = Process(target=updater, args=(scalar, vector))
p.start()
ps.append(p)
for p in ps:
p.join()
# show final results here
showdata('parent end', scalar, vector)
|
[
"jemuelb@gmail.com"
] |
jemuelb@gmail.com
|
bbdaf5b5723f6bf90d7a7615c82525fa5e2c3298
|
50e375bdc8affc1a8c09aa567a740fa19df7d5a6
|
/DSBQ/venv/bin/chardetect
|
5b47b8e5fd7a90a96a565e185674e79a64bd2b81
|
[] |
no_license
|
michTalebzadeh/SparkStructuredStreaming
|
ca7a257626e251c7b03a9844cfd229fa8ea95af5
|
87ef34ffe52061fcbb4f22fcd97764037717696a
|
refs/heads/master
| 2023-07-13T00:49:10.753863
| 2021-07-12T16:39:50
| 2021-07-12T16:39:50
| 364,826,528
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
#!/home/hduser/PycharmProjects/DSBQ/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"mich.talebzadeh@gmail.com"
] |
mich.talebzadeh@gmail.com
|
|
1fe8d88dbb1f77c4ea991f377fb063188a87e733
|
6810a482759afd585db7bb0b85fd0416f0450e6d
|
/Open Kattis/tolower.py
|
7daf83d10d73126578159f631168fa5092a44ace
|
[] |
no_license
|
BenRStutzman/kattis
|
01b000ac2353c8b8000c6bddec3698f66b0198ef
|
005720f853e7f531a264227d0d9aaa19d4d7cf1b
|
refs/heads/master
| 2020-07-15T23:52:45.785021
| 2019-11-09T03:28:06
| 2019-11-09T03:28:06
| 205,675,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
import sys
def main():
inp = sys.stdin.read().splitlines()
P, T = [int(num) for num in inp[0].split()]
num_right = 0
for problem in range(1, P * T + 1, T):
#print('NEW PROBLEM')
for case in range(problem, problem + T):
string = inp[case]
#print(string)
if len(string) > 1 and not string[1:].islower():
break
else: num_right += 1
print(num_right)
main()
|
[
"tysorx89@gmail.com"
] |
tysorx89@gmail.com
|
144944f86d23e3dd519eba2f3a2d474bfcad06bc
|
c36d9d70cbb257b2ce9a214bcf38f8091e8fe9b7
|
/75_sort_color.py
|
7bcae70f22da475f20b84852253654c068fa61d1
|
[] |
no_license
|
zdadadaz/coding_practice
|
3452e4fc8f4a79cb98d0d4ea06ce0bcae85f96a0
|
5ed070f22f4bc29777ee5cbb01bb9583726d8799
|
refs/heads/master
| 2021-06-23T17:52:40.149982
| 2021-05-03T22:31:23
| 2021-05-03T22:31:23
| 226,006,763
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,510
|
py
|
class Solution:
def sortColors(self, nums):
"""
Do not return anything, modify nums in-place instead.
"""
self.recur(nums,0,len(nums)-1)
def recur(self,nums, lt, rt):
if lt<rt:
print(lt,rt)
pivot_idx = self.rearrange(nums, lt, rt)
print(pivot_idx)
self.recur(nums,lt,pivot_idx-1)
self.recur(nums,pivot_idx+1,rt)
def rearrange(self,nums, lt, rt):
pivot = nums[rt]
i = lt-1
for j in range(lt,rt):
if nums[j] < pivot:
i+=1
nums[i],nums[j] = nums[j],nums[i]
i+=1
nums[rt], nums[i] = nums[i], nums[rt]
return i
# two pointers
def sortColors_web(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
lt = 0
rt = len(nums)-1
i=0
while i<=rt:
if nums[i]==0:
nums[i],nums[lt] = nums[lt],nums[i]
lt+=1
i+=1
elif nums[i]==2:
nums[i],nums[rt] = nums[rt],nums[i]
rt-=1
else:
i+=1
sol = Solution()
input= [2,0,2,1,1,0]
sol.sortColors(input)
print(input)
# Input: [2,0,2,1,1,0]
# Output: [0,0,1,1,2,2]
# lt=0,rt=5, pivot=2,lt=6,rt=1,out,swap pivot 5, [0,0,2,1,1,2] (n,n,n,n,n,y)
# lt=0,rt=4, pivot=0,lt=2,rt=-1,out,swap pivot 1, [0,0,2,1,1,2] (n,y,n,n,n,y)
|
[
"zdadadaz5566@gmail.com"
] |
zdadadaz5566@gmail.com
|
d12aec597ccbdf191c9024d967f7c6d1b7b636ed
|
2ff2f41c6fe48a4961513ca6deefbc5b393c406e
|
/python/tvm/contrib/dlpack.py
|
11db29f98b3ea308493040c592bdc92c390450a9
|
[
"Apache-2.0"
] |
permissive
|
zhiics/tvm
|
9f5a39c6373349800b9255d74225d5dd65aba70f
|
4782b1fc153d6614808f542155d58188f2dc8255
|
refs/heads/master
| 2021-12-21T03:24:31.176090
| 2018-09-05T22:48:37
| 2018-09-05T22:48:37
| 143,820,078
| 6
| 2
|
Apache-2.0
| 2020-05-13T00:54:21
| 2018-08-07T04:34:08
|
C++
|
UTF-8
|
Python
| false
| false
| 1,251
|
py
|
"""Wrapping functions to bridge frameworks with DLPack support to TVM"""
from .. import ndarray
def convert_func(tvm_func, tensor_type, to_dlpack_func):
"""Convert a tvm function into one that accepts a tensor from another
framework, provided the other framework supports DLPACK
Parameters
----------
tvm_func: Function
Built tvm function operating on arrays
tensor_type: Type
Type of the tensors of the target framework
to_dlpack_func: Function
Function to convert the source tensors to DLPACK
"""
assert callable(tvm_func)
def _wrapper(*args):
args = tuple(ndarray.from_dlpack(to_dlpack_func(arg))\
if isinstance(arg, tensor_type) else arg for arg in args)
return tvm_func(*args)
return _wrapper
def to_pytorch_func(tvm_func):
"""Convert a tvm function into one that accepts PyTorch tensors
Parameters
----------
tvm_func: Function
Built tvm function operating on arrays
Returns
-------
wrapped_func: Function
Wrapped tvm function that operates on PyTorch tensors
"""
import torch
import torch.utils.dlpack
return convert_func(tvm_func, torch.Tensor, torch.utils.dlpack.to_dlpack)
|
[
"tqchen@users.noreply.github.com"
] |
tqchen@users.noreply.github.com
|
c9f4124207254efef396011f078633f4edf00400
|
8e39f7ab3d728aaa593a0c0ddd113ea8e6f287c6
|
/14_ulter)table.py
|
b0ca45650e4fa1679ae40301ec502f9c1fd74489
|
[] |
no_license
|
tamercs2005/Python-MySQL-Guide
|
423ffd502cd08de2850f25f3cfad47983e0949eb
|
01917a31038517dfcabe7f5400262792d5b86ef7
|
refs/heads/master
| 2021-05-22T01:01:21.988815
| 2020-04-02T08:41:40
| 2020-04-02T08:41:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 356
|
py
|
import mysql.connector
myconn = mysql.connector.connect(
host="localhost" ,
user = "root" ,
passwd = "toor" ,
database='mydatabase'
)
mycursor = myconn.cursor()
# mycursor.execute(" ALTER TABLE movies ADD COLUMN language VARCHAR(30)")
mycursor.execute(" ALTER TABLE movies CHANGE language language VARCHAR(50)")
myconn.commit()
|
[
"pythondeveloper6@gmail.com"
] |
pythondeveloper6@gmail.com
|
d7d8e8c5e21f43299f74516f10cb59ddbb9faaba
|
9680ba23fd13b4bc0fc3ce0c9f02bb88c6da73e4
|
/Bernd Klein (520) ile Python/p_20702.py
|
fdd42efc88e98cd52221dee5ff8c40475d982f06
|
[] |
no_license
|
mnihatyavas/Python-uygulamalar
|
694091545a24f50a40a2ef63a3d96354a57c8859
|
688e0dbde24b5605e045c8ec2a9c772ab5f0f244
|
refs/heads/master
| 2020-08-23T19:12:42.897039
| 2020-04-24T22:45:22
| 2020-04-24T22:45:22
| 216,670,169
| 0
| 0
| null | null | null | null |
ISO-8859-9
|
Python
| false
| false
| 7,337
|
py
|
# coding:iso-8859-9 Türkçe
# p_20702.py: Grafik sınıfıyla, yumru, bağlantı, izole, yeni yumru ve bağlantı ekleme örneği.
class Grafik (object):
def __init__ (self, grafikSözlüğü=None):
if grafikSözlüğü == None: grafikSözlüğü = {}
self.__grafikSözlüğü = grafikSözlüğü
def yumrular (self): return list (self.__grafikSözlüğü.keys() )
def yumruEkle (self, yumru):
if yumru not in self.__grafikSözlüğü: self.__grafikSözlüğü[yumru] = []
def bağlantılar (self): return self.bağlantılarıKur()
def bağlantılarıKur (self):
bağlantılar = []
for yumru in self.__grafikSözlüğü:
for komşu in self.__grafikSözlüğü[yumru]:
if {komşu, yumru} not in bağlantılar: bağlantılar.append ({yumru, komşu})
return bağlantılar
def bağlantıEkle (self, bağlantı):
bağlantı = set (bağlantı)
(yumru1, yumru2) = tuple (bağlantı)
if yumru1 in self.__grafikSözlüğü: self.__grafikSözlüğü[yumru1].append (yumru2)
else: self.__grafikSözlüğü[yumru1] = [yumru2]
def patikaBul (self, ilkYumru, sonYumru, patika=None):
if patika == None: patika = []
grafik = self.__grafikSözlüğü
patika = patika + [ilkYumru]
if ilkYumru == sonYumru: return patika
if ilkYumru not in grafik: return None
for yumru in grafik[ilkYumru]:
if yumru not in patika:
eklenenPatika = self.patikaBul (yumru, sonYumru, patika)
if eklenenPatika: return eklenenPatika
return None
def tümPatikalarıBul (self, ilkYumru, sonYumru, patika=[]):
grafik = self.__grafikSözlüğü
patika = patika + [ilkYumru]
if ilkYumru == sonYumru: return [patika]
if ilkYumru not in grafik: return []
patikalar = []
for yumru in grafik[ilkYumru]:
if yumru not in patika:
eklenenPatika = self.tümPatikalarıBul (yumru, sonYumru, patika)
for p in eklenenPatika: patikalar.append (p)
return patikalar
def __str__ (self):
sonuç = "Yumrular: "
for y in self.__grafikSözlüğü: sonuç += str (y) + " "
sonuç += "\nBağlantılar: "
for b in self.bağlantılarıKur(): sonuç += str (b) + " "
return sonuç
# Sonraki örneklerin eklenti fonksiyonları...
def yumruDerecesi (self, yumru):
komşuYumrular = self.__grafikSözlüğü[yumru]
derece = len (komşuYumrular) + komşuYumrular.count (yumru)
return derece
def izoleYumrular (self):
grafik = self.__grafikSözlüğü
izoleListesi = []
for yumru in grafik:
if not grafik[yumru]: izoleListesi += [yumru]
return izoleListesi
def asgariDerece (self):
asgari = 100000000
for yumru in self.__grafikSözlüğü:
yumruDerecesi = self.yumruDerecesi (yumru)
if yumruDerecesi < asgari: asgari = yumruDerecesi
return asgari
def azamiDerece (self):
azami = 0
for yumru in self.__grafikSözlüğü:
yumruDerecesi = self.yumruDerecesi (yumru)
if yumruDerecesi > azami: azami = yumruDerecesi
return azami
def dereceSilsilesi (self):
silsileListesi = []
for yumru in self.__grafikSözlüğü: silsileListesi.append (self.yumruDerecesi (yumru))
silsileListesi.sort (reverse = True)
return tuple (silsileListesi)
@staticmethod
def erdoes_gallai (silsile):
if sum (silsile) % 2: return False
for k in range (1, len (silsile) + 1):
sol = sum (silsile[:k])
sağ = k * (k-1) + sum ([min (x, k) for x in silsile[k:]])
if sol > sağ: return False
return True
def yoğunluk (self):
g = self.__grafikSözlüğü
Y = len (g.keys())
B = len (self.bağlantılar())
return 2.0 * B / (Y *(Y - 1)) # Bağlantı yoğunluğu [0->1] arasıdır...
def bağlantılıMı (self, bağlantılıYumrular = None, ilkYumru=None):
if bağlantılıYumrular is None: bağlantılıYumrular = set()
gSöz = self.__grafikSözlüğü
yumrular = list (gSöz.keys())
if not ilkYumru: # İlk yumru belirtilmemişse, 0.yumruyu seç...
ilkYumru = yumrular[0]
bağlantılıYumrular.add (ilkYumru)
if len (bağlantılıYumrular) != len (yumrular):
for yumru in gSöz[ilkYumru]:
if yumru not in bağlantılıYumrular:
if self.bağlantılıMı (bağlantılıYumrular, yumru): return True
else: return True
return False
def grafiğinÇapı (self):
y = self.yumrular()
çiftlerListesi = [(y[i], y[j]) for i in range (len (y) - 1) for j in range (i+1, len (y))]
enkısaYol = []
for (y1, y2) in çiftlerListesi:
patikalar = self.tümPatikalarıBul (y1, y2)
enkısası = sorted (patikalar, key=len)[0]
enkısaYol.append (enkısası)
enkısaYol.sort (key=len)
grafiğinÇapı = len (enkısaYol[-1]) - 1 # Artan sıralamada son yol enuzunudur...
return grafiğinÇapı
if __name__ == "__main__":
g = {
"a" : ["d"],
"b" : ["c"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : [],
"g" : []
}
grafik = Grafik (g)
print ("Grafiğin mevcut yumruları:")
print (grafik.yumrular())
print ("Grafiğin mevcut bağlantıları:")
print (grafik.bağlantılar())
print ("Yeni bir yumru 'z' ekle:")
grafik.yumruEkle ("z")
print ("Grafiğin ilk güncel yumruları:")
print (grafik.yumrular())
print ("Yeni bir bağlantı {'a', 'z'} ekle:")
grafik.bağlantıEkle ({"a", "z"})
print ("Grafiğin ikinci güncel yumruları:")
print (grafik.yumrular() )
print ("Grafiğin ikinci güncel bağlantıları:")
print (grafik.bağlantılar())
print ('Yeni bağlantılı iki yumru {"x","y"} ekle:')
grafik.bağlantıEkle ({"x", "y"})
grafik.yumruEkle ("x")
print ("Grafiğin son yumruları:")
print (grafik.yumrular())
print ("Grafiğin son bağlantıları:")
print (grafik.bağlantılar())
"""Çıktı:
>python p_20702.py
Grafiğin mevcut yumruları:
['a', 'b', 'c', 'd', 'e', 'f', 'g']
Grafiğin mevcut bağlantıları:
[{'a', 'd'}, {'b', 'c'}, {'c'}, {'c', 'd'}, {'c', 'e'}]
Yeni bir yumru 'z' ekle:
Grafiğin ilk güncel yumruları:
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'z']
Yeni bir bağlantı {'a', 'z'} ekle:
Grafiğin ikinci güncel yumruları:
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'z']
Grafiğin ikinci güncel bağlantıları:
[{'a', 'd'}, {'b', 'c'}, {'c'}, {'c', 'd'}, {'c', 'e'}, {'z', 'a'}]
Yeni bağlantılı iki yumru {"x","y"} ekle:
Grafiğin son yumruları:
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'z', 'x']
Grafiğin son bağlantıları:
[{'a', 'd'}, {'b', 'c'}, {'c'}, {'c', 'd'}, {'c', 'e'}, {'z', 'a'}, {'x', 'y'}]
"""
|
[
"noreply@github.com"
] |
mnihatyavas.noreply@github.com
|
d02aa2b17cff92893dbbc39c51ae250ff988bac9
|
472578974401c83509d81ea4d832fc3fd821f295
|
/python资料/day8.1/day02/exercise06.py
|
55b143dd621190359b3f5a9fd71213fb7e669ccc
|
[
"MIT"
] |
permissive
|
why1679158278/python-stu
|
f038ec89e9c3c7cc80dc0ff83b76e7c3078e279e
|
0d95451f17e1d583d460b3698047dbe1a6910703
|
refs/heads/master
| 2023-01-05T04:34:56.128363
| 2020-11-06T09:05:16
| 2020-11-06T09:05:16
| 298,263,579
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
"""
古代的秤,一斤十六两。
在终端中获取两,计算几斤零几两。
"""
total_liang = int(input("请输入两:"))
jin = total_liang // 16
liang = total_liang % 16
print(str(jin) + "斤零" + str(liang) + "两")
|
[
"1679158278@qq.com"
] |
1679158278@qq.com
|
67048f55cb3fb75a8cae9b2b21d1dfcb147d6a37
|
70b339d0b2638a7914d0d56c5edf8a2637c9f4b0
|
/sortedSquares.py
|
2903b4c61f859f1b2a07c241e19027f5d65a4f57
|
[] |
no_license
|
pflun/advancedAlgorithms
|
9991da7514024e18ba08de8688966b9220e12571
|
5520dbcd26999b98e1229bf03c2f62dd690a2ddc
|
refs/heads/master
| 2023-02-19T12:05:26.902535
| 2023-02-14T06:08:54
| 2023-02-14T06:08:54
| 189,055,701
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 565
|
py
|
class Solution(object):
# brutal force
def sortedSquares(self, A):
res = []
for a in A:
a *= a
res.append(a)
return sorted(res)
def sortedSquares2(self, A):
l = 0
r = len(A) - 1
res = []
while l <= r:
if A[l] * A[l] > A[r] * A[r]:
res.append(A[l] * A[l])
l += 1
else:
res.append(A[r] * A[r])
r -= 1
return res[::-1]
test = Solution()
print test.sortedSquares2([-7,-3,2,3,11])
|
[
"zgao@gwu.edu"
] |
zgao@gwu.edu
|
4c428bc89f8805b8ef2093c63845593d05c4ce62
|
39cd9aa81927c20d85d1b65e55523455626ee902
|
/python_work/chapter_9/exercises/9_7_admin.py
|
bfa5186b5cf37e58e1617c832d9bc40acb9e8e8f
|
[] |
no_license
|
SMS-NED16/crash-course-python
|
acf363562a813f7deb36614dc935be4ed2d07fee
|
e6e6cb787d208f51f114f71331c43af1ddc1e4c2
|
refs/heads/master
| 2020-03-09T02:29:35.241621
| 2018-04-21T16:09:16
| 2018-04-21T16:09:16
| 128,541,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,544
|
py
|
class User():
"""A class that models a user for an online forum"""
def __init__(self, f_name, l_name,
age, city, hometown, *languages):
"""Constructor - creates attributes, initialises them"""
self.first_name = f_name;
self.last_name = l_name;
self.age = age
self.city = city
self.hometown = hometown
self.languages = languages #languages is a tuple
def describe_user(self):
"""Prints all information about user as a neatly formatted string"""
print("Name:\t" + self.first_name.title()
+ " " + self.last_name.title())
print("Age:\t" + str(self.age))
print("City:\t" + self.city)
print("Hometown:\t" + self.hometown)
print("Languages:\t" + str(self.languages))
def greet_user(self):
"""Displays a personalised greeting for the user"""
print("Hello, " + self.first_name.title() + "! Welcome back!")
class Admin(User):
"""A specialised version of the User class for forum administrators"""
def __init__(self, f_name, l_name, age, city, hometown, *languages):
"""Initialises superclass attributes, then inits privileges"""
super().__init__(f_name, l_name, age, city, hometown, *languages)
self.privileges = ["can add post", "can delete post", "can ban user"]
def show_privileges(self):
print("This admin has the following privileges.")
for privilege in self.privileges:
print(" -" + privilege)
#Creating an instance of the Admin class
spez = Admin("Alex", "Ohanian", 31, "San Francisco", "Los Angeles",
"English", "Spanish", "German")
spez.describe_user()
spez.show_privileges()
|
[
"saadmsiddiqui96@gmail.com"
] |
saadmsiddiqui96@gmail.com
|
b8583d8e9f7866f782b43f9711befcd5252146ac
|
ea393959886a5cd13da4539d634f2ca0bbcd06a2
|
/573.py
|
42d030dc870c94f3d9e8565f14b11a340a9c8cdf
|
[] |
no_license
|
zhangchizju2012/LeetCode
|
f605f35b82f16282559af71e4e61ec2629a90ebc
|
0c4c38849309124121b03cc0b4bf39071b5d1c8c
|
refs/heads/master
| 2020-04-05T12:12:14.810639
| 2018-08-09T10:24:52
| 2018-08-09T10:24:52
| 81,021,830
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 900
|
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun May 7 10:19:34 2017
@author: zhangchi
"""
class Solution(object):
def minDistance(self, height, width, tree, squirrel, nuts):
"""
:type height: int
:type width: int
:type tree: List[int]
:type squirrel: List[int]
:type nuts: List[List[int]]
:rtype: int
"""
result = 0
distance1 = []
distance2 = []
for a,b in nuts:
temp1 = abs(a-tree[0]) + abs(b-tree[1])
result += temp1
distance1.append(temp1)
temp2 = abs(a-squirrel[0]) + abs(b-squirrel[1])
distance2.append(temp2)
temp = -float('inf')
for a,b in zip(distance1,distance2):
temp = max(temp,a-b)
return 2*result-temp
s = Solution()
print s.minDistance(5,7,[2,2],[4,4],[[3,0]])
|
[
"zhangchizju2012@zju.edu.cn"
] |
zhangchizju2012@zju.edu.cn
|
affa813021fb253b56dceef1f271826d715e645c
|
150d9e4cee92be00251625b7f9ff231cc8306e9f
|
/NumbersSmallerThanCurrent.py
|
2bae72ae2f809266720b4eaf4ce851d1171905a7
|
[] |
no_license
|
JerinPaulS/Python-Programs
|
0d3724ce277794be597104d9e8f8becb67282cb0
|
d0778178d89d39a93ddb9b95ca18706554eb7655
|
refs/heads/master
| 2022-05-12T02:18:12.599648
| 2022-04-20T18:02:15
| 2022-04-20T18:02:15
| 216,547,245
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,377
|
py
|
'''
1365. How Many Numbers Are Smaller Than the Current Number
Given the array nums, for each nums[i] find out how many numbers in the array are smaller than it. That is, for each nums[i] you have to count the number of valid j's such that j != i and nums[j] < nums[i].
Return the answer in an array.
Example 1:
Input: nums = [8,1,2,2,3]
Output: [4,0,1,1,3]
Explanation:
For nums[0]=8 there exist four smaller numbers than it (1, 2, 2 and 3).
For nums[1]=1 does not exist any smaller number than it.
For nums[2]=2 there exist one smaller number than it (1).
For nums[3]=2 there exist one smaller number than it (1).
For nums[4]=3 there exist three smaller numbers than it (1, 2 and 2).
Example 2:
Input: nums = [6,5,4,8]
Output: [2,1,0,3]
Example 3:
Input: nums = [7,7,7,7]
Output: [0,0,0,0]
Constraints:
2 <= nums.length <= 500
0 <= nums[i] <= 100
'''
class Solution(object):
def smallerNumbersThanCurrent(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
index_dict = {}
sorted_nums = sorted(nums)
size = len(nums)
for index in range(size):
if index_dict.has_key(sorted_nums[index]):
pass
else:
index_dict[sorted_nums[index]] = index
result = []
for num in nums:
result.append(index_dict[num])
return result
|
[
"jerinsprograms@gmail.com"
] |
jerinsprograms@gmail.com
|
64fb7280e68ec5647db275e2630d86e34c3f4531
|
751d837b8a4445877bb2f0d1e97ce41cd39ce1bd
|
/codegolf/ryleys-theorem.py
|
d1cab6a350c802b71d7905109a1cbddc1681610b
|
[
"MIT"
] |
permissive
|
qeedquan/challenges
|
d55146f784a3619caa4541ac6f2b670b0a3dd8ba
|
56823e77cf502bdea68cce0e1221f5add3d64d6a
|
refs/heads/master
| 2023-08-11T20:35:09.726571
| 2023-08-11T13:02:43
| 2023-08-11T13:02:43
| 115,886,967
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,365
|
py
|
#!/usr/bin/env python
"""
S. Ryley proved following theorem in 1825:
Every rational number can be expressed as a sum of three rational cubes.
Challenge
Given some rational number r∈Q find three rational numbers a,b,c∈Q such that
r=a^3+b^3+c^3.
Details
Your submission should be able to compute a solution for every input given enough time and memory, that means having for instance two 32-bit int representing a fraction is not sufficient.
30 = 3982933876681^3 - 636600549515^3 - 3977505554546^3
52 = 60702901317^3 + 23961292454^3 - 61922712865^3
307/1728 = (1/2)^3 + (1/3)^3 + (1/4)^3
0 = 0^3 + 0^3 + 0^3
1 = (1/2)^3 + (2/3)^3 + (5/6)^3
42 = (1810423/509232)^3 + (-14952/10609)^3 + (-2545/4944)^3
"""
from sympy import Rational
"""
@alephalpha
This formula is given in: Richmond, H. (1930). On Rational Solutions of x^3+y^3+z^3=R. Proceedings of the Edinburgh Mathematical Society, 2(2), 92-100.
"""
def ryley(r):
a = (27*r**3 + 1) / (27*r**2 - 9*r + 3)
b = (-27*r**3 + 9*r - 1) / (27*r**2 - 9*r + 3)
c = (-27*r**2 + 9*r) / (27*r**2 - 9*r + 3)
return (a, b, c)
def test(s):
r = Rational(s)
(a, b, c) = ryley(r)
print("%s = (%s)**3 + (%s)**3 + (%s)**3" % (r, a, b, c))
assert(a**3 + b**3 + c**3 == r)
def main():
test("30")
test("52")
test("307/1728")
test("0")
test("1")
test("42")
main()
|
[
"qeed.quan@gmail.com"
] |
qeed.quan@gmail.com
|
35f1888cc5c9fc51aa085d4c3396ea1d040525b6
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/070_oop/008_metaprogramming/_exercises/templates/abc_Abstract Base Classes/a_002_abc_register.py
|
b8bfec0995c22298e2201c8eea4bad38aa21ba9e
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,704
|
py
|
# # abc_register.py
#
# # Registering a Concrete Class
# # There are two ways to indicate that a concrete class implements an abstract API: either explicitly register the class
# # or create a new subclass directly from the abstract base. Use the register() class method as a decorator on a concrete
# # class to add it explicitly when the class provides the required API, but is not part of the inheritance tree of the
# # abstract base class.
#
# ______ a..
# ____ a_001_abc_base ______ P..
#
#
# c_ LocalBaseClass
# p..
#
#
# ??.? # registration
# c_ RegisteredImplementation L..
#
# ___ load input
# r_ i__.r..
#
# ___ save output data
# r_ o__.w.. d..
#
#
# __ ______ __ _____
# print('Subclass:', iss..(R..
# P..
# print('Instance:', isi..(R..
# P..
#
#
# # $ python3 abc_register.py
# #
# # Subclass: True
# # Instance: True
#
# # In this example the RegisteredImplementation is derived from LocalBaseClass, but is registered as implementing
# # the PluginBase API so issubclass() and isinstance() treat it as though it is derived from PluginBase.
#
# python2
#
# # import abc
# # from abc_base import PluginBase
# #
# #
# # class RegisteredImplementation(object):
# #
# # def load(self, input):
# # return input.read()
# #
# # def save(self, output, data):
# # return output.write(data)
# #
# #
# # PluginBase.register(RegisteredImplementation)
# #
# # if __name__ == '__main__':
# # print 'Subclass:', issubclass(RegisteredImplementation, PluginBase)
# # print 'Instance:', isinstance(RegisteredImplementation(), PluginBase)
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
8f0df066964b1e9be5746d2af2975bece4df73dd
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/histogram/_textangle.py
|
68378b446f6808b2e7ee34ebc5568780f31bdf59
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994
| 2021-12-21T03:49:19
| 2021-12-21T03:49:19
| 234,146,634
| 0
| 0
|
MIT
| 2020-01-15T18:33:43
| 2020-01-15T18:33:41
| null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
import _plotly_utils.basevalidators
class TextangleValidator(_plotly_utils.basevalidators.AngleValidator):
def __init__(self, plotly_name="textangle", parent_name="histogram", **kwargs):
super(TextangleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs
)
|
[
"nicolas@plot.ly"
] |
nicolas@plot.ly
|
1b5b98a0ebbbde2b3c9439d1ebe83df300ce4cec
|
e5eec1428da1d24d3e9b86f5723c51cd2ca636cd
|
/graph/크루스칼 알고리즘.py
|
ae3bd36d2be0371b0a25c5e8ea7b41ba0d1f97a7
|
[] |
no_license
|
jamwomsoo/Algorithm_prac
|
3c36c381f59277721517d331a8f1640399d80c1d
|
8393f3cc2f950214c47f3cf0b2c1271791f115d0
|
refs/heads/master
| 2023-06-09T06:49:14.739255
| 2021-06-18T06:41:01
| 2021-06-18T06:41:01
| 325,227,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,385
|
py
|
# 특정 원소가 속한 집합을 찾기
def find_parent(parent, x):
# 루트 노드가 아니라면, 루트 노드를 찾을 때까지 재귀적으로 호출
if parent[x] != x:
parent[x] = find_parent(parent, parent[x])
return parent[x]
# 두 원소가 속한 집합을 합치기
def union_parent(parent, a, b):
a = find_parent(parent, a)
b = find_parent(parent, b)
if a < b:
parent[b] = a
else:
parent[a] = b
# 노드의 개수와 간선(union 연산)의 개수 입력받기
v, e = map(int,input().split())
parent = [0] * (v + 1) # 부모 테이블 초기화
# 모든 간선을 담을 리스트와 최종 비용을 담을 변수
edges = []
result = 0
# 부모 테이블상에서, 부모를 자기 자신으로 초기화
for i in range(1, v + 1):
parent[i] = i
# 모든 간선에 대한 정보를 입력받기
for i in range(e):
a, b, cost = map(int, input().split())
# 비용순으로 정렬하기 위해서 튜플의 첫 번째 원소를 비용으로 설정
edges.append((cost, a, b))
# 간선을 비용순으로 정렬
edges.sort()
# 간선을 하나씩 확인하며
for edge in edges:
cost, a, b = edge
#사이클이 발생하지 않는 경우에만 집합에 포함
if find_parent(parent, a) != find_parent(parent, b):
union_parent(parent, a, b)
result+=cost
print(result)
|
[
"41579282+jamwomsoo@users.noreply.github.com"
] |
41579282+jamwomsoo@users.noreply.github.com
|
a439223a06d02e7d8088ceb8471a3751f271d18a
|
2e0e549394fefc3e7332170603f0be3b96fcb8f5
|
/src/compas_ghpython/artists/lineartist.py
|
e6f8a82ad85c337d0de61df00022aa80fe98d0ce
|
[
"MIT"
] |
permissive
|
irfanirw/compas
|
02f6f15dc2e20de886faf5d77420fd55f6a03c4e
|
41048e71610c3e063e3e9085440b3cba0a00b603
|
refs/heads/master
| 2022-11-29T18:59:18.344232
| 2020-08-17T19:04:14
| 2020-08-17T19:04:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,093
|
py
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas_ghpython
from compas_ghpython.artists.primitiveartist import PrimitiveArtist
__all__ = ['LineArtist']
class LineArtist(PrimitiveArtist):
"""Artist for drawing lines.
Parameters
----------
primitive : :class:`compas.geometry.Line`
A COMPAS line.
Other Parameters
----------------
See :class:`compas_ghpython.artists.PrimitiveArtist` for all other parameters.
Examples
--------
>>>
"""
def draw(self):
"""Draw the line.
Returns
-------
list of :class:`Rhino.Geometry.Line`
"""
start = list(self.primitive.start)
end = list(self.primitive.end)
lines = [{'start': start, 'end': end}]
return compas_ghpython.draw_lines(lines)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
|
[
"vanmelet@ethz.ch"
] |
vanmelet@ethz.ch
|
b124149598b75875c924a95d429ff80af9708bdd
|
c9bae3b822cf8c098e656486d455d88646d1712b
|
/starnavi_task/settings.py
|
db077ea080b9e03e959ce8620862d8c3de3557a8
|
[] |
no_license
|
sergiy-chumachenko/starnavi
|
f9967eeeec8803b58ecb0bdab28063431afc9cc7
|
1aa901ba42be1ef4322c9766ec58edecca07ce74
|
refs/heads/master
| 2022-12-10T20:25:46.923932
| 2020-07-18T17:22:07
| 2020-07-18T17:22:07
| 172,897,429
| 2
| 0
| null | 2022-12-08T01:39:44
| 2019-02-27T10:49:10
|
Python
|
UTF-8
|
Python
| false
| false
| 5,260
|
py
|
"""
Django settings for starnavi_task project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
import environ
import datetime
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ROOT_DIR = environ.Path(__file__) - 2
env = environ.Env(
DJANGO_DEBUG=(bool, False),
DJANGO_SECRET_KEY=(str, 'CHANGEME!!!e8!1671ifpp362f9gbd3v@e($0_flznbb3fa2d4zg7zn@%yyk2'),
DJANGO_ALLOWED_HOSTS=(list, []),
DJANGO_STATIC_ROOT=(str, str(ROOT_DIR('static'))),
DJANGO_MEDIA_ROOT=(str, str(ROOT_DIR('media'))),
DJANGO_DATABASE_URL=(str, 'postgres://{USER}:{PASSWORD}@{HOST}:{PORT}/{DB_NAME}'),
CLEARBIT_API_SECRET_KEY=(str, 'CLEARBIT_API_SECRET_KEY'),
HUNTER_API_SECRET_KEY=(str, 'HUNTER_API_SECRET_KEY'),
HUNTER_VERIFICATION_LIMIT=(str, 'HUNTER_VERIFICATION_LIMIT')
)
environ.Env.read_env(env_file=os.path.join(str(ROOT_DIR), '.env'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("DJANGO_SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DJANGO_DEBUG")
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS")
# Application definition
DJANGO_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
THIRD_PARTY_APPS = [
'rest_framework',
'djoser'
]
LOCAL_APPS = [
'posts',
'users'
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'starnavi_task.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'starnavi_task.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# },
'default': env.db('DJANGO_DATABASE_URL')
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = env.str("DJANGO_STATIC_ROOT")
MEDIA_ROOT = env.str("DJANGO_MEDIA_ROOT")
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser'
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework_simplejwt.authentication.JWTAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication'
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.AllowAny',
),
}
SIMPLE_JWT = {
'AUTH_HEADER_TYPES': ('JWT',),
# 'JWT_EXPIRATION_DELTA': datetime.timedelta(minutes=30)
}
APPEND_SLASH = False
CLEARBIT_API_SECRET_KEY = env.str("CLEARBIT_API_SECRET_KEY")
HUNTER_IS_ACTIVE = False
HUNTER_API_SECRET_KEY = env.str("HUNTER_API_SECRET_KEY")
HUNTER_VERIFICATION_LIMIT = env.int("HUNTER_VERIFICATION_LIMIT")
|
[
"chumachenko.sergiy@gmail.com"
] |
chumachenko.sergiy@gmail.com
|
f59b3577b280e77e01de5f4b1a661591fa9d69c2
|
8d23f941ade6e18df9094760c4e3ad5676e96fef
|
/advanced/05/05pymsql.py
|
f0982c9fb3bf720ae5266437de8f47d03b214b74
|
[] |
no_license
|
asdwsxzc123/python
|
95c7b6ed39d47a48e5a2630a3f221c20a8347764
|
b2422a7c6b62d4bfa9d1488a6dff322a57ba4c48
|
refs/heads/master
| 2020-04-03T14:34:06.299282
| 2019-02-05T13:14:16
| 2019-02-05T13:14:16
| 155,326,540
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 547
|
py
|
from pymysql import connect
# 链接数据库
conn = connect(host='172.16.20.46', port=3306, user='root', password='123456', database='jing_dong', charset='utf8')
# 获取cursor对象
cs1 = conn.cursor()
# sql注入
# # 执行select语句
# cs1.execute('select * from goods;')
# # print(row)
# for row in cs1.fetchall():
# print(row)
# 插入数据
cs1.execute('insert into goods_cates (name) values ("硬盘")')
# 插入数据需要提交
conn.commit()
# 删除数据
# conn.rollback()
# 关闭cursor和链接
cs1.close()
conn.close()
|
[
"sbfgeyygy@126.com"
] |
sbfgeyygy@126.com
|
e76a57551d81a6271b7c5fa8396a47b474218e4a
|
432c55838cad26bb6233a799e35e9187a3451434
|
/q2_diversity/tests/test_beta_correlation.py
|
f511afbe0c9ea946466122955ad84cfb567a9924
|
[
"BSD-3-Clause"
] |
permissive
|
wasade/q2-diversity
|
5a46d7a34bfae05d26fd285f2bfd180e55d58f2a
|
cdcf24c551a8ca4246dce658fd6b608b174e224d
|
refs/heads/master
| 2023-06-27T19:16:48.985517
| 2018-08-31T21:56:10
| 2018-08-31T21:56:10
| 150,510,254
| 0
| 0
|
BSD-3-Clause
| 2018-09-27T01:19:35
| 2018-09-27T01:19:35
| null |
UTF-8
|
Python
| false
| false
| 1,744
|
py
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2018, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import skbio
import pandas as pd
import qiime2
from qiime2 import Artifact
from qiime2.plugin.testing import TestPluginBase
class BetaCorrelationTests(TestPluginBase):
package = 'q2_diversity'
def setUp(self):
super().setUp()
self.beta_correlation = self.plugin.pipelines['beta_correlation']
dm = skbio.DistanceMatrix([[0, 1, 2],
[1, 0, 1],
[2, 1, 0]],
ids=['sample1', 'sample2', 'sample3'])
self.dm = Artifact.import_data('DistanceMatrix', dm)
self.md = qiime2.NumericMetadataColumn(
pd.Series([1, 2, 3], name='number',
index=pd.Index(['sample1', 'sample2', 'sample3'],
name='id')))
def test_execution(self):
# does it run?
self.beta_correlation(self.md, self.dm)
def test_outputs(self):
result = self.beta_correlation(self.md, self.dm)
# correct number of outputs?
self.assertEqual(2, len(result))
# correct types?
self.assertEqual('DistanceMatrix',
str(result.metadata_distance_matrix.type))
self.assertEqual('Visualization',
str(result.mantel_scatter_visualization.type))
if __name__ == '__main__':
unittest.main()
|
[
"matthewrdillon@gmail.com"
] |
matthewrdillon@gmail.com
|
ddbedb9961f05627dcdc40f9668733b593051ada
|
e7190289cea8006fb99a54347a57439cdd7047b8
|
/stock/industry/tonghuashun_industry-master/industry_current.py
|
3100023a4c07be3e3a7b23b8aae3a3b4853d9479
|
[] |
no_license
|
hong0396/start_up_git
|
282fe3845ad4c81ee7b23785a73052e3a5749fea
|
a6f9fa2da126382577e05869ff5a2fc98e29a7fb
|
refs/heads/master
| 2021-07-09T06:16:16.955217
| 2019-03-30T13:43:13
| 2019-03-30T13:43:13
| 119,171,188
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,030
|
py
|
#coding:utf-8
from requests.exceptions import RequestException
from bs4 import BeautifulSoup
from selenium import webdriver
from sqlalchemy import create_engine
import datetime
import pymysql
import requests
import pandas as pd
import time
import random
import re
#获取动态cookies
def get_cookie():
options = webdriver.ChromeOptions()
options.add_argument('headless')
driver=webdriver.Chrome(chrome_options=options)
url="http://q.10jqka.com.cn/thshy/"
driver.get(url)
# 获取cookie列表
cookie=driver.get_cookies()
driver.close()
return cookie[0]['value']
#获取网页详情页
def get_page_detail(url):
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
'Referer':'http://q.10jqka.com.cn/thshy/detail',
'Cookie':'v={}'.format(get_cookie())
}
try:
response = requests.get(url,headers =headers)
if response.status_code == 200:
return response.content
return None
except RequestException:
print('请求页面失败',url)
return None
#获取行业列表 名称title、代码code、链接url
def get_industry_list(url):
html = get_page_detail(url).decode('gbk')
soup = BeautifulSoup(html,'lxml')
industry_list = soup.select('.cate_items > a')
for industry in industry_list:
yield {
'title':industry.get_text(),
'code':industry.get('href').split('/')[-2],
'url':industry.get('href')
}
#获取行业最新数据
def get_instury_current(code,year):
url = 'http://d.10jqka.com.cn/v4/line/bk_{}/01/{}.js'.format(code,year)
html = get_page_detail(url).decode('gbk')
pattern = re.compile('{"data":"(.*?)"}',re.S)
instury = re.findall(pattern,html)[0].replace(';','').split(',')
i = len(instury)-8
yield {
'code':code,
'date':instury[i],
'open_price':instury[i+1],
'high_price':instury[i+2],
'low_price':instury[i+3],
'close_price':instury[i+4],
'volume':instury[i+5],
'amount':instury[i+6]
}
def save_to_mysql(code,year):
industry_current_info = get_instury_current(code,year)
industry_current_df = pd.DataFrame(industry_current_info)
print(industry_current_df.head(5))
engine = create_engine('mysql://liangzhi:liangzhi123@192.168.2.52/financial_data?charset=utf8')
industry_current_df.to_sql('industry_history', engine, if_exists='append')
def main():
engine = create_engine('mysql://liangzhi:liangzhi123@192.168.2.52/financial_data?charset=utf8')
instury_index_url = 'http://q.10jqka.com.cn/thshy/'
industry_index_info = get_industry_list(instury_index_url)
year =datetime.date.today().year
for i in industry_index_info:
code = i['code']
save_to_mysql(code,year)
time.sleep(random.randint(0,5))
if __name__ == '__main__':
main()
|
[
"hong0396@126.com"
] |
hong0396@126.com
|
f0eb93788821d074784c29b88e667723b67a6989
|
10d17864a685c025bb77959545f74b797f1d6077
|
/capitulo 02/02.22.py
|
bbe2a8217430cbaaf8609a9daa1183c8464aeb0a
|
[] |
no_license
|
jcicerof/IntroducaoPython
|
02178d2dfcaa014587edbd3090c517089ccef7c2
|
02e619c7c17e74acdc3268fbfae9ab624a3601dd
|
refs/heads/master
| 2020-04-24T18:12:21.422079
| 2019-02-23T05:14:43
| 2019-02-23T05:14:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2019
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Terceira edição - Janeiro/2019 - ISBN 978-85-7522-718-3
# Site: http://python.nilo.pro.br/
#
# Arquivo: listagem3\capítulo 02\02.22.py
# Descrição:
##############################################################################
salário = 1500
aumento = 5
print(salário + (salário * aumento / 100))
|
[
"jose.cicero@gmail.com"
] |
jose.cicero@gmail.com
|
1b185f68bd117dbd36f56f53bb64319b0b1072cf
|
f8580d2c963b6a3c34e918e0743d0a503a9584bd
|
/unittests/test_dcDrawLists.py
|
f72721a02be41c67ddfde98034ae0e79396d6ec2
|
[] |
no_license
|
pypy/wxpython-cffi
|
f59c3faeed26e6a26d0c87f4f659f93e5366af28
|
877b7e6c1b5880517456f1960db370e4bb7f5c90
|
refs/heads/master
| 2023-07-08T21:13:22.765786
| 2016-12-02T22:10:45
| 2016-12-02T22:10:45
| 397,124,697
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,196
|
py
|
import imp_unittest, unittest
import wtc
import wx
import random
try:
import numpy as np
haveNumpy = True
except ImportError:
haveNumpy = False
#---------------------------------------------------------------------------
w = 600
h = 400
num = 500
colornames = ["BLACK",
"BLUE",
"BLUE VIOLET",
"BROWN",
"CYAN",
"DARK GREY",
"DARK GREEN",
"GOLD",
"GREY",
"GREEN",
"MAGENTA",
"NAVY",
"PINK",
"RED",
"SKY BLUE",
"VIOLET",
"YELLOW",
]
pencache = {}
brushcache = {}
def makeRandomPoints():
pnts = []
for i in range(num):
x = random.randint(0, w)
y = random.randint(0, h)
pnts.append( (x,y) )
return pnts
def makeRandomLines():
lines = []
for i in range(num):
x1 = random.randint(0, w)
y1 = random.randint(0, h)
x2 = random.randint(0, w)
y2 = random.randint(0, h)
lines.append( (x1,y1, x2,y2) )
return lines
def makeRandomRectangles():
rects = []
for i in range(num):
W = random.randint(10, w/2)
H = random.randint(10, h/2)
x = random.randint(0, w - W)
y = random.randint(0, h - H)
rects.append( (x, y, W, H) )
return rects
def makeRandomPolygons():
Np = 8 # number of points per polygon
polys = []
for i in range(num):
poly = []
for i in range(Np):
x = random.randint(0, w)
y = random.randint(0, h)
poly.append( (x,y) )
polys.append( poly )
return polys
def makeRandomText():
Np = 8 # number of characters in text
text = []
for i in range(num):
word = []
for i in range(Np):
c = chr( random.randint(48, 122) )
word.append( c )
text.append( "".join(word) )
return text
def makeRandomColors():
colors = []
for i in range(num):
c = random.choice(colornames)
colors.append(wx.Colour(c))
return colors
def makeRandomPens():
pens = []
for i in range(num):
c = random.choice(colornames)
t = random.randint(1, 4)
if (c,t) not in pencache:
pencache[(c, t)] = wx.Pen(c, t)
pens.append( pencache[(c, t)] )
return pens
def makeRandomBrushes():
brushes = []
for i in range(num):
c = random.choice(colornames)
if c not in brushcache:
brushcache[c] = wx.Brush(c)
brushes.append( brushcache[c] )
return brushes
#---------------------------------------------------------------------------
class dcDrawLists_Tests(wtc.WidgetTestCase):
def test_dcDrawPointLists(self):
pnl = wx.Panel(self.frame)
self.frame.SetSize((w,h))
dc = wx.ClientDC(pnl)
dc.SetPen(wx.Pen("BLACK", 1))
pens = makeRandomPens()
dc.DrawPointList(makeRandomPoints())
dc.DrawPointList(makeRandomPoints(), wx.Pen("RED", 1))
dc.DrawPointList(makeRandomPoints(), pens)
del dc
@unittest.skipIf(not haveNumpy, "Numpy required for this test")
def test_dcDrawPointArray(self):
pnl = wx.Panel(self.frame)
self.frame.SetSize((w,h))
dc = wx.ClientDC(pnl)
dc.SetPen(wx.Pen("BLACK", 1))
pens = makeRandomPens()
dc.DrawPointList(np.array(makeRandomPoints()))
dc.DrawPointList(np.array(makeRandomPoints()), wx.Pen("RED", 1))
dc.DrawPointList(np.array(makeRandomPoints()), pens)
del dc
def test_dcDrawLineLists(self):
pnl = wx.Panel(self.frame)
self.frame.SetSize((w,h))
dc = wx.ClientDC(pnl)
dc.SetPen(wx.Pen("BLACK", 1))
pens = makeRandomPens()
dc.DrawLineList(makeRandomLines())
dc.DrawLineList(makeRandomLines(), wx.Pen("RED", 2))
dc.DrawLineList(makeRandomLines(), pens)
del dc
def test_dcDrawRectangleLists(self):
pnl = wx.Panel(self.frame)
self.frame.SetSize((w,h))
dc = wx.ClientDC(pnl)
dc.SetPen(wx.Pen("BLACK", 1))
dc.SetBrush( wx.Brush("RED") )
pens = makeRandomPens()
brushes = makeRandomBrushes()
dc.DrawRectangleList(makeRandomRectangles())
dc.DrawRectangleList(makeRandomRectangles(),pens)
dc.DrawRectangleList(makeRandomRectangles(),pens[0],brushes)
dc.DrawRectangleList(makeRandomRectangles(),pens,brushes[0])
dc.DrawRectangleList(makeRandomRectangles(),None,brushes)
del dc
@unittest.skipIf(not haveNumpy, "Numpy required for this test")
def test_dcDrawRectangleArray(self):
pnl = wx.Panel(self.frame)
self.frame.SetSize((w,h))
dc = wx.ClientDC(pnl)
dc.SetPen(wx.Pen("BLACK", 1))
dc.SetBrush( wx.Brush("RED") )
pens = makeRandomPens()
brushes = makeRandomBrushes()
dc.DrawRectangleList(np.array(makeRandomRectangles()))
dc.DrawRectangleList(np.array(makeRandomRectangles()),pens)
dc.DrawRectangleList(np.array(makeRandomRectangles()),pens[0],brushes)
dc.DrawRectangleList(np.array(makeRandomRectangles()),pens,brushes[0])
dc.DrawRectangleList(np.array(makeRandomRectangles()),None,brushes)
del dc
def test_dcDrawElipseLists(self):
pnl = wx.Panel(self.frame)
self.frame.SetSize((w,h))
dc = wx.ClientDC(pnl)
dc.SetPen(wx.Pen("BLACK", 1))
dc.SetBrush( wx.Brush("RED") )
pens = makeRandomPens()
brushes = makeRandomBrushes()
dc.DrawEllipseList(makeRandomRectangles())
dc.DrawEllipseList(makeRandomRectangles(),pens)
dc.DrawEllipseList(makeRandomRectangles(),pens[0],brushes)
dc.DrawEllipseList(makeRandomRectangles(),pens,brushes[0])
dc.DrawEllipseList(makeRandomRectangles(),None,brushes)
del dc
def test_dcDrawPloygonLists(self):
pnl = wx.Panel(self.frame)
self.frame.SetSize((w,h))
dc = wx.ClientDC(pnl)
dc.SetPen(wx.Pen("BLACK", 1))
dc.SetBrush( wx.Brush("RED") )
pens = makeRandomPens()
brushes = makeRandomBrushes()
polygons = makeRandomPolygons()
dc.DrawPolygonList(polygons)
dc.DrawPolygonList(polygons, pens)
dc.DrawPolygonList(polygons, pens[0],brushes)
dc.DrawPolygonList(polygons, pens,brushes[0])
dc.DrawPolygonList(polygons, None,brushes)
del dc
def test_dcDrawTextLists(self):
pnl = wx.Panel(self.frame)
self.frame.SetSize((w,h))
dc = wx.ClientDC(pnl)
dc.SetBackgroundMode(wx.SOLID)
points = makeRandomPoints()
fore = makeRandomColors()
back = makeRandomColors()
texts = makeRandomText()
dc.DrawTextList(texts, points, fore, back)
del dc
#---------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
[
"wayedt@gmail.com"
] |
wayedt@gmail.com
|
6a598ec973d39b8857bf8e4b3513591117312264
|
c036befbd9a4b81c0f082273dd0eb007e7f9582d
|
/dort-core/util/ints.py
|
fae6a3dcebe1b48a086026ebb082e01903b1f5c2
|
[
"Apache-2.0"
] |
permissive
|
Dortchain/dort-blockchian
|
889f52f36dcdeffe0f852b413cdd32879741462f
|
14f16e321a60f9d70f849f58e4e9964fa337a084
|
refs/heads/main
| 2023-06-16T01:31:30.718415
| 2021-07-11T03:03:12
| 2021-07-11T03:03:12
| 384,694,718
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,995
|
py
|
from typing import Any, BinaryIO
from Dort.util.struct_stream import StructStream
class int8(StructStream):
PACK = "!b"
class uint8(StructStream):
PACK = "!B"
class int16(StructStream):
PACK = "!h"
class uint16(StructStream):
PACK = "!H"
class int32(StructStream):
PACK = "!l"
class uint32(StructStream):
PACK = "!L"
class int64(StructStream):
PACK = "!q"
class uint64(StructStream):
PACK = "!Q"
class uint128(int):
def __new__(cls: Any, value: int):
value = int(value)
if value > (2 ** 128) - 1 or value < 0:
raise ValueError(f"Value {value} of does not fit into uin128")
return int.__new__(cls, value) # type: ignore
@classmethod
def parse(cls, f: BinaryIO) -> Any:
read_bytes = f.read(16)
assert len(read_bytes) == 16
n = int.from_bytes(read_bytes, "big", signed=False)
assert n <= (2 ** 128) - 1 and n >= 0
return cls(n)
def stream(self, f):
assert self <= (2 ** 128) - 1 and self >= 0
f.write(self.to_bytes(16, "big", signed=False))
class int512(int):
def __new__(cls: Any, value: int):
value = int(value)
# note that the boundaries for int512 is not what you might expect. We
# encode these with one extra byte, but only allow a range of
# [-INT512_MAX, INT512_MAX]
if value >= (2 ** 512) or value <= -(2 ** 512):
raise ValueError(f"Value {value} of does not fit into in512")
return int.__new__(cls, value) # type: ignore
# Uses 65 bytes to fit in the sign bit
@classmethod
def parse(cls, f: BinaryIO) -> Any:
read_bytes = f.read(65)
assert len(read_bytes) == 65
n = int.from_bytes(read_bytes, "big", signed=True)
assert n < (2 ** 512) and n > -(2 ** 512)
return cls(n)
def stream(self, f):
assert self < (2 ** 512) and self > -(2 ** 512)
f.write(self.to_bytes(65, "big", signed=True))
|
[
"welllinks@outlook.com"
] |
welllinks@outlook.com
|
48fa8d2e0ea8bb9d65ab82b01db7bab8b5d6fe5c
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/HXkpnCxJgxkFwaReT_10.py
|
0ac63cec767a5f117ae4d9a6bd8853f9c41acfad
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,039
|
py
|
"""
Given a function that accepts **unlimited** arguments, check and count how
many data types are in those arguments. Finally return the total in a list.
List order is:
[int, str, bool, list, tuple, dictionary]
### Examples
count_datatypes(1, 45, "Hi", False) ➞ [2, 1, 1, 0, 0, 0]
count_datatypes([10, 20], ("t", "Ok"), 2, 3, 1) ➞ [3, 0, 0, 1, 1, 0]
count_datatypes("Hello", "Bye", True, True, False, {"1": "One", "2": "Two"}, [1, 3], {"Brayan": 18}, 25, 23) ➞ [2, 2, 3, 1, 0, 2]
count_datatypes(4, 21, ("ES", "EN"), ("a", "b"), False, [1, 2, 3], [4, 5, 6]) ➞ [2, 0, 1, 2, 2, 0]
### Notes
If no arguments are given, return `[0, 0, 0, 0, 0, 0]`
"""
def count_datatypes(*args):
i = sum(type(arg) == int for arg in args)
s = sum(type(arg) == str for arg in args)
b = sum(type(arg) == bool for arg in args)
l = sum(type(arg) == list for arg in args)
t = sum(type(arg) == tuple for arg in args)
d = sum(type(arg) == dict for arg in args)
return [i,s,b,l,t,d]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
8888162efa48c3b16541f45d0afbca742d5c9197
|
f504253210cec1c4ec6c3ea50a45564db7d6cd7f
|
/prettyqt/custom_widgets/adjustingboxlayoutdockwidget.py
|
35beb69676ef4e03eeb813230ff08764a12d6637
|
[
"MIT"
] |
permissive
|
phil65/PrettyQt
|
b1150cb4dce982b9b8d62f38f56694959b720a3e
|
f00500d992d1befb0f2c2ae62fd2a8aafba7fd45
|
refs/heads/master
| 2023-08-30T21:00:08.905444
| 2023-08-17T12:24:45
| 2023-08-17T12:24:45
| 177,451,205
| 17
| 5
|
MIT
| 2020-08-15T22:21:18
| 2019-03-24T18:10:21
|
Python
|
UTF-8
|
Python
| false
| false
| 2,620
|
py
|
from __future__ import annotations
from prettyqt import constants, widgets
DockWidgetArea = constants.DockWidgetArea
class AdjustingBoxLayoutDockWidget(widgets.DockWidget):
"""DockWidget adjusting its child widget QBoxLayout direction.
The child widget layout direction is set according to dock widget area.
The child widget MUST use a QBoxLayout.
"""
def __init__(self, parent: widgets.QWidget | None = None, **kwargs):
super().__init__(parent, **kwargs)
self._current_area = DockWidgetArea.NoDockWidgetArea
self.dockLocationChanged.connect(self._dock_location_changed)
self.topLevelChanged.connect(self._top_level_changed)
def setWidget(self, widget: widgets.QWidget):
"""Set the widget of this QDockWidget."""
super().setWidget(widget)
self._dock_location_changed(self._current_area)
def _dock_location_changed(self, area: DockWidgetArea):
self._current_area = area
if (widget := self.widget()) is not None:
if isinstance(layout := widget.layout(), widgets.QBoxLayout):
if area in (
DockWidgetArea.LeftDockWidgetArea,
DockWidgetArea.RightDockWidgetArea,
):
direction = widgets.BoxLayout.Direction.TopToBottom
else:
direction = widgets.BoxLayout.Direction.LeftToRight
layout.setDirection(direction)
self.resize(widget.minimumSize())
self.adjustSize()
def _top_level_changed(self, top_level: bool):
if (widget := self.widget()) is not None and top_level:
if isinstance(layout := widget.layout(), widgets.QBoxLayout):
layout.setDirection(widgets.BoxLayout.Direction.LeftToRight)
self.resize(widget.minimumSize())
self.adjustSize()
def showEvent(self, event):
"""Make sure this dock widget is raised when it is shown.
This is useful for tabbed dock widgets.
"""
self.raise_()
if __name__ == "__main__":
app = widgets.app()
mainwindow = widgets.MainWindow()
dockwidget = AdjustingBoxLayoutDockWidget()
textbox1 = widgets.PlainTextEdit()
textbox2 = widgets.PlainTextEdit()
container = widgets.Widget()
container.set_layout("horizontal")
container.set_layout(None)
container.set_layout("horizontal")
container.box.add(textbox1)
dockwidget.setWidget(container)
mainwindow.add_dockwidget(dockwidget)
mainwindow.show()
with app.debug_mode():
app.exec()
|
[
"philipptemminghoff@googlemail.com"
] |
philipptemminghoff@googlemail.com
|
c95ff0ea213f72ba08dca8053a412519590fdd91
|
c89543dd926c1787c40616ed174a3d1371c54449
|
/superset/views/users/api.py
|
675b918847c461140578a642453c1a429f620198
|
[
"Apache-2.0",
"OFL-1.1"
] |
permissive
|
j420247/incubator-superset
|
7c7bff330393f0e91f5e67782f35efe8c735250a
|
c9b9b7404a2440a4c9d3173f0c494ed40f7fa2bd
|
refs/heads/master
| 2023-03-11T21:53:16.827919
| 2023-02-03T19:04:17
| 2023-02-03T19:04:17
| 157,780,350
| 1
| 1
|
Apache-2.0
| 2023-03-07T00:14:51
| 2018-11-15T22:24:29
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 3,465
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from flask import g, Response
from flask_appbuilder.api import expose, safe
from flask_jwt_extended.exceptions import NoAuthorizationError
from superset.views.base_api import BaseSupersetApi
from superset.views.users.schemas import UserResponseSchema
from superset.views.utils import bootstrap_user_data
user_response_schema = UserResponseSchema()
class CurrentUserRestApi(BaseSupersetApi):
"""An api to get information about the current user"""
resource_name = "me"
openapi_spec_tag = "Current User"
openapi_spec_component_schemas = (UserResponseSchema,)
@expose("/", methods=["GET"])
@safe
def get_me(self) -> Response:
"""Get the user object corresponding to the agent making the request
---
get:
description: >-
Returns the user object corresponding to the agent making the request,
or returns a 401 error if the user is unauthenticated.
responses:
200:
description: The current user
content:
application/json:
schema:
type: object
properties:
result:
$ref: '#/components/schemas/UserResponseSchema'
401:
$ref: '#/components/responses/401'
"""
try:
if g.user is None or g.user.is_anonymous:
return self.response_401()
except NoAuthorizationError:
return self.response_401()
return self.response(200, result=user_response_schema.dump(g.user))
@expose("/roles/", methods=["GET"])
@safe
def get_my_roles(self) -> Response:
"""Get the user roles corresponding to the agent making the request
---
get:
description: >-
Returns the user roles corresponding to the agent making the request,
or returns a 401 error if the user is unauthenticated.
responses:
200:
description: The current user
content:
application/json:
schema:
type: object
properties:
result:
$ref: '#/components/schemas/UserResponseSchema'
401:
$ref: '#/components/responses/401'
"""
try:
if g.user is None or g.user.is_anonymous:
return self.response_401()
except NoAuthorizationError:
return self.response_401()
user = bootstrap_user_data(g.user, include_perms=True)
return self.response(200, result=user)
|
[
"noreply@github.com"
] |
j420247.noreply@github.com
|
9061a8446b430d608a4cab7867f06179ed62a610
|
65f5a6e6455cc76558dfcc95a9b2a42aeb3f4205
|
/add_contextual_info.py
|
d525a56fa2fb24b5f5497ae7a2d20d022a748ab8
|
[
"MIT"
] |
permissive
|
robertdstein/millipede_parser
|
af84b3bd650215e09d200bff67fea11cc52cd031
|
7f3ad22aea3eca6385e55fd084ce43b2d7000f92
|
refs/heads/master
| 2020-08-12T19:12:59.643359
| 2020-01-06T16:01:40
| 2020-01-06T16:01:40
| 214,826,592
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,639
|
py
|
import pickle
import os
import numpy as np
from astropy.io import fits
from astropy.time import Time
import argparse
from parse_archival_scan import get_v0_output_dir
from contextual_info import archival_data
import healpy as hp
import logging
import requests
def extract_az_zen(nside, index):
return hp.pix2ang(nside, index)
def add_archival_ehe_info(candidate, data, header):
# zen, phi = extract_az_zen(header["NSIDE"], header["MINPIXEL"])
# dec = np.degrees(zen - np.pi/2.)
stream = header["STREAM"]
namesplit = [x for x in candidate.split("_")]
try:
year = int(namesplit[1])
num = int(namesplit[4].split(".")[0])
stream_mask = archival_data.T[4] == stream
year_mask = archival_data[stream_mask].T[3] == year
match = archival_data[stream_mask][year_mask][num]
header.set('time_mjd', match[0].mjd)
header.set("YEAR", int(match[0].jyear))
except IndexError:
pass
except ValueError:
pass
return data, header
def v1_gcn_url(event_id, run_id):
return "https://gcn.gsfc.nasa.gov/notices_amon/{0}_{1}.amon".format(int(event_id), int(run_id))
def v2_gcn_url(event_id, run_id):
return "https://gcn.gsfc.nasa.gov/notices_amon_g_b/{0}_{1}.amon".format(int(run_id), int(event_id))
def retrieve_v2_alert_info(data, header):
if not header["ARCHIVAL"]:
url = v2_gcn_url(header["event_id"], header["run_id"])
page = requests.get(url)
if not "404 Not Found" in page.text:
print("Found GCN: {0}".format(url))
for line in page.text.splitlines():
row = [x for x in line.split(":")]
if len(row) > 1:
val = [x for x in row[1].split(" ") if x not in ""][0]
if row[0] == "ENERGY":
header.set("E_TeV", float(val))
elif row[0] == "SIGNALNESS":
header.set("P_astro", float(val))
elif row[0] == "RUN_NUM":
header.set("run_id", float(val))
elif row[0] == "EVENT_NUM":
header.set("event_id", float(val))
elif row[0] == "NOTICE_TYPE":
if "Gold" in line.split(" "):
header.set("Stream", "Gold")
elif "Bronze" in line.split(" "):
header.set("Stream", "Bronze")
else:
raise Exception("Stream not found in {0}".format(row))
elif row[0] == "FAR":
header.set("FAR", float(val))
elif row[0] == "DISCOVERY_DATE":
disc_date = "20" + line.split(" ")[-2].replace("/", "-")
elif row[0] == "DISCOVERY_TIME":
disc_time = line.split(" ")[-2][1:-1]
time_str = "{0} {1} UTC".format(disc_date, disc_time)
header.set("TIME_UTC", time_str)
header.set("TIME_MJD", Time("{0}T{1}".format(disc_date, disc_time), scale="utc", format="isot").mjd)
return data, header
def retrieve_v1_alert_info(data, header):
if not header["ARCHIVAL"]:
url = v1_gcn_url(header["event_id"], header["run_id"])
page = requests.get(url)
if not "404 Not Found" in page.text:
print("Found GCN: {0}".format(url))
for line in page.text.splitlines():
row = [x for x in line.split(":")]
if len(row) > 1:
val = [x for x in row[1].split(" ") if x not in ""][0]
if row[0] == "ENERGY":
header.set("E_TeV", float(val))
elif row[0] == "SIGNALNESS":
header.set("P_astro", float(val))
elif row[0] == "RUN_NUM":
header.set("run_id", float(val))
elif row[0] == "EVENT_NUM":
header.set("event_id", float(val))
elif row[0] == "NOTICE_TYPE":
if "EHE" in line.split(" "):
header.set("Stream", "EHE")
elif "HESE" in line.split(" "):
header.set("Stream", "HESE")
else:
raise Exception("Stream not found in {0}".format(row))
elif row[0] == "FAR":
header.set("FAR", float(val))
elif row[0] == "CHARGE":
header.set("CHARGE", float(val))
elif row[0] == "SIGNAL_TRACKNESS":
header.set("SIGTRACK", float(val))
elif row[0] == "DISCOVERY_DATE":
disc_date = "20" + line.split(" ")[-2].replace("/", "-")
elif row[0] == "DISCOVERY_TIME":
disc_time = line.split(" ")[-2][1:-1]
time_str = "{0} {1} UTC".format(disc_date, disc_time)
header.set("TIME_UTC", time_str)
header.set("TIME_MJD", Time("{0}T{1}".format(disc_date, disc_time), scale="utc", format="isot").mjd)
return data, header
def get_v1_output_dir(base_output_dir):
return os.path.join(base_output_dir, "fits_v1_with_contextual_info")
def add_contextual_info(candidate, base_output_dir):
input_dir = get_v0_output_dir(base_output_dir)
path = os.path.join(input_dir, candidate)
logging.info(candidate)
output_dir = get_v1_output_dir(base_output_dir)
try:
os.makedirs(output_dir)
except OSError:
pass
output_file = os.path.join(output_dir, candidate)
with fits.open(path) as hdul:
data = hdul[0].data
header = hdul[0].header
data, header = retrieve_v1_alert_info(data, header)
data, header = retrieve_v2_alert_info(data, header)
data, header = add_archival_ehe_info(candidate, data, header)
hdul = fits.PrimaryHDU(data=data, header=header)
logging.info("Writing to {0}".format(output_file))
hdul.writeto(output_file, overwrite=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output_dir")
parser.add_argument("-e", "--event", default=None)
args = parser.parse_args()
if args.event is not None:
candidates = [args.event]
else:
candidates = sorted([y for y in os.listdir(get_v0_output_dir(args.output_dir)) if "event" in y])
for candidate in candidates:
add_contextual_info(candidate, args.output_dir)
|
[
"robert.stein@desy.de"
] |
robert.stein@desy.de
|
0325fa1080c27994e0fa2c8ee857985828ccad3d
|
5c72a7214bcf1d0cf185bce57393b201cf1b9dd9
|
/pyigm/surveys/tests/test_llssurvey.py
|
4545d6d8d0a006e1798263ad87f132c1a49e6145
|
[] |
no_license
|
JianiDing/pyigm
|
6e9b960a0e8617e8861f45b0699f5e1847b5aff8
|
d72f84a27fb25386aab8035bd49b21b37008f0af
|
refs/heads/master
| 2021-08-24T09:06:49.810451
| 2017-12-07T01:17:53
| 2017-12-07T01:17:53
| 110,727,652
| 1
| 0
| null | 2017-12-09T00:00:27
| 2017-11-14T18:18:43
|
Python
|
UTF-8
|
Python
| false
| false
| 2,416
|
py
|
# Module to run tests on initializing AbsSurvey
# TEST_UNICODE_LITERALS
import numpy as np
import glob, os, imp, pdb
import pytest
from pyigm.surveys.llssurvey import LLSSurvey
from ..lls_literature import load_lls_lit
remote_data = pytest.mark.remote_data
lt_path = imp.find_module('linetools')[1]
'''
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'files')
return os.path.join(data_dir, filename)
'''
def test_read_hdlls_dr1_simple():
#hdlls = LLSSurvey.load_HDLLS()
hdlls = LLSSurvey.load_HDLLS(load_sys=False)
assert hdlls.nsys == 157
@remote_data
def test_read_hdlls_dr1(): # This might actually be local now..
hdlls = LLSSurvey.load_HDLLS()
assert hdlls.nsys == 157
CII_clms = hdlls.ions((6,2))
gdCII = np.where(CII_clms['flag_N']>0)[0]
assert len(gdCII) == 103
def test_dat_list():
"""JXP format :: Likely to be Deprecated
"""
# LLS Survey
if os.getenv('LLSTREE') is None:
assert True
return
# Load
lls = LLSSurvey.from_flist('Lists/lls_metals.lst', tree=os.getenv('LLSTREE'))
# tests
np.testing.assert_allclose(lls.NHI[0], 19.25)
assert lls.nsys == 164
def test_sdss():
""" Test SDSS DR7 -- This is very slow..
"""
# All
sdss_dr7_all = LLSSurvey.load_SDSS_DR7(sample='all')
assert sdss_dr7_all.nsys == 1935
# Stat
sdss_dr7_stat = LLSSurvey.load_SDSS_DR7()
assert len(sdss_dr7_stat.NHI) == 254
def test_hst():
""" Test HST surveys
"""
# ACS
acs = LLSSurvey.load_HST_ACS()
assert acs.nsys == 34
assert len(acs.sightlines) == 18
# WFC3
wfc3 = LLSSurvey.load_HST_WFC3()
assert wfc3.nsys == 91
assert len(wfc3.sightlines) == 53
# Combined
HST_LLS = wfc3 + acs
assert HST_LLS.nsys == 125
assert len(HST_LLS.sightlines) == 71
def test_z3mage():
""" Test z~3 MagE
"""
# All
z3mage = LLSSurvey.load_mage_z3()
assert z3mage.nsys == 60
assert len(z3mage.sightlines) == 105
# Non-Color
z3mage_NC = LLSSurvey.load_mage_z3(sample='non-color')
assert z3mage_NC.nsys == 32
assert len(z3mage_NC.sightlines) == 61
@remote_data
def test_literature():
""" Literature list
"""
lls_lit = load_lls_lit()
assert lls_lit.nsys == 58
assert lls_lit.ref == 'Zon04,Jen05,Tri05,Prx06a,Prx06b,Mei06,Mei07,Mei08,Nes08,Mei09,DZ09,Tum11,Kcz12,Bat12'
|
[
"xavier@ucolick.org"
] |
xavier@ucolick.org
|
7e539a9efa9026a14bd5bab19690d3be70235f37
|
951e433b25a25afeea4d9b45994a57e0a6044144
|
/NowCoder/百度_二叉树深度.py
|
d30d251bea18bf6991eba6cc5047bd36ce19bac8
|
[] |
no_license
|
EricaEmmm/CodePython
|
7c401073e0a9b7cd15f9f4a553f0aa3db1a951a3
|
d52aa2a0bf71b5e7934ee7bff70d593a41b7e644
|
refs/heads/master
| 2020-05-31T14:00:34.266117
| 2019-09-22T09:48:23
| 2019-09-22T09:48:23
| 190,318,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
from tool import TreeNode
def depth(root):
# 递归
if not root:
return 0
return max(depth(root.left), depth(root.right)) + 1
def depth1(root):
# 层次遍历:队列
depth = 0
tmp = []
tmp.append(root)
while tmp:
depth += 1
for i in range(len(tmp)):
new = tmp[0]
del tmp[0]
if new.left:
tmp.append(new.left)
if new.right:
tmp.append(new.right)
return depth
if __name__ == '__main__':
root = TreeNode(1)
root.left = TreeNode(2)
root.left.left = TreeNode(3)
root.right = TreeNode(4)
print(depth(root))
|
[
"1016920795@qq.com"
] |
1016920795@qq.com
|
715651fb3db0cf699d68d033c0188c829fb5d7bc
|
8b713697ba8c40179393e069f07d750480c21d4a
|
/django_tutorial/test.py
|
21287d83ef06d9670784440e3ce3f515b7d1db5c
|
[] |
no_license
|
navill/pytest_tutorial
|
ca24bcd742d5f8c7273694ebf0a0dd38af095bdb
|
27b4d163abe8581996503c851949c9f75aebb786
|
refs/heads/master
| 2023-02-02T03:06:11.784673
| 2020-12-23T04:53:16
| 2020-12-23T04:53:16
| 323,780,460
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 889
|
py
|
import pytest
from django.test import TestCase
from django.contrib.auth.models import Group, User
class MyTest(TestCase):
fixtures = ['group.json', 'user.json']
def test_should_create_group(self):
group = Group.objects.get(pk=1)
self.assertEqual(group.name, 'appusers')
def test_should_craete_user(self):
user = User.objects.get(pk=1)
self.assertEqual(user.pk, 1)
@pytest.fixture
def user_A(db):
return User.objects.create_user('A')
def test_should_create_user_with_username(db, user_A):
assert user_A.username == 'A'
def test_should_check_password(db, user_A):
user_A.set_password('secret')
assert user_A.check_password('secret') is True
def test_should_not_check_unusable_password(db, user_A):
user_A.set_password('secret')
user_A.set_unusable_password()
assert user_A.check_password('secret') is False
|
[
"blue_jihoon@naver.com"
] |
blue_jihoon@naver.com
|
f7a4ceae9be0677a9d7c43d574dbb721e9efbe82
|
52ba0511e188a289f73a226f616d0f34bd96ebdb
|
/blog/models.py
|
9a2f84d21a454b24a62e1c3f0bc9a3f456aa58db
|
[] |
no_license
|
Tyaro2/my-first-blog
|
a4d96e33481e3a5a4373b41c4e5099f2c9561837
|
7d3139a010be719d96b4e94f9439c083078ff1c3
|
refs/heads/master
| 2023-04-04T05:12:03.278013
| 2021-04-06T09:36:29
| 2021-04-06T09:36:29
| 339,306,261
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,338
|
py
|
from django.conf import settings
from django.db import models
from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
from django.contrib.auth.models import PermissionsMixin
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from django.contrib.auth import get_user_model
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, username, email, password, **extra_fields):
if not username:
raise ValueError('The given username must be set')
if not email:
raise ValueError('The given email must be set')
user = self.model(username=self.model.normalize_username(username), email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(username, email, password, **extra_fields)
def create_superuser(self, username, email, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True')
return self._create_user(username, email, password, **extra_fields)
class User(AbstractBaseUser, PermissionsMixin):
username = models.CharField(max_length=25, unique=True)
email = models.EmailField(unique=True)
icon = models.ImageField(blank=True, null=True)
introduction = models.CharField(max_length=75, blank=True, null=True)
followees = models.ManyToManyField(
'User', verbose_name='フォロー中のユーザー', through='FriendShip',
related_name='+', through_fields=('follower', 'followee')
)
followers = models.ManyToManyField(
'User', verbose_name='フォローされているユーザー', through='FriendShip',
related_name='+', through_fields=('followee', 'follower')
)
is_staff = models.BooleanField(
_('staff status'),
default=False,
help_text=_('Designates whether the user can log into this admin site.'),
)
is_active = models.BooleanField(
_('active'),
default=True,
help_text=_(
'Designates whether this user should be treated as active. '
'Unselect this instead of deleting accounts.'
),
)
date_joined = models.DateTimeField(default=timezone.now)
objects = UserManager()
EMAIL_FIELD = 'email'
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
class Post(models.Model):
author = models.ForeignKey('blog.User', on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
like = models.IntegerField(default=0)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
def approved_comments(self):
return self.comments.filter(approved_comment=True)
class Comment(models.Model):
post = models.ForeignKey('blog.Post', on_delete=models.CASCADE, related_name='comments')
author = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
approved_comment = models.BooleanField(default=False)
def approve(self):
self.approved_comment = True
self.save()
def __str__(self):
return self.text
class FriendShip(models.Model):
follower = models.ForeignKey('User', on_delete=models.CASCADE, related_name='followee_friendships')
followee = models.ForeignKey('User', on_delete=models.CASCADE, related_name='follower_friendships')
class Meta:
unique_together = ('follower', 'followee')
|
[
"you@example.com"
] |
you@example.com
|
11465b5b97e1db0d850bb05e9a72d69ab7a6968f
|
62fe739a9485a2bf547b6d96777f6a01c32dd2f5
|
/tarefas/forms.py
|
8b1285266859e81df676add9789352e1a9f79d9d
|
[] |
no_license
|
jgjefersonluis/pdmytest1
|
950c84016a153e931f49b47a7366d7677dfeb62d
|
7bb3b377af1fdffe2ad307352a82e2f39005b909
|
refs/heads/master
| 2023-06-16T14:36:01.388219
| 2021-06-29T05:27:51
| 2021-06-29T05:27:51
| 380,875,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
from django import forms
from tarefas.models import Tarefas
class TarefasModelForms(forms.ModelForm):
class Meta:
model = Tarefas
fields = '__all__'
|
[
"jgjefersonluis@gmail.com"
] |
jgjefersonluis@gmail.com
|
117d079ed37845edbeb098e4e4257bb6044dda54
|
05f06a1b104526c5262289e663df855e1335f32a
|
/User/views.py
|
682030b507801c942c3b37a8305cfb6ddf3afb55
|
[] |
no_license
|
Harshvartak/E-commerce-DRF
|
784ebecce95730996a9553fef1f3a9218e942749
|
b2eea09d7f965b8e8c9397f636c7ea8703813539
|
refs/heads/master
| 2022-12-13T11:39:11.854643
| 2020-01-03T07:54:41
| 2020-01-03T07:54:41
| 229,832,815
| 0
| 0
| null | 2022-11-22T04:19:28
| 2019-12-23T22:41:40
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 714
|
py
|
from django.shortcuts import render
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework.authtoken.models import Token
from .serializers import RegistrationSerializer
@api_view(['POST', ])
def registration_view(request):
if request.method == 'POST':
serializer = RegistrationSerializer(data=request.data)
data = {}
if serializer.is_valid():
CustomUser = serializer.save()
data['response'] = 'successfully registered new user.'
data['username']= CustomUser.username
token = Token.objects.get(user=CustomUser).key
data['token']=token
else:
data = serializer.errors
return Response(data)
|
[
"vartak.harsh@gmail.com"
] |
vartak.harsh@gmail.com
|
8c68a5907b798fbc88de11c624c0c63918f8322b
|
46d13216824f95f29da7672bf83ae1128a835a97
|
/tests/unittests/test_user.py
|
4a07b07fb597d183b4ae04f5c989e43c5abc6294
|
[] |
no_license
|
lucascandidoff/crudzin
|
0a426b754d925e2990dd60cdd9cf2f90424c68b6
|
bad721c62fa5f7f9905724838dbc61113ccc25ec
|
refs/heads/master
| 2022-11-27T22:03:45.185391
| 2020-08-01T12:40:51
| 2020-08-01T12:40:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 929
|
py
|
from flask import url_for
from flask_base_tests_cases import TestFlaskBase
class TestUserBP(TestFlaskBase):
def test_api_deve_registrar_usuario_na_base(self):
user = {
'username': 'test',
'password': '1234'
}
esperado = {
'id': '1',
'username': 'test'
}
response = self.client.post(url_for('user.register'), json=user)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.json['username'], esperado['username'])
def test_api_nao_deve_registrar_usuario_na_base_quando_faltar_fields(self):
user = {
'username': 'test',
}
esperado = {'password': ['Missing data for required field.']}
response = self.client.post(url_for('user.register'), json=user)
self.assertEqual(response.status_code, 401)
self.assertEqual(response.json, esperado)
|
[
"mendesxeduardo@gmail.com"
] |
mendesxeduardo@gmail.com
|
737252ca9ba41c369ac7335e2c5f7e8fcb7042d2
|
1721445a6635af8604a1f2693660bb0c4ae88ca8
|
/6/dest.py
|
4eec336a4587bfb3fd4f69becc8e2938e75848a9
|
[] |
no_license
|
nelhage/aoc2018
|
fdc01f557f9d9b82b533decaff64ebbe1909f9ba
|
c42a44ea2f86bf42ab4a0c9034f75a6827daf69e
|
refs/heads/master
| 2020-04-13T06:50:10.134072
| 2018-12-25T18:07:31
| 2018-12-25T18:07:31
| 163,032,184
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 993
|
py
|
import numpy as np
import sys
coords = [list(map(int, line.rstrip().split(", "))) for line in sys.stdin]
xs = np.array([x for (x, y) in coords])
ys = np.array([y for (x, y) in coords])
def closest(x, y):
ds = np.abs(xs - x) + np.abs(ys - y)
m = np.argmin(ds)
if np.sum(ds == ds[m]) == 1:
return m
return None
INF = 5*max(max(xs), max(ys))
inf = set()
for i, (x,y) in enumerate(coords):
if closest(INF, y) == i or \
closest(-INF, y) == i or \
closest(x, INF) == i or \
closest(x, -INF) == i:
inf.add(i)
area = [0]*len(coords)
for x in range(min(xs), max(xs)+1):
for y in range(min(ys), max(ys)+1):
c = closest(x, y)
if c:
area[c] += 1
fa = np.array(area)
fa[np.array(list(inf))] = 0
maxa = np.max(fa)
print("maxa={}".format(maxa))
MAXD = 10000
n = 0
for x in range(min(xs), max(xs)+1):
for y in range(min(ys), max(ys)+1):
d = np.sum(np.abs(xs - x) + np.abs(ys - y))
if d < MAXD:
n += 1
print("|d<{}|={}".format(MAXD, n))
|
[
"nelhage@nelhage.com"
] |
nelhage@nelhage.com
|
0ab99563f7ecd9e09c49f930b365411a52e51bb9
|
5c7f2ce11f440c1abc1e2052c1ec725fadb3e8c9
|
/ckanext/youckan/controllers/organization.py
|
0bf3b89e498d1fd7027313f5e49ceff6fa1450a8
|
[] |
no_license
|
morty/ckanext-youckan
|
e06f2877270ba215b3dbbbb5b8a8c21ab33f71f6
|
8303e94363805a49d3cf93ee5216c7135b22e481
|
refs/heads/master
| 2021-01-18T07:47:11.510353
| 2014-02-26T14:24:03
| 2014-02-26T14:24:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,114
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from ckan import model
from ckan.model import Group
from ckan.plugins import toolkit
from ckanext.youckan.controllers.base import YouckanBaseController
from ckanext.youckan.models import MembershipRequest
DB = model.meta.Session
log = logging.getLogger(__name__)
class YouckanOrganizationController(YouckanBaseController):
def membership_request(self, org_name):
'''Request membership for an organization'''
if not toolkit.request.method == 'POST':
raise toolkit.abort(400, 'Expected POST method')
user = toolkit.c.userobj
if not user:
raise toolkit.NotAuthorized('Membership request requires an user')
organization = Group.by_name(org_name)
comment = toolkit.request.params.get('comment')
membership_request = MembershipRequest(user, organization, comment)
DB.add(membership_request)
DB.commit()
membership_request.notify_admins()
return self.json_response({})
def membership_accept(self, request_id):
'''Accept a membership request'''
if not toolkit.request.method == 'POST':
raise toolkit.abort(400, 'Expected POST method')
user = toolkit.c.userobj
if not user:
raise toolkit.NotAuthorized('Membership validation requires an user')
membership_request = MembershipRequest.get(request_id)
membership = membership_request.accept(user)
return self.json_response(membership)
def membership_refuse(self, request_id):
'''Refuse a membership request'''
if not toolkit.request.method == 'POST':
raise toolkit.abort(400, 'Expected POST method')
user = toolkit.c.userobj
if not user:
raise toolkit.NotAuthorized('Membership validation requires an user')
comment = toolkit.request.params.get('comment')
membership_request = MembershipRequest.get(request_id)
membership_request.refuse(user, comment)
return self.json_response({})
|
[
"noirbizarre@gmail.com"
] |
noirbizarre@gmail.com
|
b029233ca5db7a28e56f90072761bd0020303006
|
e781909485258415cb01e2bb2f7aaaaa0b14eb3b
|
/trunk/pyinstaller-1.3/optparse.py
|
8d231fe52eed9349d83fc923a56d765d04b3f47f
|
[] |
no_license
|
BGCX067/fabriciols-svn-to-git
|
408cce031243125ed13c1b01deeb077c55519ac6
|
6fbd5eccdc8af831e3c7f3a313a5c300ab39b4aa
|
refs/heads/master
| 2016-09-01T08:53:02.311545
| 2015-12-28T14:39:38
| 2015-12-28T14:39:38
| 48,847,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 321
|
py
|
"""
optparse -- forward-compatibility wrapper for use with Python 2.2.x and
earlier. If you import from 'optparse' rather than 'optik', your code
will work on base Python 2.3 (and later), or on earlier Pythons with
Optik 1.4.1 or later installed.
"""
from optik import __version__, __all__
from optik import *
|
[
"you@example.com"
] |
you@example.com
|
0a17c4a3a96f46c1b448dc93700c7c55373c4752
|
e9757274ddb8484e27590ff0cc3f24550776c6cc
|
/Solved/0072/0072.py
|
55dea79789686cbd455c67813dc7f05c68eda8de
|
[] |
no_license
|
Jinmin-Goh/LeetCode
|
948a9b3e77eb03507aad6f3c78640aa7f00e6ad5
|
d6e80b968032b08506c5b185f66d35c6ff1f8bb9
|
refs/heads/master
| 2020-09-22T10:22:18.443352
| 2020-09-06T06:34:12
| 2020-09-06T06:34:12
| 225,153,497
| 1
| 1
| null | 2020-01-29T15:16:53
| 2019-12-01T11:55:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,045
|
py
|
# Problem No.: 72
# Solver: Jinmin Goh
# Date: 20191228
# URL: https://leetcode.com/problems/edit-distance/
import sys
# good solution link: https://leetcode.com/problems/edit-distance/discuss/159295/Python-solutions-and-intuition
class Solution:
def minDistance(self, word1: str, word2: str) -> int:
# top-down solution
self.dp = {}
ans = self.dpProcess(word1, word2, 0, 0)
return ans
def dpProcess(self, word1: str, word2: str, i: int, j: int):
# if both string are empty
if i == len(word1) and j == len(word2):
return 0
# if one string is empty
if i == len(word1):
return len(word2) - j
if j == len(word2):
return len(word1) - i
# memoization
if (i,j) not in self.dp:
temp = 0
if word1[i] == word2[j]:
temp = self.dpProcess(word1, word2, i + 1, j + 1)
else:
insert = 1 + self.dpProcess(word1, word2, i, j + 1)
delete = 1 + self.dpProcess(word1, word2, i + 1, j)
replace = 1 + self.dpProcess(word1, word2, i + 1, j + 1)
temp = min(insert, delete, replace)
self.dp[(i,j)] = temp
return self.dp[(i,j)]
# bottom-up solution
"""
def minDistance(self, word1: str, word2: str) -> int:
cnt_1 = len(word1)
cnt_2 = len(word2)
dp_table = [[0] * (cnt_2 + 1) for i in range(cnt_1 + 1)]
for i in range(cnt_1 + 1):
dp_table[i][0] = i
for i in range(cnt_2 + 1):
dp_table[0][i] = i
for i in range(1, cnt_1 + 1):
for j in range(1, cnt_2 + 1):
if word1[i - 1] == word2[j - 1]:
dp_table[i][j] = dp_table[i - 1][j - 1]
else:
dp_table[i][j] = 1 + min(dp_table[i][j - 1], dp_table[i - 1][j], dp_table[i - 1][j - 1])
return dp_table[cnt_1][cnt_2]
"""
|
[
"eric970901@gmail.com"
] |
eric970901@gmail.com
|
83a51b7834034e76de94905bbd610c06a4d17331
|
587dbdf730b6cc3e693efc5dca5d83d1dd35ee1a
|
/extra/exam/tencent/t4.py
|
a3655900a722fb09c95050c6251460968144bb67
|
[] |
no_license
|
Rivarrl/leetcode_python
|
8db2a15646d68e4d84ab263d8c3b6e38d8e3ea99
|
dbe8eb449e5b112a71bc1cd4eabfd138304de4a3
|
refs/heads/master
| 2021-06-17T15:21:28.321280
| 2021-03-11T07:28:19
| 2021-03-11T07:28:19
| 179,452,345
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 713
|
py
|
# -*- coding: utf-8 -*-
# ======================================
# @File : t4.py
# @Time : 2020/5/10 20:35
# @Author : Rivarrl
# ======================================
def f(n, board):
def dfs(l, r, x=0):
if l > r: return 0
if l == r: return board[l]
res = mi = min(board[l:r+1]) - x
i = l
for j in range(l, r+1):
if board[j] - x == mi:
res += dfs(i, j-1, mi)
i = j + 1
res += dfs(i, r, mi)
return min(res, r - l + 1)
return dfs(0, n-1)
if __name__ == '__main__':
n = int(input())
board = list(map(int, input().strip().split(' ')))
res = f(n, board)
print(res)
"""
5
2 2 1 2 2
"""
|
[
"1049793871@qq.com"
] |
1049793871@qq.com
|
90d367495fd4f5c2f02d3b37e2e010af2cff3534
|
c7faffdc340eb72737315efb017beee97cbd016d
|
/GetSharedWithAnyoneTeamDriveACLs.py
|
52723eee417c2f398f7db170df420c32af2f7da5
|
[] |
no_license
|
boussagf/GAM-Scripts
|
d4b1c840cc2d25e67960262758b2a59f844d452b
|
bc1e05ddd4e9c94a3fd78c4566fe1df13a8d2e68
|
refs/heads/master
| 2020-04-29T16:35:57.279614
| 2019-03-01T18:43:49
| 2019-03-01T18:43:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,418
|
py
|
#!/usr/bin/env python2
"""
# Purpose: For a Google Drive User(s), delete all drive file ACLs for Team Drive files shared with anyone
# Note: This script requires Advanced GAM with Team Drive support:
# https://github.com/taers232c/GAMADV-XTD, https://github.com/taers232c/GAMADV-XTD3
# Customize: Set FILE_NAME and ALT_FILE_NAME based on your environment. Set DOMAIN_LIST and DESIRED_ALLOWFILEDISCOVERY
# Usage:
# For all Team Drives, start at step 1; For Team Drives selected by user/group/OU, start at step 6
# All Team Drives
# 1: Get all Team Drives.
# $ gam redirect csv ./TeamDrives.csv print teamdrives fields id,name
# 2: Get ACLs for all Team Drives
# $ gam redirect csv ./TeamDriveACLs.csv multiprocess csv TeamDrives.csv gam print drivefileacls ~id
# 3: Customize GetTeamDriveOrganizers.py for this task:
# Set DOMAIN_LIST as required
# Set ONE_ORGANIZER = True
# Set SHOW_GROUP_ORGANIZERS = False
# Set SHOW_USER_ORGANIZERS = True
# 4: From that list of ACLs, output a CSV file with headers "id,name,organizers"
# that shows the organizers for each Team Drive
# $ python GetTeamDriveOrganizers.py TeamDriveACLs.csv TeamDrives.csv TeamDriveOrganizers.csv
# 4: Get ACLs for all team drive files; you can use permission matching to narrow the number of files listed; add to the end of the command line
# DESIRED_ALLOWFILEDISCOVERY = 'Any' - pm type anyone em
# DESIRED_ALLOWFILEDISCOVERY = 'True' - pm type anyone allowfilediscovery true em
# DESIRED_ALLOWFILEDISCOVERY = 'False' - pm type anyone allowfilediscovery false em
# $ gam redirect csv ./filelistperms.csv multiprocess csv TeamDriveOrganizers.csv gam user ~organizers print filelist select teamdriveid ~id fields teamdriveid,id,title,permissions
# 5: Go to step 10
# Selected Team Drives
# 6: If want Team Drives for a specific set of organizers, replace <UserTypeEntity> with your user selection in the command below
# $ gam redirect csv ./AllTeamDrives.csv <UserTypeEntity> print teamdrives role organizer fields id,name
# 7: Customize DeleteDuplicateRows.py for this task:
# Set ID_FIELD = 'id'
# 8: Delete duplicate Team Drives (some may have multiple organizers).
# $ python DeleteDuplicateRows.py ./AllTeamDrives.csv ./TeamDrives.csv
# 9: Get ACLs for all team drive files; you can use permission matching to narrow the number of files listed; add to the end of the command line
# DESIRED_ALLOWFILEDISCOVERY = 'Any' - pm type anyone em
# DESIRED_ALLOWFILEDISCOVERY = 'True' - pm type anyone allowfilediscovery true em
# DESIRED_ALLOWFILEDISCOVERY = 'False' - pm type anyone allowfilediscovery false em
# $ gam redirect csv ./filelistperms.csv multiprocess csv TeamDrives.csv gam user ~User print filelist select teamdriveid ~id fields teamdriveid,id,title,permissions
# Common code
# 10: From that list of ACLs, output a CSV file with headers "Owner,driveFileId,driveFileTitle,permissionId,role,allowFileDiscovery"
# that lists the driveFileIds and permissionIds for all ACLs shared with anyone
# (n.b., driveFileTitle, role and allowFileDiscovery are not used in the next step, they are included for documentation purposes)
# $ python GetSharedWithAnyoneTeamDriveACLs.py filelistperms.csv deleteperms.csv
# 11: Inspect deleteperms.csv, verify that it makes sense and then proceed
# 12: Delete the ACLs
# $ gam csv deleteperms.csv gam user "~Owner" delete drivefileacl "~driveFileId" "~permissionId"
"""
import csv
import re
import sys
# For GAMADV-XTD/GAMADV-XTD3 with drive_v3_native_names = false
FILE_NAME = 'title'
ALT_FILE_NAME = 'name'
# For GAMADV-XTD/GAMADV-XTD3 with drive_v3_native_names = true
#FILE_NAME = 'name'
#ALT_FILE_NAME = 'title'
# Specify desired value of allowFileDiscovery field: True, False, Any (matches True and False)
DESIRED_ALLOWFILEDISCOVERY = 'Any'
QUOTE_CHAR = '"' # Adjust as needed
LINE_TERMINATOR = '\n' # On Windows, you probably want '\r\n'
PERMISSIONS_N_TYPE = re.compile(r"permissions.(\d+).type")
if (len(sys.argv) > 2) and (sys.argv[2] != '-'):
outputFile = open(sys.argv[2], 'wb')
else:
outputFile = sys.stdout
outputCSV = csv.DictWriter(outputFile, ['Owner', 'driveFileId', 'driveFileTitle', 'permissionId', 'role', 'allowFileDiscovery'], lineterminator=LINE_TERMINATOR, quotechar=QUOTE_CHAR)
outputCSV.writeheader()
if (len(sys.argv) > 1) and (sys.argv[1] != '-'):
inputFile = open(sys.argv[1], 'rbU')
else:
inputFile = sys.stdin
for row in csv.DictReader(inputFile, quotechar=QUOTE_CHAR):
for k, v in row.iteritems():
mg = PERMISSIONS_N_TYPE.match(k)
if mg and v == 'anyone':
permissions_N = mg.group(1)
allowFileDiscovery = row.get('permissions.{0}.allowFileDiscovery'.format(permissions_N), str(row.get('permissions.{0}.withLink'.format(permissions_N)) == 'False'))
if DESIRED_ALLOWFILEDISCOVERY in ('Any', allowFileDiscovery):
outputCSV.writerow({'Owner': row['Owner'],
'driveFileId': row['id'],
'driveFileTitle': row.get(FILE_NAME, row.get(ALT_FILE_NAME, 'Unknown')),
'permissionId': 'id:{0}'.format(row['permissions.{0}.id'.format(permissions_N)]),
'role': row['permissions.{0}.role'.format(permissions_N)],
'allowFileDiscovery': allowFileDiscovery})
if inputFile != sys.stdin:
inputFile.close()
if outputFile != sys.stdout:
outputFile.close()
|
[
"ross.scroggs@gmail.com"
] |
ross.scroggs@gmail.com
|
f439595cb39ef495936efad8712fa16400e44651
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Introduction_to_numerical_programming_using_Python_and_CPP_Beu/Ch04/Python/P04-SortComp0.py
|
75189e60cc98e52081bce1ec35954c3f6ea24a9d
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,186
|
py
|
# Comparison of operation counts for sorting methods
from random import *
#============================================================================
def BubbleSort(x, n):
#----------------------------------------------------------------------------
# Ascending sort of array x[1..n] by modified bubble sort
#----------------------------------------------------------------------------
global ncomp, nsave # no. of compares and saves
ipass = 0 # initialize pass counter
swap = 1 # initialize swap flag to enter loop
while (swap): # perform passes while swaps occur
ipass += 1 # increase pass counter
swap = 0 # initialize swap flag
for i in range(1,n-ipass+1): # loop over unsorted sublists
ncomp += 1 #------------------------------------------------------
if (x[i] > x[i+1]): # compare neighbors
nsave += 3 #---------------------------------------------------
xi = x[i]; x[i] = x[i+1]; x[i+1] = xi # swap neighbors
swap = 1 # set swap flag
#============================================================================
def InsertSort(x, n):
#----------------------------------------------------------------------------
# Ascending sort of array x[1..n] by direct insertion
#----------------------------------------------------------------------------
global ncomp, nsave # no. of compares and saves
for ipiv in range(2,n+1): # loop over pivots
nsave += 1 #---------------------------------------------------------
xpiv = x[ipiv] # save pivot to free its location
i = ipiv - 1 # initialize sublist counter
ncomp += 1 #---------------------------------------------------------
while ((i > 0) and (x[i] > xpiv)): # scan to the left of pivot
nsave += 1 #------------------------------------------------------
x[i+1] = x[i] # item > pivot: shift to the right
i -= 1
nsave += 1 #---------------------------------------------------------
x[i+1] = xpiv # insert pivot into last freed location
#============================================================================
def QuickSort(x, l, n):
#----------------------------------------------------------------------------
# Ascending sort of array x[l..n] by Quicksort
#----------------------------------------------------------------------------
global ncomp, nsave # no. of compares and saves
if (l >= n): return
# pivot = x[n]; create "<" and ">=" lists
m = l # upper index in "<"-list
for i in range(l,n): # scan entire list, excepting pivot
ncomp += 1 #---------------------------------------------------------
if (x[i] < x[n]): # compare current value with pivot
nsave += 3 #------------------------------------------------------
t = x[i]; x[i] = x[m]; x[m] = t # swap < value to end of "<"-list
m += 1 # extend "<"-list: increase upper index
nsave += 3 #------------------------------------------------------------
t = x[m]; x[m] = x[n]; x[n] = t # swap pivot between "<" and ">=" lists
QuickSort(x,l,m-1) # sort "<"-list
QuickSort(x,m+1,n) # sort ">="-list
# main
nf = 100 # scaling factor
np = 50 # number of plotting points
ns = nf * np # max. number of values to be sorted
x = [0]*(ns+1); x0 = [0]*(ns+1) # array to be sorted and copy
out = open("sort.txt","w") # open output file
out.write(" n Bubble "
"Insertion Quick\n")
out.write(" ncomp nsave ncomp"
" nsave ncomp nsave\n")
for ip in range(1,np+1):
n = nf * ip # number of values to be sorted
print("n = ",n)
for i in range(1,n+1): x0[i] = random() # list to be sorted
for i in range(1,n+1): x[i] = x0[i]
ncomp = 0; nsave = 0
BubbleSort(x,n)
out.write("{0:10d}{1:10d}{2:10d}".format(n,ncomp,nsave))
for i in range(1,n+1): x[i] = x0[i]
ncomp = 0; nsave = 0
InsertSort(x,n)
out.write("{0:10d}{1:10d}".format(ncomp,nsave))
for i in range(1,n+1): x[i] = x0[i]
ncomp = 0; nsave = 0
QuickSort(x,1,n)
out.write("{0:10d}{1:10d}\n".format(ncomp,nsave))
out.close()
|
[
"me@yomama.com"
] |
me@yomama.com
|
8a93acb3e5d67ecd8aa7a59639df91384b829756
|
8e0070b28f5377b94c50de2c979f92194a4ac88d
|
/tests/test_svg_colorful.py
|
6b5c35925b757675b537c35c42fe91fbd1853ceb
|
[
"BSD-3-Clause"
] |
permissive
|
AgentIvan/segno
|
c04ce887c3ee71ae0173d8e7653442fa16eea3b5
|
1033b9d21c6d82cf76f90927702f79599097ca1d
|
refs/heads/master
| 2023-07-26T04:30:52.640466
| 2021-08-30T21:06:42
| 2021-08-30T21:06:42
| 416,811,279
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,735
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - 2020 -- Lars Heuer
# All rights reserved.
#
# License: BSD License
#
"""\
SVG related tests for multicolor support.
"""
from __future__ import absolute_import, unicode_literals
import io
import xml.etree.ElementTree as etree
import pytest
import segno
from segno import writers as colors
_SVG_NS = 'http://www.w3.org/2000/svg'
def _get_svg_el(root, name):
return root.find('{%s}%s' % (_SVG_NS, name))
def _get_group(root):
return _get_svg_el(root, 'g')
def _parse_xml(buff):
"""\
Parses XML and returns the root element.
"""
buff.seek(0)
return etree.parse(buff).getroot()
def test_merge_colors():
qr = segno.make_qr('test')
out = io.BytesIO()
qr.save(out, kind='svg', dark='green', finder_dark='green',
dark_module='green')
green = colors._color_to_webcolor('green')
assert green in out.getvalue().decode('utf-8')
root = _parse_xml(out)
paths = root.findall('.//{%s}path' % _SVG_NS)
assert 1 == len(paths)
def test_merge_colors2():
qr = segno.make_qr('test')
out = io.BytesIO()
qr.save(out, kind='svg', dark='green', finder_dark='green',
dark_module='blue', alignment_light='yellow',
quiet_zone='yellow')
green = colors._color_to_webcolor('green')
yellow = colors._color_to_webcolor('yellow')
blue = colors._color_to_webcolor('blue')
res = out.getvalue().decode('utf-8')
assert green in res
assert yellow in res
assert blue in res
root = _parse_xml(out)
paths = root.findall('.//{%s}path' % _SVG_NS)
assert 3 == len(paths)
assert not any(p.attrib.get('transform') for p in paths)
def test_nogroup():
qr = segno.make_qr('test')
out = io.BytesIO()
qr.save(out, kind='svg', dark='green', finder_dark='green',
dark_module='blue', alignment_light='yellow', quiet_zone='yellow',
scale=1.0)
root = _parse_xml(out)
paths = root.findall('.//{%s}path' % _SVG_NS)
assert 3 == len(paths)
assert all(p.attrib.get('transform') is None for p in paths)
group = _get_group(root)
assert not group
def test_scale():
qr = segno.make_qr('test')
out = io.BytesIO()
qr.save(out, kind='svg', dark='green', finder_dark='green',
dark_module='blue', alignment_light='yellow', quiet_zone='yellow',
scale=1.5)
root = _parse_xml(out)
paths = root.findall('.//{%s}path' % _SVG_NS)
assert 3 == len(paths)
assert all(p.attrib.get('transform') is None for p in paths)
group = _get_group(root)
assert group is not None
assert 'scale(1.5)' == group.attrib.get('transform')
if __name__ == '__main__':
pytest.main([__file__])
|
[
"heuer@semagia.com"
] |
heuer@semagia.com
|
5a1e56872d5452c032dc23c23ca192b621aa38e1
|
a296dc8f628111cab2b5b96cc22fb1b35f776a1d
|
/arbitrage/public_markets/paymiumeur.py
|
009e357742af71f7cb84d275af7b1d5d2d6a43d1
|
[
"MIT"
] |
permissive
|
caojun105/bitcoin-arbitrage
|
e67e9c6bfb4280f1f3cc79292844f2365a28f964
|
9677b77829005c928c7a0b738c5fd28ad65a6331
|
refs/heads/master
| 2021-01-18T20:47:54.084419
| 2016-07-08T07:31:56
| 2016-07-08T07:31:56
| 63,140,117
| 2
| 0
| null | 2016-07-12T08:22:20
| 2016-07-12T08:22:19
| null |
UTF-8
|
Python
| false
| false
| 1,112
|
py
|
import urllib.request
import urllib.error
import urllib.parse
import json
from .market import Market
class PaymiumEUR(Market):
def __init__(self):
super(PaymiumEUR, self).__init__("EUR")
# bitcoin central maximum call / day = 5000
# keep 2500 for other operations
self.update_rate = 24 * 60 * 60 / 2500
def update_depth(self):
res = urllib.request.urlopen(
'https://paymium.com/api/data/eur/depth')
depth = json.loads(res.read().decode('utf8'))
self.depth = self.format_depth(depth)
def sort_and_format(self, l, reverse=False):
l.sort(key=lambda x: float(x['price']), reverse=reverse)
r = []
for i in l:
r.append({'price': float(i[
'price']), 'amount': float(i['amount'])})
return r
def format_depth(self, depth):
bids = self.sort_and_format(depth['bids'], True)
asks = self.sort_and_format(depth['asks'], False)
return {'asks': asks, 'bids': bids}
if __name__ == "__main__":
market = PaymiumEUR()
print(market.get_ticker())
|
[
"maxime.biais@gmail.com"
] |
maxime.biais@gmail.com
|
647b30379f37a277c78f8b6e9cfad34e77374880
|
18da2e6f54184313f9d36107fd7d6a60acc3df92
|
/0x0F-python-object_relational_mapping/model_state.py
|
dc00ab58c9b0ba6aab8f141c4ec7660efe4e191c
|
[] |
no_license
|
decolnz/HS_higher_level_programming
|
a92311faed6ecdeeb6db81ebd8da330126d0e139
|
4ff903992945e7f021db9a7e7499dccd35395446
|
refs/heads/master
| 2022-07-09T12:34:57.847768
| 2020-05-15T06:45:39
| 2020-05-15T06:45:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
#!/usr/bin/python3
"""
Creating a class
"""
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class State(Base):
"""Class State"""
__tablename__ = 'states'
id = Column(Integer, autoincrement=True, unique=True, nullable=False,
primary_key=True)
name = Column(String(128), nullable=False)
|
[
"castrok.c11@hotmail.com"
] |
castrok.c11@hotmail.com
|
762f66561d8648aca6e0784e3ebccc8d36a7e4c0
|
3ead257c7b6413fd6f0831652f367b926f39f337
|
/data/cites/utils.py
|
1107ce533303981d9e6153144a29dfeba0b07994
|
[] |
no_license
|
syyunn/tex
|
88558b993f561f0370e9b8ad1500220f6c7578c9
|
6b5976c2f668d115ef2dbfa22cd16d1fac9c2b0c
|
refs/heads/master
| 2022-12-31T21:09:05.073341
| 2020-10-21T11:27:41
| 2020-10-21T11:27:41
| 300,886,452
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,323
|
py
|
def create_cooccurrence_matrix(cites):
"""Create co occurrence matrix from given list of sentences.
Returns:
- vocabs: dictionary of word counts
- co_occ_matrix_sparse: sparse co occurrence matrix
Example:
===========
sentences = ['I love nlp', 'I love to learn',
'nlp is future', 'nlp is cool']
vocabs,co_occ = create_cooccurrence_matrix(sentences)
df_co_occ = pd.DataFrame(co_occ.todense(),
index=vocabs.keys(),
columns = vocabs.keys())
df_co_occ = df_co_occ.sort_index()[sorted(vocabs.keys())]
df_co_occ.style.applymap(lambda x: 'color: red' if x>0 else '')
"""
import scipy.sparse as sparse
arts = {}
data = []
row = []
col = []
for cite in cites:
for pos, art in enumerate(cite):
i = arts.setdefault(art, len(arts))
start = 0
end = len(cite)
for pos2 in range(start, end):
if pos2 == pos:
continue
j = arts.setdefault(cite[pos2], len(arts))
data.append(1.)
row.append(i)
col.append(j)
cooccurrence_matrix_sparse = sparse.coo_matrix((data, (row, col)))
return arts, cooccurrence_matrix_sparse
|
[
"syyun@snu.ac.kr"
] |
syyun@snu.ac.kr
|
e18b3657bcf036d792f980438845cacd61d5bd51
|
9adc810b07f7172a7d0341f0b38088b4f5829cf4
|
/experiments/shikhar/corl2019/mulitobj_cvae/multiobj_cvae2.py
|
23bef1e12a07541089ac824be0af1db3efcbb747
|
[
"MIT"
] |
permissive
|
Asap7772/railrl_evalsawyer
|
7ee9358b5277b9ddf2468f0c6d28beb92a5a0879
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
refs/heads/main
| 2023-05-29T10:00:50.126508
| 2021-06-18T03:08:12
| 2021-06-18T03:08:12
| 375,810,557
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,161
|
py
|
import rlkit.misc.hyperparameter as hyp
from experiments.murtaza.multiworld.skew_fit.reacher.generate_uniform_dataset import generate_uniform_dataset_reacher
from multiworld.envs.mujoco.cameras import sawyer_init_camera_zoomed_in
from rlkit.launchers.launcher_util import run_experiment
# from rlkit.torch.grill.launcher import grill_her_twin_sac_online_vae_full_experiment
from rlkit.torch.grill.launcher import *
import rlkit.torch.vae.vae_schedules as vae_schedules
from rlkit.torch.vae.conv_vae import imsize48_default_architecture
from rlkit.launchers.arglauncher import run_variants
from multiworld.envs.pygame.multiobject_pygame_env import Multiobj2DEnv
from rlkit.torch.vae.conditional_conv_vae import ConditionalConvVAE
from rlkit.torch.vae.vae_trainer import ConditionalConvVAETrainer
def experiment(variant):
full_experiment_variant_preprocess(variant)
train_vae_and_update_variant(variant)
if __name__ == "__main__":
variant = dict(
double_algo=False,
online_vae_exploration=False,
imsize=48,
init_camera=sawyer_init_camera_zoomed_in,
env_class=Multiobj2DEnv,
env_kwargs=dict(
render_onscreen=False,
ball_radius=1,
images_are_rgb=True,
show_goal=False,
),
grill_variant=dict(
save_video=True,
custom_goal_sampler='replay_buffer',
online_vae_trainer_kwargs=dict(
beta=20,
lr=1e-3,
),
save_video_period=100,
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
vf_kwargs=dict(
hidden_sizes=[400, 300],
),
max_path_length=50,
algo_kwargs=dict(
batch_size=1024,
num_epochs=1000,
num_eval_steps_per_epoch=500,
num_expl_steps_per_train_loop=500,
num_trains_per_train_loop=1000,
min_num_steps_before_training=10000,
vae_training_schedule=vae_schedules.custom_schedule_2,
oracle_data=False,
vae_save_period=50,
parallel_vae_train=False,
),
twin_sac_trainer_kwargs=dict(
discount=0.99,
reward_scale=1,
soft_target_tau=1e-3,
target_update_period=1, # 1
use_automatic_entropy_tuning=True,
),
replay_buffer_kwargs=dict(
start_skew_epoch=10,
max_size=int(100000),
fraction_goals_rollout_goals=0.2,
fraction_goals_env_goals=0.5,
exploration_rewards_type='None',
vae_priority_type='vae_prob',
priority_function_kwargs=dict(
sampling_method='importance_sampling',
decoder_distribution='gaussian_identity_variance',
num_latents_to_sample=10,
),
power=-1,
relabeling_goal_sampling_mode='vae_prior',
),
exploration_goal_sampling_mode='vae_prior',
evaluation_goal_sampling_mode='reset_of_env',
normalize=False,
render=False,
exploration_noise=0.0,
exploration_type='ou',
training_mode='train',
testing_mode='test',
reward_params=dict(
type='latent_distance',
),
observation_key='latent_observation',
desired_goal_key='latent_desired_goal',
vae_wrapped_env_kwargs=dict(
sample_from_true_prior=True,
),
algorithm='ONLINE-VAE-SAC-BERNOULLI',
),
train_vae_variant=dict(
representation_size=4,
beta=50,
num_epochs=2001,
dump_skew_debug_plots=False,
# decoder_activation='gaussian',
decoder_activation='sigmoid',
use_linear_dynamics=False,
generate_vae_dataset_kwargs=dict(
N=20000,
n_random_steps=10,
test_p=.9,
use_cached=False,
show=False,
oracle_dataset=False,
oracle_dataset_using_set_to_goal=False,
non_presampled_goal_img_is_garbage=False,
random_rollout_data=True,
conditional_vae_dataset=True,
save_trajectories=True,
),
vae_trainer_class=ConditionalConvVAETrainer,
vae_class=ConditionalConvVAE,
vae_kwargs=dict(
input_channels=3,
architecture=imsize48_default_architecture,
decoder_distribution='gaussian_identity_variance',
),
# TODO: why the redundancy?
algo_kwargs=dict(
start_skew_epoch=5000,
is_auto_encoder=False,
batch_size=64,
lr=1e-3,
skew_config=dict(
method='vae_prob',
power=0,
),
skew_dataset=False,
priority_function_kwargs=dict(
decoder_distribution='gaussian_identity_variance',
sampling_method='importance_sampling',
# sampling_method='true_prior_sampling',
num_latents_to_sample=10,
),
use_parallel_dataloading=False,
),
save_period=25,
),
)
search_space = {
'seedid': range(1),
'train_vae_variant.representation_size': [2, 4, 8, 16],
'train_vae_variant.beta':[0, 0.5, 1]
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, run_id=0)
|
[
"alexanderkhazatsky@gmail.com"
] |
alexanderkhazatsky@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.