blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
632f50ce657bd31338db5ba020bec2b0f1357596
|
6e155cd7444e69b719d129e9dcaed2b788d4359b
|
/shop/shop/celery.py
|
2582d795673aac826732cb8f19387b7702df0cf7
|
[] |
no_license
|
tishmanoni/My-store
|
0ac1beb26fd4c3176f90346b23b9e9c955e90729
|
79bec452be871089edd6415b00bd094dc6288443
|
refs/heads/master
| 2022-12-06T05:33:21.163835
| 2020-08-29T19:39:45
| 2020-08-29T19:39:45
| 291,334,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shop.settings')
app = Celery('shop')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
|
[
"66375712+tishmanoni@users.noreply.github.com"
] |
66375712+tishmanoni@users.noreply.github.com
|
2ca74b87fb00d97fdb9b1cd2746f2e542e60938b
|
b65c1f6000af4ddeb7280e7d93bf861fbf1964bc
|
/contracts/tests/test_load_data.py
|
e385455a7533122a2a8978adbb1a3792d745a638
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
EricSchles/calc
|
ef00aaddfec010321867a8287db0a565dbb7985e
|
eaa1ab227a5a07f5f4f7d2c64a278977cd43cb18
|
refs/heads/develop
| 2021-01-25T14:33:58.124300
| 2017-10-11T19:29:20
| 2017-10-11T19:29:20
| 72,668,485
| 1
| 0
| null | 2016-11-02T18:17:57
| 2016-11-02T18:17:57
| null |
UTF-8
|
Python
| false
| false
| 483
|
py
|
import pathlib
from django.core.management import call_command
from django.test import TestCase
from contracts.models import Contract
MY_DIR = pathlib.Path(__file__).resolve().parent
class LoadS70TestCase(TestCase):
sample_filename = MY_DIR.parent / 'docs' / 'hourly_prices_sample.csv'
def test_loads_sample(self):
call_command(
'load_data',
filename=self.sample_filename
)
self.assertEquals(Contract.objects.count(), 79)
|
[
"varmaa@gmail.com"
] |
varmaa@gmail.com
|
dba58d500dc281d3b42ffe31ba813201ef1ff43f
|
e4abeab73f2aa2de037aa84d195dce986af5208a
|
/lmp/script/sample_from_dataset.py
|
758446f580908db34133a1f847dfbd2745eb7d72
|
[
"Beerware"
] |
permissive
|
france5289/language-model-playground
|
1792fc712bace3ca3e7a0b8b3ba4745b2d6c9b5c
|
02181561107dac13d52e411bc970e245277854d4
|
refs/heads/main
| 2023-08-07T01:59:56.928232
| 2021-09-22T06:57:28
| 2021-09-22T06:57:28
| 409,092,012
| 0
| 0
|
NOASSERTION
| 2021-09-22T06:39:53
| 2021-09-22T06:39:52
| null |
UTF-8
|
Python
| false
| false
| 2,896
|
py
|
r"""Sample dataset using index.
Tool for observing data point in specified dataset.
Use index to sample from dataset.
See Also
========
lmp.dset
All available dataset.
Examples
========
The following example sample index ``0`` from
:py:class:`lmp.dset.WikiText2Dset` ``train`` dataset.
.. code-block:: sh
python -m lmp.script.sample_from_dataset wikitext-2
The following example sample index ``1`` from
:py:class:`lmp.dset.WikiText2Dset` ``train`` dataset.
.. code-block:: sh
python -m lmp.script.sample_from_dataset wikitext-2 --idx 1
The following example sample index ``1`` from
:py:class:`lmp.dset.WikiText2Dset` ``test`` dataset.
.. code-block:: sh
python -m lmp.script.sample_from_dataset wikitext-2 --idx 1 --ver test
Use ``-h`` or ``--help`` options to get list of available dataset.
.. code-block:: sh
python -m lmp.script.sample_from_dataset -h
Use ``-h`` or ``--help`` options on specific dataset to get a list of available
versions.
.. code-block:: sh
python -m lmp.script.sample_from_dataset wikitext-2 -h
"""
import argparse
import lmp.util.dset
from lmp.dset import DSET_OPTS
def parse_arg() -> argparse.Namespace:
r"""Parse arguments from CLI.
Argument must begin with a dataset name ``dset_name``.
The following arguments are optional:
--ver Version of the dataset.
Default to ``dset``'s default version.
--idx Sample index.
Default to ``0``.
Returns
=======
argparse.Namespace
Arguments from CLI.
"""
# Create parser.
parser = argparse.ArgumentParser(
'python -m lmp.script.sample_from_dataset',
description='Sample dataset using index.',
)
# Create subparser for each dataset.
subparsers = parser.add_subparsers(dest='dset_name', required=True)
for dset_name, dset_clss in DSET_OPTS.items():
# Use dataset name as CLI argument.
dset_parser = subparsers.add_parser(
dset_name,
description=f'Sample {dset_name} dataset using index.',
)
# Optional arguments.
dset_parser.add_argument(
'--idx',
default=0,
help='Sample index.',
type=int,
)
dset_parser.add_argument(
'--ver',
default=None,
help=' '.join([
f'Version of the {dset_name} dataset.',
f'Defaults to {dset_clss.df_ver}.',
]),
choices=dset_clss.vers,
type=str,
)
return parser.parse_args()
def main() -> None:
r"""Script entry point."""
# Parse command-line argument.
args = parse_arg()
# Get dataset instance with specified version.
dset = lmp.util.dset.load(dset_name=args.dset_name, ver=args.ver)
# Output sample result.
print(dset[args.idx])
if __name__ == '__main__':
main()
|
[
"ProFatXuanAll@gmail.com"
] |
ProFatXuanAll@gmail.com
|
0d1cb7925a58261d9e23d04bfa835151026b290e
|
d968882c6bdecb2347307aea7381b9495911a0a6
|
/microconventions/type_conventions.py
|
743a0db4e8a8e19220b9f89b9415898b16077566
|
[] |
no_license
|
fagan2888/microconventions
|
a070bddf94c0788ed4ff3ab31941d0daccf30fd5
|
037f9fcc67caa28916c6b81f4742a68afaf296b0
|
refs/heads/master
| 2022-11-10T21:52:53.632179
| 2020-07-02T14:28:59
| 2020-07-02T14:28:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
from typing import List, Union, Any, Optional
KeyList = List[Optional[str]]
NameList = List[Optional[str]]
Value = Union[str,int]
ValueList = List[Optional[Value]]
DelayList = List[Optional[int]]
|
[
"info@3za.org"
] |
info@3za.org
|
d38ad13d5b90a52d56ed6d9da5384a5f4df4d21f
|
746bf62ae3599f0d2dcd620ae37cd11370733cc3
|
/leetcode/spiralmatrixtwo.py
|
c0075822c99847054ebdbfc8e1a03cd68cd9c653
|
[] |
no_license
|
wanglinjie/coding
|
ec0e614343b39dc02191455165eb1a5c9e6747ce
|
350f28cad5ec384df476f6403cb7a7db419de329
|
refs/heads/master
| 2021-04-22T14:00:48.825959
| 2017-05-02T12:49:05
| 2017-05-02T12:49:05
| 48,011,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,106
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# date:20160711
class Solution(object):
def generateMatrix(self, n):
"""
:type n: int
:rtype: List[List[int]]
Given an integer n, generate a square matrix filled with elements from 1 to n2 in spiral order.
For example,
Given n = 3,
You should return the following matrix:
[
[ 1, 2, 3 ],
[ 8, 9, 4 ],
[ 7, 6, 5 ]
]
"""
if not n:
return []
rows = n
columns = n
loop = 0
if n & 0x1:
loop = n / 2 + 1
else:
loop = n / 2
# 为什么使用下面创建数组,matrix[1][2]=1赋值,会将第2列的值都赋值为1?
# matrix = [[0] * n] * n
matrix = []
for i in xrange(n):
matrix.append([0] * n)
number = 1
for i in xrange(loop):
row = i
column = i
read_num = 0
read_rows = rows - 2 * i
read_columns = columns - 2 * i
if (read_rows == 1) or (read_columns == 1):
read_num = read_rows * read_columns
else:
read_num = 2 * read_rows + 2 * (read_columns - 2)
while read_num:
read_num -= 1
matrix[row][column] = number
# print matrix
# print row, column, number
# print
number += 1
if (row == i) and (column < (columns - i - 1)):
column += 1
elif (column == (columns - i - 1)) and (row < (rows - i - 1)):
row += 1
elif (row == (rows - i - 1)) and (column > i):
column -= 1
elif (column == i) and (row > i):
row -= 1
return matrix
n = 3
# so = Solution()
# print so.generateMatrix(n)
matrix = [[0] * n] * n
# matrix = []
# for i in xrange(n):
# matrix.append([0]*n)
# matrix = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
print matrix
matrix[0][1] = 1
print matrix
print matrix[0][2]
matrix[0][2] = 2
print matrix
|
[
"hitwhwlj@163.com"
] |
hitwhwlj@163.com
|
2541bc3717df13f38034e534423c96eec29b2d31
|
9cadeb694a677c4ad567d514eee042891c65eeaf
|
/apiServer/wsgi.py
|
64aabb0e27969ed54fed3cc2e2a79148e4e57375
|
[] |
no_license
|
epikjjh/Stock-Seeker
|
b8267fda13df6579f3883f66f94007d6ca11187a
|
934d97c0ceb89c1fcdfb469c1807d09c2671cc67
|
refs/heads/master
| 2022-12-22T23:38:24.947593
| 2020-09-22T11:50:56
| 2020-09-22T11:50:56
| 297,632,451
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
WSGI config for stockSeeker project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apiServer.settings')
application = get_wsgi_application()
|
[
"epikjjh@gmail.com"
] |
epikjjh@gmail.com
|
440bfdebbceb6eaef3277aca9941a759e42ae116
|
7beff965d7b0e6155d6d52b27d71c557421d5ada
|
/aoj/grl_7_a.py
|
830ad7ec1ba09ea22f3deb70e180d4910bd89f7e
|
[] |
no_license
|
uk-ar/competitive_programming
|
82a53a1007798843ac006b9c7d313826e6cb45c3
|
d2523cf303f47644cada3b03e9eed2349bdbe394
|
refs/heads/master
| 2023-03-28T13:20:07.728861
| 2021-03-30T20:25:55
| 2021-03-30T20:25:55
| 249,638,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,613
|
py
|
#!/usr/bin/env python3
# N,M = map(int,sys.stdin.readline().split())
# a = tuple(map(int,sys.stdin.readline().split())) # single line with multi param
# a = tuple(int(sys.stdin.readline()) for _ in range(N)) # multi line with single param
# a = tuple(tuple(map(int,sys.stdin.readline().rstrip().split())) for _ in range(N)) # multi line with multi param
# s = sys.stdin.readline().rstrip()
# N = int(sys.stdin.readline())
# INF = float("inf")
import sys,collections
sys.setrecursionlimit(100000)
INF = float("inf")
X,Y,E = map(int,sys.stdin.readline().split())
xy = tuple(tuple(map(int,sys.stdin.readline().rstrip().split())) for _ in range(E)) # multi line with multi param
#uvc = [[0,1,1],[0,2,3],[1,2,1],[2,3,2]]
#xy = [[0,0],[1,2],[2,2],[1,3]]
V = X+Y+2
uvc = [[x+1,y+X+1,1] for x,y in xy]
for i in range(X):
uvc.append([0,i+1,1])
for i in range(Y):
uvc.append([X+i+1,V-1,1])
G = {i:{} for i in range(V)}
mG = {i:{} for i in range(V)}
for u,v,c in uvc:
G[u][v] = c
G[v][u] = 0 # reverse edge
mG[u][v] = 0
mG[v][u] = 0
# print(G)
# print(mG)
def dfs(current,flow):
if current == V-1:
return flow
visited.add(current)
for nex,nex_c in G[current].items():
if not nex in visited and nex_c != 0:
f = dfs(nex,min(flow,nex_c))
if f != 0:
mG[current][nex] = mG[current][nex] + f
G[current][nex] = G[current][nex] - f
G[nex][current] = G[nex][current] + f
return f
return 0
visited = set()
while dfs(0,INF) != 0:
visited = set()
pass
print(sum(mG[0].values()))
|
[
"yuuki.ari@gmail.com"
] |
yuuki.ari@gmail.com
|
d973a98d468f699d88ed22bda3be21818e1727e8
|
4c44c593048fa4e00fb0334209632a286886efd9
|
/import_template_supplierinfo/wizards/import_file.py
|
6e86c4502afe621620950784d04bcf17a2bff77f
|
[] |
no_license
|
treytux/trey-addons
|
0c3fec43c584d46bd299b4bca47dcc334bedca60
|
1cda42c0eae702684badce769f9ec053c59d6e42
|
refs/heads/12.0
| 2023-06-08T21:56:09.945084
| 2023-05-29T10:05:53
| 2023-05-29T10:05:53
| 114,281,765
| 19
| 49
| null | 2023-05-29T10:05:55
| 2017-12-14T18:10:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,065
|
py
|
###############################################################################
# For copyright and license notices, see __manifest__.py file in root directory
###############################################################################
import base64
import io
import logging
from odoo import models
_log = logging.getLogger(__name__)
try:
import pandas as pd
except (ImportError, IOError) as err:
_log.debug(err)
class ImportFile(models.TransientModel):
_inherit = 'import.file'
def dataframe_get(self):
self.ensure_one()
if self.template_id.model_id.model == 'import.template.supplierinfo':
buf = io.BytesIO()
buf.write(base64.b64decode(self.file))
ext = self.file_filename.split('.')[-1:][0]
if ext in ['xlsx', 'xls']:
df = pd.read_excel(
buf, engine='xlrd', encoding='utf-8', na_values=['NULL'],
converters={'name': str})
return df.where((pd.notnull(df)), None)
return super().dataframe_get()
|
[
"roberto@trey.es"
] |
roberto@trey.es
|
da88288f281baad769af1ccbf83b2777ed6a91a0
|
3f7c27ccd0ab1fcbd2583cf4b764b81bd27dd718
|
/apps/members/urls.py
|
03acc1cb09f082fefdc65dd6e430675e3a4ac2b6
|
[] |
no_license
|
adamtlord/foreverland
|
001ca1a91a3cc468405efb80fe7981e75b82021c
|
8206ddeeb8cfbd2752ef6fa9839424718cb96e07
|
refs/heads/master
| 2020-04-16T00:50:51.582008
| 2016-09-21T03:27:39
| 2016-09-21T03:27:39
| 11,668,672
| 0
| 0
| null | 2016-09-04T03:46:51
| 2013-07-25T19:05:55
|
Python
|
UTF-8
|
Python
| false
| false
| 144
|
py
|
from django.conf.urls import patterns, url
urlpatterns = patterns('members.views',
url(r'^$', 'list_members', {}, name='list_members'),
)
|
[
"adam.lord@gmail.com"
] |
adam.lord@gmail.com
|
269093e40a4014ea89ecd80de5f371b123bd4fa7
|
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
|
/ml-flask/Lib/site-packages/srsly/tests/cloudpickle/cloudpickle_file_test.py
|
02c568f8652ef647b699a151dff037527a6e8836
|
[
"MIT"
] |
permissive
|
YaminiHP/SimilitudeApp
|
8cbde52caec3c19d5fa73508fc005f38f79b8418
|
005c59894d8788c97be16ec420c0a43aaec99b80
|
refs/heads/master
| 2023-06-27T00:03:00.404080
| 2021-07-25T17:51:27
| 2021-07-25T17:51:27
| 389,390,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:a058ea411ee874062513e922cfd60cc7f362eda3000cc849fc6af9c828f1412b
size 3430
|
[
"yamprakash130@gmail.com"
] |
yamprakash130@gmail.com
|
c3df0ac58a6da679590d7b4d309dd0b86190657c
|
863a1f5091f1faad2beaf2a6037e3a5c0ebdc194
|
/Backuper.glyphsPlugin/Contents/Resources/plugin.py
|
8a21cdbee8b408df2b4ca9d675f6a38770426e11
|
[] |
no_license
|
schriftgestalt/Backuper
|
e65f08ec016770564131c05dbd888fe6841c6612
|
2b738600c4a8cb288184ae2c216bcfcbf64e266b
|
refs/heads/master
| 2023-07-17T19:50:00.265070
| 2021-09-01T21:38:34
| 2021-09-01T21:38:34
| 109,949,694
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,699
|
py
|
# encoding: utf-8
###########################################################################################################
#
#
# General Plugin
#
# Read the docs:
# https://github.com/schriftgestalt/GlyphsSDK/tree/master/Python%20Templates/General%20Plugin
#
#
###########################################################################################################
from __future__ import print_function
import objc
from GlyphsApp import *
from GlyphsApp.plugins import *
from Foundation import NSFileManager
import os
class Backuper(GeneralPlugin):
@objc.python_method
def start(self):
Glyphs.addCallback(self.doBackup_, DOCUMENTOPENED)
def doBackup_(self, sender):
document = sender.object()
if document.fileURL() is None:
return
importedVersion = document.valueForKey_("importedVersion")
if importedVersion != None and int(Glyphs.buildNumber) > int(importedVersion):
documentPath = document.fileURL().path()
fileName = os.path.basename(documentPath)
bachupFolder = os.path.join(os.path.dirname(documentPath), "Backup")
bachupPath = os.path.join(bachupFolder, importedVersion + "_" + fileName)
fileManager = NSFileManager.defaultManager()
if fileManager.fileExistsAtPath_isDirectory_(bachupFolder, None) == (False, False):
if not fileManager.createDirectoryAtPath_withIntermediateDirectories_attributes_error_(bachupFolder, True, None, None):
print("Could not make backup folder")
if fileManager.isReadableFileAtPath_(documentPath):
NSFileManager.defaultManager().copyItemAtPath_toPath_error_(documentPath, bachupPath, None)
@objc.python_method
def __file__(self):
"""Please leave this method unchanged"""
return __file__
|
[
"georg.seifert@mac.com"
] |
georg.seifert@mac.com
|
ffa39f22831b11734d04b3e3eea7856437400115
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/LeetcodePythonProject_with_solution/leetcode_0401_0450/LeetCode422_ValidWordSquare.py
|
ee694477735ad1d5c38aa096a5f0bfdceae3713d
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 449
|
py
|
'''
Created on Apr 13, 2017
@author: MT
'''
c_ Solution(o..
___ validWordSquare words
__ n.. words: r.. F..
___ i, word1 __ e..(words
word2 ''
___ j __ r..(l..(word1:
__ j >_ l..(words
r.. F..
__ i >_ l..(words[j]
r.. F..
word2 += words[j][i]
__ word1 !_ word2:
r.. F..
r.. T..
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
23936645c5429dbbbaad5e2fbb69f5df836ab631
|
dd4d1a61ec680a86d4b569490bf2a898ea0d7557
|
/appengine/predator/common/model/chrome_crash_analysis.py
|
4473ea72c94ffcc660fa5ff6418f3923faf801aa
|
[
"BSD-3-Clause"
] |
permissive
|
mcgreevy/chromium-infra
|
f1a68914b47bcbe3cd8a424f43741dd74fedddf4
|
09064105713603f7bf75c772e8354800a1bfa256
|
refs/heads/master
| 2022-10-29T23:21:46.894543
| 2017-05-16T06:22:50
| 2017-05-16T06:22:50
| 91,423,078
| 1
| 1
|
BSD-3-Clause
| 2022-10-01T18:48:03
| 2017-05-16T06:23:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,079
|
py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from google.appengine.ext import ndb
from common.model.crash_analysis import CrashAnalysis
class ChromeCrashAnalysis(CrashAnalysis): # pylint: disable=W0223
"""Represents an analysis of a Chrome Crash (Cracas or Fracas)."""
# Customized properties for Fracas crash.
historical_metadata = ndb.JsonProperty(indexed=False)
channel = ndb.StringProperty(indexed=False)
def Reset(self):
super(ChromeCrashAnalysis, self).Reset()
self.historical_metadata = None
self.channel = None
def Initialize(self, crash_data):
"""(Re)Initializes a CrashAnalysis ndb.Model from ``ChromeCrashData``."""
super(ChromeCrashAnalysis, self).Initialize(crash_data)
self.channel = crash_data.channel
self.historical_metadata = crash_data.historical_metadata
@property
def customized_data(self):
return {'historical_metadata': self.historical_metadata,
'channel': self.channel}
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
d926c78d9ca4a0ffd80d8aefc3bac5797f7db7a1
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_LinearTrend_BestCycle_AR.py
|
b8625fb2fc2806fe6e615c6b2e4052c583dac9c1
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970
| 2018-12-17T22:08:11
| 2018-12-17T22:08:11
| 137,104,552
| 0
| 0
|
BSD-3-Clause
| 2018-12-17T22:08:12
| 2018-06-12T17:15:43
|
Python
|
UTF-8
|
Python
| false
| false
| 155
|
py
|
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['LinearTrend'] , ['BestCycle'] , ['AR'] );
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
38c15c39a97e7ab3d51118f6386f186dda7696d8
|
a0f1bfea522d5917ae6f18d3a4ab980870feac77
|
/modules/hs/analysis/instruction.py
|
9c6896ba3c8d225b4552a2b47164300ff9cdddce
|
[
"MIT"
] |
permissive
|
sinsai/Sahana_eden
|
1d9768d19266010caf2753b66d17925fe708007a
|
798688dcf206fc81d586d9af1c57a99e6f1573c5
|
refs/heads/master
| 2020-06-07T21:10:17.416723
| 2011-06-10T08:57:23
| 2011-06-10T08:57:23
| 1,659,383
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,114
|
py
|
"""
Healthscapes Geolytics Module
@author: Nico Preston <nicopresto@gmail.com>
@author: Colin Burreson <kasapo@gmail.com>
@author: Zack Krejci <zack.krejci@gmail.com>
@copyright: (c) 2010 Healthscapes
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import enum
from utils import keygen
class Instruction:
def __init__ (self, mode, procedure, dst, *args):
self.mode = mode
self.procedure = procedure
self.dst = dst
self.args = args
|
[
"fran@aidiq.com"
] |
fran@aidiq.com
|
a794b38e5b1c9bc25dfef36a9d955d9cf54a7d8b
|
2f0aa66e14c6595289f6a0de2bdf71e9922052a7
|
/nextApi/user/migrations/0003_auto_20200818_2008.py
|
6d6e0a8b8d3ab39d197ff070024c08b0dd3e56ff
|
[] |
no_license
|
aimethierry/NextApi
|
8f83a2b0f499fdf5118eb930baa051584cfd9aa5
|
90884ee6d900ce71116b40276dda0e97bec0b521
|
refs/heads/master
| 2022-12-11T09:03:54.981284
| 2020-09-19T12:40:36
| 2020-09-19T12:40:36
| 296,866,571
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 943
|
py
|
# Generated by Django 3.1 on 2020-08-18 18:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0002_companyacc'),
]
operations = [
migrations.AddField(
model_name='companyacc',
name='email',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AddField(
model_name='companyacc',
name='password',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AddField(
model_name='companyacc',
name='usesrname',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='companyacc',
name='company',
field=models.CharField(blank=True, max_length=120, null=True),
),
]
|
[
"aime.thierry97@gmail.com"
] |
aime.thierry97@gmail.com
|
803331d02c81b15dd9eeeb88fb58de707d4c9897
|
287c663c97e7840239794fbe84ce285773b72985
|
/virtual/bin/mako-render
|
ff06d7a97fb70f3f26a64dd2325bf6138e8c7d31
|
[
"MIT"
] |
permissive
|
mzazakeith/flask-blog
|
ea8e5b2da9a581eb026564c1b9e500fa0532ee88
|
2833404cc5e96ffdbfb767f35b9caf2bdcce7997
|
refs/heads/master
| 2020-03-21T21:24:57.296282
| 2018-07-02T20:20:24
| 2018-07-02T20:20:24
| 139,062,052
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
#!/home/mzaza/Desktop/flask_blog/virtual/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from mako.cmd import cmdline
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(cmdline())
|
[
"mzazakeith@gmail.com"
] |
mzazakeith@gmail.com
|
|
e650793603ccf2cefac008d1c76270721b8d1367
|
57061e611a549f9afe4f5201730a85d76a7e505f
|
/setup.py
|
5323723ba2f8215b16c769148b156602f63760fc
|
[
"MIT"
] |
permissive
|
briostack/chrome-printtopdf
|
35ee5da836878107f7586a7e61f1adf6b7d8c4cb
|
6b4f91ab50cbc3570c27cfd8511f3964387c356e
|
refs/heads/master
| 2022-03-08T14:58:51.843698
| 2022-03-01T22:32:14
| 2022-03-01T22:32:14
| 94,803,813
| 1
| 0
| null | 2017-06-19T17:38:03
| 2017-06-19T17:38:03
| null |
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
#!/usr/bin/env python
from __future__ import print_function
import os
import codecs
from setuptools import setup, find_packages
def read(*parts):
filename = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
setup(
name="chrome-printtopdf",
version='0.0.2',
url='https://github.com/stefanw/chrome-printtopdf',
license='MIT',
description="Get PDFs from URLs using chrome",
long_description=read('README.md'),
author='Stefan Wehrmeyer',
author_email='mail@stefanwehrmeyer.com',
packages=find_packages(),
install_requires=['aiohttp'],
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities',
]
)
|
[
"mail@stefanwehrmeyer.com"
] |
mail@stefanwehrmeyer.com
|
4020c3b3e7e7165b0196c1585615c9b95e9e33fd
|
11211916f39b9d98027b64d778e52743d0c519a1
|
/L3/tmp/assignments/outline.py
|
e7bba9d3d9acf2c3e969914fff659f06fe1cd781
|
[] |
no_license
|
mantasruigys3000/Group-Task
|
87baf1bc2747323c0508f6f32ef733c3f4b50978
|
6790d74ae7fa0fe6b13733efcd75a9f4aca70ab0
|
refs/heads/master
| 2020-04-23T20:54:09.696659
| 2019-02-22T01:29:53
| 2019-02-22T01:29:53
| 171,454,102
| 0
| 0
| null | 2019-02-19T10:31:09
| 2019-02-19T10:31:08
| null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
Amet velit etincidunt porro est quaerat etincidunt.
Velit ut velit dolor consectetur est dolor.
Voluptatem quisquam quiquia quisquam sed ut.
Non voluptatem voluptatem etincidunt.
Username: Marcus
Password: titten
Dolorem velit labore velit amet ipsum ipsum adipisci.
Quaerat labore est dolore quaerat aliquam.
Amet sit consectetur labore sed.
|
[
"mantasruigys101@gmail.com"
] |
mantasruigys101@gmail.com
|
d205eeabe1230372e52454c55429cccf3659b362
|
614cad3588af9c0e51e0bb98963075e3195e92f5
|
/utils/completeness.py
|
bd6b0845fa36983abbad225f1ed473385db12e64
|
[] |
no_license
|
dragonlong/haoi-pose
|
2810dae7f9afd0a26b3d0a5962fd9ae8a5abac58
|
43388efd911feecde588b27a753de353b8e28265
|
refs/heads/master
| 2023-07-01T14:18:29.029484
| 2021-08-10T10:57:42
| 2021-08-10T10:57:42
| 294,602,794
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,738
|
py
|
import argparse
import os
import torch
import numpy as np
from scipy.spatial import cKDTree as KDTree
import trimesh
import glob
from joblib import Parallel, delayed
def directed_hausdorff(point_cloud1:torch.Tensor, point_cloud2:torch.Tensor, reduce_mean=True):
"""
:param point_cloud1: (B, 3, N)
:param point_cloud2: (B, 3, M)
:return: directed hausdorff distance, A -> B
"""
n_pts1 = point_cloud1.shape[2]
n_pts2 = point_cloud2.shape[2]
pc1 = point_cloud1.unsqueeze(3)
pc1 = pc1.repeat((1, 1, 1, n_pts2)) # (B, 3, N, M)
pc2 = point_cloud2.unsqueeze(2)
pc2 = pc2.repeat((1, 1, n_pts1, 1)) # (B, 3, N, M)
l2_dist = torch.sqrt(torch.sum((pc1 - pc2) ** 2, dim=1)) # (B, N, M)
shortest_dist, _ = torch.min(l2_dist, dim=2)
hausdorff_dist, _ = torch.max(shortest_dist, dim=1) # (B, )
if reduce_mean:
hausdorff_dist = torch.mean(hausdorff_dist)
return hausdorff_dist
def nn_distance(query_points, ref_points):
ref_points_kd_tree = KDTree(ref_points)
one_distances, one_vertex_ids = ref_points_kd_tree.query(query_points)
return one_distances
def completeness(query_points, ref_points, thres=0.03):
a2b_nn_distance = nn_distance(query_points, ref_points)
percentage = np.sum(a2b_nn_distance < thres) / len(a2b_nn_distance)
return percentage
def process_one(shape_dir):
# load generated shape
pc_paths = glob.glob(os.path.join(shape_dir, "fake-z*.ply"))
pc_paths = sorted(pc_paths)
gen_pcs = []
for path in pc_paths:
sample_pts = trimesh.load(path)
sample_pts = np.asarray(sample_pts.vertices)
# sample_pts = torch.tensor(sample_pts.vertices).transpose(1, 0)
gen_pcs.append(sample_pts)
# load partial input
partial_path = os.path.join(shape_dir, "raw.ply")
partial_pc = trimesh.load(partial_path)
partial_pc = np.asarray(partial_pc.vertices)
# partial_pc = torch.tensor(partial_pc.vertices).transpose(1, 0)
# completeness percentage
gen_comp = 0
for sample_pts in gen_pcs:
comp = completeness(partial_pc, sample_pts)
gen_comp += comp
gen_comp = gen_comp / len(gen_pcs)
# unidirectional hausdorff
gen_pcs = [torch.tensor(pc).transpose(1, 0) for pc in gen_pcs]
gen_pcs = torch.stack(gen_pcs, dim=0)
partial_pc = torch.tensor(partial_pc).transpose(1, 0)
partial_pc = partial_pc.unsqueeze(0).repeat((gen_pcs.size(0), 1, 1))
hausdorff = directed_hausdorff(partial_pc, gen_pcs, reduce_mean=True).item()
return gen_comp, hausdorff
def func(args):
shape_names = sorted(os.listdir(args.src))
all_shape_dir = [os.path.join(args.src, name) for name in shape_names]
results = Parallel(n_jobs=args.process, verbose=2)(delayed(process_one)(path) for path in all_shape_dir)
res_comp, res_hausdorff = zip(*results)
res_comp = np.mean(res_comp)
res_hausdorff = np.mean(res_hausdorff)
return res_hausdorff, res_comp
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--src", type=str)
parser.add_argument("-p", "--process", type=int, default=10)
parser.add_argument("-o", "--output", type=str)
args = parser.parse_args()
if args.output is None:
args.output = args.src + '-eval_UHD.txt'
res_hausdorff, res_comp = func(args)
print("Avg Unidirectional Hausdorff Distance: {}".format(res_hausdorff))
print("Avg Completeness: {}".format(res_comp))
with open(args.output, "a") as fp:
fp.write("SRC: {}\n".format(args.src))
fp.write("Avg Unidirectional Hausdorff Distance: {}\n".format(res_hausdorff))
fp.write("Avg Completeness: {}\n".format(res_comp))
if __name__ == '__main__':
main()
|
[
"lxiaol9@vt.edu"
] |
lxiaol9@vt.edu
|
d1e535da617f09a037448c3df23b3b182bcedd53
|
c0578b14ebaef889ffc75551ebcc7e5c80b6069e
|
/src/811_subdomain_visit_count.py
|
87eb66cd63973d2c0ce2d010c3afb1f86145ce1f
|
[] |
no_license
|
BrianQcq/LeetCode
|
88ee122aa2b358c61d6980c159008e8ccac6cc8c
|
127ca7d82fa15214da8d5e9fbc461831cdb6b60b
|
refs/heads/master
| 2020-06-10T04:20:33.798787
| 2019-11-12T07:56:58
| 2019-11-12T07:56:58
| 193,580,067
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
class Solution(object):
def subdomainVisit(self, cpdomains):
d = {}
for item in cpdomains:
n, domains = item.split()
n, domains = int(n), domains.split('.')
for i in range(len(domains)):
temp = '.'.join(domains[i:])
d[temp] = d[temp] + n if temp in d else n
return [str(d[i]) + ' ' + i for i in d]
|
[
"qiuchuanqin@gmail.com"
] |
qiuchuanqin@gmail.com
|
40b606a75f2a3ea6ee7f290d627b798e157e9894
|
2b31366107bd56244564c196c852f39ff024e278
|
/example.py
|
095c2818d3c45494ec74d905b086705256aa66a9
|
[
"BSD-3-Clause"
] |
permissive
|
toastdriven/pubsubittyhub
|
444a7b0d5b26abf0a1cd820d3d57a1d92346a4c4
|
8d3a0b135b0a284f52234c06cfc586cc5e6f5c6d
|
refs/heads/master
| 2020-05-05T01:39:51.073435
| 2009-12-17T09:22:31
| 2009-12-17T09:22:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 842
|
py
|
import urllib2
import sys
from urllib import urlencode
try:
import json
except ImportError:
import simplejson as json
print 'Testing index...'
content = urllib2.urlopen('http://localhost:8080/').read()
print 'Creating a channel...'
content = urllib2.urlopen('http://localhost:8080/channels', data={}).read()
print content
channel_id = json.loads(content)['id']
print "Adding subscriber to channel '%s'..." % channel_id
body = urlencode({'data': json.dumps({'channel': channel_id, 'url': sys.argv[1]})})
content = urllib2.urlopen('http://localhost:8080/subscribers', data=body).read()
print content
print "Posting message to channel '%s'..." % channel_id
body = urlencode({'data': json.dumps({'channel': channel_id, 'message': 'O HAI'})})
content = urllib2.urlopen('http://localhost:8080/messages', data=body).read()
print content
|
[
"daniel@toastdriven.com"
] |
daniel@toastdriven.com
|
ee66c9dd4a0d630c6ecb661c22a3acf967691125
|
58ce8a45d03ec24b89e7502f149bef42d77ad777
|
/tests/test_models_artist.py
|
96a9afac325c6d5076dbf3cec399a9ae628b3fc7
|
[
"MIT"
] |
permissive
|
AndyTempel/spotify.py
|
db9ba8523d6dbd9bf233f963ea04fac4bf555d5e
|
d5a18ee59ddffd9026b36f510b45b4cc391ac557
|
refs/heads/master
| 2022-12-12T14:46:41.780249
| 2020-08-28T23:35:09
| 2020-08-28T23:35:09
| 291,162,036
| 0
| 0
|
MIT
| 2020-08-28T23:02:23
| 2020-08-28T23:02:22
| null |
UTF-8
|
Python
| false
| false
| 618
|
py
|
import asyncio
import unittest
from types import ModuleType
from common import *
class TestArtist(unittest.TestCase):
@async_with_client(SPOTIFY_CLIENT_ID, SPOTIFY_CLIENT_SECRET)
async def test_artist(self, *, client):
for artist_uri in TEST_ARTISTS:
artist = await client.get_artist(artist_uri)
await async_chain([
artist.get_albums(),
artist.get_all_albums(),
artist.total_albums(),
artist.top_tracks(),
artist.related_artists()
])
if __name__ == '__main__':
unittest.main()
|
[
"m3nta1@yahoo.com"
] |
m3nta1@yahoo.com
|
b2763a3a3c9318b24e36592eed8791533faf27d4
|
4786216d2a8e9221cc3624366152f47ae513e5c7
|
/北京房屋交易/00.py
|
3738ce39b9fbe67fc5d1c47c31d9d290e2cc619a
|
[] |
no_license
|
injuredangel/-
|
b6a2502ee026320b96947d41c223edebe3ec65cc
|
7988c6aa5e825504ff59b006c37d4383b3bb1da8
|
refs/heads/master
| 2020-05-25T02:21:15.654253
| 2019-05-20T06:27:42
| 2019-05-20T06:27:42
| 187,575,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,050
|
py
|
import requests
from bs4 import BeautifulSoup
url = 'http://www.bjjs.gov.cn/bjjs/fwgl/fdcjy/fwjy/index.shtml'
headers = {
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'zh-CN,zh;q=0.9',
'Cache-Control':'max-age=0',
'Connection':'keep-alive',
'Cookie':'wdcid=55e47ea030f84764; _gscu_1677760547=4060476218oivg24; _gscbrs_1677760547=1; Hm_lvt_9ac0f18d7ef56c69aaf41ca783fcb10c=1540604763,1540621692; wdlast=1540624935; _gscs_1677760547=t406249357bbz3224|pv:1; Hm_lpvt_9ac0f18d7ef56c69aaf41ca783fcb10c=1540624935',
'Host':'www.bjjs.gov.cn',
'Upgrade-Insecure-Requests':'1',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36',
}
response = requests.get(url=url,headers=headers).text
print(response)
# html_doc = BeautifulSoup(response,'lxml')
|
[
"you@example.com"
] |
you@example.com
|
5e15489107b3c51fb2cfad091143fbf0e6ceb0fc
|
9009ad47bc1d6adf8ee6d0f2f2b3125dea44c0aa
|
/cf-540-a.py
|
44dd74a1dc85a6de815348507461f004dcdbb3da
|
[] |
no_license
|
luctivud/Coding-Trash
|
42e880624f39a826bcaab9b6194add2c9b3d71fc
|
35422253f6169cc98e099bf83c650b1fb3acdb75
|
refs/heads/master
| 2022-12-12T00:20:49.630749
| 2020-09-12T17:38:30
| 2020-09-12T17:38:30
| 241,000,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,017
|
py
|
# JAI SHREE RAM
import math; from collections import *
import sys; from functools import reduce
# sys.setrecursionlimit(10**6)
def get_ints(): return map(int, input().strip().split())
def get_list(): return list(get_ints())
def get_string(): return list(input().strip().split())
def printxsp(*args): return print(*args, end="")
def printsp(*args): return print(*args, end=" ")
UGLYMOD = int(1e9)+7; SEXYMOD = 998244353; MAXN = int(1e5)
# sys.stdin=open("input.txt","r");sys.stdout=open("output.txt","w")
# for _testcases_ in range(int(input())):
n = int(input())
s = input()
t = input()
ans = 0
for i in range(n):
first = int(s[i])
secon = int(t[i])
diff = max(first, secon) - min(first, secon)
ans += min(diff, 10-diff)
print(ans)
'''
>>> COMMENT THE STDIN!! CHANGE ONLINE JUDGE !!
THE LOGIC AND APPROACH IS MINE @luctivud ( UDIT GUPTA )
Link may be copy-pasted here if it's taken from other source.
DO NOT PLAGIARISE.
>>> COMMENT THE STDIN!! CHANGE ONLINE JUDGE !!
'''
|
[
"luctivud@gmail.com"
] |
luctivud@gmail.com
|
36d766acb64d266f4988a64145c619c6d89a0910
|
17331ee8285a1f19e4ca1abd89dac64da381959d
|
/03-accessing-web-data/reading-webpages.py
|
537e39244e68328b3514cbd9f43c78a7595785c4
|
[] |
no_license
|
chaochaocodes/PY4E
|
3681367ce548fe9a423adb895fe76efda60521bb
|
09930f6187c3388b61903680bcd4a1533b0b4f82
|
refs/heads/main
| 2023-03-28T11:29:09.209120
| 2021-04-01T02:34:58
| 2021-04-01T02:34:58
| 333,506,525
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 747
|
py
|
'''
Reading Webpages like Files using urllib
'''
import urllib.request, urllib.parse, urllib.error
# 1. Read like a File
fhand = urllib.request.urlopen('http://data.pr4e.org/romeo.txt')
for line in fhand:
print(line.decode().strip())
# reads the HTML file!
# returns header + body, but header not returned in this for loop; accessed another way
# 2. Working with the data. Retrieve and find frequency of words
fhand = urllib.request.urlopen('http://data.pr4e.org/romeo.txt')
counts = dict()
for line in fhand:
words = line.decode().split()
# line is a byte string, decode into character string
for word in words:
counts[word]: counts.get(word,0) + 1
print(counts)
# array of words, count and save in dict
|
[
"57464564+chaochaocodes@users.noreply.github.com"
] |
57464564+chaochaocodes@users.noreply.github.com
|
a4b161d665baf8d27aecbdb191e60e06308b2f62
|
8c7fba506eb022e627537e6017b97508ca453b65
|
/models/dbsetup.py
|
7b7c8e89cda78eb02cff7f6496740a112f1c6dcd
|
[
"MIT"
] |
permissive
|
laminko/wBlog
|
4a6851ba159c5cf30461fd08b428647c14622c14
|
c2bdecede8bf589eabb57bd080e90d995261aafd
|
refs/heads/master
| 2020-04-06T07:05:22.722787
| 2016-09-15T09:01:34
| 2016-09-15T09:01:34
| 65,677,117
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,885
|
py
|
from datetime import datetime
SYM_PAGE_BREAKER = " {LMK:PAGE-BREAK} "
SINGLE_SPACE = " "
# Tables
db.define_table('post',
Field('title', 'string'),
Field('body', 'text'),
Field('body_pagebreak',
compute=lambda r: (
r['body'] or "").split(SYM_PAGE_BREAKER)[0]),
Field('body_nobreak',
compute=lambda r: (
r['body'] or "").replace(SYM_PAGE_BREAKER,
SINGLE_SPACE)),
Field('has_pagebreak',
compute=lambda r: SYM_PAGE_BREAKER in (r['body'] or "")),
Field('is_draft', 'boolean', default=False),
Field('total_likes', 'integer', default=0,
readable=False, writable=False),
Field('created_on', 'datetime', default=request.now,
readable=False, writable=False),
Field('created_by', 'reference auth_user',
default=auth.user_id,
readable=False, writable=False),
Field('modified_on', 'datetime', update=request.now,
readable=False, writable=False),
Field('modified_by', 'reference auth_user',
update=auth.user_id,
readable=False, writable=False),
Field('tags', 'list:string'))
db.define_table('postcomment',
Field('post', 'reference post',
readable=False, writable=False),
Field('body', 'text', label=T("Comment")),
Field('is_approved', 'boolean', default=False,
readable=False, writable=False),
Field('is_deleted', 'boolean', default=False,
readable=False, writable=False),
Field('reply_to', 'reference postcomment',
readable=False, writable=False),
Field('created_on', 'datetime', default=request.now,
readable=False, writable=False),
Field('created_by', 'reference auth_user',
default=auth.user_id,
readable=False, writable=False),
Field('modified_on', 'datetime', update=request.now,
readable=False, writable=False),
Field('modified_by', 'reference auth_user',
update=auth.user_id,
readable=False, writable=False))
db.define_table('contact',
Field('name', 'string', requires=IS_NOT_EMPTY()),
Field('email', 'string', requires=[
IS_NOT_EMPTY(), IS_EMAIL()]),
Field('description', 'text', requires=IS_NOT_EMPTY()),
Field('created_on', 'datetime', default=request.now,
readable=False, writable=False))
db.define_table('bulletin',
Field('title', 'string', requires=IS_NOT_EMPTY()),
Field('message_body', 'text', requires=IS_NOT_EMPTY()),
Field('message_type', 'string',
default='info',
requires=IS_IN_SET(('success',
'info',
'warning',
'danger',
'special'))),
Field('expires_on', 'datetime', default=None),
Field('is_active', 'boolean', default=False),
Field('created_on', 'datetime', default=request.now,
readable=False, writable=False),
Field('created_by', 'reference auth_user',
default=auth.user_id,
readable=False, writable=False),
Field('modified_on', 'datetime', update=request.now,
readable=False, writable=False),
Field('modified_by', 'reference auth_user',
update=auth.user_id,
readable=False, writable=False))
db.define_table('eventinfo',
Field('title', 'string', requires=IS_NOT_EMPTY()),
Field('event_detail', 'text', requires=IS_NOT_EMPTY()),
Field('image_url', 'text'),
Field('location_text', 'text'),
Field('location_lat', 'float'),
Field('location_lng', 'float'),
Field('event_start', 'datetime'),
Field('event_end', 'datetime'),
Field('is_active', 'boolean', default=False),
Field('created_on', 'datetime', default=request.now,
readable=False, writable=False),
Field('created_by', 'reference auth_user',
default=auth.user_id,
readable=False, writable=False),
Field('modified_on', 'datetime', update=request.now,
readable=False, writable=False),
Field('modified_by', 'reference auth_user',
update=auth.user_id,
readable=False, writable=False))
db.define_table('upload',
Field('title', 'string', requires=IS_NOT_EMPTY()),
Field('the_file', 'upload'),
Field('is_public', 'boolean', default=True,
comment='Public url is like <b>/getobject/(id)</b>.'),
Field('created_on', 'datetime', default=request.now,
readable=False, writable=False),
Field('created_by', 'reference auth_user',
default=auth.user_id,
readable=False, writable=False),
Field('modified_on', 'datetime', update=request.now,
readable=False, writable=False),
Field('modified_by', 'reference auth_user',
update=auth.user_id,
readable=False, writable=False))
# check default root user exists or not.
if db(db.auth_user).count() < 1:
# if not:
# create groups once.
db.auth_group.bulk_insert([
dict(role='Root', description='System user'),
dict(role='Admin', description='Blog admin'),
dict(role='Editor', description='Blog editor'),
dict(role='Moderator', description='Blog moderator'),
dict(role='User', description='Blog reader')
])
# create default root user.
db.auth_user.insert(
**dict(
first_name='System',
last_name='User',
email='root@root.su',
password=db.auth_user.password.validate('root@root.su')[0]
)
)
# set permission for default user.
auth.add_membership(user_id=1, group_id=1)
|
[
"="
] |
=
|
f7881b2609d4092aa8e483ad9b8bc0d585901f87
|
67d8173a716da10a7350213d98938aae9f2115ce
|
/ProgrammingCourses/CS61A/project/maps/data/__init__.py
|
41d0dcae71d4be63e17e6aabe7e0795053028508
|
[] |
no_license
|
jxie0755/Learning_Python
|
94490d41bdf93acf8396f843328e38b6da310b0f
|
143422321cbc3715ca08f6c3af8f960a55887ced
|
refs/heads/master
| 2021-11-02T22:47:35.790239
| 2021-09-26T04:26:23
| 2021-09-26T04:26:23
| 101,445,132
| 0
| 2
| null | 2019-02-19T15:48:44
| 2017-08-25T22:00:16
|
Python
|
UTF-8
|
Python
| false
| false
| 3,054
|
py
|
import collections
import os
from abstractions import *
import data.jsonl
DATA_DIRECTORY = "data"
USER_DIRECTORY = "users"
def load_data(user_dataset, review_dataset, restaurant_dataset):
with open(os.path.join(DATA_DIRECTORY, user_dataset)) as f:
user_data = jsonl.load(f)
with open(os.path.join(DATA_DIRECTORY, review_dataset)) as f:
review_data = jsonl.load(f)
with open(os.path.join(DATA_DIRECTORY, restaurant_dataset)) as f:
restaurant_data = jsonl.load(f)
# Load users.
userid_to_user = {}
for user in user_data:
name = user["name"]
_user_id = user["user_id"]
user = make_user(name, []) # MISSING: reviews
userid_to_user[_user_id] = user
# Load restaurants.
busid_to_restaurant = {}
for restaurant in restaurant_data:
name = restaurant["name"]
location = float(restaurant["latitude"]), float(restaurant["longitude"])
categories = restaurant["categories"]
price = restaurant["price"]
if price is not None:
price = int(price)
num_reviews = int(restaurant["review_count"])
_business_id = restaurant["business_id"]
restaurant = make_restaurant(name, location, categories, price, []) # MISSING: reviews
busid_to_restaurant[_business_id] = restaurant
# Load reviews.
reviews = []
busid_to_reviews = collections.defaultdict(list)
userid_to_reviews = collections.defaultdict(list)
for review in review_data:
_user_id = review["user_id"]
_business_id = review["business_id"]
restaurant = restaurant_name(busid_to_restaurant[_business_id])
rating = float(review["stars"])
review = make_review(restaurant, rating)
reviews.append(review)
busid_to_reviews[_business_id].append(review)
userid_to_reviews[_user_id].append(review)
# Reviews done.
restaurants = {}
for busid, restaurant in busid_to_restaurant.items():
name = restaurant_name(restaurant)
location = list(restaurant_location(restaurant))
categories = restaurant_categories(restaurant)
price = restaurant_price(restaurant)
restaurant_reviews = busid_to_reviews[busid]
restaurant = make_restaurant(name, location, categories, price, restaurant_reviews)
restaurants[name] = restaurant
# Restaurants done.
users = []
for userid, user in userid_to_user.items():
name = user_name(user)
user_reviews = userid_to_reviews[userid]
user = make_user(name, user_reviews)
users.append(user)
# Users done.
return users, reviews, list(restaurants.values())
USERS, REVIEWS, ALL_RESTAURANTS = load_data("users.json", "reviews.json", "restaurants.json")
CATEGORIES = {c for r in ALL_RESTAURANTS for c in restaurant_categories(r)}
def load_user_file(user_file):
with open(os.path.join(USER_DIRECTORY, user_file)) as f:
return eval(f.read())
import glob
USER_FILES = [f[6:-4] for f in glob.glob("users/*.dat")]
|
[
"30805062+jxie0755@users.noreply.github.com"
] |
30805062+jxie0755@users.noreply.github.com
|
d3c366292f09c31949649f09f59f18df63e790be
|
1cad3fa574350c9be29282f518f4927efb26e18f
|
/http_api/api_intro.py
|
0c1edd60bc8199386fc7a5c103448f64018fc4dc
|
[] |
no_license
|
EvgeniyBudaev/python_learn
|
c72fdc2c5a84dae03bfd6e5afc5453b795ada17f
|
1a5385e3412832dd9017536dad1140138143600e
|
refs/heads/main
| 2023-05-27T04:41:50.754525
| 2021-05-27T02:11:18
| 2021-05-27T02:11:18
| 337,162,792
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
import requests
# url = 'https://earthquake.usgs.gov/fdsnws/event/1/query?format=geojson&starttime=2014-01-01&endtime=2014-01-02'
# response = requests.get(url, headers={'Accept':'application/json'})
url = 'https://earthquake.usgs.gov/fdsnws/event/1/query?'
response = requests.get(url, headers={'Accept':'application/json'}, params={
'format': 'geojson',
'starttime': '2014-01-01',
'endtime': '2014-01-02'
})
# print(response.text)
# print(response.json())
# print(type(response.json())) # dict
data = response.json()
print(data['features'][0]['properties']['place'])
|
[
"49036840+EvgeniyBudaev@users.noreply.github.com"
] |
49036840+EvgeniyBudaev@users.noreply.github.com
|
9301f373603392c31e4ef37ab57d6eace6eb163f
|
cf470f7d3fd0ea481970bcdedcd869258f692d05
|
/aces_1.2/python/bin/create_aces_config
|
198eabe513ced3b60c560b6793299b56d29d36e7
|
[
"LicenseRef-scancode-unknown-license-reference",
"AMPAS"
] |
permissive
|
colour-science/OpenColorIO-Configs
|
3acef083127b698eb3252b45d724dfd4f5346c1a
|
b0a3ae218c24ed452e01ac1282d0b40e31dede6e
|
refs/heads/master
| 2023-09-03T11:51:31.862794
| 2022-04-14T20:17:13
| 2022-04-14T20:17:13
| 54,505,320
| 619
| 440
|
NOASSERTION
| 2022-04-14T20:17:14
| 2016-03-22T20:06:48
|
Roff
|
UTF-8
|
Python
| false
| false
| 802
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: AMPAS
# Copyright Academy of Motion Picture Arts and Sciences
"""
Creates the *ACES* configuration.
"""
from __future__ import division
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from aces_ocio.generate_config import main
__author__ = (
'Haarm-Pieter Duiker, Thomas Mansencal, Stephen Hill, Kevin Wheatley, '
'Joseph Goldstone')
__copyright__ = (
'Copyright (C) 2014-2021 Academy of Motion Picture Arts and Sciences')
__license__ = 'Academy of Motion Picture Arts and Sciences License Terms'
__maintainer__ = 'Academy of Motion Picture Arts and Sciences'
__email__ = 'acessupport@oscars.org'
__status__ = 'Production'
__all__ = []
if __name__ == '__main__':
main()
|
[
"thomas.mansencal@gmail.com"
] |
thomas.mansencal@gmail.com
|
|
e58b36b05c142642d3001d70c865a8a112804449
|
75dcb56e318688499bdab789262839e7f58bd4f6
|
/_algorithms_challenges/projecteuler/ProjectEuler-master(2)/ProjectEuler-master/156.py
|
aafe0dea8075d2124fd3dc79cbb842ba780bd38f
|
[] |
no_license
|
syurskyi/Algorithms_and_Data_Structure
|
9a1f358577e51e89c862d0f93f373b7f20ddd261
|
929dde1723fb2f54870c8a9badc80fc23e8400d3
|
refs/heads/master
| 2023-02-22T17:55:55.453535
| 2022-12-23T03:15:00
| 2022-12-23T03:15:00
| 226,243,987
| 4
| 1
| null | 2023-02-07T21:01:45
| 2019-12-06T04:14:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,015
|
py
|
import sys
class Problem():
def __init__(self):
self.found = None
def solve(self):
count = 0
for digit in range(1, 10):
solution_sum = self.s(digit)
print(digit, solution_sum)
count += solution_sum
print(count)
def s(self, digit):
self.found = []
self.binary_search(1, 10**11, digit)
return sum(self.found)
def f(self, n, digit):
count = 0
factor = 1
while n // factor != 0:
lower_number = n - (n // factor) * factor
curr_number = (n // factor) % 10
higher_number = n // (factor * 10)
if curr_number < digit:
count += higher_number * factor
elif curr_number == digit:
count += higher_number * factor + lower_number + 1
else:
count += (higher_number + 1) * factor
factor *= 10
return count
def binary_search(self, lower, upper, digit):
if lower + 1 == upper:
if self.f(lower, digit) == lower:
self.found.append(lower)
return
middle = (lower + upper) // 2
lower_value = self.f(lower, digit)
upper_value = self.f(upper, digit)
middle_value = self.f(middle, digit)
if middle_value >= lower and middle >= lower_value:
self.binary_search(lower, middle, digit)
if upper_value >= middle and upper >= middle_value:
self.binary_search(middle, upper, digit)
def f_naive(self, n, digit):
return sum([self.count_naive(i, digit) for i in range(1, n+1)])
def count_naive(self, n, digit):
count = 0
while n > 0:
n, r = divmod(n, 10)
if r == digit:
count += 1
return count
def main():
problem = Problem()
problem.solve()
if __name__ == '__main__':
sys.exit(main())
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
b34734bccd0addbe7a3f95e5866fe250ba44c343
|
e6ebd1f9e3968f6ed613e9f35e46716115e6e9c3
|
/chapter4/demo2.py
|
9d025bcb4070abbf880ddf67f5d49444fcbfdbdb
|
[] |
no_license
|
huwanping001/Python
|
897046d3d6d1b420befeefcaa2b9544efa7d1881
|
3c76278f7a9b216b28b8880e0108af3c550b9372
|
refs/heads/main
| 2023-08-21T00:45:17.991833
| 2021-10-18T13:47:52
| 2021-10-18T13:47:52
| 409,586,414
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 692
|
py
|
# 学校:四川轻化工大学
# 学院:自信学院
# 学生:胡万平
# 开发时间:2021/9/18 9:54
#测试对象的bool值
print(bool(False)) #False
print(bool(0)) #False
print(bool(0.0)) #False
print(bool(None)) #False
print(bool('')) #False
print(bool("")) #False
print(bool(list())) #空列表 False
print(bool([])) #空列表 False
print(bool(())) #空元组 False
print(bool(tuple())) #空元组 False
print(bool({})) #空字典 False
print(bool(dict())) #空字典False
print(bool(set())) #空集合 False
print('-----------------其他对象的bool值均为True------------------')
print(bool(18))
print(bool(True))
print(bool('xiaohu'))
|
[
"noreply@github.com"
] |
huwanping001.noreply@github.com
|
3df69c8078977d9b51a98b936360a4cf6bcf6b89
|
1260ce7869ce32d6b434afbf273273b7b1ebea2d
|
/lorentz_equivariant_gnn/architectures/EquivariantGNN/egnn_base.py
|
ddcd258aec9bfcb6cb6915b699a963195d25437c
|
[] |
no_license
|
savvy379/Lorentz-Equivariant-GNN
|
b3b30e964cfa9af39adcb4e8b73bc78b4f8b7b5e
|
3d1c74081bdd43387a7c530bce73580db379d22d
|
refs/heads/master
| 2023-08-01T06:43:13.229014
| 2021-09-22T18:35:15
| 2021-09-22T18:35:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,499
|
py
|
import sys, os
import logging
import pytorch_lightning as pl
from pytorch_lightning import LightningModule
import torch.nn.functional as F
from torch.nn import Linear
from torch_geometric.data import DataLoader
import torch
import numpy as np
from sklearn.metrics import roc_auc_score, roc_curve
from .utils import load_datasets
class EGNNBase(LightningModule):
def __init__(self, hparams):
super().__init__()
"""
Initialise the Lightning Module that can scan over different Equivariant GNN training regimes
"""
# Assign hyperparameters
self.save_hyperparameters(hparams)
def setup(self, stage):
# Handle any subset of [train, val, test] data split, assuming that ordering
self.trainset, self.valset = load_datasets(self.hparams["input_dir"], self.hparams["data_split"])
def train_dataloader(self):
if self.trainset is not None:
return DataLoader(self.trainset, batch_size=self.hparams["train_batch"], num_workers=1, shuffle=True)
else:
return None
def val_dataloader(self):
if self.valset is not None:
return DataLoader(self.valset, batch_size=self.hparams["val_batch"], num_workers=1)
else:
return None
def test_dataloader(self):
if self.testset is not None:
return DataLoader(self.testset, batch_size=1, num_workers=1)
else:
return None
def configure_optimizers(self):
optimizer = [
torch.optim.AdamW(
self.parameters(),
lr=(self.hparams["lr"]),
betas=(0.9, 0.999),
eps=1e-08,
amsgrad=True,
)
]
scheduler = [
{
"scheduler": torch.optim.lr_scheduler.StepLR(
optimizer[0],
step_size=self.hparams["patience"],
gamma=self.hparams["factor"],
),
"interval": "epoch",
"frequency": 1,
}
]
return optimizer, scheduler
def get_metrics(self, batch, output):
prediction = torch.sigmoid(output)
tp = (prediction.round() == batch.y).sum().item()
acc = tp / len(batch.y)
try:
auc = roc_auc_score(batch.y.bool().cpu().detach(), prediction.cpu().detach())
except:
auc = 0
fpr, tpr, _ = roc_curve(batch.y.bool().cpu().detach(), prediction.cpu().detach())
# Calculate which threshold gives the best signal goal
signal_goal_idx = abs(tpr - self.hparams["signal_goal"]).argmin()
eps = fpr[signal_goal_idx]
return prediction, acc, auc, eps
def training_step(self, batch, batch_idx):
output = self(batch).squeeze(-1)
loss = F.binary_cross_entropy_with_logits(output, batch.y.float())
prediction, acc, auc, inv_eps = self.get_metrics(batch, output)
self.log_dict({"train_loss": loss, "train_acc": acc}, on_step=False, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
output = self(batch).squeeze(-1)
loss = F.binary_cross_entropy_with_logits(output, batch.y.float())
prediction, acc, auc, eps = self.get_metrics(batch, output)
current_lr = self.optimizers().param_groups[0]["lr"]
self.log_dict({"val_loss": loss, "acc": acc, "auc": auc, "current_lr": current_lr}, on_step=False, on_epoch=True)
return {
"loss": loss,
"preds": prediction,
"acc": acc,
"auc": auc,
"eps": eps
}
def validation_epoch_end(self, step_outputs):
mean_eps = np.mean([output["eps"] for output in step_outputs])
if mean_eps != 0:
self.log_dict({"inv_eps": 1/mean_eps})
def optimizer_step(
self,
epoch,
batch_idx,
optimizer,
optimizer_idx,
optimizer_closure=None,
on_tpu=False,
using_native_amp=False,
using_lbfgs=False,
):
# warm up lr
if (self.hparams["warmup"] is not None) and (
self.trainer.global_step < self.hparams["warmup"]
):
lr_scale = min(
1.0, float(self.trainer.global_step + 1) / self.hparams["warmup"]
)
for pg in optimizer.param_groups:
pg["lr"] = lr_scale * self.hparams["lr"]
# update params
optimizer.step(closure=optimizer_closure)
optimizer.zero_grad()
def compute_radials(edge_index, x):
"""
Calculates the Minkowski distance (squared) between coordinates (node embeddings) x_i and x_j
:param edge_index: Array containing the connection between nodes
:param x: The coordinates (node embeddings)
:return: Minkowski distances (squared) and coordinate differences x_i - x_j
"""
row, col = edge_index
coordinate_differences = x[row] - x[col]
minkowski_distance_squared = coordinate_differences ** 2
minkowski_distance_squared[:, 0] = -minkowski_distance_squared[:, 0] # Place minus sign on time coordinate as \eta = diag(-1, 1, 1, 1)
radial = torch.sum(minkowski_distance_squared, 1).unsqueeze(1)
return radial, coordinate_differences
|
[
"murnanedaniel@hotmail.com"
] |
murnanedaniel@hotmail.com
|
300dc5d3cf9ec6b7d67dca8ceb272fa0ad0e6d80
|
6a7e9e0e9c08132166f566bd88ae1c46ff8f9c0a
|
/azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/security_rule_py3.py
|
0f8b6dc1a56b4743ed087bfac58ebfdafeb3318d
|
[
"MIT"
] |
permissive
|
ashirey-msft/azure-sdk-for-python
|
d92381d11c48f194ec9f989f5f803db614fb73f2
|
e04778e13306dad2e8fb044970215bad6296afb6
|
refs/heads/master
| 2020-03-23T06:05:39.283442
| 2018-09-15T00:18:26
| 2018-09-15T00:18:26
| 141,188,192
| 0
| 1
|
MIT
| 2018-07-16T20:02:52
| 2018-07-16T20:02:52
| null |
UTF-8
|
Python
| false
| false
| 7,668
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource_py3 import SubResource
class SecurityRule(SubResource):
"""Network security rule.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:param description: A description for this rule. Restricted to 140 chars.
:type description: str
:param protocol: Required. Network protocol this rule applies to. Possible
values are 'Tcp', 'Udp', and '*'. Possible values include: 'Tcp', 'Udp',
'*'
:type protocol: str or
~azure.mgmt.network.v2017_09_01.models.SecurityRuleProtocol
:param source_port_range: The source port or range. Integer or range
between 0 and 65535. Asterix '*' can also be used to match all ports.
:type source_port_range: str
:param destination_port_range: The destination port or range. Integer or
range between 0 and 65535. Asterix '*' can also be used to match all
ports.
:type destination_port_range: str
:param source_address_prefix: The CIDR or source IP range. Asterix '*' can
also be used to match all source IPs. Default tags such as
'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If
this is an ingress rule, specifies where network traffic originates from.
:type source_address_prefix: str
:param source_address_prefixes: The CIDR or source IP ranges.
:type source_address_prefixes: list[str]
:param source_application_security_groups: The application security group
specified as source.
:type source_application_security_groups:
list[~azure.mgmt.network.v2017_09_01.models.ApplicationSecurityGroup]
:param destination_address_prefix: The destination address prefix. CIDR or
destination IP range. Asterix '*' can also be used to match all source
IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and
'Internet' can also be used.
:type destination_address_prefix: str
:param destination_address_prefixes: The destination address prefixes.
CIDR or destination IP ranges.
:type destination_address_prefixes: list[str]
:param destination_application_security_groups: The application security
group specified as destination.
:type destination_application_security_groups:
list[~azure.mgmt.network.v2017_09_01.models.ApplicationSecurityGroup]
:param source_port_ranges: The source port ranges.
:type source_port_ranges: list[str]
:param destination_port_ranges: The destination port ranges.
:type destination_port_ranges: list[str]
:param access: Required. The network traffic is allowed or denied.
Possible values are: 'Allow' and 'Deny'. Possible values include: 'Allow',
'Deny'
:type access: str or
~azure.mgmt.network.v2017_09_01.models.SecurityRuleAccess
:param priority: The priority of the rule. The value can be between 100
and 4096. The priority number must be unique for each rule in the
collection. The lower the priority number, the higher the priority of the
rule.
:type priority: int
:param direction: Required. The direction of the rule. The direction
specifies if rule will be evaluated on incoming or outcoming traffic.
Possible values are: 'Inbound' and 'Outbound'. Possible values include:
'Inbound', 'Outbound'
:type direction: str or
~azure.mgmt.network.v2017_09_01.models.SecurityRuleDirection
:param provisioning_state: The provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'protocol': {'required': True},
'access': {'required': True},
'direction': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'source_port_range': {'key': 'properties.sourcePortRange', 'type': 'str'},
'destination_port_range': {'key': 'properties.destinationPortRange', 'type': 'str'},
'source_address_prefix': {'key': 'properties.sourceAddressPrefix', 'type': 'str'},
'source_address_prefixes': {'key': 'properties.sourceAddressPrefixes', 'type': '[str]'},
'source_application_security_groups': {'key': 'properties.sourceApplicationSecurityGroups', 'type': '[ApplicationSecurityGroup]'},
'destination_address_prefix': {'key': 'properties.destinationAddressPrefix', 'type': 'str'},
'destination_address_prefixes': {'key': 'properties.destinationAddressPrefixes', 'type': '[str]'},
'destination_application_security_groups': {'key': 'properties.destinationApplicationSecurityGroups', 'type': '[ApplicationSecurityGroup]'},
'source_port_ranges': {'key': 'properties.sourcePortRanges', 'type': '[str]'},
'destination_port_ranges': {'key': 'properties.destinationPortRanges', 'type': '[str]'},
'access': {'key': 'properties.access', 'type': 'str'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'direction': {'key': 'properties.direction', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, protocol, access, direction, id: str=None, description: str=None, source_port_range: str=None, destination_port_range: str=None, source_address_prefix: str=None, source_address_prefixes=None, source_application_security_groups=None, destination_address_prefix: str=None, destination_address_prefixes=None, destination_application_security_groups=None, source_port_ranges=None, destination_port_ranges=None, priority: int=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None:
super(SecurityRule, self).__init__(id=id, **kwargs)
self.description = description
self.protocol = protocol
self.source_port_range = source_port_range
self.destination_port_range = destination_port_range
self.source_address_prefix = source_address_prefix
self.source_address_prefixes = source_address_prefixes
self.source_application_security_groups = source_application_security_groups
self.destination_address_prefix = destination_address_prefix
self.destination_address_prefixes = destination_address_prefixes
self.destination_application_security_groups = destination_application_security_groups
self.source_port_ranges = source_port_ranges
self.destination_port_ranges = destination_port_ranges
self.access = access
self.priority = priority
self.direction = direction
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
|
[
"noreply@github.com"
] |
ashirey-msft.noreply@github.com
|
d05ce141ecc9bf14ab3e7757f48348f9ccdd9d61
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/59/usersdata/171/61531/submittedfiles/testes.py
|
0394990bdadaa06eebff3565e0697e79fea81b66
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
# -*- coding: utf-8 -*-
import math
#COMECE AQUI ABAIXO
def media(a):
soma=0
for i in range(0,len(a),1):
soma=soma+a[i]
media=soma/(len(a))
return(media)
n=int(input('digite numero de elementos da lista:'))
a=[]
for i in range(0,n,1):
numero=float(input('digite numero á ser inserido na lista:'))
a.append(numero)
print('%.3f'%a[0])
print('%.3f'%a[len(a)-1])
print'%.3f'%(media(a))
print(a)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
c61d403099fed6fbcb69b33fa047ee2d16e137e1
|
5eb52c07e5b1bd00af77306f927f382b684cd6ff
|
/indy_common/generates_request.py
|
b70eefee0dfbe79d777c07d732f89f1458602edb
|
[
"Apache-2.0"
] |
permissive
|
hyperledger/indy-node
|
bce39486988f5114581cff4f6d14fc1b7684143c
|
e6bb87d4c605aff9914491d062248b6ec857334c
|
refs/heads/main
| 2023-09-03T15:33:08.187153
| 2023-05-08T22:48:21
| 2023-05-08T22:48:21
| 77,021,566
| 691
| 783
|
Apache-2.0
| 2023-05-09T15:42:43
| 2016-12-21T05:45:04
|
Python
|
UTF-8
|
Python
| false
| false
| 328
|
py
|
from abc import abstractmethod
class GeneratesRequest:
@abstractmethod
def _op(self):
pass
@abstractmethod
def ledgerRequest(self):
"""
Generates a Request object to be submitted to the ledger.
:return: a Request to be submitted, or None if it shouldn't be written
"""
|
[
"alexander.sherbakov@dsr-company.com"
] |
alexander.sherbakov@dsr-company.com
|
7c30c30dcc7cc854a841fbb8a6e3e7b45eb5bcf8
|
22aa900e70c8cc6005ecadbb2ae710526af8d3ba
|
/course/forms.py
|
8da7a5b70959e28879bb8df24e4db2ededff90aa
|
[] |
no_license
|
skafis/career_choice
|
f79ac3df223122a19a7718d9247ca4e2e72ee22e
|
84d3ec752ba6da60e7130f132bd329ff72d66cae
|
refs/heads/master
| 2021-01-13T02:50:44.580867
| 2016-12-22T16:32:40
| 2016-12-22T16:32:40
| 77,144,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
from django import forms
from .models import Courses
class add_coursesForm(forms.ModelForm):
class Meta:
model = Courses
fields = [
'name',
'time',
'cost'
]
|
[
"franciskipchumba5@gmail.com"
] |
franciskipchumba5@gmail.com
|
1999644c558f0f3bf2fc69e88aea396932927a64
|
f3ad39ebf9654c99edb33c0fee843a53f9b6c31a
|
/backend/wesmusicmedia_20833/settings.py
|
66d308873bed16394e0b2159023b8a1dcdfc1907
|
[] |
no_license
|
crowdbotics-apps/wesmusicmedia-20833
|
306814d32b3acd43c446cd004351c9fb93009afa
|
474162c36a486c6028cfec8214d93d83fda4e235
|
refs/heads/master
| 2022-12-25T19:45:48.328522
| 2020-09-29T17:51:09
| 2020-09-29T17:51:09
| 299,693,724
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,900
|
py
|
"""
Django settings for wesmusicmedia_20833 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"event",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
# start fcm_django push notifications
"fcm_django",
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "wesmusicmedia_20833.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "wesmusicmedia_20833.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
454994b05910daee4f982348fb1beb8bab821645
|
cf5bfac1e203ae0da1802cf539f32250b57e7224
|
/4.exceptions/exception101.py
|
dafb2cf845bdff6ba846e02634c5ec0ab88084d4
|
[] |
no_license
|
jnepal/OReilly-Python-Beyond-the-Basics-OOP
|
fba2229ffd31b87e2ceab48c6c3f7f445ab47493
|
05050a7ecd0db5c9f18cc6e5ae49a07ddf6054cf
|
refs/heads/master
| 2021-05-01T07:16:26.957247
| 2018-02-11T18:16:36
| 2018-02-11T18:16:36
| 121,152,291
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 533
|
py
|
'''
Handling Exceptions
'''
import sys
mydict = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
key = input('please input a key: ')
try:
print("The value for {0} is {1}".format(key, mydict[key]))
except KeyError as err:
print('the key ' + key + ' does not exists')
print(err)
# print(sys.exc_info()[0])
'''
Raising Exceptions
'''
def divideByZero(num):
return num / 0
try:
divideByZero(5)
except ZeroDivisionError as error:
raise ZeroDivisionError("ZeroDivisionError: You cannot divide a number by zero")
|
[
"johndoe@example.com"
] |
johndoe@example.com
|
58be87c385080aa2d8610c062e6534b8eb59cef9
|
06adea92d1e66d653d0884e8469b7352f5de4f04
|
/matplotlibMine/change/ACF_PACFPlot.py
|
525672a6ccb4aba2cba6966636059490d812989e
|
[] |
no_license
|
Gedanke/FigureDemo
|
a3cf1f0998fb0dc7acce9b90ff55453372759575
|
e37164521d9c4e8c5a05592749f1779bed2b0903
|
refs/heads/master
| 2023-04-19T08:47:35.417971
| 2021-05-05T05:09:48
| 2021-05-05T05:09:48
| 361,770,137
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 842
|
py
|
# -*- coding:utf-8 -*-
import pandas
import matplotlib.pyplot as plt
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
# Import Data
df = pandas.read_csv('../dataset/AirPassengers.csv')
# Draw Plot
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6), dpi=80)
plot_acf(df.value.tolist(), ax=ax1, lags=50)
plot_pacf(df.value.tolist(), ax=ax2, lags=20)
# Decorate
# lighten the borders
ax1.spines["top"].set_alpha(.3)
ax2.spines["top"].set_alpha(.3)
ax1.spines["bottom"].set_alpha(.3)
ax2.spines["bottom"].set_alpha(.3)
ax1.spines["right"].set_alpha(.3)
ax2.spines["right"].set_alpha(.3)
ax1.spines["left"].set_alpha(.3)
ax2.spines["left"].set_alpha(.3)
# font size of tick labels
ax1.tick_params(axis='both', labelsize=12)
ax2.tick_params(axis='both', labelsize=12)
plt.savefig("../photos/change/ACF_PACFPlot.png")
plt.show()
|
[
"13767927306@163.com"
] |
13767927306@163.com
|
47a973711a8b923b936e2065c5d59905c74acf35
|
810412fc189697eaad5731cd66cc291f1d82c3b5
|
/cap2/extensions/experimental/strains/merge_snp_graph.py
|
17ef682831a202a1ef92a7392795e9a09cedac61
|
[
"MIT"
] |
permissive
|
MetaSUB/CAP2
|
c511655ed15a7e886d5216a358fc6e5904b25f24
|
5ccdc0af310dd4ee382a81c7330e04927d9ef5fe
|
refs/heads/master
| 2022-12-03T15:50:59.694245
| 2021-12-29T17:03:50
| 2021-12-29T17:03:50
| 213,112,026
| 12
| 7
|
MIT
| 2022-11-22T09:28:20
| 2019-10-06T05:09:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,890
|
py
|
from .tasks import StrainCapGroupTask
from ....pipeline.config import PipelineConfig
from .strainotyping import (
VERSION,
merge_filter_graphs_from_filepaths,
write_graph_to_filepath,
graph_node_table,
)
from .make_snp_graph import MakeSNPGraph
class MergeSNPGraph(StrainCapGroupTask):
MIN_WEIGHT = 2
module_description = """
This module
Motivation:
Negatives:
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.config = PipelineConfig(self.config_filename)
@property
def snp_graphs(self):
return self.module_req_list(MakeSNPGraph)
def requires(self):
return self.snp_graphs
@classmethod
def version(cls):
return 'v0.1.0'
def tool_version(self):
return VERSION
@classmethod
def dependencies(cls):
return [MakeSNPGraph]
@classmethod
def _module_name(cls):
return 'experimental::merge_snp_graph'
def output(self):
out = {
f'merged_snp_graph__{self.genome_name}': self.get_target(f'merged_snp_graph__{self.genome_name}', 'gml.gz'),
f'merged_snp_nodes__{self.genome_name}': self.get_target(f'merged_snp_nodes__{self.genome_name}', 'csv.gz'),
}
return out
@property
def graph_path(self):
return self.output()[f'merged_snp_graph__{self.genome_name}'].path
@property
def node_path(self):
return self.output()[f'merged_snp_nodes__{self.genome_name}'].path
def _run(self):
graph_paths = [snp_graph.graph_path for snp_graph in self.snp_graphs]
merged_graph = merge_filter_graphs_from_filepaths(graph_paths, min_weight=self.MIN_WEIGHT)
write_graph_to_filepath(merged_graph, self.graph_path)
tbl = graph_node_table(merged_graph)
tbl.to_csv(self.node_path, compression='gzip')
|
[
"dcdanko@gmail.com"
] |
dcdanko@gmail.com
|
235f8543683b0f8e93ab3658fce247f2507db2ac
|
2a3743ced45bd79826dcdc55f304da049f627f1b
|
/venv/lib/python3.7/site-packages/deribit_api.py
|
29835d45975a42ff171d83291465e8b8813c9460
|
[
"MIT"
] |
permissive
|
Dimasik007/Deribit_funding_rate_indicator
|
12cc8cd7c0be564d6e34d9eae91940c62492ae2a
|
3251602ae5249069489834f9afb57b11ff37750e
|
refs/heads/master
| 2023-05-26T10:14:20.395939
| 2019-08-03T11:35:51
| 2019-08-03T11:35:51
| 198,705,946
| 5
| 3
|
MIT
| 2023-05-22T22:29:24
| 2019-07-24T20:32:19
|
Python
|
UTF-8
|
Python
| false
| false
| 5,534
|
py
|
# -*- coding: utf-8 -*-
import time, hashlib, requests, base64, sys
from collections import OrderedDict
class RestClient(object):
def __init__(self, key=None, secret=None, url=None):
self.key = key
self.secret = secret
self.session = requests.Session()
if url:
self.url = url
else:
self.url = "https://www.deribit.com"
def request(self, action, data):
response = None
if action.startswith("/api/v1/private/"):
if self.key is None or self.secret is None:
raise Exception("Key or secret empty")
signature = self.generate_signature(action, data)
response = self.session.post(self.url + action, data=data, headers={'x-deribit-sig': signature}, verify=True)
else:
response = self.session.get(self.url + action, params=data, verify=True)
if response.status_code != 200:
raise Exception("Wrong response code: {0}".format(response.status_code))
json = response.json()
if json["success"] == False:
raise Exception("Failed: " + json["message"])
if "result" in json:
return json["result"]
elif "message" in json:
return json["message"]
else:
return "Ok"
def generate_signature(self, action, data):
tstamp = int(time.time()* 1000)
signature_data = {
'_': tstamp,
'_ackey': self.key,
'_acsec': self.secret,
'_action': action
}
signature_data.update(data)
sorted_signature_data = OrderedDict(sorted(signature_data.items(), key=lambda t: t[0]))
def converter(data):
key = data[0]
value = data[1]
if isinstance(value, list):
return '='.join([str(key), ''.join(value)])
else:
return '='.join([str(key), str(value)])
items = map(converter, sorted_signature_data.items())
signature_string = '&'.join(items)
sha256 = hashlib.sha256()
sha256.update(signature_string.encode("utf-8"))
sig = self.key + "." + str(tstamp) + "."
sig += base64.b64encode(sha256.digest()).decode("utf-8")
return sig
def getorderbook(self, instrument):
return self.request("/api/v1/public/getorderbook", {'instrument': instrument})
def getinstruments(self):
return self.request("/api/v1/public/getinstruments", {})
def getcurrencies(self):
return self.request("/api/v1/public/getcurrencies", {})
def getlasttrades(self, instrument, count=None, since=None):
options = {
'instrument': instrument
}
if since:
options['since'] = since
if count:
options['count'] = count
return self.request("/api/v1/public/getlasttrades", options)
def getsummary(self, instrument):
return self.request("/api/v1/public/getsummary", {"instrument": instrument})
def index(self):
return self.request("/api/v1/public/index", {})
def stats(self):
return self.request("/api/v1/public/stats", {})
def account(self):
return self.request("/api/v1/private/account", {})
def buy(self, instrument, quantity, price, postOnly=None, label=None):
options = {
"instrument": instrument,
"quantity": quantity,
"price": price
}
if label:
options["label"] = label
if postOnly:
options["postOnly"] = postOnly
return self.request("/api/v1/private/buy", options)
def sell(self, instrument, quantity, price, postOnly=None, label=None):
options = {
"instrument": instrument,
"quantity": quantity,
"price": price
}
if label:
options["label"] = label
if postOnly:
options["postOnly"] = postOnly
return self.request("/api/v1/private/sell", options)
def cancel(self, orderId):
options = {
"orderId": orderId
}
return self.request("/api/v1/private/cancel", options)
def cancelall(self, typeDef="all"):
return self.request("/api/v1/private/cancelall", {"type": typeDef})
def edit(self, orderId, quantity, price):
options = {
"orderId": orderId,
"quantity": quantity,
"price": price
}
return self.request("/api/v1/private/edit", options)
def getopenorders(self, instrument=None, orderId=None):
options = {}
if instrument:
options["instrument"] = instrument
if orderId:
options["orderId"] = orderId
return self.request("/api/v1/private/getopenorders", options)
def positions(self):
return self.request("/api/v1/private/positions", {})
def orderhistory(self, count=None):
options = {}
if count:
options["count"] = count
return self.request("/api/v1/private/orderhistory", options)
def tradehistory(self, countNum=None, instrument="all", startTradeId=None):
options = {
"instrument": instrument
}
if countNum:
options["count"] = countNum
if startTradeId:
options["startTradeId"] = startTradeId
return self.request("/api/v1/private/tradehistory", options)
|
[
"dmitriy00vn@gmail.com"
] |
dmitriy00vn@gmail.com
|
00e9f5fe14e266706112b3eda5db3a81edd109a1
|
4fdd98d5e82385393d4eb2f6526cddb15563c477
|
/src/morphforge/core/quantities/__init__.py
|
8ed61d5caaf565c48693fc05504751f56db48a69
|
[
"BSD-2-Clause"
] |
permissive
|
bmerrison/morphforge
|
f8541d4471ce13519986c42d4ebb3714a238e390
|
6d06845493bf01aae94a706bfde5d4eb9c733659
|
refs/heads/master
| 2021-01-18T07:49:47.645031
| 2012-09-26T20:54:13
| 2012-09-26T20:54:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,047
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
#from quantities import *
from morphforge.core.quantities.fromcore import factorise_units_from_list
from morphforge.core.quantities.fromcore import unit
import common_neuroscience_defs
from morphforge.core.quantities.wrappers import NpPqWrappers
from morphforge.core.quantities.common_neuroscience_defs import mS, uS, nS, pS
from morphforge.core.quantities.common_neuroscience_defs import mF, uF, nF, pF
from morphforge.core.quantities.common_neuroscience_defs import um2, cm2
from morphforge.core.quantities.common_neuroscience_defs import mm2, m2
from morphforge.core.quantities.common_neuroscience_defs import Molar, nMolar
from morphforge.core.quantities.common_neuroscience_defs import uMolar
from morphforge.core.quantities.common_neuroscience_defs import ohmcm
from morphforge.core.quantities.common_neuroscience_defs import MOhm
from morphforge.core.quantities.common_neuroscience_defs import mV
from morphforge.core.quantities.common_neuroscience_defs import pA_um2
from quantities import ms, Quantity, millivolt, milliamp, picoamp
from quantities import milli, siemens, millisecond, volt, J, second
U = unit
__all__ = [
'factorise_units_from_list',
'unit',
'NpPqWrappers',
'common_neuroscience_defs',
'mS', 'uS', 'nS', 'pS',
'mF', 'uF', 'nF', 'pF',
'um2', 'cm2', 'mm2', 'm2',
'Molar', 'uMolar', 'nMolar',
'ohmcm', 'MOhm',
'mV','pA_um2',
'ms',
'Quantity',
'millivolt','milliamp','picoamp',
'milli', 'siemens',
'millisecond',
'volt','J','second'
]
|
[
"mikehulluk@googlemail.com"
] |
mikehulluk@googlemail.com
|
95b4ba670fad9aa6e2ada7300f4aa62646de42ef
|
897d82d4953ed7b609746a0f252f3f3440b650cb
|
/day07/exercise_personal/08_exercise.py
|
36811015b50dfbbced51178172c00201a0a3c549
|
[] |
no_license
|
haiou90/aid_python_core
|
dd704e528a326028290a2c18f215b1fd399981bc
|
bd4c7a20950cf7e22e8e05bbc42cb3b3fdbe82a1
|
refs/heads/master
| 2022-11-26T19:13:36.721238
| 2020-08-07T15:05:17
| 2020-08-07T15:05:17
| 285,857,695
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
list_poker = []
for r in range(1,7):
for c in range(1,7):
for v in range(1,7):
list_poker.append((r,c,v))
print(list_poker)
|
[
"caoho@outlook.com"
] |
caoho@outlook.com
|
8a752594fbaede8a55376c2bb862d7962842e631
|
fa67314df981eb8c72790819ca29f45c37c52c69
|
/Assignment-1_CS16BTECH11036/Question4/Dtree.py
|
a37b07fb18afb5767de65c0ba9122cf331dccafb
|
[] |
no_license
|
omsitapara23/AML
|
5ce142751354cee72a8007ba952c55ae8a90d193
|
7d320ef6ce342590dfbce9e70d9d9fff7561939b
|
refs/heads/master
| 2020-04-20T01:45:41.095561
| 2019-03-12T17:13:57
| 2019-03-12T17:13:57
| 168,553,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,800
|
py
|
import numpy as np
import csv
import json
from sklearn.neighbors import KNeighborsClassifier
from sklearn import preprocessing
from collections import Counter
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn import tree
def makeTrain(data_train):
totAttr = set([])
count=0
labels = []
for item in data_train:
labels.append(item.get("cuisine"))
ingredits = item.get("ingredients")
for i in ingredits:
totAttr.add(i)
count += 1
featureVec = []
for i in totAttr:
featureVec.append(i)
data = np.zeros((count, len(totAttr)))
count =0
for item in data_train:
ingredits = item.get("ingredients")
for i in ingredits:
if i in featureVec:
ind = featureVec.index(i)
data[count,ind] = 1
count +=1
return data, len(totAttr), labels, featureVec
def makeTest(data_test, totAttr, featureVec):
no = 0
for item in data_test:
no += 1
ids = []
data = np.zeros((no, totAttr))
count = 0
for item in data_test:
ids.append(item.get("id"))
ingredits = item.get("ingredients")
for i in ingredits:
if i in featureVec:
ind = featureVec.index(i)
data[count,ind] = 1
count += 1
return data, ids
def preprocessing_data(data_train, data_test):
return preprocessing.scale(data_train), preprocessing.scale(data_test)
def learn(data_train, labels):
model = tree.DecisionTreeClassifier()
model.fit(data_train, labels)
return model
def test(data_test, model):
output = model.predict(data_test)
return output
def write_csv(output, ids):
text_file = open("Output.csv", "w")
text_file.write("id,cuisine\n")
counter = 0
for instance in output:
text_file.write("%d,%s\n" % (ids[counter] , instance))
counter += 1
text_file.close()
if __name__ == "__main__":
#opening the files
with open('train.json') as f:
data_train = json.load(f)
with open('test.json') as f1:
data_test = json.load(f1)
data_train, totAttr, labels, featureVec = makeTrain(data_train)
print "Train loaded"
data_test, ids = makeTest(data_test, totAttr, featureVec)
print "Test loaded"
print "Preprocessing..."
data_train, data_test = preprocessing_data(data_train, data_test)
print "Preprocessing complete"
print "Learning..."
model = learn(data_train, labels)
print "Model learned"
print "Predicting..."
output = test(data_test, model)
print "Predection complete writing to file..."
write_csv(output, ids)
print "Writing success"
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
828e53f2e62d6cc45ed309a2d29a4778afa6d5a6
|
057bdbd048d8b99064eb06af45d9e40beff6fe80
|
/examples/app.py
|
5726ced00da4c2d832a28e7d5bce9fbca39c9927
|
[
"MIT"
] |
permissive
|
miguelgrinberg/APIFairy
|
5a058f9763c381b765a4139366e35e579b4a1723
|
ed2c9b99e8ed8b7cd61a1b95f7f295bd2a902590
|
refs/heads/main
| 2023-07-24T14:22:21.282560
| 2023-07-15T23:01:50
| 2023-07-15T23:01:50
| 299,060,489
| 303
| 28
|
MIT
| 2023-01-05T15:49:05
| 2020-09-27T15:24:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,202
|
py
|
"""Welcome to the APIFairy Simple Example project!
## Overview
This is a short and simple example that demonstrates many of the features of
APIFairy.
"""
from typing import Annotated
from uuid import uuid4
from flask import Flask, abort
from flask_marshmallow import Marshmallow
from apifairy import APIFairy, body, response, other_responses
app = Flask(__name__)
app.config['APIFAIRY_TITLE'] = 'APIFairy Simple Example'
app.config['APIFAIRY_VERSION'] = '1.0'
ma = Marshmallow(app)
apifairy = APIFairy(app)
users = []
class UserSchema(ma.Schema):
class Meta:
description = 'This schema represents a user'
id = ma.String(dump_only=True, description="The user's id")
username = ma.String(required=True, description="The user's username")
first_name = ma.String(description="The user's first name")
last_name = ma.String(description="The user's last name")
age = ma.Integer(description="The user's age")
password = ma.String(load_only=True, description="The user's password")
@app.get('/users')
@response(UserSchema(many=True), description="The users")
def get_users():
"""Return all the users."""
return users
@app.post('/users')
@body(UserSchema)
@response(UserSchema, description="The new user")
@other_responses({400: 'Duplicate username or validation error'})
def new_user(user):
"""Create a new user."""
if any([u['username'] == user['username'] for u in users]):
abort(400)
new_id = uuid4().hex
user['id'] = new_id
users.append(user)
return user
@app.get('/users/<id>')
@response(UserSchema, description="The requested user")
@other_responses({404: 'User not found'})
def get_user(id: Annotated[str, 'The id of the user']):
"""Return a user."""
user = [u for u in users if u['id'] == id]
if not user:
abort(404)
return user[0]
@app.errorhandler(400)
def bad_request(e):
return {'code': 400, 'error': 'bad request'}
@app.errorhandler(404)
def not_found(e):
return {'code': 404, 'error': 'not found'}
@apifairy.error_handler
def validation_error(status_code, messages):
return {'code': status_code, 'error': 'validation error',
'messages': messages['json']}
|
[
"miguel.grinberg@gmail.com"
] |
miguel.grinberg@gmail.com
|
91c04102d7309c5dc96caf9dbaefa29ae8dc3d40
|
ecb113be53f2fe1768e85a1004d571c74d87ae8d
|
/tests/fmlaas/model/model.py
|
0621cd46327918a951006e15f1e784933fe91ece
|
[] |
no_license
|
Internet-SmokeAlarm/core
|
39351e4d5bddf19bd59faf51bbc225c0e0521905
|
87b66a10042ec41916c490bb20cb4117f3caf1ba
|
refs/heads/master
| 2023-02-17T18:40:12.822530
| 2020-07-05T20:28:38
| 2020-07-05T20:28:38
| 216,093,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,953
|
py
|
import unittest
from dependencies.python.fmlaas.s3_storage import JobAggregateModelPointer
from dependencies.python.fmlaas.model import Model
class ModelTestCase(unittest.TestCase):
def test_to_json_pass(self):
model = Model("1234", str(JobAggregateModelPointer("4456", "5567", "1234")), "123552")
json_data = model.to_json()
self.assertEqual(model.get_entity_id(), json_data["entity_id"])
self.assertEqual("4456/5567/1234/aggregate_model", json_data["name"])
self.assertEqual(model.get_size(), json_data["size"])
def test_from_json_pass(self):
json_data = {
'entity_id': '1234',
'name': '4456/5567/1234/aggregate_model',
'size': "123552"}
model = Model.from_json(json_data)
self.assertEqual(model.get_entity_id(), "1234")
self.assertEqual(model.get_name(), JobAggregateModelPointer("4456", "5567", "1234"))
self.assertEqual(model.get_size(), "123552")
def test_is_valid_json_pass(self):
self.assertTrue(Model.is_valid_json(
{'entity_id': '1234', 'name': '4456/5567/1234', 'size': "123552"}))
self.assertFalse(Model.is_valid_json(
{'name': '4456/5567/1234', 'size': "123552"}))
self.assertFalse(Model.is_valid_json(
{'entity_id': '1234', 'size': "123552"}))
self.assertFalse(Model.is_valid_json(
{'entity_id': '1234', 'name': '4456/5567/1234'}))
def test_eq_pass(self):
model_1 = Model("123123", "23123/123123/1231231", "12312313")
model_2 = Model("564543", "23123/123123/1231231", "12312313")
model_3 = Model("564543", "23123/123123/1231231", "12312313")
model_4 = Model("564543", "23123/123123/1231231", "123512313")
self.assertTrue(model_1 == model_1)
self.assertFalse(model_1 == model_2)
self.assertTrue(model_2 == model_3)
self.assertFalse(model_2 == model_4)
|
[
"valetolpegin@gmail.com"
] |
valetolpegin@gmail.com
|
33e0f6e0f58713cd6b9e0bf434b0190abffc395a
|
a47e4480d1584c5a2bb4c31ac512c864d0c2c240
|
/core/settings.py
|
8e44faed4463d4a577291f1a56a01053b9a77cef
|
[
"MIT"
] |
permissive
|
shaymk1/ke-nako-shop
|
014bd960e2048d4e2b5cc77c0b2d99f2058208d4
|
5c6f3dfb6b1e89efe111c1c6daa21434c7843ddc
|
refs/heads/main
| 2023-08-02T08:19:31.702068
| 2021-09-20T19:21:53
| 2021-09-20T19:21:53
| 406,715,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,243
|
py
|
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-twj=7-)r(w9l5r96^0xf30w$w-id1f3uo=8pqc_d6_o#d!6i!#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'store',
'category',
'accounts.apps.AccountsConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'category.context_processors.menu_links',
],
},
},
]
WSGI_APPLICATION = 'core.wsgi.application'
# saying we are using custom user model
AUTH_USER_MODEL = 'accounts.Account'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATICFILES_DIRS = [
BASE_DIR/'static'
]
MEDIA_ROOT = BASE_DIR/'static'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"mkekae@gmail.com"
] |
mkekae@gmail.com
|
78b752d117f11c2a5a5d056b47227a18ba096e0b
|
185b7529d9d439a0d554db2fc7b60a1531a5a836
|
/scrappy_settings/asgi.py
|
2d14dbc27187113e3031e51b3b38ab18a5531eeb
|
[] |
no_license
|
cavidanhasanli/Scrappy_price
|
8901baeaa40beb7102042d687d405258ae20d7fe
|
b5cc50010f727ba95686d89cac29f76533d860c2
|
refs/heads/main
| 2023-03-11T00:14:56.576016
| 2021-02-16T09:58:00
| 2021-02-16T09:58:00
| 338,854,431
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
"""
ASGI config for scrappy_settings project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'scrappy_settings.settings')
application = get_asgi_application()
|
[
"cavidan.hasanli@mail.ru"
] |
cavidan.hasanli@mail.ru
|
6b7df363e07c32497d7b6a3ae77012127a2fb79a
|
789f108a849be99052f13cdec68953266458e646
|
/nfe_mde/nfe_schedule.py
|
c0e580b66329ba0eeea7f4afc2737145ed796e5a
|
[] |
no_license
|
rick-romero/odoo-brazil-eletronic-documents
|
6ebe1b30deaa854861aa632ee62b022b8eeb2d8a
|
2a1f144612ef23b77b57b9edcf2089a2b2b3077a
|
refs/heads/8.0
| 2021-01-14T14:07:27.728313
| 2016-07-12T22:11:29
| 2016-07-12T22:11:29
| 59,238,349
| 0
| 0
| null | 2016-05-19T19:58:39
| 2016-05-19T19:58:39
| null |
UTF-8
|
Python
| false
| false
| 7,908
|
py
|
# coding=utf-8
###############################################################################
# #
# Copyright (C) 2015 Danimar Ribeiro www.trustcode.com.br #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
import re
import base64
import logging
from lxml import objectify
from datetime import datetime
from .service.mde import distribuicao_nfe
from openerp import models, api, fields
from openerp.exceptions import Warning as UserError
from openerp.addons.nfe.sped.nfe.validator.config_check import \
validate_nfe_configuration
_logger = logging.getLogger(__name__)
class nfe_schedule(models.TransientModel):
_name = 'nfe.schedule'
state = fields.Selection(
string="Estado",
selection=[('init', 'Não iniciado'), ('done', 'Finalizado')],
default='init'
)
@staticmethod
def _mask_cnpj(cnpj):
if cnpj:
val = re.sub('[^0-9]', '', cnpj)
if len(val) == 14:
cnpj = "%s.%s.%s/%s-%s" % (val[0:2], val[2:5], val[5:8],
val[8:12], val[12:14])
return cnpj
@api.model
def schedule_download(self, raise_error=False):
companies = self.env['res.company'].search([])
for company in companies:
try:
validate_nfe_configuration(company)
nfe_result = distribuicao_nfe(company, company.last_nsu_nfe)
env_events = self.env['l10n_br_account.document_event']
if nfe_result['code'] == '137' or nfe_result['code'] == '138':
event = {
'type': '12', 'company_id': company.id,
'response': 'Consulta distribuição: sucesso',
'status': nfe_result['code'],
'message': nfe_result['message'],
'create_date': datetime.now(),
'write_date': datetime.now(),
'end_date': datetime.now(),
'state': 'done', 'origin': 'Scheduler Download'
}
obj = env_events.create(event)
self.env['ir.attachment'].create(
{
'name': u"Consulta manifesto - {0}".format(
company.cnpj_cpf),
'datas': base64.b64encode(
nfe_result['file_returned']),
'datas_fname': u"Consulta manifesto - {0}".format(
company.cnpj_cpf),
'description': u'Consulta distribuição: sucesso',
'res_model': 'l10n_br_account.document_event',
'res_id': obj.id
})
env_mde = self.env['nfe.mde']
for nfe in nfe_result['list_nfe']:
if nfe['schema'] == 'resNFe_v1.00.xsd':
root = objectify.fromstring(nfe['xml'])
cnpj_forn = self._mask_cnpj(('%014d' % root.CNPJ))
partner = self.env['res.partner'].search(
[('cnpj_cpf', '=', cnpj_forn)])
invoice_eletronic = {
'chNFe': root.chNFe,
'nSeqEvento': nfe['NSU'], 'xNome': root.xNome,
'tpNF': str(root.tpNF), 'vNF': root.vNF,
'cSitNFe': str(root.cSitNFe),
'state': 'pending',
'dataInclusao': datetime.now(),
'CNPJ': cnpj_forn,
'IE': root.IE,
'partner_id': partner.id,
'dEmi': datetime.strptime(str(root.dhEmi)[:19],
'%Y-%m-%dT%H:%M:%S'),
'company_id': company.id,
'formInclusao': u'Verificação agendada'
}
obj_nfe = env_mde.create(invoice_eletronic)
file_name = 'resumo_nfe-%s.xml' % nfe['NSU']
self.env['ir.attachment'].create(
{
'name': file_name,
'datas': base64.b64encode(nfe['xml']),
'datas_fname': file_name,
'description': u'NFe via manifesto',
'res_model': 'nfe.mde',
'res_id': obj_nfe.id
})
company.last_nsu_nfe = nfe['NSU']
else:
event = {
'type': '12',
'response': 'Consulta distribuição com problemas',
'company_id': company.id,
'file_returned': nfe_result['file_returned'],
'file_sent': nfe_result['file_sent'],
'message': nfe_result['message'],
'create_date': datetime.now(),
'write_date': datetime.now(),
'end_date': datetime.now(),
'status': nfe_result['code'],
'state': 'done', 'origin': 'Scheduler Download'
}
obj = env_events.create(event)
self.env['ir.attachment'].create(
{
'name': u"Consulta manifesto - {0}".format(
company.cnpj_cpf),
'datas': base64.b64encode(
nfe_result['file_returned']),
'datas_fname': u"Consulta manifesto - {0}".format(
company.cnpj_cpf),
'description': u'Consulta manifesto com erro',
'res_model': 'l10n_br_account.document_event',
'res_id': obj.id
})
except Exception as ex:
_logger.error("Erro ao consultar Manifesto", exc_info=True)
if raise_error:
raise UserError(
u'Atenção',
u'Não foi possivel efetuar a consulta!\n Verifique o log')
@api.one
def execute_download(self):
self.schedule_download(raise_error=True)
|
[
"danimaribeiro@gmail.com"
] |
danimaribeiro@gmail.com
|
94e544a15e0e29b8f771385dfbdcefcb09413fcd
|
30467bd47c29412687a384d824655daa7400cef4
|
/examples/dockerbuild.py
|
d7f0e1ae2176bdb6508f530e8a2f4f6e916f5b3c
|
[] |
no_license
|
dpedu/shipper
|
556409843c6da888338d2a791d4f06b17c709a52
|
e5544416c2b0ee818285b9a13761f1c351d7676f
|
refs/heads/master
| 2020-05-17T17:39:45.645549
| 2019-02-03T00:59:34
| 2019-02-03T00:59:34
| 183,860,592
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,308
|
py
|
from shipper.lib import ShipperJob, SshConnection, GiteaCheckoutTask, LambdaTask, \
DockerBuildTask, DockerTagTask, DockerPushTask
# This job accepts gitea webooks and builds docker images. If the "imagename" parameter is passed, it will be used to
# name the image. Otherwise, a repo named "docker-image-name" would builds/pushes a docker image called "image-name".
job = ShipperJob()
job.default_connection(SshConnection(None, None, key="testkey.pem"))
job.add_task(GiteaCheckoutTask("code", allow_branches=["master"]))
def getimgname(job):
if "imagename" in job.props: # prefer "imagename" url param
imagename = job.props["imagename"]
else: # fall back to repo name, stripping 'docker-' prefix if needed.
imagename = job.props["payload"]["repository"]["name"] # Expecting a repo name like "docker-nginx"
if imagename.startswith("docker-"): # strip the "docker-" repo name prefix
imagename = imagename[len("docker-"):]
job.props["docker_imagename"] = "dpedu/" + imagename # we'll build the image locally as this
job.props["docker_tag"] = "apps2reg:5000/dpedu/" + imagename # then tag and push it as this
job.add_task(LambdaTask(getimgname))
job.add_task(DockerBuildTask())
job.add_task(DockerTagTask())
job.add_task(DockerPushTask())
|
[
"dave@davepedu.com"
] |
dave@davepedu.com
|
3bd68f15f1ba900bd732975bf7fe77e8c8d0874c
|
c4cfce852c59bdd65d5ab5e77021e42cb7b02ff8
|
/eng_to_kana_test/test_eng_to_kana.py
|
c7f683673655332c566e7794109d70e9fe281858
|
[
"MIT"
] |
permissive
|
yokolet/transcript
|
5749be490a7f53e907b2143696afaa592647dc59
|
4a83cc70d868bb243846ebee8c322c63c2092141
|
refs/heads/master
| 2020-05-28T09:47:27.771042
| 2019-06-15T21:46:53
| 2019-06-15T21:46:59
| 188,961,209
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 683
|
py
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import unittest
from eng_to_kana.eng_to_kana import EngToKana
class TestEngToKana(unittest.TestCase):
def setUp(self):
self.list_func = EngToKana().fromWordList
self.file_func = EngToKana().fromFile
def test_1(self):
words = ['what', 'girl', 'cat', 'judge', 'majority']
expected = [['ワット', 'ホワット'], ['ガール'], ['キャット'], ['ジャッジ'], ['マジョリティー']]
self.assertEqual(expected, self.list_func(words))
def test_2(self):
words = ['gaga']
self.assertEqual([['E_DIC']], self.list_func(words))
|
[
"yokolet@gmail.com"
] |
yokolet@gmail.com
|
4094dc115614d752fdc61bd95ecac6cfb7797367
|
7b3711d4c6d7284255ba0270d49d120f984bf7c6
|
/problems/2361_minimum_cost_using_the_train_line.py
|
546dcff817a1d4ba91ccab3ae95614fb4d2f1ff7
|
[] |
no_license
|
loganyu/leetcode
|
2d336f30feb55379aaf8bf0273d00e11414e31df
|
77c206305dd5cde0a249365ce7591a644effabfc
|
refs/heads/master
| 2023-08-18T09:43:10.124687
| 2023-08-18T00:44:51
| 2023-08-18T00:44:51
| 177,875,222
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,677
|
py
|
'''
A train line going through a city has two routes, the regular route and the express route. Both routes go through the same n + 1 stops labeled from 0 to n. Initially, you start on the regular route at stop 0.
You are given two 1-indexed integer arrays regular and express, both of length n. regular[i] describes the cost it takes to go from stop i - 1 to stop i using the regular route, and express[i] describes the cost it takes to go from stop i - 1 to stop i using the express route.
You are also given an integer expressCost which represents the cost to transfer from the regular route to the express route.
Note that:
There is no cost to transfer from the express route back to the regular route.
You pay expressCost every time you transfer from the regular route to the express route.
There is no extra cost to stay on the express route.
Return a 1-indexed array costs of length n, where costs[i] is the minimum cost to reach stop i from stop 0.
Note that a stop can be counted as reached from either route.
Example 1:
Input: regular = [1,6,9,5], express = [5,2,3,10], expressCost = 8
Output: [1,7,14,19]
Explanation: The diagram above shows how to reach stop 4 from stop 0 with minimum cost.
- Take the regular route from stop 0 to stop 1, costing 1.
- Take the express route from stop 1 to stop 2, costing 8 + 2 = 10.
- Take the express route from stop 2 to stop 3, costing 3.
- Take the regular route from stop 3 to stop 4, costing 5.
The total cost is 1 + 10 + 3 + 5 = 19.
Note that a different route could be taken to reach the other stops with minimum cost.
Example 2:
Input: regular = [11,5,13], express = [7,10,6], expressCost = 3
Output: [10,15,24]
Explanation: The diagram above shows how to reach stop 3 from stop 0 with minimum cost.
- Take the express route from stop 0 to stop 1, costing 3 + 7 = 10.
- Take the regular route from stop 1 to stop 2, costing 5.
- Take the express route from stop 2 to stop 3, costing 3 + 6 = 9.
The total cost is 10 + 5 + 9 = 24.
Note that the expressCost is paid again to transfer back to the express route.
Constraints:
n == regular.length == express.length
1 <= n <= 105
1 <= regular[i], express[i], expressCost <= 105
'''
class Solution:
def minimumCosts(self, regular: List[int], express: List[int], expressCost: int) -> List[int]:
prevReg = 0
prevExp = expressCost
ans = [None] * len(regular)
for i in range(1, len(regular) + 1):
reg = regular[i-1] + min(prevReg, prevExp)
exp = express[i-1] + min(expressCost + prevReg, prevExp)
ans[i-1] = min(reg, exp)
prevReg = reg
prevExp = exp
return ans
|
[
"yu.logan@gmail.com"
] |
yu.logan@gmail.com
|
cc15539f09c655e2a85fd8d417d67c0477c45e87
|
a323fc11db97690c4ea50d92766d9d5db0418aac
|
/article/migrations/0020_auto_20200719_1016.py
|
ae9b1b99f0de83746e98a9195dcf7750dc6193a6
|
[] |
no_license
|
sparshjaincs/articleplus
|
ad909f937ebf856b6da87bd623af0776f8faafc3
|
0fa34a5384d8cfc52181be42c130aadd03ad8ef2
|
refs/heads/master
| 2023-08-10T23:21:44.845993
| 2021-09-30T22:29:13
| 2021-09-30T22:29:13
| 279,252,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,419
|
py
|
# Generated by Django 2.2.6 on 2020-07-19 04:46
import datetime
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('article', '0019_auto_20200719_0951'),
]
operations = [
migrations.RemoveField(
model_name='articles',
name='mute',
),
migrations.RemoveField(
model_name='articles',
name='subscribe',
),
migrations.AddField(
model_name='profile',
name='mute',
field=models.ManyToManyField(blank=True, default=None, related_name='mute_title', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='profile',
name='subscribe',
field=models.ManyToManyField(blank=True, default=None, related_name='subscribe_title', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='activity',
name='activity_time',
field=models.TimeField(default=datetime.datetime(2020, 7, 19, 10, 16, 18, 77112)),
),
migrations.AlterField(
model_name='articles',
name='time',
field=models.TimeField(default=datetime.datetime(2020, 7, 19, 10, 16, 18, 61070)),
),
]
|
[
"sparshjaincs@gmail.com"
] |
sparshjaincs@gmail.com
|
2e0fad46c16958cbd3582723916a0ac1dda5a23e
|
0bd14d7590db43af015433edc95c101b325f2b45
|
/simple_sso/sso_server/admin.py
|
e21b92dc68b986e617291c935f1c31c63bf08af1
|
[
"BSD-3-Clause"
] |
permissive
|
chrisglass/django-simple-sso
|
21f390535c012af4bba9a1b78a23b298592611df
|
b63d37ac64450ff5a506e6b1c2e34e42109b8cd8
|
refs/heads/master
| 2020-12-25T03:22:00.352693
| 2011-08-10T15:40:09
| 2011-08-10T15:40:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from simple_sso.sso_server.models import Client
admin.site.register(Client)
|
[
"jonas.obrist@divio.ch"
] |
jonas.obrist@divio.ch
|
0b7ff06c8aa9f6a941ff4fe8a749d7d0a028286b
|
4da0c8906c9cd671e3a4bee3a6ee801a353e3d9a
|
/Water/watres/migrations/0012_targetusewo_checkin.py
|
82f4465901163fac353bd3219380d8d3cb10db6e
|
[] |
no_license
|
avpakh/GVK
|
2a5a699caa8a986a3fd0dadbe2160fc9da5bf193
|
ac8b8d8ad5cd5ef8485e98cd532a29cd420e0cae
|
refs/heads/master
| 2020-06-13T10:35:36.663668
| 2017-01-06T09:01:42
| 2017-01-06T09:01:42
| 75,392,559
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 488
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-02 14:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('watres', '0011_auto_20160816_1523'),
]
operations = [
migrations.AddField(
model_name='targetusewo',
name='checkin',
field=models.BooleanField(default=1),
preserve_default=False,
),
]
|
[
"aliaksandr.pakhomau@gmail.com"
] |
aliaksandr.pakhomau@gmail.com
|
2445f62695bc503243d90b47fd380b81e2c25e92
|
3528abad46b15133b2108c237f926a1ab252cbd5
|
/Core/ableton/v2/control_surface/elements/optional.py
|
86a72e77d342aa7dee2a59777b27af577769514a
|
[] |
no_license
|
scottmudge/MPK261_Ableton
|
20f08234f4eab5ba44fde6e5e745752deb968df2
|
c2e316b8347367bd157276f143b9f1a9bc2fe92c
|
refs/heads/master
| 2020-03-20T10:56:32.421561
| 2018-06-14T19:12:47
| 2018-06-14T19:12:47
| 137,389,086
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,241
|
py
|
# Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/ableton/v2/control_surface/elements/optional.py
# Compiled at: 2018-04-23 20:27:04
from __future__ import absolute_import, print_function, unicode_literals
from ...base import listens
from .combo import ToggleElement
class ChoosingElement(ToggleElement):
u"""
An Element wrapper that enables one of the nested elements based on
the value of the given flag.
"""
def __init__(self, flag=None, *a, **k):
super(ChoosingElement, self).__init__(*a, **k)
self.__on_flag_changed.subject = flag
self.__on_flag_changed(flag.value)
@listens('value')
def __on_flag_changed(self, value):
self.set_toggled(value)
class OptionalElement(ChoosingElement):
u"""
An Element wrapper that enables the nested element IFF some given
flag is set to a specific value.
"""
def __init__(self, control=None, flag=None, value=None, *a, **k):
on_control = control if value else None
off_control = None if value else control
super(OptionalElement, self).__init__(on_control=on_control, off_control=off_control, flag=flag, *a, **k)
return
|
[
"mail@scottmudge.com"
] |
mail@scottmudge.com
|
140475678049842dcc7a9513455b15a220182ac9
|
fe8d49331e73fe89be9195bf748159830d2c3622
|
/zerver/views/drafts.py
|
47b5c6fa242f0d66e718fb84c1ffb31a7fce178b
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
lizzzp1/zulip
|
13e1a4428b5ed6d9cdc06cb291b126ee127a03e8
|
4e8067aadc7d5a4b2644e383898c5c731740ffd5
|
refs/heads/master
| 2022-12-13T23:44:52.351757
| 2020-09-12T19:04:24
| 2020-09-12T19:04:24
| 295,025,435
| 1
| 0
|
Apache-2.0
| 2020-09-12T21:00:35
| 2020-09-12T21:00:34
| null |
UTF-8
|
Python
| false
| false
| 5,766
|
py
|
import time
from typing import Any, Dict, List, Set
from django.core.exceptions import ValidationError
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.lib.actions import recipient_for_user_profiles
from zerver.lib.addressee import get_user_profiles_by_ids
from zerver.lib.exceptions import JsonableError
from zerver.lib.message import truncate_body, truncate_topic
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_error, json_success
from zerver.lib.streams import access_stream_by_id
from zerver.lib.timestamp import timestamp_to_datetime
from zerver.lib.validator import (
check_dict_only,
check_float,
check_int,
check_list,
check_required_string,
check_string,
check_string_in,
check_union,
)
from zerver.models import Draft, UserProfile
VALID_DRAFT_TYPES: Set[str] = {"", "private", "stream"}
# A validator to verify if the structure (syntax) of a dictionary
# meets the requirements to be a draft dictionary:
draft_dict_validator = check_dict_only(
required_keys=[
("type", check_string_in(VALID_DRAFT_TYPES)),
("to", check_list(check_int)), # The ID of the stream to send to, or a list of user IDs.
("topic", check_string), # This string can simply be empty for private type messages.
("content", check_required_string),
],
optional_keys=[
("timestamp", check_union([check_int, check_float])), # A Unix timestamp.
]
)
def further_validated_draft_dict(draft_dict: Dict[str, Any],
user_profile: UserProfile) -> Dict[str, Any]:
""" Take a draft_dict that was already validated by draft_dict_validator then
further sanitize, validate, and transform it. Ultimately return this "further
validated" draft dict. It will have a slightly different set of keys the values
for which can be used to directly create a Draft object. """
content = truncate_body(draft_dict["content"])
if "\x00" in content:
raise JsonableError(_("Content must not contain null bytes"))
timestamp = draft_dict.get("timestamp", time.time())
timestamp = round(timestamp, 6)
if timestamp < 0:
# While it's not exactly an invalid timestamp, it's not something
# we want to allow either.
raise JsonableError(_("Timestamp must not be negative."))
last_edit_time = timestamp_to_datetime(timestamp)
topic = ""
recipient = None
to = draft_dict["to"]
if draft_dict["type"] == "stream":
topic = truncate_topic(draft_dict["topic"])
if "\x00" in topic:
raise JsonableError(_("Topic must not contain null bytes"))
if len(to) != 1:
raise JsonableError(_("Must specify exactly 1 stream ID for stream messages"))
stream, recipient, sub = access_stream_by_id(user_profile, to[0])
elif draft_dict["type"] == "private" and len(to) != 0:
to_users = get_user_profiles_by_ids(set(to), user_profile.realm)
try:
recipient = recipient_for_user_profiles(to_users, False, None, user_profile)
except ValidationError as e: # nocoverage
raise JsonableError(e.messages[0])
return {
"recipient": recipient,
"topic": topic,
"content": content,
"last_edit_time": last_edit_time,
}
def fetch_drafts(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
user_drafts = Draft.objects.filter(user_profile=user_profile).order_by("last_edit_time")
draft_dicts = {str(draft.id): draft.to_dict() for draft in user_drafts}
return json_success({"count": user_drafts.count(), "drafts": draft_dicts})
@has_request_variables
def create_drafts(request: HttpRequest, user_profile: UserProfile,
draft_dicts: List[Dict[str, Any]]=REQ("drafts",
validator=check_list(draft_dict_validator)),
) -> HttpResponse:
draft_objects = []
for draft_dict in draft_dicts:
valid_draft_dict = further_validated_draft_dict(draft_dict, user_profile)
draft_objects.append(Draft(
user_profile=user_profile,
recipient=valid_draft_dict["recipient"],
topic=valid_draft_dict["topic"],
content=valid_draft_dict["content"],
last_edit_time=valid_draft_dict["last_edit_time"],
))
created_draft_objects = Draft.objects.bulk_create(draft_objects)
draft_ids = [draft_object.id for draft_object in created_draft_objects]
return json_success({"ids": draft_ids})
@has_request_variables
def edit_draft(request: HttpRequest, user_profile: UserProfile, draft_id: int,
draft_dict: Dict[str, Any]=REQ("draft", validator=draft_dict_validator),
) -> HttpResponse:
try:
draft_object = Draft.objects.get(id=draft_id, user_profile=user_profile)
except Draft.DoesNotExist:
return json_error(_("Draft does not exist"), status=404)
valid_draft_dict = further_validated_draft_dict(draft_dict, user_profile)
draft_object.content = valid_draft_dict["content"]
draft_object.topic = valid_draft_dict["topic"]
draft_object.recipient = valid_draft_dict["recipient"]
draft_object.last_edit_time = valid_draft_dict["last_edit_time"]
draft_object.save()
return json_success()
def delete_draft(request: HttpRequest, user_profile: UserProfile, draft_id: int) -> HttpResponse:
try:
draft_object = Draft.objects.get(id=draft_id, user_profile=user_profile)
except Draft.DoesNotExist:
return json_error(_("Draft does not exist"), status=404)
draft_object.delete()
return json_success()
|
[
"tabbott@zulip.com"
] |
tabbott@zulip.com
|
a83a11d7de133095f348d5920113cb836562415e
|
8e95e79840005f6c34dfb978e8fe6e0ec4f7f643
|
/7_Image Processing in Python_/29_Edges.py
|
f4953bd068a7dbf38dcc7620a093d0b9b0858f0d
|
[] |
no_license
|
Naysla/Machine_Learning
|
a0593cac41ef1561f14bec55780570b82fc37720
|
e75d5cd2894ccb005228ab3da87dde9025385a08
|
refs/heads/master
| 2023-02-01T17:19:32.413609
| 2020-12-22T20:36:45
| 2020-12-22T20:36:45
| 323,708,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
#Edges
#In this exercise you will identify the shapes in a grapefruit image by detecting the edges, using the Canny algorithm.
#Image preloaded as grapefruit.
#The color module has already been preloaded for you.
# Import the canny edge detector
from skimage.feature import canny
# Convert image to grayscale
grapefruit = color.rgb2gray(grapefruit)
# Apply canny edge detector
canny_edges = canny(grapefruit)
# Show resulting image
show_image(canny_edges, "Edges with Canny")
#You can see the shapes and details of the grapefruits of the original image being highlighted.
|
[
"60472499+Naysla@users.noreply.github.com"
] |
60472499+Naysla@users.noreply.github.com
|
b1c806080769dbbd96a828a4f775b7cd730fbd53
|
8eeef7742573a8b671648d94e448d5614272c5d6
|
/core2web/week2/day7/printNumber.py
|
33b2619d33b2dfbf88f662253e9577e0f68a5cc6
|
[] |
no_license
|
damodardikonda/Python-Basics
|
582d18bc9d003d90b1a1930c68b9b39a85778ea7
|
fd239722fc6e2a7a02dae3e5798a5f1172f40378
|
refs/heads/master
| 2023-01-28T16:22:19.153514
| 2020-12-11T06:36:49
| 2020-12-11T06:36:49
| 270,733,918
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 159
|
py
|
"""
Program 1: Write a program that accepts an integer from user and print it.
Input: 11
Output: 11
"""
v=(int)(input("enter the number"))
print("output",v)
|
[
"damodar2dikonda@gmail.com"
] |
damodar2dikonda@gmail.com
|
b87d3c6c3e1f49c4c0cfbc2f7d0ecab4016fc060
|
fb2cc597f319380d228fc15c4008760a82203687
|
/var/spack/repos/builtin/packages/py-linear-operator/package.py
|
8133edf5144a33322dd069c679c2a2a0f9be91e9
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"LGPL-2.1-only"
] |
permissive
|
JayjeetAtGithub/spack
|
c41b5debcbe139abb2eab626210505b7f930d637
|
6c2df00443a2cd092446c7d84431ae37e64e4296
|
refs/heads/develop
| 2023-03-21T02:35:58.391230
| 2022-10-08T22:57:45
| 2022-10-08T22:57:45
| 205,764,532
| 0
| 0
|
MIT
| 2019-09-02T02:44:48
| 2019-09-02T02:44:47
| null |
UTF-8
|
Python
| false
| false
| 888
|
py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyLinearOperator(PythonPackage):
"""A linear operator implementation, primarily designed for finite-dimensional
positive definite operators (i.e. kernel matrices)."""
homepage = "https://github.com/cornellius-gp/linear_operator/"
pypi = "linear_operator/linear_operator-0.1.1.tar.gz"
version("0.1.1", sha256="81adc1aea9e98f3c4f07f5608eb77b689bc61793e9beebfea82155e9237bf1be")
depends_on("python@3.8:", type=("build", "run"))
depends_on("py-setuptools", type="build")
depends_on("py-setuptools-scm", type="build")
depends_on("py-torch@1.11:", type=("build", "run"))
depends_on("py-scipy", type=("build", "run"))
|
[
"noreply@github.com"
] |
JayjeetAtGithub.noreply@github.com
|
6b89749fe8823ae962abbaa45373e75891ef3212
|
15e6385746ccf4b8eb6c6e302aca236021bb8781
|
/LintcodePartII/li405_submatrixSum.py
|
443777a2c60f7562a7b839a401d41945fa35145d
|
[] |
no_license
|
akb46mayu/Data-Structures-and-Algorithms
|
11c4bbddc9b4d286e1aeaa9481eb6a620cd54746
|
de98494e14fff3e2a468da681c48d60b4d1445a1
|
refs/heads/master
| 2021-01-12T09:51:32.618362
| 2018-05-16T16:37:18
| 2018-05-16T16:37:18
| 76,279,268
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,307
|
py
|
"""
Given an integer matrix, find a submatrix where the sum of numbers is zero.
Your code should return the coordinate of the left-up and right-down number.
Have you met this question in a real interview? Yes
Example
Given matrix
[
[1 ,5 ,7],
[3 ,7 ,-8],
[4 ,-8 ,9],
]
return [(1,1), (2,2)]
"""
class Solution:
# @param {int[][]} matrix an integer matrix
# @return {int[][]} the coordinate of the left-up and right-down number
def submatrixSum(self, matrix):
# Write your code here
if not matrix:
return [[0,0], [0,0]]
m, n = len(matrix), len(matrix[0])
psum = [[0]*n for _ in range(m)]
for i in range(m):
for j in range(n):
if i == 0:
psum[i][j] = matrix[i][j]
else:
psum[i][j] = psum[i-1][j] + matrix[i][j]
for lx in range(m):
for rx in range(lx, m):
dict = {0:-1}
sum0 = 0
for j in range(n):
sumcur = psum[rx][j] - psum[lx-1][j] if lx >= 1 else psum[rx][j]
sum0 += sumcur
if sum0 in dict:
return [[lx,dict[sum0]+1],[rx,j]]
dict[sum0] = j
return [[0,0], [0,0]]
|
[
"noreply@github.com"
] |
akb46mayu.noreply@github.com
|
2ae16a9e9e78108fc155c5ad03fae33bc317ad74
|
d63222abe326a3c8debd59bb8d24cb7eab3de09e
|
/leetcode/mock-interviews/reorganize_string/solve2.py
|
457e7f2208383c86f7b71461edd8321eeb4e2c1e
|
[] |
no_license
|
tariqrahiman/pyComPro
|
91f47e93eb0a077d489659fcf0a75d5c1a65fc17
|
86ec13f47506a2495ab6b6bbb58d4e8b2a21538b
|
refs/heads/master
| 2022-02-10T04:15:40.194828
| 2019-06-16T10:22:38
| 2019-06-16T10:22:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 948
|
py
|
class Solution(object):
def reorganizeString(self, S):
count_letter = [[0, i] for i in xrange(25)]
for char in S: count_letter[ord(char) - 97][0] += 1
count_letter.sort(reverse=True)
count_letter = [k for k in count_letter if k[0] > 0]
res = [""]
def decrease(index):
res[0] += chr(count_letter[index][1] + 97)
count_letter[index][0] -= 1
if count_letter[index][0] == 0: del count_letter[index]
print count_letter
while len(count_letter) > 1:
i = len(count_letter) - 1
while i > 0:
for _ in xrange(count_letter[i][0]):
decrease(i); decrease(i - 1)
i -= 2
print res[0]
print count_letter
if len(count_letter) == 1:
if count_letter[0][0] != 1: return " "
return chr(count_letter[0][1] + 97) + res[0]
return res[0]
|
[
"alexsolbiati@hotmail.it"
] |
alexsolbiati@hotmail.it
|
185c7b7f95c8487e2f85422f38c93095e8bd3438
|
3f36a8e71ea13a135467ea64367d6e3358333f74
|
/movie_details.py
|
b88daf458d68088e861cd4d0c53e98e1ee709f51
|
[
"MIT"
] |
permissive
|
gorpo/Exemplos-Python
|
4257873af5a23b79d51cc60e8ea84185b7e299c4
|
2cc11e0604d83c4f0a46645ceef0b209e467e6e6
|
refs/heads/master
| 2023-03-09T00:24:27.404626
| 2020-08-24T04:49:59
| 2020-08-24T04:49:59
| 264,974,378
| 4
| 4
|
MIT
| 2021-02-26T02:53:36
| 2020-05-18T15:02:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,600
|
py
|
import urllib.request
import mechanize
from bs4 import BeautifulSoup
# Create a Browser
browser = mechanize.Browser()
# Disable loading robots.txt
browser.set_handle_robots(False)
browser.addheaders = [('User-agent',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 98;)')]
movie_title = input("Enter movie title: ")
movie_types = ('feature', 'tv_movie', 'tv_series', 'tv_episode', 'tv_special',
'tv_miniseries', 'documentary', 'video_game', 'short', 'video', 'tv_short')
# Navigate
browser.open('http://www.imdb.com/search/title')
# Choose a form
browser.select_form(nr=1)
browser['title'] = movie_title
# Check all the boxes of movie types
for m_type in movie_types:
browser.find_control(type='checkbox', nr=0).get(m_type).selected = True
# Submit
fd = browser.submit()
soup = BeautifulSoup(fd.read(), 'html5lib')
# Updated from td tag to h3 tag
for div in soup.findAll('h3', {'class': 'lister-item-header'}, limit=1):
a = div.findAll('a')[0]
hht = 'http://www.imdb.com' + a.attrs['href']
print(hht)
page = urllib.request.urlopen(hht)
soup2 = BeautifulSoup(page.read(), 'html.parser')
find = soup2.find
print("Title: " + find(itemprop='name').get_text().strip())
print("Duration: " + find(itemprop='duration').get_text().strip())
print("Director: " + find(itemprop='director').get_text().strip())
print("Genre: " + find(itemprop='genre').get_text().strip())
print("IMDB rating: " + find(itemprop='ratingValue').get_text().strip())
print("Summary: " + find(itemprop='description').get_text().strip())
|
[
"noreply@github.com"
] |
gorpo.noreply@github.com
|
8b4b8b5b6d763fd2a7db57022a79bde58116674a
|
9692a20a1e7a224a72785e4495f31421639b9f3b
|
/frex/stores/sparql_queryable.py
|
ca7711df4642f8043fbcf8e36450204ce9c9d5df
|
[] |
no_license
|
solashirai/FREx
|
6b0cb040930761a0e269f4591d7dde36e3f636d1
|
36ad09a0cb0020661ee990c7800bafd110e2ec04
|
refs/heads/master
| 2023-08-14T08:49:49.270281
| 2021-09-29T14:58:23
| 2021-09-29T14:58:23
| 291,760,109
| 0
| 0
| null | 2021-09-24T22:41:19
| 2020-08-31T15:57:47
|
Python
|
UTF-8
|
Python
| false
| false
| 526
|
py
|
from abc import ABC, abstractmethod
from rdflib.query import Result
class SparqlQueryable(ABC):
"""
SparqlQueryable is the base class for stores that can be queried in some way using SPARQL queries.
"""
@abstractmethod
def query(self, *, sparql: str) -> Result:
"""
Query the sparql queryable and retrieve a result.
:param sparql: A string containing valid SPARQL to query.
:return: A Result containing the result from calling the SPARQL query.
"""
pass
|
[
"solashakashirai@gmail.com"
] |
solashakashirai@gmail.com
|
58bcf3d3d7a9e42fa01ca8b29a710f6e81cfde90
|
b086a1caa4e3457c1faa0889d7a7291e653a0248
|
/tests/test_decontaminate.py
|
ed588a42754b1f20fbaafdd1f11bfdc4e4ef65af
|
[
"MIT"
] |
permissive
|
hover2pi/specialsoss
|
a29381bbfcf7cc15a82e0aba8e607b99192dc48f
|
6afde9fbd83bb33afa9e606e681c330b64e64aa2
|
refs/heads/master
| 2023-01-12T19:22:03.636104
| 2022-11-30T18:51:16
| 2022-11-30T18:51:16
| 152,112,781
| 1
| 1
|
MIT
| 2022-12-26T20:46:35
| 2018-10-08T16:36:32
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 500
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `decontaminate` module."""
import unittest
from pkg_resources import resource_filename
from specialsoss import decontaminate
class TestDecontaminate(unittest.TestCase):
"""Test functions in decontaminate.py"""
def setUp(self):
"""Test instance setup"""
# Get files for testing
self.frame = np.ones((256, 2048))
self.tso3d = np.ones((4, 256, 2048))
self.tso4d = np.ones((2, 2, 256, 2048))
|
[
"jfilippazzo@stsci.edu"
] |
jfilippazzo@stsci.edu
|
24b225f065ed151eb22a92e8b8d904ab8f8a5b5d
|
ad01faab6dd663dc5193eb8383fdc2d24c2df23d
|
/_flask/_flask/src/models.py
|
65bcffc85e3d1501298f09252d2b8c292996163d
|
[] |
no_license
|
jurgeon018/snippets
|
585db91b8120076b37deaa37393b34f7c61fec66
|
e0ab24a99791c3b25422a3208f02919cf98ca084
|
refs/heads/master
| 2023-05-14T12:31:48.139452
| 2023-01-23T03:33:41
| 2023-01-23T03:33:41
| 222,001,233
| 0
| 0
| null | 2023-05-01T22:16:48
| 2019-11-15T20:51:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,368
|
py
|
from flask_security import UserMixin, RoleMixin
from datetime import datetime
import re
from app import db
def slugify(s):
pattern = r'[^\w+]'
return re.sub(pattern, '-', str(s))
post_tags = db.Table(
'post_tags',
db.Column('post_id', db.Integer, db.ForeignKey('post.id')),
db.Column('tag_id', db.Integer, db.ForeignKey('tag.id'))
)
roles_users = db.Table(
'roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id'))
)
class SaveMixin:
def save(self, *args, **kwargs):
db.session.add(self)
db.session.commit()
class User(db.Model, SaveMixin, UserMixin):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100))
active = db.Column(db.Boolean())
roles = db.relationship('Role',
secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
class Role(db.Model, SaveMixin, RoleMixin):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=True)
description = db.Column(db.String(255))
class Post(db.Model, SaveMixin):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(140))
slug = db.Column(db.String(140), unique=True)
body = db.Column(db.Text())
created = db.Column(db.DateTime, default=datetime.now)
tags = db.relationship('Tag',
secondary=post_tags,
backref=db.backref('posts'),
lazy='dynamic')
def __init__(self, *args, **kwargs):
super(Post, self).__init__(*args, **kwargs)
self.generate_slug()
def generate_slug(self):
if self.title:
self.slug = slugify(self.title)
def __repr__(self):
return '<Post id: {}, title: {}'.format(self.id, self.title)
class Tag(db.Model, SaveMixin):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100))
slug = db.Column(db.String(100))
def __init__(self, *args, **kwargs):
super(Tag, self).__init__(*args, **kwargs)
self.slug = slugify(self.name)
def __repr__(self):
return '<Tag id: {}, name: {}>'.format(self.id, self.name)
|
[
"jurgeon018@gmail.com"
] |
jurgeon018@gmail.com
|
552ab2bbd2ef44a5026c219a56b2ffd8ce677ca4
|
c73fc798764f40ea6fa466a573fb01223e367ce3
|
/recursion/dequeue.py
|
0cb394faf20083a3b1185caeaf1124bf8907044b
|
[] |
no_license
|
mohitsh/python_work
|
b1385f62104aa6b932f5452ca5c2421526345455
|
223a802dea5cdb73f44a159856c7432983655668
|
refs/heads/master
| 2020-04-24T00:34:15.427060
| 2018-08-21T19:12:07
| 2018-08-21T19:12:07
| 37,491,449
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,060
|
py
|
'''
in this deque example I have considered position 0 to be FRONT
in other deque exmaples like Palindrome checker last element has
been considered as front.
Usually first element is considered rear and last one is considered
front.
I have to manipulate 0 twice to make this code compatible with 0 as
rear and last as Front
so no worries!
'''
class Deque:
def __init__(self):
self.items = []
def size(self):
return len(self.items)
def isEmpty(self):
return self.items == []
def addFront(self,item):
self.items.insert(0,item)
def addRear(self,item):
self.items.append(item)
def removeFront(self):
return self.items.pop(0)
def removeRear(self):
return self.items.pop()
def show(self):
print self.items
d = Deque()
print d.isEmpty()
print d.size()
d.addFront(1)
d.addFront(2)
d.addFront(3)
print d.isEmpty()
print d.size()
d.show()
d.addRear(10)
d.addRear(11)
d.addRear(12)
d.show()
print d.removeFront()
print d.removeFront()
print d.removeFront()
d.show()
print d.removeRear()
print d.removeRear()
print d.removeRear()
d.show()
|
[
"mohitsh114@gmail.com"
] |
mohitsh114@gmail.com
|
8939cb11b44574e3ae4666bb7ed1698550d192c4
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5756407898963968_0/Python/eding/A-small-code.py
|
e5ebfa779d5c7ca729204629dbea0f829a594e03
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 939
|
py
|
import codecs
import sys
N_ROWS = 4
def main():
file = codecs.open(sys.argv[1], "r", "utf-8-sig")
lines = [line.strip() for line in file]
T = int(lines[0])
cards1 = []
cards2 = []
index = 1
for trial in xrange(0,T):
ans1 = int(lines[index])
cards1 = map(int, lines[index+ans1].split())
index += 5
ans2 = int(lines[index])
cards2 = map(int, lines[index+ans2].split())
index += 5
intersect = [card for card in cards1 if card in cards2]
sys.stdout.write("Case #%d: " % (trial+1))
if len(intersect) < 1:
sys.stdout.write("Volunteer cheated!\n")
elif len(intersect) == 1:
sys.stdout.write("%d\n" % intersect[0])
elif len(intersect) > 1:
sys.stdout.write("Bad magician!\n")
if __name__ == '__main__':
main()
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
90bd6692ba1c920aebf545909f10a2d5fe660622
|
c8036cb365243439b4a3593124eafdfba933a034
|
/src/loss/normal_6_class.py
|
445ac4273311e941f342bfc5794d5eeaf8cc2e37
|
[] |
no_license
|
koike-ya/rsna
|
3a1150dc878bde6320ae4c1d965675460dd7de0d
|
c88c45cfa280b47f0fb48cc9df88954f83a551b4
|
refs/heads/master
| 2022-03-16T00:36:55.846905
| 2019-11-02T00:49:15
| 2019-11-02T00:49:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,715
|
py
|
dir_csv = '../../input/'
dir_train_img = '../../input/stage_1_train_pngs/'
dir_test_img = '../../input/stage_1_test_pngs/'
# Parameters
n_classes = 6
n_epochs = 5
batch_size = 32
import glob
import os
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
import pydicom
import torch
import torch.optim as optim
from albumentations import Compose, ShiftScaleRotate, CenterCrop, HorizontalFlip, RandomBrightnessContrast
from albumentations.pytorch import ToTensor
from skimage.transform import resize
from torch.utils.data import Dataset
from tqdm import tqdm as tqdm
from apex import amp
CT_LEVEL = 40
CT_WIDTH = 150
def rescale_pixelarray(dataset):
image = dataset.pixel_array
rescaled_image = image * dataset.RescaleSlope + dataset.RescaleIntercept
rescaled_image[rescaled_image < -1024] = -1024
return rescaled_image
def set_manual_window(hu_image, custom_center, custom_width):
min_value = custom_center - (custom_width / 2)
max_value = custom_center + (custom_width / 2)
hu_image[hu_image < min_value] = min_value
hu_image[hu_image > max_value] = max_value
return hu_image
class IntracranialDataset(Dataset):
def __init__(self, csv_file, data_dir, labels, ct_level=0, ct_width=0, transform=None):
self.data_dir = data_dir
self.data = pd.read_csv(csv_file)
self.transform = transform
self.labels = labels
self.level = ct_level
self.width = ct_width
self.nn_input_shape = (224, 224)
def __len__(self):
return len(self.data)
def resize(self, image):
image = resize(image, self.nn_input_shape)
return image
def fill_channels(self, image):
filled_image = np.stack((image,)*3, axis=-1)
return filled_image
def _get_hounsfield_window(self, dicom):
hu_image = rescale_pixelarray(dicom)
windowed_image = set_manual_window(hu_image, self.level, self.width)
return windowed_image
def _load_dicom_to_image(self, file_path):
dicom = pydicom.dcmread(file_path)
windowed_image = self._get_hounsfield_window(dicom)
image = self.fill_channels(self.resize(windowed_image))
return image
def __getitem__(self, idx):
file_path = os.path.join(self.data_dir, self.data.loc[idx, 'Image'] + '.png')
from pathlib import Path
if not Path(file_path).is_file():
return self.__getitem__(idx + 1)
# img = self._load_dicom_to_image(file_path)
img = cv2.imread(file_path)
if self.transform:
augmented = self.transform(image=img)
img = augmented['image']
if self.labels:
labels = torch.tensor(
self.data.loc[idx, ['epidural', 'intraparenchymal', 'intraventricular', 'subarachnoid', 'subdural', 'any']])
return {'image': img, 'labels': labels}
else:
return {'image': img}
# # CSV
# In[7]:
# CSVs
if __name__ == '__main__':
if not Path('../../src/train.csv').is_file():
train = pd.read_csv(os.path.join(dir_csv, 'stage_1_train.csv'))
test = pd.read_csv(os.path.join(dir_csv, 'stage_1_sample_submission.csv'))
# Split train out into row per image and save a sample
train[['ID', 'Image', 'Diagnosis']] = train['ID'].str.split('_', expand=True)
train = train[['Image', 'Diagnosis', 'Label']]
train.drop_duplicates(inplace=True)
train = train.pivot(index='Image', columns='Diagnosis', values='Label').reset_index()
train['Image'] = 'ID_' + train['Image']
train.head()
# Some files didn't contain legitimate images, so we need to remove them
png = glob.glob(os.path.join(dir_train_img, '*.png'))
png = [os.path.basename(png)[:-4] for png in png]
png = np.array(png)
train = train[train['Image'].isin(png)]
train.to_csv('train.csv', index=False)
# Also prepare the test data
test[['ID','Image','Diagnosis']] = test['ID'].str.split('_', expand=True)
test['Image'] = 'ID_' + test['Image']
test = test[['Image', 'Label']]
test.drop_duplicates(inplace=True)
test.to_csv('test.csv', index=False)
# Data loaders
transform_train = Compose([CenterCrop(200, 200),
#Resize(224, 224),
HorizontalFlip(),
RandomBrightnessContrast(),
ShiftScaleRotate(),
ToTensor()
])
transform_test= Compose([CenterCrop(200, 200),
#Resize(224, 224),
ToTensor()
])
train_dataset = IntracranialDataset(
csv_file='train.csv', data_dir=dir_train_img, transform=transform_train, labels=True)
test_dataset = IntracranialDataset(
csv_file='test.csv', data_dir=dir_test_img, transform=transform_test, labels=False)
data_loader_train = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=False, num_workers=8)
data_loader_test = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=8)
device = torch.device("cuda:0")
# device = torch.device("cpu")
# model = torch.hub.load('facebookresearch/WSL-Images', 'resnext101_32x8d_wsl')
model = torch.hub.load('pytorch/vision', 'shufflenet_v2_x1_0', pretrained=True)
model.fc = torch.nn.Linear(1024, n_classes)
model.to(device)
criterion = torch.nn.BCEWithLogitsLoss()
plist = [{'params': model.parameters(), 'lr': 2e-5}]
optimizer = optim.Adam(plist, lr=2e-5)
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
for epoch in range(n_epochs):
print('Epoch {}/{}'.format(epoch, n_epochs - 1))
print('-' * 10)
model.train()
tr_loss = 0
tk0 = tqdm(data_loader_train, desc="Iteration")
for step, batch in enumerate(tk0):
inputs = batch["image"]
labels = batch["labels"]
inputs = inputs.to(device, dtype=torch.float)
labels = labels.to(device, dtype=torch.float)
outputs = model(inputs)
loss = criterion(outputs, labels)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
# loss.backward()
tr_loss += loss.item()
optimizer.step()
optimizer.zero_grad()
if epoch == 1 and step > 6000:
epoch_loss = tr_loss / 6000
print('Training Loss: {:.4f}'.format(epoch_loss))
break
epoch_loss = tr_loss / len(data_loader_train)
print('Training Loss: {:.4f}'.format(epoch_loss))
for param in model.parameters():
param.requires_grad = False
model.eval()
test_pred = np.zeros((len(test_dataset) * n_classes, 1))
for i, x_batch in enumerate(tqdm(data_loader_test)):
x_batch = x_batch["image"]
x_batch = x_batch.to(device, dtype=torch.float)
with torch.no_grad():
pred = model(x_batch)
test_pred[(i * batch_size * n_classes):((i + 1) * batch_size * n_classes)] = torch.sigmoid(
pred).detach().cpu().reshape((len(x_batch) * n_classes, 1))
# Submission
submission = pd.read_csv(os.path.join(dir_csv, 'stage_1_sample_submission.csv'))
submission = pd.concat([submission.drop(columns=['Label']), pd.DataFrame(test_pred)], axis=1)
submission.columns = ['ID', 'Label']
submission.to_csv(f'../../output/{Path(__file__).name}_sub.csv', index=False)
submission.head()
|
[
"makeffort134@gmail.com"
] |
makeffort134@gmail.com
|
a10d864424683827df934951ff4cb07416e8d969
|
a838d4bed14d5df5314000b41f8318c4ebe0974e
|
/sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_12_01/aio/operations/_private_link_resources_operations.py
|
a7c4a66aa9351e0ab6a575929711ac78f42085cb
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
scbedd/azure-sdk-for-python
|
ee7cbd6a8725ddd4a6edfde5f40a2a589808daea
|
cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a
|
refs/heads/master
| 2023-09-01T08:38:56.188954
| 2021-06-17T22:52:28
| 2021-06-17T22:52:28
| 159,568,218
| 2
| 0
|
MIT
| 2019-08-11T21:16:01
| 2018-11-28T21:34:49
|
Python
|
UTF-8
|
Python
| false
| false
| 5,000
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateLinkResourcesOperations:
"""PrivateLinkResourcesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2020_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list(
self,
resource_group_name: str,
resource_name: str,
**kwargs
) -> "_models.PrivateLinkResourcesListResult":
"""Gets a list of private link resources in the specified managed cluster.
Gets a list of private link resources in the specified managed cluster. The operation returns
properties of each private link resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResourcesListResult, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_12_01.models.PrivateLinkResourcesListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResourcesListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResourcesListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateLinkResources'} # type: ignore
|
[
"noreply@github.com"
] |
scbedd.noreply@github.com
|
ab1409aaf95d2bf69bc496ba2c8a4938816631bd
|
3b7b6648b72910046b6a227db30f71aeee2cba9c
|
/2020-12-18-neural-style-transfer/deeptools/preprocessing/RandomSingleCropPreprocessor.py
|
4ddf0ecef9eedb517ec472e48447e933c6d54b45
|
[] |
no_license
|
ken2190/deep-learning-study
|
f2abeb1cd302e405a15bbb52188ae44ffb414e2f
|
f2998be89d0c931176f158ae5f48ca562786e171
|
refs/heads/main
| 2023-04-02T05:07:08.504212
| 2021-04-11T15:11:22
| 2021-04-11T15:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 579
|
py
|
from sklearn.feature_extraction.image import extract_patches_2d
# this processor randomly crop an image of fixed size.
class RandomSingleCropPreprocessor:
def __init__(self, width, height):
self.width = width
self.height = height
def preprocess(self, image):
return extract_patches_2d(image, (self.height, self.width), max_patches=1)[0]
# from PIL import Image
# import numpy as np
# pp = RandomSingleCropPreprocessor(200, 200)
# im = np.array(Image.open('pyimagesearch/preprocessing/test.png'))
# Image.fromarray(pp.preprocess(im)).show()
|
[
"machingclee@gmail.com"
] |
machingclee@gmail.com
|
f4056f860df1771e62dd5010d3a51ea2059537d3
|
6dc761a30cf5efa045f1154aaff2acfa139b835a
|
/LeetCode/Python/majorityElement.py
|
2c3a07d29edec31ce28f3cebf1b76d1b29269efe
|
[] |
no_license
|
snail15/AlgorithmPractice
|
4e58beee3ff76498a389268dd4cc207dcabf778e
|
9e8885953ad50e966454c45c460e81dbb6e48be0
|
refs/heads/master
| 2021-08-17T06:30:02.290260
| 2021-06-08T01:15:07
| 2021-06-08T01:15:07
| 98,246,912
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 654
|
py
|
# Given an array of size n, find the majority element. The majority element is the element that appears more than ⌊ n/2 ⌋ times.
# You may assume that the array is non-empty and the majority element always exist in the array.
# Example 1:
# Input: [3,2,3]
# Output: 3
# Example 2:
# Input: [2,2,1,1,1,2,2]
# Output: 2
def majorityElement(self, nums: List[int]) -> int:
counter = {}
target = len(nums) // 2 + 1
for num in nums:
if not counter.get(num):
counter[num] = 1
else:
counter[num] += 1
if counter[num] >= target:
return num
|
[
"jungs@uchicago.edu"
] |
jungs@uchicago.edu
|
883b131aab7cc6403a4eb04a14315ce599a3fb52
|
60d6b8501d0be546437b26a6ee1f9fab97ec3897
|
/platypush/message/event/zigbee/mqtt.py
|
e3179407f6aa9291e1c47fb4fbf836c0c6dbf740
|
[
"MIT"
] |
permissive
|
BlackLight/platypush
|
68284a85b2f9eef303d26b04530f075927b5834a
|
446bc2f67493d3554c5422242ff91d5b5c76d78a
|
refs/heads/master
| 2023-08-31T21:01:53.519960
| 2023-08-29T22:05:38
| 2023-08-29T22:05:38
| 109,421,017
| 265
| 25
|
MIT
| 2023-09-01T23:15:49
| 2017-11-03T16:56:24
|
Python
|
UTF-8
|
Python
| false
| false
| 5,542
|
py
|
from typing import Dict, Any
from platypush.message.event import Event
class ZigbeeMqttEvent(Event):
pass
class ZigbeeMqttOnlineEvent(ZigbeeMqttEvent):
"""
Triggered when a zigbee2mqtt service goes online.
"""
def __init__(self, host: str, port: int, *args, **kwargs):
super().__init__(*args, host=host, port=port, **kwargs)
class ZigbeeMqttOfflineEvent(ZigbeeMqttEvent):
"""
Triggered when a zigbee2mqtt service goes offline.
"""
def __init__(self, host: str, port: int, *args, **kwargs):
super().__init__(*args, host=host, port=port, **kwargs)
class ZigbeeMqttDevicePropertySetEvent(ZigbeeMqttEvent):
"""
Triggered when a the properties of a Zigbee connected devices (state, brightness, alert etc.) change.
"""
def __init__(self, host: str, port: int, device: str, properties: Dict[str, Any], *args, **kwargs):
super().__init__(*args, host=host, port=port, device=device, properties=properties, **kwargs)
class ZigbeeMqttDevicePairingEvent(ZigbeeMqttEvent):
"""
Triggered when a device is pairing to the network.
"""
def __init__(self, host: str, port: int, device=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, device=device, **kwargs)
class ZigbeeMqttDeviceConnectedEvent(ZigbeeMqttEvent):
"""
Triggered when a device connects to the network.
"""
def __init__(self, host: str, port: int, device=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, device=device, **kwargs)
class ZigbeeMqttDeviceBannedEvent(ZigbeeMqttEvent):
"""
Triggered when a device is banned from the network.
"""
def __init__(self, host: str, port: int, device=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, device=device, **kwargs)
class ZigbeeMqttDeviceRemovedEvent(ZigbeeMqttEvent):
"""
Triggered when a device is removed from the network.
"""
def __init__(self, host: str, port: int, device=None, force=False, *args, **kwargs):
super().__init__(*args, host=host, port=port, device=device, force=force, **kwargs)
class ZigbeeMqttDeviceRemovedFailedEvent(ZigbeeMqttEvent):
"""
Triggered when the removal of a device from the network failed.
"""
def __init__(self, host: str, port: int, device=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, device=device, **kwargs)
class ZigbeeMqttDeviceWhitelistedEvent(ZigbeeMqttEvent):
"""
Triggered when a device is whitelisted on the network.
"""
def __init__(self, host: str, port: int, device=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, device=device, **kwargs)
class ZigbeeMqttDeviceRenamedEvent(ZigbeeMqttEvent):
"""
Triggered when a device is renamed on the network.
"""
def __init__(self, host: str, port: int, device=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, device=device, **kwargs)
class ZigbeeMqttDeviceBindEvent(ZigbeeMqttEvent):
"""
Triggered when a device bind occurs on the network.
"""
def __init__(self, host: str, port: int, device=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, device=device, **kwargs)
class ZigbeeMqttDeviceUnbindEvent(ZigbeeMqttEvent):
"""
Triggered when a device bind occurs on the network.
"""
def __init__(self, host: str, port: int, device=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, device=device, **kwargs)
class ZigbeeMqttGroupAddedEvent(ZigbeeMqttEvent):
"""
Triggered when a group is added.
"""
def __init__(self, host: str, port: int, group=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, group=group, **kwargs)
class ZigbeeMqttGroupAddedFailedEvent(ZigbeeMqttEvent):
"""
Triggered when a request to add a group fails.
"""
def __init__(self, host: str, port: int, group=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, group=group, **kwargs)
class ZigbeeMqttGroupRemovedEvent(ZigbeeMqttEvent):
"""
Triggered when a group is removed.
"""
def __init__(self, host: str, port: int, group=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, group=group, **kwargs)
class ZigbeeMqttGroupRemovedFailedEvent(ZigbeeMqttEvent):
"""
Triggered when a request to remove a group fails.
"""
def __init__(self, host: str, port: int, group=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, group=group, **kwargs)
class ZigbeeMqttGroupRemoveAllEvent(ZigbeeMqttEvent):
"""
Triggered when all the devices are removed from a group.
"""
def __init__(self, host: str, port: int, group=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, group=group, **kwargs)
class ZigbeeMqttGroupRemoveAllFailedEvent(ZigbeeMqttEvent):
"""
Triggered when a request to remove all the devices from a group fails.
"""
def __init__(self, host: str, port: int, group=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, group=group, **kwargs)
class ZigbeeMqttErrorEvent(ZigbeeMqttEvent):
"""
Triggered when an error happens on the zigbee2mqtt service.
"""
def __init__(self, host: str, port: int, error=None, *args, **kwargs):
super().__init__(*args, host=host, port=port, error=error, **kwargs)
# vim:sw=4:ts=4:et:
|
[
"blacklight86@gmail.com"
] |
blacklight86@gmail.com
|
a472c103c0b1f3c1f8c566e750f7ba8e53639190
|
65cc6a8877896ef69dd03d7b5eee5bed56e5371f
|
/example/attpc-daq/web/attpcdaq/daq/templatetags/daq_model_tags.py
|
600bbc1d51d3e665f9f57b9b0ce19ce3797deda5
|
[] |
no_license
|
wuhongyi/DjangoNote
|
34bdb9e82fc379e19b1df0bd7c90e504fa70a40d
|
81ad949ff895feda8131d8bdf5fa1439f962ae37
|
refs/heads/master
| 2020-05-02T17:54:12.270297
| 2019-05-22T14:37:32
| 2019-05-22T14:37:32
| 178,112,720
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 762
|
py
|
from django import template
from ..models import DataSource
register = template.Library()
def get_datasource_attr_from_choices(attr_name, choices):
value = getattr(DataSource, attr_name, None)
# Verify that the result is a valid member of the set of choices.
# This also ensures that we're not just returning any random attribute
# of the model, but just one member of a set of constants.
if value not in (key for key, name in choices):
return None
else:
return value
@register.simple_tag
def datasource_state(name):
return get_datasource_attr_from_choices(name, DataSource.STATE_CHOICES)
@register.simple_tag
def daq_state(name):
return get_datasource_attr_from_choices(name, DataSource.DAQ_STATE_CHOICES)
|
[
"wuhongyi@pku.edu.cn"
] |
wuhongyi@pku.edu.cn
|
cc16d1697225baee47a86dda51adb9016bdd330c
|
3f394cd47a1aaf0ae2f8de5ab9854f52341e017a
|
/tests/conftest.py
|
0ec2f5ef473a93e1446046c292552c5de1df0cff
|
[
"MIT"
] |
permissive
|
devildeveloper/Clay
|
e3771d97d23ae3ba7d866d8921102d50e95a6562
|
ca419ee4cfe191724ed68e3507515a5b258bb4bb
|
refs/heads/master
| 2021-01-18T02:27:22.094481
| 2013-11-18T20:24:02
| 2013-11-18T20:24:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
# -*- coding: utf-8 -*-
"""
Directory-specific fixtures, hooks, etc. for py.test
"""
from clay import Clay
import pytest
from .helpers import TESTS
@pytest.fixture()
def c():
return Clay(TESTS)
@pytest.fixture()
def t(c):
return c.get_test_client()
|
[
"juanpablo@lucumalabs.com"
] |
juanpablo@lucumalabs.com
|
42dc6d18884578c84f4ca5272b7590683a423d4d
|
532549735aab20e7948511b63e0fb77cc5aedacf
|
/chaussette/backend/_fastgevent.py
|
c43809bd8d374be7c03b29174b2ce058a6b65653
|
[
"Apache-2.0"
] |
permissive
|
ericem/chaussette
|
f71ac35990b2b7aa41610ec4be867321ce3be89f
|
fe62725ca1d018bb26c024f796447b6c761f00e0
|
refs/heads/master
| 2021-01-18T10:52:43.720192
| 2013-05-02T13:38:23
| 2013-05-02T13:38:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 928
|
py
|
import socket
from gevent.wsgi import WSGIServer
from gevent import monkey
from chaussette.util import create_socket
class Server(WSGIServer):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
def __init__(self, listener, application=None, backlog=None,
spawn='default', log='default', handler_class=None,
environ=None, **ssl_args):
monkey.noisy = False
monkey.patch_all()
host, port = listener
self.socket = create_socket(host, port, self.address_family,
self.socket_type, backlog=backlog)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_address = self.socket.getsockname()
log = None
super(Server, self).__init__(self.socket, application, None, spawn,
log, handler_class, environ, **ssl_args)
|
[
"tarek@ziade.org"
] |
tarek@ziade.org
|
f7d0ebc5b5c74035f2e5e648525b0bdabb67d31e
|
ee53b0262007b2f0db0fe15b2ad85f65fafa4e25
|
/Leetcode/441. Arranging Coins.py
|
dfa616241b4d3e2f18fe71fc819dff41930a76d6
|
[] |
no_license
|
xiaohuanlin/Algorithms
|
bd48caacb08295fc5756acdac609be78e143a760
|
157cbaeeff74130e5105e58a6b4cdf66403a8a6f
|
refs/heads/master
| 2023-08-09T05:18:06.221485
| 2023-08-08T11:53:15
| 2023-08-08T11:53:15
| 131,491,056
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,616
|
py
|
'''
You have a total of n coins that you want to form in a staircase shape, where every k-th row must have exactly k coins.
Given n, find the total number of full staircase rows that can be formed.
n is a non-negative integer and fits within the range of a 32-bit signed integer.
Example 1:
n = 5
The coins can form the following rows:
¤
¤ ¤
¤ ¤
Because the 3rd row is incomplete, we return 2.
Example 2:
n = 8
The coins can form the following rows:
¤
¤ ¤
¤ ¤ ¤
¤ ¤
Because the 4th row is incomplete, we return 3.
'''
import unittest
class Solution(object):
def arrangeCoins(self, n):
"""
:type n: int
:rtype: int
"""
# stair = 0
# while n > 0:
# stair += 1
# n -= stair
# if n < 0:
# stair -= 1
# break
# return stair
# ------------
# i = 0
# while True:
# sum_ = i*(i+1)/2
# if sum_ > n:
# return i - 1
# i += 1
# -------------
# use the root of the function to get answer
return int(((8*n+1)**0.5 - 1)/2)
class TestSolution(unittest.TestCase):
def test_arrangeCoins(self):
examples = (
(0, 0),
(1, 1),
(5, 2),
(8, 3),
)
for first, second in examples:
self.assert_function(first, second)
def assert_function(self, first, second):
self.assertEqual(Solution().arrangeCoins(first), second, msg="first: {}; second: {}".format(first, second))
unittest.main()
|
[
"derek.xiao@loftk.us"
] |
derek.xiao@loftk.us
|
cfc155b48e7139b1bf1bea71e66f59e91f6f6b50
|
d7c527d5d59719eed5f8b7e75b3dc069418f4f17
|
/main/_pythonSnippet1_backup/61/views.py
|
3e9bacefeb4c0afffa4042075dad295c84f00a02
|
[] |
no_license
|
Aivree/SnippetMatcher
|
3e348cea9a61e4342e5ad59a48552002a03bf59a
|
c8954dfcad8d1f63e6e5e1550bc78df16bc419d1
|
refs/heads/master
| 2021-01-21T01:20:59.144157
| 2015-01-07T04:35:29
| 2015-01-07T04:35:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,894
|
py
|
from django.shortcuts import render_to_response
from django.template import Template, Context, RequestContext
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.template.loader import get_template
import datetime
from django import forms
from runner.forms import DocumentForm
from runner.models import Document
def list(request):
# Handle file upload
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
newdoc = Document(docfile = request.FILES['docfile'])
newdoc.save()
# Redirect to the document list after POST
return HttpResponseRedirect(reverse('runner.views.list'))
else:
form = DocumentForm() # A empty, unbound form
# Load documents for the list page
documents = Document.objects.all()
# Render list page with the documents and the form
return render_to_response(
'list.html',
{'documents': documents, 'form': form},
context_instance=RequestContext(request)
)
def index(request):
from runner.models import Software
software_list = []
for i in Software.objects.all():
i = str(i).split("|")
software_list.append(i)
t = get_template("bootstrap3.html")
html = t.render(Context({
'bootstrap3_title': 'Run programs',
'software_list': software_list,
}))
return HttpResponse(html)
def software(request, name):
t = get_template("bootstrap3.html")
html = t.render(RequestContext(request, {
'bootstrap3_title': 'Running ' + name,
}))
return HttpResponse(html)
def current_datetime(request):
now = datetime.datetime.now()
t = get_template("bootstrap3.html")
html = t.render(Context({'current_date': now}))
return HttpResponse(html)
|
[
"prateek1404@gmail.com"
] |
prateek1404@gmail.com
|
078e7534de86ed7c579a2ba0c616d3db8756b6be
|
d32a1eff193052dd62ad05f638346c7132796c2e
|
/python/pyspark/pandas/tests/connect/test_parity_groupby_slow.py
|
375dc703d956f229358f88f2ca4bde9e8f96075a
|
[
"CC0-1.0",
"MIT",
"Python-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-free-unknown",
"EPL-1.0",
"Classpath-exception-2.0",
"GCC-exception-3.1",
"CDDL-1.0",
"CC-BY-SA-3.0",
"LGPL-2.1-only",
"LicenseRef-scancode-unicode",
"CPL-1.0",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-only",
"CC-PDDC",
"NAIST-2003",
"LicenseRef-scancode-other-copyleft",
"EPL-2.0",
"CDDL-1.1",
"BSD-2-Clause"
] |
permissive
|
Kyligence/spark
|
c266dc19c7c2e2914eea34c9922f97ba17011075
|
f29502acf2fe96e23525268b0a29a6338b41bce6
|
refs/heads/master
| 2023-08-31T08:42:15.254881
| 2023-04-22T00:30:53
| 2023-04-22T00:30:53
| 100,349,194
| 6
| 61
|
Apache-2.0
| 2023-09-14T06:29:07
| 2017-08-15T07:04:07
|
Scala
|
UTF-8
|
Python
| false
| false
| 2,010
|
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.pandas.tests.test_groupby_slow import GroupBySlowTestsMixin
from pyspark.testing.connectutils import ReusedConnectTestCase
from pyspark.testing.pandasutils import PandasOnSparkTestUtils, TestUtils
class GroupBySlowParityTests(
GroupBySlowTestsMixin, PandasOnSparkTestUtils, TestUtils, ReusedConnectTestCase
):
@unittest.skip("Fails in Spark Connect, should enable.")
def test_diff(self):
super().test_diff()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_dropna(self):
super().test_dropna()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_rank(self):
super().test_rank()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_split_apply_combine_on_series(self):
super().test_split_apply_combine_on_series()
if __name__ == "__main__":
from pyspark.pandas.tests.connect.test_parity_groupby_slow import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
[
"gurwls223@apache.org"
] |
gurwls223@apache.org
|
e9222d3599e353156217730a4903521d6e392997
|
90c6262664d013d47e9a3a9194aa7a366d1cabc4
|
/tests/storage/cases/test_KT1QHRKLkwaHDV6TyY9H4ZU9ZwGuwZ1TWPfg_babylon.py
|
69dc0227141ef450501ea7063314cad598bd84b6
|
[
"MIT"
] |
permissive
|
tqtezos/pytezos
|
3942fdab7aa7851e9ea81350fa360180229ec082
|
a4ac0b022d35d4c9f3062609d8ce09d584b5faa8
|
refs/heads/master
| 2021-07-10T12:24:24.069256
| 2020-04-04T12:46:24
| 2020-04-04T12:46:24
| 227,664,211
| 1
| 0
|
MIT
| 2020-12-30T16:44:56
| 2019-12-12T17:47:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,170
|
py
|
from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1QHRKLkwaHDV6TyY9H4ZU9ZwGuwZ1TWPfg_babylon(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/mainnet/KT1QHRKLkwaHDV6TyY9H4ZU9ZwGuwZ1TWPfg_babylon.json')
def test_storage_encoding_KT1QHRKLkwaHDV6TyY9H4ZU9ZwGuwZ1TWPfg_babylon(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1QHRKLkwaHDV6TyY9H4ZU9ZwGuwZ1TWPfg_babylon(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1QHRKLkwaHDV6TyY9H4ZU9ZwGuwZ1TWPfg_babylon(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
|
[
"mz@baking-bad.org"
] |
mz@baking-bad.org
|
fb3c1d8faf3f4c7f4a59af63fb46a030978ecd4e
|
f167dffa2f767a0419aa82bf434852069a8baeb8
|
/lib/youtube_dl/extractor/einthusan.py
|
4e0f8bc819c70730a476ca31cd4320cecdc25b3d
|
[
"MIT"
] |
permissive
|
firsttris/plugin.video.sendtokodi
|
d634490b55149adfdcb62c1af1eb77568b8da3f5
|
1095c58e2bc21de4ab6fcb67a70e4f0f04febbc3
|
refs/heads/master
| 2023-08-18T10:10:39.544848
| 2023-08-15T17:06:44
| 2023-08-15T17:06:44
| 84,665,460
| 111
| 31
|
MIT
| 2022-11-11T08:05:21
| 2017-03-11T16:53:06
|
Python
|
UTF-8
|
Python
| false
| false
| 3,720
|
py
|
# coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..compat import (
compat_b64decode,
compat_str,
compat_urlparse,
)
from ..utils import (
extract_attributes,
ExtractorError,
get_elements_by_class,
urlencode_postdata,
)
class EinthusanIE(InfoExtractor):
_VALID_URL = r'https?://(?P<host>einthusan\.(?:tv|com|ca))/movie/watch/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://einthusan.tv/movie/watch/9097/',
'md5': 'ff0f7f2065031b8a2cf13a933731c035',
'info_dict': {
'id': '9097',
'ext': 'mp4',
'title': 'Ae Dil Hai Mushkil',
'description': 'md5:33ef934c82a671a94652a9b4e54d931b',
'thumbnail': r're:^https?://.*\.jpg$',
}
}, {
'url': 'https://einthusan.tv/movie/watch/51MZ/?lang=hindi',
'only_matching': True,
}, {
'url': 'https://einthusan.com/movie/watch/9097/',
'only_matching': True,
}, {
'url': 'https://einthusan.ca/movie/watch/4E9n/?lang=hindi',
'only_matching': True,
}]
# reversed from jsoncrypto.prototype.decrypt() in einthusan-PGMovieWatcher.js
def _decrypt(self, encrypted_data, video_id):
return self._parse_json(compat_b64decode((
encrypted_data[:10] + encrypted_data[-1] + encrypted_data[12:-1]
)).decode('utf-8'), video_id)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
host = mobj.group('host')
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<h3>([^<]+)</h3>', webpage, 'title')
player_params = extract_attributes(self._search_regex(
r'(<section[^>]+id="UIVideoPlayer"[^>]+>)', webpage, 'player parameters'))
page_id = self._html_search_regex(
'<html[^>]+data-pageid="([^"]+)"', webpage, 'page ID')
video_data = self._download_json(
'https://%s/ajax/movie/watch/%s/' % (host, video_id), video_id,
data=urlencode_postdata({
'xEvent': 'UIVideoPlayer.PingOutcome',
'xJson': json.dumps({
'EJOutcomes': player_params['data-ejpingables'],
'NativeHLS': False
}),
'arcVersion': 3,
'appVersion': 59,
'gorilla.csrf.Token': page_id,
}))['Data']
if isinstance(video_data, compat_str) and video_data.startswith('/ratelimited/'):
raise ExtractorError(
'Download rate reached. Please try again later.', expected=True)
ej_links = self._decrypt(video_data['EJLinks'], video_id)
formats = []
m3u8_url = ej_links.get('HLSLink')
if m3u8_url:
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, ext='mp4', entry_protocol='m3u8_native'))
mp4_url = ej_links.get('MP4Link')
if mp4_url:
formats.append({
'url': mp4_url,
})
self._sort_formats(formats)
description = get_elements_by_class('synopsis', webpage)[0]
thumbnail = self._html_search_regex(
r'''<img[^>]+src=(["'])(?P<url>(?!\1).+?/moviecovers/(?!\1).+?)\1''',
webpage, 'thumbnail url', fatal=False, group='url')
if thumbnail is not None:
thumbnail = compat_urlparse.urljoin(url, thumbnail)
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'description': description,
}
|
[
"noreply@github.com"
] |
firsttris.noreply@github.com
|
2d441b942de17b1981ea070088659addc116d4ac
|
4f3a4c194451eae32f1ff7cf3b0db947e3892365
|
/142/main.py
|
7dd2d69286c4280a2dc6408e5232b45fffb6d8a6
|
[] |
no_license
|
szhongren/leetcode
|
84dd848edbfd728b344927f4f3c376b89b6a81f4
|
8cda0518440488992d7e2c70cb8555ec7b34083f
|
refs/heads/master
| 2021-12-01T01:34:54.639508
| 2021-11-30T05:54:45
| 2021-11-30T05:54:45
| 83,624,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,177
|
py
|
"""
Given a linked list, return the node where the cycle begins. If there is no cycle, return null.
Note: Do not modify the linked list.
Follow up:
Can you solve it without using extra space?
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def make_list(ls):
if len(ls) == 0:
return None
list_nodes = list(map(lambda x: ListNode(x), ls))
for i, v in enumerate(list_nodes[1:]):
list_nodes[i].next = v
return list_nodes[0]
class Solution(object):
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head == None:
return None
slow = head
fast = head
cycle_start = head
while slow.next and fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
if slow == fast:
while cycle_start != slow:
cycle_start = cycle_start.next
slow = slow.next
return slow
return None
|
[
"shao.zhongren@gmail.com"
] |
shao.zhongren@gmail.com
|
1b2708b9fd69527e897aec7549fa95a9ed7fafd3
|
6d11eda98e529286c775942f63013619f37246c5
|
/examples/potsdam/semantic_segmentation.py
|
a3ad3085dd6f3c7d8d3532839dfb3cf35057feda
|
[
"Apache-2.0"
] |
permissive
|
Pandinosaurus/raster-vision-examples
|
388438ddd58c2c0fd8a7eced5be02cc5518e80f8
|
d6957a5de6d49fbe7d419da67979725eaab43ee7
|
refs/heads/master
| 2021-07-18T08:17:33.274224
| 2020-07-03T02:52:20
| 2020-07-03T02:52:20
| 184,796,275
| 1
| 0
|
NOASSERTION
| 2020-07-03T04:10:43
| 2019-05-03T17:38:55
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,610
|
py
|
import os
from os.path import join
import rastervision as rv
from examples.utils import str_to_bool, save_image_crop
class PotsdamSemanticSegmentation(rv.ExperimentSet):
def exp_main(self, raw_uri, processed_uri, root_uri, test=False, use_tf=False):
"""Run an experiment on the ISPRS Potsdam dataset.
Uses Tensorflow Deeplab backend with Mobilenet architecture. Should get to
F1 score of ~0.86 (including clutter class) after 6 hours of training on a P3
instance.
Args:
raw_uri: (str) directory of raw data
root_uri: (str) root directory for experiment output
test: (bool) if True, run a very small experiment as a test and generate
debug output
use_tf: (bool) if True, use Tensorflow Deeplab backend.
"""
test = str_to_bool(test)
use_tf = str_to_bool(use_tf)
exp_id = 'potsdam-seg'
train_ids = ['2-10', '2-11', '3-10', '3-11', '4-10', '4-11', '4-12', '5-10',
'5-11', '5-12', '6-10', '6-11', '6-7', '6-9', '7-10', '7-11',
'7-12', '7-7', '7-8', '7-9']
val_ids = ['2-12', '3-12', '6-12']
# infrared, red, green
channel_order = [3, 0, 1]
debug = False
if test:
debug = True
train_ids = train_ids[0:1]
val_ids = val_ids[0:1]
exp_id += '-test'
classes = {
'Car': (1, '#ffff00'),
'Building': (2, '#0000ff'),
'Low Vegetation': (3, '#00ffff'),
'Tree': (4, '#00ff00'),
'Impervious': (5, "#ffffff"),
'Clutter': (6, "#ff0000")
}
task = rv.TaskConfig.builder(rv.SEMANTIC_SEGMENTATION) \
.with_chip_size(300) \
.with_classes(classes) \
.with_chip_options(window_method='sliding',
stride=300, debug_chip_probability=1.0) \
.build()
if use_tf:
batch_size = 8
num_steps = 100000
if test:
num_steps = 1
batch_size = 2
model_type = rv.MOBILENET_V2
backend = rv.BackendConfig.builder(rv.TF_DEEPLAB) \
.with_task(task) \
.with_model_defaults(model_type) \
.with_train_options(sync_interval=600) \
.with_num_steps(num_steps) \
.with_batch_size(batch_size) \
.with_debug(debug) \
.build()
else:
batch_size = 8
num_epochs = 10
if test:
batch_size = 2
num_epochs = 1
backend = rv.BackendConfig.builder(rv.PYTORCH_SEMANTIC_SEGMENTATION) \
.with_task(task) \
.with_train_options(
lr=1e-4,
batch_size=batch_size,
num_epochs=num_epochs,
model_arch='resnet50',
debug=debug) \
.build()
def make_scene(id):
id = id.replace('-', '_')
raster_uri = '{}/4_Ortho_RGBIR/top_potsdam_{}_RGBIR.tif'.format(
raw_uri, id)
label_uri = '{}/5_Labels_for_participants/top_potsdam_{}_label.tif'.format(
raw_uri, id)
if test:
crop_uri = join(
processed_uri, 'crops', os.path.basename(raster_uri))
save_image_crop(raster_uri, crop_uri, size=600)
raster_uri = crop_uri
# Using with_rgb_class_map because label TIFFs have classes encoded as RGB colors.
label_source = rv.LabelSourceConfig.builder(rv.SEMANTIC_SEGMENTATION) \
.with_rgb_class_map(task.class_map) \
.with_raster_source(label_uri) \
.build()
# URI will be injected by scene config.
# Using with_rgb(True) because we want prediction TIFFs to be in RGB format.
label_store = rv.LabelStoreConfig.builder(rv.SEMANTIC_SEGMENTATION_RASTER) \
.with_rgb(True) \
.build()
scene = rv.SceneConfig.builder() \
.with_task(task) \
.with_id(id) \
.with_raster_source(raster_uri,
channel_order=channel_order) \
.with_label_source(label_source) \
.with_label_store(label_store) \
.build()
return scene
train_scenes = [make_scene(id) for id in train_ids]
val_scenes = [make_scene(id) for id in val_ids]
dataset = rv.DatasetConfig.builder() \
.with_train_scenes(train_scenes) \
.with_validation_scenes(val_scenes) \
.build()
experiment = rv.ExperimentConfig.builder() \
.with_id(exp_id) \
.with_task(task) \
.with_backend(backend) \
.with_dataset(dataset) \
.with_root_uri(root_uri) \
.build()
return experiment
if __name__ == '__main__':
rv.main()
|
[
"lewfish@gmail.com"
] |
lewfish@gmail.com
|
f4be1784fe13e6274c766985a165f620b822bcb1
|
930309163b930559929323647b8d82238724f392
|
/abc216_e.py
|
b2c5d66d2e922c823160cdcb8e9ca31ca835c4d4
|
[] |
no_license
|
GINK03/atcoder-solvers
|
874251dffc9f23b187faa77c439b445e53f8dfe1
|
b1e7ac6e9d67938de9a85df4a2f9780fb1fbcee7
|
refs/heads/master
| 2021-11-07T14:16:52.138894
| 2021-09-12T13:32:29
| 2021-09-12T13:32:29
| 11,724,396
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,123
|
py
|
import sys
import logging
def main():
n, k = map(int, input().split())
a = list(map(int, input().split()))
a = sorted(a, reverse=True) + [0]
def cumsum(x):
return x * (x + 1) // 2
k_remaining = k
ans = 0
for i in range(n):
if a[i] == a[i + 1]:
continue
if k_remaining >= (i + 1) * (a[i] - a[i + 1]):
ans += (cumsum(a[i]) - cumsum(a[i + 1])) * (i + 1)
k_remaining -= (i + 1) * (a[i] - a[i + 1])
else:
j = k_remaining // (i + 1)
r = k_remaining % (i + 1)
logging.debug((j, r))
ans += (cumsum(a[i]) - cumsum(a[i] - j)) * (i + 1)
ans += (a[i] - j) * r
k_remaining = 0
if k_remaining == 0:
break
print(ans)
if __name__ == "__main__":
loglevel = "DEBUG" if "--debug" in sys.argv else "WARNING"
numeric_level = getattr(logging, loglevel, None)
log_format = "%(levelname)s (%(asctime)s.%(msecs)d): %(message)s"
logging.basicConfig(level=numeric_level, format=log_format, datefmt="%I:%M:%S")
main()
|
[
"gim.kobayashi@gmail.com"
] |
gim.kobayashi@gmail.com
|
e7b69c6f075b17d67552da7d91dd5b80b77ed235
|
5f0eeef355fa84b165d4e0707e8874755cc03259
|
/chp02_forces/Exercise_2_10_attractrepel/Attractor.py
|
b265f28a642f9d31b8c7540541527dd188cd2d56
|
[] |
no_license
|
kidult00/NatureOfCode-Examples-Python
|
5835fbed114f3991b9986852f31d29a0a46d7e53
|
42461590deebbe305d5815ff0d207ff974335ad5
|
refs/heads/master
| 2021-05-11T04:47:53.999705
| 2018-03-07T15:54:12
| 2018-03-07T15:54:12
| 117,946,895
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,195
|
py
|
# The Nature of Code - Python Version
# [kidult00](https://github.com/kidult00)
# A class for a draggable attractive body in our world
# Attraction = G * M1 * M2 / Distance^2
class Attractor(object):
def __init__(self):
self.mass = 10.0 # Mass, tied to size
self.g = 1.0 # Gravitational Constant
self.position = PVector(width/2, height/2)
self.dragOffset = PVector(0.0, 0.0) # holds the offset for when object is clicked on
self.dragging = False
self.rollover = False
def attract(self, m):
force = PVector.sub(self.position, m.position) # Calculate direction of force
d = force.mag() # Distance between objects
d = constrain(d, 5.0, 25.0) # Limiting the distance to eliminate "extreme" results for very close or very far objects
force.normalize() # Normalize vector (distance doesn't matter here, we just want this vector for direction)
strength = (self.g * self.mass * m.mass) / (d * d) # Calculate gravitional force magnitude
force.mult(strength) # Get force vector --> magnitude * direction
return force
# Method to display
def display(self):
ellipseMode(CENTER)
strokeWeight(0)
stroke(0)
if self.dragging : fill(50)
elif self.rollover : fill(100)
else : fill(0)
ellipse(self.position.x, self.position.y, self.mass*6, self.mass*6)
# The methods below are for mouse interaction
def clicked(self, mx, my):
d = dist(mx, my, self.position.x, self.position.y)
if d < self.mass :
self.dragging = True
self.dragOffset.x = self.position.x - mx
self.dragOffset.y = self.position.y - my
def hover(self, mx, my):
d = dist(mx, my, self.position.x, self.position.y)
if d < self.mass : self.rollover = True
else: self.rollover = False
def stopDragging(self):
self.dragging = False
def drag(self):
if self.dragging :
self.position.x = mouseX + self.dragOffset.x
self.position.y = mouseY + self.dragOffset.y
|
[
"sysulj@gmail.com"
] |
sysulj@gmail.com
|
9b0612a4597a28b9bfac2f4dc745eb4104ab302c
|
384d0be5ac54b306b945cf38c10d9b0a44c975ea
|
/devstack/tools/uec/meta.py
|
5b845d81a69b19773c66ea4fb61a1a9065a88c47
|
[] |
no_license
|
ashokcse/openstack-bill
|
05ae313637b3cfecba946d2a9b32e8c7609fc721
|
1a3d7575d4b341f64fa1764ed47e47a7504a9bcc
|
refs/heads/master
| 2021-01-18T14:05:24.696165
| 2012-09-12T11:29:20
| 2012-09-12T11:29:20
| 5,424,267
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 827
|
py
|
import sys
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from SimpleHTTPServer import SimpleHTTPRequestHandler
def main(host, port, HandlerClass = SimpleHTTPRequestHandler,
ServerClass = HTTPServer, protocol="HTTP/1.0"):
"""simple http server that listens on a give address:port"""
server_address = (host, port)
HandlerClass.protocol_version = protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
if sys.argv[1:]:
address = sys.argv[1]
else:
address = '0.0.0.0'
if ':' in address:
host, port = address.split(':')
else:
host = address
port = 8080
main(host, int(port))
|
[
"ashokcse@live.com"
] |
ashokcse@live.com
|
afef5e088c4a797fddf972b908f3d05308a8a5c5
|
a512b8893b0d2de827d6292e810f3a98b41e132c
|
/Week6/Day1/Solutions/Python/prog4.py
|
8f234ad7cc815e2ff244fd79557baa2595b427a1
|
[] |
no_license
|
Audarya07/Daily-Flash-Codes
|
d771079fd0d470e2d3e05679f17f32fb64b4f426
|
cf96ca2b1676b038e243fac67be778381492ffeb
|
refs/heads/master
| 2022-11-06T15:37:47.180729
| 2020-06-25T16:20:55
| 2020-06-25T16:20:55
| 274,960,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 175
|
py
|
for i in range(5):
num = 5
for j in range(5):
if i>j:
print(" ",end=" ")
else:
print(num,end=" ")
num-=1
print()
|
[
"audiuttarwar2000@gmail.com"
] |
audiuttarwar2000@gmail.com
|
bef4ed0adc518bd890aba6eb08948e612e7755b4
|
9eaa2c64a777bd24a3cccd0230da5f81231ef612
|
/study/1905/month01/code/Stage1/day04/exercise02.py
|
4527340f5bf057badc200a68d1b1fcc8edce6772
|
[
"MIT"
] |
permissive
|
Dython-sky/AID1908
|
4528932f2ca66b844d8a3fcab5ed8bf84d20eb0c
|
46cd54a7b36b5f009974f2bbb7005a4ad440ca1a
|
refs/heads/master
| 2022-04-14T12:23:30.426270
| 2020-04-01T18:05:19
| 2020-04-01T18:05:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
"""
一张纸的厚度是0.01mm
对折多少次厚度能超过珠穆朗玛峰(8844.43米)
"""
thickness = 0.01 / 1000
count = 0
while thickness <= 8848.43:
thickness *= 2
count += 1
# print(thickness)
print("一张纸对折{}次能超过珠穆朗玛峰".format(count))
|
[
"dong_1998_dream@163.com"
] |
dong_1998_dream@163.com
|
d49ea65ea1d608754984e1885d288d255efbf3a9
|
a8f615e6f2e00bcc72cd67475c5dd4a9ff0e6c14
|
/imdemo/imdemo/pages/nodes/pin.py
|
15058ecdfab3a662b795bd45d0d98c33f047f968
|
[
"MIT"
] |
permissive
|
KangWeon/arcade-imgui
|
fcf43f2399f56960b5249bd80e4e16d8639be8e2
|
24a8d423440cd9adaf3373a9c2492d04d8862062
|
refs/heads/master
| 2023-01-01T03:04:05.605347
| 2020-10-18T08:04:21
| 2020-10-18T08:04:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 349
|
py
|
class Pin:
def __init__(self, node, name):
self.node = node
self.name = name
self.x = 0
self.y = 0
def set_position(self, pos):
self.x, self.y = pos
def get_position(self):
return (self.x, self.y)
def draw(self):
pass
class Input(Pin):
pass
class Output(Pin):
pass
|
[
"kurtisfields@gmail.com"
] |
kurtisfields@gmail.com
|
8da13cd142ec6b62a14d15b73cfe977ec43475ff
|
a97fb0584709e292a475defc8506eeb85bb24339
|
/source code/code/ch203.py
|
3aa2f981b9a6399e15c03b0b1aeb0e4e562fef35
|
[] |
no_license
|
AAQ6291/PYCATCH
|
bd297858051042613739819ed70c535901569079
|
27ec4094be785810074be8b16ef84c85048065b5
|
refs/heads/master
| 2020-03-26T13:54:57.051016
| 2018-08-17T09:05:19
| 2018-08-17T09:05:19
| 144,963,014
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 553
|
py
|
#!/usr/bin/env python
#coding=utf-8
from __future__ import print_function
## 宣告x, y, z變數,各分別為tuple, list, dict資料型態。
x, y, z = (), [], {}
## 雖然都是空的結構,但是它們之間並不相等。
if x == y == z:
print(x, y, z, "相等")
else:
print(x, y, z, "不相等")
if x == None:
print(x, " 相等 None")
else:
print(x, " 不相等 None")
if y == None:
print(y, " 相等 None")
else:
print(y, " 不相等 None")
if z == None:
print(z, " 相等 None")
else:
print(z, " 不相等 None")
|
[
"angelak.tw@gmail.com"
] |
angelak.tw@gmail.com
|
a1b3558b03ae177a9ec695640ddab9481f1cfb65
|
093b9569be9d1c4e5daf92efbebc38f680917b2d
|
/.history/base/views_20210829090123.py
|
bfec5c7dacaf07d85a118c58236ec494edd47b23
|
[] |
no_license
|
Justin-Panagos/todoList
|
95b1e97ff71af1b0be58e7f8937d726a687cea4d
|
10539219b59fcea00f8b19a406db3d4c3f4d289e
|
refs/heads/master
| 2023-08-04T13:27:13.309769
| 2021-08-29T14:06:43
| 2021-08-29T14:06:43
| 400,827,602
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,107
|
py
|
from django.shortcuts import render
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView, FormView
from django.urls import reverse_lazy
from django.contrib.auth.views import LoginView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import login
from .models import Task
class CustoomLoginView(LoginView):
template_name = 'base/login.html'
fields = '__all__'
redirect_authenticated_user = True
def get_success_url(self):
return reverse_lazy('tasks')
class RegisterPage(FormView):
template_name = 'base/register.html'
form_class= UserCreationForm
redirect_authenticated_user = True
success_url = reverse_lazy('tasks')
def form_validate(self,form):
user= form.save()
if user is not None:
login()
class TaskList( LoginRequiredMixin, ListView):
model = Task
context_object_name = 'tasks'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['tasks'] = context['tasks'].filter(user=self.request.user)
context['count'] = context['tasks'].filter(complete=False).count()
return context
class TaskDetail(LoginRequiredMixin, DetailView):
model = Task
context_object_name = 'task'
template_name = 'base/task.html'
class TaskCreate(LoginRequiredMixin, CreateView):
model = Task
fields = ['title','description','complete']
success_url = reverse_lazy('tasks')
def form_valid(self, form):
form.instance.user = self.request.user
return super(TaskCreate, self).form_valid(form)
class TaskUpdate( LoginRequiredMixin, UpdateView):
model = Task
fields = ['title','description','complete']
success_url = reverse_lazy('tasks')
class TaskDelete(LoginRequiredMixin, DeleteView):
model = Task
context_object_name = 'task'
success_url = reverse_lazy('tasks')
|
[
"justpanagos@gmail.com"
] |
justpanagos@gmail.com
|
b983070276e9108430c515665fa30b6bce8cb8fb
|
f6841d5626d87e836f6012d88c783706fa46d769
|
/web_crawler.py
|
c736c3b9c98f9e2dabb384fc0182472094e813d0
|
[] |
no_license
|
Jack-Valentine/python-seminar-4
|
850b22cd7c552b570e25e9432abf98a25cf0b7d6
|
cd6c8945f436fa5dc0d6dec14551d07e6dd3562a
|
refs/heads/master
| 2021-01-22T07:42:35.044924
| 2017-05-25T03:46:59
| 2017-05-25T03:46:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,010
|
py
|
from bs4 import BeautifulSoup
from gevent import monkey
import sys
import gevent
import time
import urllib.request
def crawling_product_price(product_url):
try:
with urllib.request.urlopen(product_url) as response:
html = response.read()
soup = BeautifulSoup(html, 'html.parser')
product_title = soup.find(id='productTitle').get_text().strip()
price = soup.find(id='priceblock_ourprice').get_text()
print(product_title, price)
except:
crawling_product_price(product_url)
if __name__ == '__main__':
concurrency = sys.argv[1:2] == ['-c']
product_urls = [
'https://www.amazon.com/LG-Electronics-OLED65E7P-65-Inch-Smart/dp/B01MZF7YUD',
'https://www.amazon.com/LG-Electronics-75SJ8570-75-Inch-SUPER/dp/B01N5V18W6',
'https://www.amazon.com/All-New-Element-4K-Ultra-HD-Smart-TV-Fire-TV-Edition-43-Inch/dp/B06XD4SXWD',
'https://www.amazon.com/Sceptre-U518CV-UMS-Ultra-True-black/dp/B06Y26S3BC',
'https://www.amazon.com/Vizio-SMART-23-54IN-RETURNS-D24H-E1/dp/B06XQW5FJH',
'https://www.amazon.com/Hisense-55K22DG-55-Inch-1080p-120Hz/dp/B00GFHG1OQ',
'https://www.amazon.com/Samsung-Electronics-UN65MU9000-65-Inch-Ultra/dp/B06XGCT2PQ',
'https://www.amazon.com/Samsung-Electronics-UN65MU8000-65-Inch-Ultra/dp/B06X9VSZYM',
'https://www.amazon.com/Element-ELEFW3916R-720p-Certified-Refurbished/dp/B01N8PPMRG',
'https://www.amazon.com/Samsung-UN50J5000-50-Inch-1080p-Model/dp/B00WR28LLE'
]
start_time = time.time()
if concurrency:
monkey.patch_all()
threads = [gevent.spawn(crawling_product_price, product_url) for product_url in product_urls]
gevent.joinall(threads)
else:
for product_url in product_urls:
crawling_product_price(product_url)
end_time = time.time()
print('-' * 90)
print(f"Results(concurrency is {'on' if concurrency else 'off'}): {end_time-start_time}s")
|
[
"kd980311@naver.com"
] |
kd980311@naver.com
|
69e51fdfc4869a7c3cbfdeaf0cb52e5fa0558a74
|
f69eccca4970bc898983b149bbadfc6a79e77916
|
/befh/api_socket.py
|
9252264f83791eecb5cd78803add2d6948531050
|
[
"Apache-2.0"
] |
permissive
|
chrischris292/MarketDataGdax
|
a3cd911edafe7a246a1d553180e1edb66a125c8c
|
95dc398123f7878526df4af2402af3cbeee67057
|
refs/heads/master
| 2021-05-06T17:38:19.949472
| 2017-11-24T22:24:40
| 2017-11-24T22:24:40
| 111,900,487
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 887
|
py
|
#!/bin/python
class ApiSocket:
"""
API socket
"""
def __init__(self):
pass
@classmethod
def parse_l2_depth(cls, instmt, raw):
"""
Parse raw data to L2 depth
:param instmt: Instrument
:param raw: Raw data in JSON
"""
return None
@classmethod
def parse_trade(cls, instmt, raw):
"""
:param instmt: Instrument
:param raw: Raw data in JSON
:return:
"""
return None
def get_order_book(self, instmt):
"""
Get order book
:param instmt: Instrument
:return: Object L2Depth
"""
return None
def get_trades(self, instmt, trade_id):
"""
Get trades
:param instmt: Instrument
:param trade_id: Trade id
:return: List of trades
"""
return None
|
[
"gavincyi@gmail.com"
] |
gavincyi@gmail.com
|
5f8d714422c7d691696299d9f7a93d52b2168c5c
|
59166105545cdd87626d15bf42e60a9ee1ef2413
|
/test/test_space_shuttle_api.py
|
f91c3cfb0e3cfbade624d087754e5913f39e478a
|
[] |
no_license
|
mosoriob/dbpedia_api_client
|
8c594fc115ce75235315e890d55fbf6bd555fa85
|
8d6f0d04a3a30a82ce0e9277e4c9ce00ecd0c0cc
|
refs/heads/master
| 2022-11-20T01:42:33.481024
| 2020-05-12T23:22:54
| 2020-05-12T23:22:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 995
|
py
|
# coding: utf-8
"""
DBpedia
This is the API of the DBpedia Ontology # noqa: E501
The version of the OpenAPI document: v0.0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import dbpedia
from dbpedia.api.space_shuttle_api import SpaceShuttleApi # noqa: E501
from dbpedia.rest import ApiException
class TestSpaceShuttleApi(unittest.TestCase):
"""SpaceShuttleApi unit test stubs"""
def setUp(self):
self.api = dbpedia.api.space_shuttle_api.SpaceShuttleApi() # noqa: E501
def tearDown(self):
pass
def test_spaceshuttles_get(self):
"""Test case for spaceshuttles_get
List all instances of SpaceShuttle # noqa: E501
"""
pass
def test_spaceshuttles_id_get(self):
"""Test case for spaceshuttles_id_get
Get a single SpaceShuttle by its id # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"maxiosorio@gmail.com"
] |
maxiosorio@gmail.com
|
165d038ef67c6e5d9650811fb8eebb4c215a8874
|
1ad12a71c3d5d2b3810ce03e8bd138c4ffb66eb8
|
/xlsxwriter/test/comparison/test_chart_axis17.py
|
d77b584fbddf1629eaf7474c77dea6cb5512ae61
|
[
"BSD-2-Clause-Views"
] |
permissive
|
idreamsfy/XlsxWriter
|
b52929229b16e2ee1eaca0cda9980a5a0aad5769
|
129044ed821de67895b4562c6b71f90eba5be6b4
|
refs/heads/master
| 2021-01-02T20:39:20.415882
| 2020-02-07T21:07:55
| 2020-02-07T21:07:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,340
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2020, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_axis17.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [43812736, 45705088]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_y_axis({'log_base': 10})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
ac0032fb8c3c73b7de8979c896fcd0df0b3a547f
|
263dc86ea58278d6e1db448c245f692049c73199
|
/employeedetails/customer/urls.py
|
00350cd841b461cd5617ec8e73ffdbac809561d7
|
[] |
no_license
|
krishnanunni-pr/MyDjangoProjects
|
c3a81b193a659c47fd6aec01777d6f689479eb9f
|
3d644d2a261243be40f5678e9a61d508a5980143
|
refs/heads/master
| 2023-08-05T20:10:08.509167
| 2021-09-27T09:21:21
| 2021-09-27T09:21:21
| 394,686,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
from django.urls import path
from customer import views
urlpatterns=[
path("accounts/signup",views.signup,name="signup"),
path("accounts/signin",views.signin,name="signin"),
path("accounts/signout",views.signout,name="signout"),
path("",views.home,name="home")
]
|
[
"krishna@gmail.com"
] |
krishna@gmail.com
|
9b8abd96e7a9d1cf1657b05be3e7327c9595c874
|
f64e31cb76909a6f7fb592ad623e0a94deec25ae
|
/tests/test_p0380_insert_delete_getrandom_o1.py
|
6fb1b572dadda2d8a17a49d0331190489c5cd47b
|
[] |
no_license
|
weak-head/leetcode
|
365d635cb985e1d154985188f6728c18cab1f877
|
9a20e1835652f5e6c33ef5c238f622e81f84ca26
|
refs/heads/main
| 2023-05-11T14:19:58.205709
| 2023-05-05T20:57:13
| 2023-05-05T20:57:13
| 172,853,059
| 0
| 1
| null | 2022-12-09T05:22:32
| 2019-02-27T05:58:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,572
|
py
|
# flake8: noqa: F403, F405
import pytest
from leetcode.p0380_insert_delete_getrandom_o1 import *
solutions = [
RandomizedSet,
]
# ([args], expectation),
test_cases = [
[
("d", 2, False),
("i", 1, True),
("i", 1, False),
("r", None, {1}),
],
[
("d", 2, False),
("i", 1, True),
("i", 1, False),
("r", None, {1}),
("i", 2, True),
("i", 3, True),
("r", None, {1, 2, 3}),
("r", None, {1, 2, 3}),
("r", None, {1, 2, 3}),
("r", None, {1, 2, 3}),
("r", None, {1, 2, 3}),
],
[
("d", 1, False),
("i", 1, True),
("r", None, {1}),
("r", None, {1}),
("r", None, {1}),
("i", 2, True),
("r", None, {1, 2}),
("r", None, {1, 2}),
("r", None, {1, 2}),
("r", None, {1, 2}),
("r", None, {1, 2}),
("d", 1, True),
("d", 1, False),
("r", None, {2}),
("r", None, {2}),
("r", None, {2}),
("d", 2, True),
("i", 3, True),
("r", None, {3}),
("r", None, {3}),
("r", None, {3}),
("r", None, {3}),
],
]
@pytest.mark.timeout(1)
@pytest.mark.parametrize(("args"), test_cases)
@pytest.mark.parametrize("solution", solutions)
def test_solution(args, solution):
rs = solution()
for m, v, e in args:
if m == "i":
assert rs.insert(v) == e
elif m == "d":
assert rs.remove(v) == e
else:
assert rs.getRandom() in e
|
[
"zinchenko@live.com"
] |
zinchenko@live.com
|
27eeeb653c05caa760b8785076bda08a096fb674
|
0eb599c3bbfa6e5b31516913b88cc9db3a1311ce
|
/AtCoder_unofficial/chokudai_speedrun_001_i.py
|
5148ec2cff2560f0cb7e129c29a7606713c0aa9f
|
[] |
no_license
|
Linus-MK/AtCoder
|
5b84dc88c2d2773d0f97ed18265d303290da7879
|
a587e89a9e0c2ab4d36b09176bcc95e901e14326
|
refs/heads/master
| 2022-11-25T05:37:12.148722
| 2022-11-17T16:04:10
| 2022-11-17T16:04:10
| 169,840,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 661
|
py
|
n = int(input())
nums = list(map(int, input().split()))
# 累積和 + α
# 累積和が単調増加であることを利用して二分探索で解くか(NlogN)
# 1からの相異なる数の和がnに達するのは割と早いことを利用して逐次計算で解くか(最悪N√Nだがそれより小さいはず)
# 後者でやってみよう
# 369ms, 余裕を持って間に合う
cumsum = [0] * (n+1)
for i in range(n):
cumsum[i+1] = cumsum[i] + nums[i]
ans = 0
for i in range(n+1):
for j in range(i+1, n+1):
if cumsum[j] - cumsum[i] == n:
ans += 1
elif cumsum[j] - cumsum[i] > n:
break
print(ans)
|
[
"13600386+Linus-MK@users.noreply.github.com"
] |
13600386+Linus-MK@users.noreply.github.com
|
5225cec94bbd84fd01b937451ec2e442f10c6b36
|
64aadced1900d9791099228fa91995c2f8444633
|
/python/prices.py
|
1865f0e7dfe64d2745c9ef79321c2b43b4be11fc
|
[] |
no_license
|
ctmakro/playground
|
821a8c668b58ebd81cd48309e6f4c6cd16badea7
|
5d6e8e528f1913b6089322ef388213cec5264ae1
|
refs/heads/master
| 2020-12-25T01:51:12.041611
| 2020-07-14T19:17:24
| 2020-07-14T19:17:24
| 57,165,089
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,044
|
py
|
wg = '''<!-- TradingView Widget BEGIN -->
<div class="tradingview-widget-container">
<div class="asdf">{}</div>
<div class="tradingview-widget-container__widget"></div>
<script type="text/javascript" src="https://s3.tradingview.com/external-embedding/embed-widget-mini-symbol-overview.js" async>
{{
"symbol": "{}",
"width": "280",
"height": "280",
"locale": "en",
"dateRange": "{}",
"colorTheme": "light",
"trendLineColor": "#37a6ef",
"underLineColor": "#e3f2fd",
"isTransparent": false,
"autosize": false,
"largeChartUrl": ""
}}
</script>
</div>
<!-- TradingView Widget END -->'''
print('''
<style>
.tradingview-widget-container{
display:inline-block;
margin:5px;
}
.asdf { text-align:center;}
</style>''')
items = '1/FX_IDC:CNYUSD,FOREXCOM:XAUUSD/31.1034807/FX_IDC:CNYUSD,INDEX:HSI,GOOGL,AAPL'.split(',')
names = 'USD/CNY,CNY/g,HSI,Google,Apple'.split(',')
dataranges = '12m,1m'.split(',')
for n,i in zip(names,items):
for d in dataranges:
print(wg.format(n+(' ({})'.format(d)), i, d))
|
[
"ctmakro@gmail.com"
] |
ctmakro@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.