blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ee47ea3245a9cd92cd24c180636dd59a9eba5dfa
|
b6a31ec10b39a3dbae183ba40c42078cadf88946
|
/383. Ransom Note.py
|
900fdd239e174e5afdc8395cd90ddc62d89eb8ef
|
[] |
no_license
|
QIAOZHIBAO0104/My-Leetcode-Records
|
69fabd11b279f08861cd644973e51bf664da0d90
|
882724c8d50b2f21193c81e5072c31385c5e6b8e
|
refs/heads/main
| 2023-07-11T00:17:02.368441
| 2021-08-07T16:19:45
| 2021-08-07T16:19:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,323
|
py
|
'''
https://leetcode.com/problems/ransom-note/
Given an arbitrary ransom note string and another string containing letters from all the magazines,
write a function that will return true if the ransom note can be constructed from the magazines ; otherwise, it will return false.
Each letter in the magazine string can only be used once in your ransom note.
Example 1:
Input: ransomNote = "a", magazine = "b"
Output: false
Example 3:
Input: ransomNote = "aa", magazine = "aab"
Output: true
'''
'''
Method below uses hashtable.
Time:O(n)
Space:O(1)
'''
class Solution(object):
def canConstruct(self, ransomNote, magazine):
"""
:type ransomNote: str
:type magazine: str
:rtype: bool
"""
d = defaultdict(int)
for i in magazine:
d[i] += 1
for i in ransomNote:
if i not in d or d[i]==0:
return False
else:
d[i] -= 1
return True
'''
Time:O(m*n)
Space:O(m)
'''
class Solution(object):
def canConstruct(self, ransomNote, magazine):
for c in ransomNote:
if c not in magazine:
return False
else:
i = magazine.index(c)
magazine = magazine[:i]+magazine[i+1:]
return True
|
[
"noreply@github.com"
] |
QIAOZHIBAO0104.noreply@github.com
|
3457d37c6f1dfc3005c54f4b733dd89fd782404f
|
f13acd0d707ea9ab0d2f2f010717b35adcee142f
|
/ARC/arc001-arc050/arc041/b.py
|
923178d3ccfc5919230f3dbbbcc329f538fb9950
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
KATO-Hiro/AtCoder
|
126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7
|
bf43320bc1af606bfbd23c610b3432cddd1806b9
|
refs/heads/master
| 2023-08-18T20:06:42.876863
| 2023-08-17T23:45:21
| 2023-08-17T23:45:21
| 121,067,516
| 4
| 0
|
CC0-1.0
| 2023-09-14T21:59:38
| 2018-02-11T00:32:45
|
Python
|
UTF-8
|
Python
| false
| false
| 764
|
py
|
# -*- coding: utf-8 -*-
def main():
n, m = map(int, input().split())
b = [list(map(int, input())) for _ in range(n)]
dxy = [(0, 1), (0, -1), (-1, 0), (1, 0)]
ans = [[0 for _ in range(m)] for _ in range(n)]
for i in range(1, n - 1):
for j in range(1, m - 1):
value_min = float('inf')
for dx, dy in dxy:
nx = j + dx
ny = i + dy
value_min = min(value_min, b[ny][nx])
for dx, dy in dxy:
nx = j + dx
ny = i + dy
b[ny][nx] -= value_min
ans[i][j] = value_min
for a in ans:
print(''.join(map(str, a)))
if __name__ == '__main__':
main()
|
[
"k.hiro1818@gmail.com"
] |
k.hiro1818@gmail.com
|
8dc21b539a1709fbbc19f848faf4d29c48d613e5
|
cd5746f8cc7aee1f20606a65b4fae0d5e8ee78dc
|
/Python Books/Athena/training/demo/demo/weave/sum_ex.py
|
e71d44f05ce9cdb9fe79675d5a085c08694899d1
|
[] |
no_license
|
theGreenJedi/Path
|
df24fca355590efef0c6cb5c52e7216c6b5d2464
|
b5ed2805dbb046480929e49e550bfd8af5bb4d6f
|
refs/heads/master
| 2023-07-27T14:23:37.694546
| 2021-07-16T01:38:55
| 2021-07-16T01:38:55
| 87,686,563
| 8
| 2
| null | 2023-07-11T22:49:03
| 2017-04-09T05:57:30
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,262
|
py
|
import time
from numpy import array, arange
from scipy import weave
def weave_sum(a):
""" Build a 'sum' method that will work on contiguous
1D arrays.
"""
code = """
double sum = 0.0;
for(int i=0; i < Na[0]; i++)
sum += a[i];
return_val = sum;
"""
return weave.inline(code,['a'],compiler='gcc')
# compile (if necessary) and print the result.
a = array([1,2,3,4.0])
print "sum:", weave_sum(a)
############################################################################
# Compare the weave_sum to the built in 'sum' function on for a list as well
# as to a numpy array sum method.
for i in range(7):
size = 10**i
a = arange(size) # array
l = range(size) # list
print 'size:', size
t1=time.clock(); b = weave_sum(a); t2=time.clock();
weave_time = (t2-t1)/1e-3
t1=time.clock(); b = a.sum(); t2=time.clock();
numpy_time = (t2-t1)/1e-3
t1=time.clock(); b = sum(l); t2=time.clock();
list_time = (t2-t1)/1e-3
print 'list, numpy, weave (ms): %4.4f, %4.4f, %4.4f' % \
(list_time, numpy_time, weave_time)
print 'numpy/list, numpy/weave: %3.2f, %3.2f' % \
(numpy_time/list_time, numpy_time/weave_time)
|
[
"GreenJedi@protonmail.com"
] |
GreenJedi@protonmail.com
|
55c90344c15d291f3944d5301b73b6802bc87b8a
|
321e58ab3e6b2385bb3549aaaefd56a58c2a51e7
|
/python/tests/install_test.py
|
3b7fc9c2bb45e11fe69b5f78ed4954a33be2b93f
|
[] |
no_license
|
alexmadon/atpic_photosharing
|
7829118d032344bd9a67818cd50e2c27a228d028
|
9fdddeb78548dadf946b1951aea0d0632e979156
|
refs/heads/master
| 2020-06-02T15:00:29.282979
| 2017-06-12T17:09:52
| 2017-06-12T17:09:52
| 94,095,494
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,053
|
py
|
import unittest
import os
import sys
import atpic.installerat
class installer_test(unittest.TestCase):
"""Test the installer test functions"""
def testFind(self):
file="fixture/matcher.txt"
nb=atpic.installerat.check_file_match_re("GOOD",file)
print nb
class install_test(unittest.TestCase):
"""USER legacy urls"""
def testSyslogExist(self):
"""We need syslog-ng"""
path="/etc/syslog-ng/syslog-ng.conf"
exist=os.path.exists(path)
# print "Exist: %s" % exist
self.assertEqual(
True,
exist,
"%s is missing: it seems you don't have syslog-ng installed."%path)
def testSyslogDate(self):
""" checks if the syslog-ng.conf file contains:
ts_format(iso)
which is needed for page view count
"""
# def testftp(self):
# from ftplib import FTP
# ftp = FTP("u1.up.atpic.com","someuser","somepass")
# ftp.dir()
# ftp.quit()
if __name__ == "__main__":
unittest.main()
|
[
"alex.madon@gmail.com"
] |
alex.madon@gmail.com
|
5b96b3ddcc2765aafbb02d61bec76bd8d23d15cd
|
33e2187c1815b1e1209743f5a4870401d2097d71
|
/CTCI/Tree & Graph/q.4.11.py
|
dfa56f17cc8b2af6fcc49e074a6ab6f8e7d13531
|
[] |
no_license
|
sachinjose/Coding-Prep
|
8801e969a3608b5e69dc667cba7f3afaf7273e88
|
95f6bc85e7c38034e358af47ef4c228937cd4629
|
refs/heads/master
| 2022-12-26T22:49:48.510197
| 2020-09-22T07:05:55
| 2020-09-22T07:05:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,381
|
py
|
import random
class Node:
def __init__(self,item):
self.item = item
self.left = None
self.right = None
self.size = 1
def get_size(self):
return self.size
def get_item(self):
return self.item
def getRandomNode(self):
if(self.left):
leftSize = self.left.get_size();
else:
leftSize = 0
index = random.randint(0,self.get_size())
if(index < leftSize):
if(self.left):
return self.left.getRandomNode()
else:
return self.item
elif (index == leftSize):
return self.item
else:
if (self.right):
return self.right.getRandomNode()
else:
return self.item
def insertInOrder(self,item):
if(item<self.item):
if(self.left):
self.left.insertInOrder(item)
else:
self.left = Node(item)
else:
if(self.right):
self.right.insertInOrder(item)
else:
self.right = Node(item)
self.size+=1
def find(self,value):
if self.item == value:
return self
elif (value <= self.item):
if(self.left):
self.left.find(value)
else:
return None
elif(value >= self.item):
if(self.right):
self.right.find(value)
else:
return None
else:
return None
root = Node(10)
root.insertInOrder(5)
root.insertInOrder(-3)
root.insertInOrder(3)
root.insertInOrder(2)
root.insertInOrder(11)
root.insertInOrder(3)
root.insertInOrder(-2)
root.insertInOrder(1)
print(root.getRandomNode())
|
[
"sachinjose16@gmail.com"
] |
sachinjose16@gmail.com
|
60c9a1554ec6a514ee2d377e0f436211713d08d6
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_drawls.py
|
dcb613684441c7614a936842e41bce32f8ab5fe1
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
#calss header
class _DRAWLS():
def __init__(self,):
self.name = "DRAWLS"
self.definitions = drawl
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['drawl']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
0a06a1af1bd774cd4ea1106f94f1566e382efbe9
|
0fe37e11df976c55fe5bbe492879b7cd8a95b7c5
|
/27_public_test/03_spider_test.py
|
bad3562d5d72ee0956bc52b9c32569d4ae326cfe
|
[] |
no_license
|
1286211699/mmc_code
|
9bb7761107604b445dea4fe5acf9d503fbc28dfa
|
ee97879632dfd7d24c604f7db52c82fa29109daa
|
refs/heads/master
| 2022-12-08T23:19:06.382825
| 2020-05-08T13:59:46
| 2020-05-08T13:59:46
| 177,100,815
| 2
| 0
| null | 2022-12-08T01:42:47
| 2019-03-22T08:25:37
|
HTML
|
UTF-8
|
Python
| false
| false
| 4,597
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2018/12/27 14:49
# @Author : for
# @File : 03_spider_test.py
# @Software: PyCharm
# from urllib import request
#
# import requests,time
# url = 'https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord=%E5%94%AF%E7%BE%8E&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&hd=&latest=©right=&word=%E5%94%AF%E7%BE%8E&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&expermode=&force=&pn=30&rn=30&gsm=1e&1545892906631='
#
# headers = {
# 'Referer': 'https://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fm=result&fr=&sf=1&fmq=1545892872367_R&pv=&ic=0&nc=1&z=&hd=&latest=©right=&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&word=%E5%94%AF%E7%BE%8E',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36'
# }
# data_str='''
# tn: resultjson_com
# ipn: rj
# ct: 201326592
# is:
# fp: result
# queryWord: 大美女
# cl: 2
# lm: -1
# ie: utf-8
# oe: utf-8
# adpicid:
# st: -1
# z:
# ic: 0
# hd:
# latest:
# copyright:
# word: 大美女
# s:
# se:
# tab:
# width:
# height:
# face: 0
# istype: 2
# qc:
# nc: 1
# fr:
# expermode:
# force:
# pn: 30
# rn: 60
# gsm: 1e
# 1545892906631:
# '''
# send_data = {}
#
# for line in data_str.splitlines():
# # print(line)
# line_data = line.split(': ')
# # print(line_data)
# if len(line_data) == 2:
# key,value = line_data
# if key and value:
# send_data[key] = value
# # print(key)
# # print(send_data)
#
# response = requests.get(url=url,headers=headers,params=send_data)
# # print(response)
# # print(response.text)
# content = response.json()['data']
# for index,src in enumerate(content):
# # pass
# img_url = src.get('middleURL')
# # print(img_url)
# if img_url:
# name = './image/image_%s_%s.png'%('唯美',index)
# try:
# request.urlretrieve(url=img_url,filename=name)
# except Exception as e:
# print(e)
# else:
# print('%s is download'%name)
# time.sleep(1)
# print(content)
def get_image(keywords):
from urllib import request
import requests, time
url = 'https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord=%E5%94%AF%E7%BE%8E&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&hd=&latest=©right=&word=%E5%94%AF%E7%BE%8E&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&expermode=&force=&pn=30&rn=30&gsm=1e&1545892906631='
headers = {
'Referer': 'https://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fm=result&fr=&sf=1&fmq=1545892872367_R&pv=&ic=0&nc=1&z=&hd=&latest=©right=&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&word=%E5%94%AF%E7%BE%8E',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36'
}
data_str = '''
tn: resultjson_com
ipn: rj
ct: 201326592
is:
fp: result
queryWord: 大美女
cl: 2
lm: -1
ie: utf-8
oe: utf-8
adpicid:
st: -1
z:
ic: 0
hd:
latest:
copyright:
word: 大美女
s:
se:
tab:
width:
height:
face: 0
istype: 2
qc:
nc: 1
fr:
expermode:
force:
pn: 30
rn: 60
gsm: 1e
1545892906631:
'''
send_data = {}
for line in data_str.splitlines():
line_data = line.split(': ')
if len(line_data) == 2:
key, value = line_data
if key and value:
send_data[key] = value
send_data['word'] = send_data['queryword'] = keywords
response = requests.get(url=url, headers=headers, params=send_data)
content = response.json()['data']
for index, src in enumerate(content):
# pass
img_url = src.get('middleURL')
# print(img_url)
if img_url:
name = './image/image_%s_%s.png' % ('唯美', index)
try:
request.urlretrieve(url=img_url, filename=name)
except Exception as e:
print(e)
else:
print('%s is download' % name)
time.sleep(1)
if __name__ == '__main__':
key = input('请输入你想爬取的内容')
get_image(key)
|
[
"1286211699@qq.com"
] |
1286211699@qq.com
|
e3d23e19fb56278fe483550fa5c9206258d1866b
|
609085edf06f3091ecd639007480fb7cba15126f
|
/napari/components/experimental/monitor/_monitor.py
|
7f924f33ab276f4524af867b366a2f4a12ffd949
|
[
"BSD-3-Clause"
] |
permissive
|
jojoelfe/napari
|
c7432daa6672045e95a58946c90ed0a5c779ef2d
|
c871c319247b79d4c5cc9270c0fec04740aed9d0
|
refs/heads/master
| 2023-05-12T17:08:48.145360
| 2022-12-21T16:25:43
| 2022-12-21T16:25:43
| 241,278,044
| 0
| 0
|
BSD-3-Clause
| 2023-05-01T02:58:28
| 2020-02-18T05:11:42
|
Python
|
UTF-8
|
Python
| false
| false
| 6,774
|
py
|
"""Monitor class.
The Monitor class wraps the MonitorServer and MonitorApi. One reason
for having a wrapper class is that so the rest of napari does not
need to import any multiprocessing code unless actually using
the monitor.
"""
import errno
import json
import logging
import os
import sys
from pathlib import Path
from typing import Optional
from napari.utils.translations import trans
LOGGER = logging.getLogger("napari.monitor")
# If False monitor is disabled even if we meet all other requirements.
ENABLE_MONITOR = True
def _load_config(path: str) -> dict:
"""Load the JSON formatted config file.
Parameters
----------
path : str
The path of the JSON file we should load.
Returns
-------
dict
The parsed data from the JSON file.
"""
path = Path(path).expanduser()
if not path.exists():
raise FileNotFoundError(
errno.ENOENT,
trans._(
"Monitor: Config file not found: {path}",
deferred=True,
path=path,
),
)
with path.open() as infile:
return json.load(infile)
def _load_monitor_config() -> Optional[dict]:
"""Return the MonitorService config file data, or None.
Returns
-------
Optional[dict]
The parsed config file data or None if no config.
"""
# We shouldn't even call into this file unless NAPARI_MON is defined
# but check to be sure.
value = os.getenv("NAPARI_MON")
if value in [None, "0"]:
return None
return _load_config(value)
def _setup_logging(config: dict) -> None:
"""Log "napari.monitor" messages to the configured file.
Parameters
----------
config : dict
Monitor configuration
"""
try:
log_path = config['log_path']
except KeyError:
return # No log file.
# Nuke/reset log for now.
# Path(log_path).unlink()
fh = logging.FileHandler(log_path)
LOGGER.addHandler(fh)
LOGGER.setLevel(logging.DEBUG)
LOGGER.info("Writing to log path %s", log_path)
def _get_monitor_config() -> Optional[dict]:
"""Create and return the configuration for the MonitorService.
The routine might return None for one serveral reasons:
1) We're not running under Python 3.9 or now.
2) The monitor is explicitly disable, ENABLED_MONITOR is False.
3) The NAPARI_MON environment variable is not defined.
4) The NAPARI_MON config file cannot be found and parsed.
Returns
-------
Optional[dict]
The configuration for the MonitorService.
"""
if sys.version_info[:2] < (3, 9):
# We require Python 3.9 for now. The shared memory features we need
# were added in 3.8, but the 3.8 implemention was buggy. It's
# possible we could backport to or otherwise fix 3.8 or even 3.7,
# but for now we're making 3.9 a requirement.
print("Monitor: not starting, requires Python 3.9 or newer")
return None
if not ENABLE_MONITOR:
print("Monitor: not starting, disabled")
return None
# The NAPARI_MON environment variable points to our config file.
config = _load_monitor_config()
if config is None:
print("Monitor: not starting, no usable config file")
return None
return config
class Monitor:
"""Wraps the monitor service.
We can't start the monitor service at import time. Under the hood the
multiprocessing complains about a "partially started process".
Instead someone must call our start() method explicitly once the
process has fully started.
"""
def __init__(self):
# Both are set when start() is called, and only if we have
# a parseable config file, have Python 3.9, etc.
self._service = None
self._api = None
self._running = False
def __nonzero__(self) -> bool:
"""Return True if the service is running.
So that callers can do:
if monitor:
monitor.add(...)
"""
return self._running
@property
def run_command_event(self):
"""The MonitorAPI fires this event for commands from clients."""
return self._api.events.run_command
def start(self) -> bool:
"""Start the monitor service, if it hasn't been started already.
Returns
-------
bool
True if we started the service or it was already started.
"""
if self._running:
return True # It was already started.
config = _get_monitor_config()
if config is None:
return False # Can't start without config.
_setup_logging(config)
# Late imports so no multiprocessing modules are even
# imported unless we are going to start the service.
from napari.components.experimental.monitor._api import MonitorApi
from napari.components.experimental.monitor._service import (
MonitorService,
)
# Create the API first. It will register our callbacks, then
# we start the manager that will serve those callbacks.
self._api = MonitorApi()
# Now we can start our service.
self._service = MonitorService(config, self._api.manager)
self._running = True
return True # We started the service.
def stop(self) -> None:
"""Stop the monitor service."""
if not self._running:
return
self._api.stop()
self._api = None
self._service.stop()
self._service = None
self._running = False
def on_poll(self, event) -> None:
"""The QtPoll object polls us.
Probably we could get rid of polling by creating a thread that
blocks waiting for client messages. Then it posts those messages as
Qt Events. So the GUI doesn't block, but gracefully handles
incoming messages as Qt events.
"""
if self._running:
self._api.poll()
# Handle the event to say "keep polling us".
event.handled = True
def add_data(self, data) -> None:
"""Add data to the monitor service.
Caller should use this pattern:
if monitor:
monitor.add(...)
So no time wasted assembling the dict unless the monitor is running.
"""
if self._running:
self._api.add_napari_data(data)
def send_message(self, message: dict) -> None:
"""Send a message to shared memory clients.
Parameters
----------
message : dict
Post this message to clients.
"""
if self._running:
self._api.send_napari_message(message)
monitor = Monitor()
|
[
"noreply@github.com"
] |
jojoelfe.noreply@github.com
|
5a4d13317f4b72112f488988ccf1aec7d04865b9
|
e89837f01f81a8b63015a31a45507fca95b75335
|
/inference/export.py
|
433d65683b0a9f46e56b8a50241a67205beba67a
|
[] |
no_license
|
PaulZhangIsing/etagger
|
4af56fa30ae7e77411be77750d679c66b0ab3848
|
306b76e51124623ff434e45b0f5e3d2915eaa5d6
|
refs/heads/master
| 2020-04-24T01:03:51.168249
| 2019-02-20T02:06:35
| 2019-02-20T02:06:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,583
|
py
|
from __future__ import print_function
import sys
import time
import argparse
import tensorflow as tf
# for LSTMBlockFusedCell(), https://github.com/tensorflow/tensorflow/issues/23369
tf.contrib.rnn
def export(args):
session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
sess = tf.Session(config=session_conf)
with sess.as_default():
# restore meta graph
meta_file = args.restore + '.meta'
loader = tf.train.import_meta_graph(meta_file, clear_devices=True)
# mapping placeholders and tensors
graph = tf.get_default_graph()
p_is_train = graph.get_tensor_by_name('is_train:0')
p_sentence_length = graph.get_tensor_by_name('sentence_length:0')
p_input_data_pos_ids = graph.get_tensor_by_name('input_data_pos_ids:0')
p_input_data_word_ids = graph.get_tensor_by_name('input_data_word_ids:0')
p_input_data_wordchr_ids = graph.get_tensor_by_name('input_data_wordchr_ids:0')
t_logits = graph.get_tensor_by_name('logits:0')
t_trans_params = graph.get_tensor_by_name('loss/trans_params:0')
t_sentence_lengths = graph.get_tensor_by_name('sentence_lengths:0')
print('is_train', p_is_train)
print('sentence_length', p_sentence_length)
print('input_data_pos_ids', p_input_data_pos_ids)
print('input_data_word_ids', p_input_data_word_ids)
print('input_data_wordchr_ids', p_input_data_wordchr_ids)
print('logits', t_logits)
print('trans_params', t_trans_params)
print('sentence_lengths', t_sentence_lengths)
# restore actual values
loader.restore(sess, args.restore)
print(tf.global_variables())
print(tf.trainable_variables())
print('model restored')
# save
saver = tf.train.Saver(tf.global_variables())
saver.save(sess, args.export)
tf.train.write_graph(sess.graph, args.export_pb, "graph.pb", as_text=False)
tf.train.write_graph(sess.graph, args.export_pb, "graph.pb_txt", as_text=True)
print('model exported')
sess.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--restore', type=str, help='path to saved model(ex, ../checkpoint/ner_model)', required=True)
parser.add_argument('--export', type=str, help='path to exporting model(ex, exported/ner_model)', required=True)
parser.add_argument('--export-pb', type=str, help='path to exporting graph proto(ex, exported)', required=True)
args = parser.parse_args()
export(args)
|
[
"hazzling@gmail.com"
] |
hazzling@gmail.com
|
7174dacdc84b4da1c7036ed509ffdd9820d05787
|
a2ab6c23253badb3be54b19ba061e1aeaac6a8cd
|
/obj_detection/tf_api/object_detection/builders/image_resizer_builder.py
|
13c029cd24114d4fa78c0e21a52e4ff45500ac24
|
[] |
no_license
|
vivek09pathak/ImageDetection_RealTime
|
0720fb4a6f35a81591f401a04ae44aa3bbea013f
|
d9e376b41a1216aecaacc9626cee59d45001695c
|
refs/heads/master
| 2022-12-26T22:04:18.328476
| 2020-09-30T10:20:15
| 2020-09-30T10:20:15
| 152,729,896
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,693
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder function for image resizing operations."""
import functools
import tensorflow as tf
from obj_detection.tf_api.object_detection.core import preprocessor
from obj_detection.tf_api.object_detection.protos import image_resizer_pb2
def _tf_resize_method(resize_method):
"""Maps image resize method from enumeration type to TensorFlow.
Args:
resize_method: The resize_method attribute of keep_aspect_ratio_resizer or
fixed_shape_resizer.
Returns:
method: The corresponding TensorFlow ResizeMethod.
Raises:
ValueError: if `resize_method` is of unknown type.
"""
dict_method = {
image_resizer_pb2.BILINEAR:
tf.image.ResizeMethod.BILINEAR,
image_resizer_pb2.NEAREST_NEIGHBOR:
tf.image.ResizeMethod.NEAREST_NEIGHBOR,
image_resizer_pb2.BICUBIC:
tf.image.ResizeMethod.BICUBIC,
image_resizer_pb2.AREA:
tf.image.ResizeMethod.AREA
}
if resize_method in dict_method:
return dict_method[resize_method]
else:
raise ValueError('Unknown resize_method')
def build(image_resizer_config):
"""Builds callable for image resizing operations.
Args:
image_resizer_config: image_resizer.proto object containing parameters for
an image resizing operation.
Returns:
image_resizer_fn: Callable for image resizing. This callable always takes
a rank-3 image tensor (corresponding to a single image) and returns a
rank-3 image tensor, possibly with new spatial dimensions.
Raises:
ValueError: if `image_resizer_config` is of incorrect type.
ValueError: if `image_resizer_config.image_resizer_oneof` is of expected
type.
ValueError: if min_dimension > max_dimension when keep_aspect_ratio_resizer
is used.
"""
if not isinstance(image_resizer_config, image_resizer_pb2.ImageResizer):
raise ValueError('image_resizer_config not of type '
'image_resizer_pb2.ImageResizer.')
image_resizer_oneof = image_resizer_config.WhichOneof('image_resizer_oneof')
if image_resizer_oneof == 'keep_aspect_ratio_resizer':
keep_aspect_ratio_config = image_resizer_config.keep_aspect_ratio_resizer
if not (keep_aspect_ratio_config.min_dimension <=
keep_aspect_ratio_config.max_dimension):
raise ValueError('min_dimension > max_dimension')
method = _tf_resize_method(keep_aspect_ratio_config.resize_method)
per_channel_pad_value = (0, 0, 0)
if keep_aspect_ratio_config.per_channel_pad_value:
per_channel_pad_value = tuple(keep_aspect_ratio_config.
per_channel_pad_value)
image_resizer_fn = functools.partial(
preprocessor.resize_to_range,
min_dimension=keep_aspect_ratio_config.min_dimension,
max_dimension=keep_aspect_ratio_config.max_dimension,
method=method,
pad_to_max_dimension=keep_aspect_ratio_config.pad_to_max_dimension,
per_channel_pad_value=per_channel_pad_value)
if not keep_aspect_ratio_config.convert_to_grayscale:
return image_resizer_fn
elif image_resizer_oneof == 'fixed_shape_resizer':
fixed_shape_resizer_config = image_resizer_config.fixed_shape_resizer
method = _tf_resize_method(fixed_shape_resizer_config.resize_method)
image_resizer_fn = functools.partial(
preprocessor.resize_image,
new_height=fixed_shape_resizer_config.height,
new_width=fixed_shape_resizer_config.width,
method=method)
if not fixed_shape_resizer_config.convert_to_grayscale:
return image_resizer_fn
else:
raise ValueError(
'Invalid image resizer option: \'%s\'.' % image_resizer_oneof)
def grayscale_image_resizer(image):
[resized_image, resized_image_shape] = image_resizer_fn(image)
grayscale_image = preprocessor.rgb_to_gray(resized_image)
grayscale_image_shape = tf.concat([resized_image_shape[:-1], [1]], 0)
return [grayscale_image, grayscale_image_shape]
return functools.partial(grayscale_image_resizer)
|
[
"anupamb266@gmail.com"
] |
anupamb266@gmail.com
|
e402f7415864f1e25dab0fb3075576751ea865b9
|
9a391fdd281e92427c299e061335987b3a0d0ce5
|
/scripts/download_attachments.py
|
2a71d0d63c6ed24d1c56c436a11b240821721298
|
[] |
no_license
|
felixebert/offeneskoeln
|
8b63d5c5e669f0b65e56d935d73f82a0c14390fb
|
587b7e5564a31522ea28034886b78221eb509ace
|
refs/heads/master
| 2021-01-16T20:44:12.206718
| 2013-01-08T14:49:39
| 2013-01-08T14:49:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,776
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
Lädt Anhänge eines bestimmten Datumsbereichs herunter
Copyright (c) 2012 Marian Steinbach
Hiermit wird unentgeltlich jeder Person, die eine Kopie der Software und
der zugehörigen Dokumentationen (die "Software") erhält, die Erlaubnis
erteilt, sie uneingeschränkt zu benutzen, inklusive und ohne Ausnahme, dem
Recht, sie zu verwenden, kopieren, ändern, fusionieren, verlegen,
verbreiten, unterlizenzieren und/oder zu verkaufen, und Personen, die diese
Software erhalten, diese Rechte zu geben, unter den folgenden Bedingungen:
Der obige Urheberrechtsvermerk und dieser Erlaubnisvermerk sind in allen
Kopien oder Teilkopien der Software beizulegen.
Die Software wird ohne jede ausdrückliche oder implizierte Garantie
bereitgestellt, einschließlich der Garantie zur Benutzung für den
vorgesehenen oder einen bestimmten Zweck sowie jeglicher Rechtsverletzung,
jedoch nicht darauf beschränkt. In keinem Fall sind die Autoren oder
Copyrightinhaber für jeglichen Schaden oder sonstige Ansprüche haftbar zu
machen, ob infolge der Erfüllung eines Vertrages, eines Delikts oder anders
im Zusammenhang mit der Software oder sonstiger Verwendung der Software
entstanden.
"""
import sys
import os
import json
import urllib
from optparse import OptionParser
def get_documents(date=None):
"""
Führt den API-Request für das Abrufen von Dokumenten aus
"""
url = 'http://offeneskoeln.de/api/documents'
url += '?docs=10000&output=attachments&date=%s' % date
request = urllib.urlopen(url)
if request.getcode() != 200:
sys.stderr.write('Bad HTTP Status: ' + str(request.getcode()))
sys.stderr.write(request.read())
else:
jsons = request.read()
return json.loads(jsons)
if __name__ == '__main__':
usage = "usage: %prog <daterange>"
usage += "\n\nWhere daterange is e.g. 2010-2011 or 201208-201209"
parser = OptionParser(usage=usage)
parser.add_option("-f", "--folder", dest="folder",
help="write files to folder FOLDER", metavar="FOLDER")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("No date range given")
if options.folder is None:
options.folder = '.'
result = get_documents(args[0])
num = result['response']['numhits']
print num, "Document(s) found"
if num > 0:
for doc in result['response']['documents']:
if 'attachments' in doc:
for attachment in doc['attachments']:
print "Downloading", attachment['url']
filename = attachment['url'].split('/')[-1]
path = options.folder + os.sep + filename
urllib.urlretrieve(attachment['url'], filename)
|
[
"marian@sendung.de"
] |
marian@sendung.de
|
00d73fc855f563d69f9529e02177901bbf755bdb
|
6841787dc208198535c90ee76d035d8837f8b973
|
/alipay/aop/api/response/AlipayCommerceEcEmployeeInviteQueryResponse.py
|
f2d86a19b1c5ee78a6a56be338006b74a17a9d00
|
[
"Apache-2.0"
] |
permissive
|
demlution/alipay-sdk-python-all
|
e56ea12651717476f940212c281b6c2199dd3fa6
|
c3cb81aa5980913e7cd51edcf754a804495a9a30
|
refs/heads/master
| 2022-10-09T02:08:28.789764
| 2022-09-20T04:32:03
| 2022-09-20T04:32:03
| 263,779,638
| 0
| 1
| null | 2020-05-14T00:58:51
| 2020-05-14T00:58:51
| null |
UTF-8
|
Python
| false
| false
| 1,047
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayCommerceEcEmployeeInviteQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceEcEmployeeInviteQueryResponse, self).__init__()
self._enterprise_id = None
self._sign_url = None
@property
def enterprise_id(self):
return self._enterprise_id
@enterprise_id.setter
def enterprise_id(self, value):
self._enterprise_id = value
@property
def sign_url(self):
return self._sign_url
@sign_url.setter
def sign_url(self, value):
self._sign_url = value
def parse_response_content(self, response_content):
response = super(AlipayCommerceEcEmployeeInviteQueryResponse, self).parse_response_content(response_content)
if 'enterprise_id' in response:
self.enterprise_id = response['enterprise_id']
if 'sign_url' in response:
self.sign_url = response['sign_url']
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
fdf6a03063cf072a159e6c921d303d350ab9a597
|
8a965c2a21d738468fc621915d39b39d1972fac0
|
/testserver.py
|
7307b5b80ecf648d3cd423398170a66f07f9ec90
|
[] |
no_license
|
Ashmaitelly/seproject2chatapp
|
38cc6dd918783f6d1d11aad0a91b8427c9316a3d
|
88af7d958d093802bae976f17f08f7fa7002420c
|
refs/heads/master
| 2023-01-22T23:45:08.774654
| 2020-12-06T17:47:29
| 2020-12-06T17:47:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 557
|
py
|
from SimpleWebSocketServer import SimpleWebSocketServer, WebSocket
print('Server running...')
clients = []
class SimpleChat(WebSocket):
def handleMessage(self):
for client in clients:
if client != self:
client.sendMessage(self.data)
def handleConnected(self):
print(self.address, 'connected')
clients.append(self)
def handleClose(self):
clients.remove(self)
print(self.address, 'closed')
server = SimpleWebSocketServer('localhost', 8000, SimpleChat)
server.serveonce()
|
[
"email"
] |
email
|
0cbc1facbd06e26b6bb163a0add0ca8ae030a6fa
|
0d8ce8f494880c885bd48ac3a28ee9697c15aff4
|
/frontend/models.py
|
800c97d8456eaf532dbce38d285bb008d911dff0
|
[] |
no_license
|
chensandiego/carrental-django
|
e0f474a4d8d72940b3db1dc17a540e66222c376a
|
30b594ca238545a59ee00e28aae3705e6638c93e
|
refs/heads/master
| 2021-01-20T20:39:04.420338
| 2016-08-05T05:36:13
| 2016-08-05T05:36:13
| 64,991,266
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 876
|
py
|
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.db import models
class Car(models.Model):
name = models.CharField(max_length=100)
image = models.ImageField(upload_to='car_images')
description = models.TextField()
daily_rent = models.IntegerField()
is_available = models.BooleanField()
def get_absolute_url(self):
return reverse('car-details', kwargs={'pk': self.pk})
def __str__(self):
return self.name
class Booking(models.Model):
car = models.ForeignKey(Car)
customer_name = models.CharField(max_length=100)
customer_email = models.EmailField()
customer_phone_number = models.TextField()
booking_start_date = models.DateField()
booking_end_date = models.DateField()
booking_message = models.TextField()
is_approved = models.BooleanField()
|
[
"chensandiego@gmail.com"
] |
chensandiego@gmail.com
|
087014408c5b2c09f6f943476f1fa7b570ae0450
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5756407898963968_0/Python/Roun/magic.py
|
e30d7e751a99a8d84316f435875bc46b4da76ef6
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 752
|
py
|
#!/usr/bin/env python
from __future__ import print_function
from sys import argv,stdin
with open(argv[1]) if len(argv)>1 else stdin as f:
num_cases = int(f.readline())
for i in xrange(num_cases):
row_idx1 = int(f.readline())
arrangement1 = [f.readline() for j in xrange(4)]
set1 = set(arrangement1[row_idx1-1].split())
row_idx2 = int(f.readline())
arrangement2 = [f.readline() for j in xrange(4)]
set2 = set(arrangement2[row_idx2-1].split())
sol = set1.intersection(set2)
print('Case #%d: '%(i+1),end='')
if len(sol) == 0:
print('Volunteer cheated!')
elif len(sol) == 1:
print(sol.pop())
else:
print('Bad magician!')
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
a63b5f56d46d28618515820a9c7e9d185e53d0e7
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/B/buttub/basic_twitter_scraper_338.py
|
831396458ed083ee5c81b59e0ebd3a27d47a6645
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,288
|
py
|
###################################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'from:NME_Gigs'
RESULTS_PER_PAGE = '100'
LANGUAGE = 'en'
NUM_PAGES = 20
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
###################################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'from:NME_Gigs'
RESULTS_PER_PAGE = '100'
LANGUAGE = 'en'
NUM_PAGES = 20
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
62d8a7d1cd85bf3326485ad8b3d09b6af3c09161
|
a5d05e3cecfa6571016e41d19c982f1082714582
|
/TLG.py
|
80349d5374afe2a719cb3a67b4ecdf2a8b8152ed
|
[] |
no_license
|
Sanket-Mathur/CodeChef-Practice
|
8ebc80eb9a32c90a5b3785348fca2048190dbeb0
|
cba5bc2eaaf5489cbd8e85acaca6f82d223cff4f
|
refs/heads/master
| 2023-08-08T05:59:42.755206
| 2021-09-26T12:44:15
| 2021-09-26T12:44:15
| 268,267,425
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
try:
S, A, B = [], [], []
for _ in range(int(input())):
a, b = map(int, input().split())
A.append(a); B.append(b)
a = sum(A); b = sum(B)
S.append(a-b)
if max(S) > abs(min(S)):
print(1, max(S))
else:
print(2, abs(min(S)))
except:
pass
|
[
"rajeev.sanket@gmail.com"
] |
rajeev.sanket@gmail.com
|
5434b5dbe590c3fed01340af08021e6c3c2756a2
|
26192962dc2627e7ca5f0e3b249c3fabcf52442c
|
/Python/Iniciante/1002 - Área do Círculo.py
|
4e699785d2644624cef0240deb3cc5ce5f019580
|
[] |
no_license
|
PierreVieira/URI
|
77278ccb1724ca206ab2c12afbea1e51fa08ff73
|
c1eb211c788d26b5cb9bedf5dda4147a2961fa19
|
refs/heads/master
| 2023-04-10T07:03:13.954639
| 2023-03-22T00:18:28
| 2023-03-22T00:18:28
| 189,321,748
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
"""
Autor: Pierre Vieira
Data da submissão: 04/05/2018 16:49:45
"""
raio = float(input())
print('A={:.4f}'.format(3.14159*raio*raio))
|
[
"pierrevieiraggg@gmail.com"
] |
pierrevieiraggg@gmail.com
|
655ab4a239d1041ff2cb3539fe9e397b55a1e9ba
|
d769bb17ce4ca61df1cb5c88a2c2ae8202b6927f
|
/unitest/test_oprp.py
|
49f8a8710fcb14c8ac49906dbe17ef4479ad37e0
|
[
"Apache-2.0"
] |
permissive
|
dallerbarn/oictest
|
596489625c459d6607cacb90b1e593eca7465fc9
|
0f85acf8ee081f59176600e0db7bfee915f5cb3b
|
refs/heads/master
| 2020-12-30T19:58:25.301555
| 2015-05-21T08:56:50
| 2015-05-21T08:56:50
| 35,876,393
| 0
| 0
| null | 2015-05-19T10:48:27
| 2015-05-19T10:48:26
| null |
UTF-8
|
Python
| false
| false
| 1,594
|
py
|
import json
from rrtest.check import WARNING
from rrtest import Trace
from oictest.oprp import not_supported, support
from oictest.base import Conversation
from oictest.check import factory as check_factory
from oictest.oidcrp import Client
from oic.oic.message import factory as message_factory
from oic.oic.message import ProviderConfigurationResponse
__author__ = 'roland'
_cli = Client()
CONV = Conversation(_cli, {}, Trace(), None,
message_factory, check_factory=check_factory)
def test_not_support():
assert not_supported("abc", "abc") is None
assert not_supported("bac", "abc") == ["bac"]
assert not_supported("abc", ["abc", "def"]) is None
assert not_supported("bac", ["abc", "def"]) == ["bac"]
assert not_supported(["abc", "def"], ["abc", "def"]) is None
assert not_supported(["bac", "def"], ["abc", "def"]) == ["bac"]
assert not_supported(["abc", "def", "ghi"], ["abc", "def"]) == ["ghi"]
def test_support():
pi = json.loads(open("pi.google").read())
CONV.client.provider_info = ProviderConfigurationResponse(**pi)
stat = support(CONV, {'warning': {
'scopes_supported': ['profile', 'email', 'address', 'phone']}})
print CONV.test_output[-1]
assert stat is WARNING
_output = CONV.test_output[-1]
assert _output["status"] == WARNING
assert _output["message"] == ("OP is not supporting ['address', 'phone'] "
"according to 'scopes_supported' in the "
"provider configuration")
if __name__ == "__main__":
test_support()
|
[
"roland.hedberg@adm.umu.se"
] |
roland.hedberg@adm.umu.se
|
561d151ac587c161a7e42f1da177389d1757e03f
|
5ae01ab82fcdedbdd70707b825313c40fb373fa3
|
/scripts/charonInterpreter/parsers/MaterialBlock/MasettiMobility/ElectronMobilityParameters/charonLineParserDopantSpecies.py
|
a8b6aca7a2756b075e6afeee05b0c806fd872472
|
[] |
no_license
|
worthenmanufacturing/tcad-charon
|
efc19f770252656ecf0850e7bc4e78fa4d62cf9e
|
37f103306952a08d0e769767fe9391716246a83d
|
refs/heads/main
| 2023-08-23T02:39:38.472864
| 2021-10-29T20:15:15
| 2021-10-29T20:15:15
| 488,068,897
| 0
| 0
| null | 2022-05-03T03:44:45
| 2022-05-03T03:44:45
| null |
UTF-8
|
Python
| false
| false
| 5,527
|
py
|
from __future__ import print_function
import copy
class charonLineParserDopantSpecies:
"DopantSpecies parser"
def __init__(self):
# Register the parsing keys
self.parserName = "DopantSpecies"
self.parsingKey = "dopant species"
self.parsingKeyOptional = []
self.interpreterHelpLine = "dopant species is {species} "
self.interpreterQuickHelp = "The dopant species should be either Arsenic or Phosphorous"
self.interpreterLongHelp = "The dopant species should be either Arsenic or Phosphorous"
# Register the xml required lines
self.xmlRequiredLines = []
self.xmlRequiredLinePriority = []
self.xmlRequiredLines.append("Charon->Closure Models->{MaterialBlockName}->Electron Mobility,Dopant Species,string,{species}")
self.xmlRequiredLinePriority.append(2)
self.xmlNewRequiredLines = []
# Register the xml required arguments and their indexes
self.xmlRequiredArgument = []
self.xmlRequiredArgument.append("{species}")
self.xmlRequiredArgumentIndexes = []
self.xmlRequiredArgumentIndexes.append("3")
# Register the xml optional lines
self.xmlOptionalLines = [[]]
self.xmlOptionalLinePriority = [[]]
# Register the xml optional arguments and their indexes
self.xmlOptionalArgument = []
self.xmlOptionalArgumentIndexes = []
# Register the xml default lines
self.xmlDefaultLines = []
self.xmlDefaultLinePriority = []
self.xmlReturned = []
self.xmlPriorityCode = []
def isThisMe(self,tokenizer,line):
# Tokenize the line
lineTokens = tokenizer.tokenize(line)
# Tokenize the parsing key
parsingTokens = self.parsingKey.split()
returnType = True
for itoken in range(len(parsingTokens)):
if itoken+1 > len(lineTokens):
return False
if lineTokens[itoken].lower() != parsingTokens[itoken].lower():
returnType = False
return returnType
def getName(self):
# Return parser name
return self.parserName
def getHelp(self,verbosity):
# Return help content
if verbosity.lower() == "long":
return (self.interpreterHelpLine,self.interpreterLongHelp)
else:
return (self.interpreterHelpLine,self.interpreterQuickHelp)
def generateXML(self,tokenizer,line):
# Tokenize the line
lineTokens = tokenizer.tokenize(line)
self.xmlNewRequiredLines[:] = []
for xL in self.xmlRequiredLines:
self.xmlNewRequiredLines.append(xL)
for ipar in range(len(self.xmlRequiredArgument)):
line.replace(self.xmlRequiredArgument[ipar],lineTokens[int(self.xmlRequiredArgumentIndexes[ipar])])
for iRLine in range(len(self.xmlRequiredLines)):
self.xmlNewRequiredLines[iRLine]=self.xmlNewRequiredLines[iRLine].replace(self.xmlRequiredArgument[ipar],lineTokens[int(self.xmlRequiredArgumentIndexes[ipar])])
for index,xmlLine in enumerate(self.xmlNewRequiredLines):
self.xmlReturned.append(xmlLine)
self.xmlPriorityCode.append(self.xmlRequiredLinePriority[index]) #required lines have priority code 2
# Look over input line to see if any options are called out.
optCounter = 0
optIndex = 0
for optKey in self.parsingKeyOptional:
# Tokenize the opt keys
foundOptionalKey = False
optKeyTokens = optKey.split()
for iLT in range(len(lineTokens)):
if lineTokens[iLT].lower() == optKeyTokens[0]:
if len(optKeyTokens) == 1:
optIndex = iLT
foundOptionalKey = True
else:
for iPK in range(len(optKeyTokens)-1):
optIndex = iLT
if iLT+iPK+1 > len(lineTokens)-1:
continue
if optKeyTokens[iPK+1] == lineTokens[iLT+iPK+1].lower():
if iPK+2 == len(optKeyTokens):
foundOptionalKey = True
else:
continue
#Found the key, now create the xml line
if foundOptionalKey == True:
self.Returned=copy.deepcopy(self.xmlOptionalLines[optCounter])
for iopt in range(len(self.xmlOptionalLines[optCounter])):
for ipar in range(len(self.xmlOptionalArgument[optCounter])):
self.Returned[iopt] = self.Returned[iopt].replace(self.xmlOptionalArgument[optCounter][ipar],lineTokens[optIndex+int(self.xmlOptionalArgumentIndexes[optCounter][ipar])])
for ipar in range(len(self.xmlRequiredArgument)):
self.Returned[iopt] = self.Returned[iopt].replace(self.xmlRequiredArgument[ipar],lineTokens[int(self.xmlRequiredArgumentIndexes[ipar])])
self.xmlReturned.append(self.Returned[iopt])
self.xmlPriorityCode.append(2) #optional lines have priority code 2
optCounter += 1
for xmlLine in self.xmlDefaultLines:
self.xmlReturned.append(xmlLine)
self.xmlPriorityCode.append(1) #optional lines have priority code 1
return (self.xmlReturned,self.xmlPriorityCode)
|
[
"juan@tcad.com"
] |
juan@tcad.com
|
a23383ab17a182849ae340924a7204e43425fae6
|
2660859a9e1a73da695a42d73b75863e02185dce
|
/src/pretix/plugins/pretixdroid/signals.py
|
7164ff39eb9d56a3180640a2abaf46e1bbbbba63
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
alainrk/pretix
|
d4931a5528cfd42b1a9d9fb1b1df02aeee507171
|
867a8132aa1ed73dd9513efae5b3c46b5bbae140
|
refs/heads/master
| 2021-01-18T19:49:27.366758
| 2017-04-01T13:23:11
| 2017-04-01T13:23:11
| 86,915,380
| 1
| 0
| null | 2017-04-01T13:31:04
| 2017-04-01T13:31:04
| null |
UTF-8
|
Python
| false
| false
| 1,444
|
py
|
import json
from django.core.urlresolvers import resolve, reverse
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from pretix.base.signals import logentry_display
from pretix.control.signals import nav_event
@receiver(nav_event, dispatch_uid="pretixdroid_nav")
def control_nav_import(sender, request=None, **kwargs):
url = resolve(request.path_info)
if not request.eventperm.can_change_orders:
return []
return [
{
'label': _('pretixdroid'),
'url': reverse('plugins:pretixdroid:config', kwargs={
'event': request.event.slug,
'organizer': request.event.organizer.slug,
}),
'active': (url.namespace == 'plugins:pretixdroid' and url.url_name == 'config'),
'icon': 'android',
}
]
@receiver(signal=logentry_display, dispatch_uid="pretixdroid_logentry_display")
def pretixcontrol_logentry_display(sender, logentry, **kwargs):
if logentry.action_type != 'pretix.plugins.pretixdroid.scan':
return
data = json.loads(logentry.data)
if data.get('first'):
return _('Position #{posid} has been scanned.'.format(
posid=data.get('positionid')
))
else:
return _('Position #{posid} has been scanned and rejected because it has already been scanned before.'.format(
posid=data.get('positionid')
))
|
[
"mail@raphaelmichel.de"
] |
mail@raphaelmichel.de
|
71df9a20bf7584b49b5d66c00b8295b3e97ee01b
|
bd88cc281bf5cb92d7f5cd29c028298b0f79de2e
|
/robot-name/robot_name.py
|
749f9abc8323278c1d0db1162dfba32397a7ec6d
|
[] |
no_license
|
kingsley-ijomah/python-basics
|
4f3b6bcb4c264d23d9d223d2e00609ad30b7b0e5
|
190409625246243a0be6cc5e52463d541497b467
|
refs/heads/master
| 2021-03-11T20:03:09.272119
| 2020-05-13T08:30:08
| 2020-05-13T08:30:08
| 246,557,136
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
from string import ascii_uppercase, digits
from random import seed, sample
class Robot:
def __init__(self):
self.generate_name()
def generate_name(self):
seed()
self.name = self.alphabets() + self.digits()
def reset(self):
self.generate_name()
@staticmethod
def alphabets():
return "".join(sample(ascii_uppercase, 2))
@staticmethod
def digits():
return "".join(sample(digits, 3))
|
[
"kingsley.ijomah@gmail.com"
] |
kingsley.ijomah@gmail.com
|
c83edd0b9cd2dd22f40be2012e2d44f280509dc0
|
d63222abe326a3c8debd59bb8d24cb7eab3de09e
|
/codeforces/contest/1108/C.py
|
f7085b931f6177bfc0fd876a9a41f15536b83baf
|
[] |
no_license
|
tariqrahiman/pyComPro
|
91f47e93eb0a077d489659fcf0a75d5c1a65fc17
|
86ec13f47506a2495ab6b6bbb58d4e8b2a21538b
|
refs/heads/master
| 2022-02-10T04:15:40.194828
| 2019-06-16T10:22:38
| 2019-06-16T10:22:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,010
|
py
|
"""
this is a standard python template for codeforces task, repo: github.com/solbiatialessandro/pyComPro/codeforces
"""
from sys import stdin as _stdin
from sys import stdout
stdin = lambda type_ = "int", sep = " ": list(map(eval(type_), _stdin.readline().split(sep)))
joint = lambda sep = " ", *args: sep.join(str(i) if type(i) != list else sep.join(map(str, i)) for i in args)
def iters(): return xrange(int(raw_input()))
def solve(string):
l = len(string) / 3 + 1
counters = [0 for _ in xrange(6)]
strs = [
"RGB" * l,
"RBG" * l,
"GBR" * l,
"GRB" * l,
"BRG" * l,
"BGR" * l
]
for i, c in enumerate(string):
for ci in xrange(6):
if c != strs[ci][i]: counters[ci] += 1
m = min(counters)
print m
for ci in xrange(6):
if m == counters[ci]: return strs[ci][:len(string)]
if __name__ == "__main__":
"""the solve(*args) structure is needed for testing purporses"""
n = raw_input()
n = raw_input()
print solve(n)
|
[
"alexsolbiati@hotmail.it"
] |
alexsolbiati@hotmail.it
|
36f62627e0ca608318f0af03cb4d32d0fcd88a63
|
45e7fc0c586729a9fbff0cd5ec036db292264bad
|
/siena/warp_BM_siena.py
|
10bc958ee81017c5b1ef5533e1c6f6dd9436d6b8
|
[] |
no_license
|
ginakirkish/henrylab_scripts
|
5a11dc96ed03f85e00b7330d26ee776b6f93ac63
|
5e3009180a731ccd10c851668b00234e6e6728a5
|
refs/heads/master
| 2020-12-12T17:48:48.280077
| 2020-02-10T19:22:42
| 2020-02-10T19:22:42
| 234,189,521
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,521
|
py
|
from subprocess import check_call
from time import time
import argparse
import json
import pbr
from pbr.base import _get_output
from glob import glob
import os
import shutil
import pandas as pd
pbr_long ="/data/henry12/siena_BM/"
def get_t1(mse):
with open('{0}/{1}/alignment/status.json'.format(_get_output(mse), mse)) as data_file:
data = json.load(data_file)
if len(data["t1_files"]) > 0:
t1_file = data["t1_files"][-1]
print(t1_file)
return t1_file
def get_bm(mse):
BM = glob('{0}/{1}/sienaxorig_*/I_brain_mask.nii.gz'.format(_get_output(mse), mse))
if BM:
bm = BM[0]
else:
bm = ""
cmd = ["sienax_optibet", get_t1(mse), "-r", "-d", "-o", _get_output(mse)+'/'+mse + '/sienaxorig_noles']
check_call(cmd)
bm = _get_output(mse)+'/'+mse + '/sienaxorig_noles/I_brain_mask.nii.gz'
return bm
"""def get_bm(tp_brain):
bm = tp_brain.replace(".nii", "_optiBET_brain_mask.nii")
print(bm)
if not os.path.exists(bm):
cmd = ["/netopt/share/bin/local/optiBET.sh", "-i", tp_brain]
check_call(cmd)
return bm"""
def make_wd(mse1, mse2):
msid = get_t1(mse1).split('/')[-1].split('-')[0]
if not os.path.exists(pbr_long + msid):
os.mkdir(pbr_long + msid)
warp_dir = pbr_long + msid + '/warp/'
wd = '{0}/{1}_{2}/'.format(warp_dir, mse1, mse2)
print("MSID:", msid)
if not os.path.exists(warp_dir):
print(warp_dir)
os.mkdir(warp_dir)
if not os.path.exists(wd):
print(wd)
os.mkdir(wd)
return wd
def get_t1_mni(mse, t1_file):
t1 = t1_file.split("/")[-1]
bl_t1 = '{0}/{1}/alignment/baseline_mni/{2}'.format(_get_output(mse), mse, t1.replace(".nii", "_T1mni.nii"))
return bl_t1
def run_warp( mse1, mse2, wd):
tp1_brain = get_t1(mse1)
tp2_brain = get_t1(mse2)
tp1_affine_tp2space = '{0}/{1}_{2}_affine.nii.gz'.format(wd,mse1, mse2)
tp1_warp_tp2space = '{0}/{1}_{2}_warp.nii.gz'.format(wd,mse1, mse2)
inv_warp = '{0}/{1}_{2}_warp_field_inv.nii.gz'.format(wd,mse1, mse2)
tp1_tp2_brain_mask = wd + mse1+"_BM_"+ mse2 + "space.nii.gz"
combined_bm_tp2 = wd + "combined_BM_{}space.nii.gz".format(mse2)
tp1_bm = get_bm(mse1)
tp2_bm = get_bm(mse2)
#shutil.copyfile(tp1_brain, wd + tp1_brain.split('/')[-1])
#shutil.copyfile(tp2_brain, wd + tp2_brain.split('/')[-1])
#tp1_brain = wd + tp1_brain.split('/')[-1]
#tp2_brain = wd + tp2_brain.split('/')[-1]
#tp1_brain = get_t1_mni(mse1, t1_file1)
#tp2_brain = get_t1_mni(mse2, t1_file2)
msid = get_t1(mse1).split('/')[-1].split('-')[0]
warp_final = pbr_long + msid + "/warp/"
#if not os.path.exists(warp_final + mse1 + "_" +mse2 ):
if not os.path.exists(tp1_affine_tp2space):
cmd = ['flirt','-dof','6', '-in',tp1_brain, '-ref', tp2_brain, '-out', tp1_affine_tp2space, "-omat", wd + "affine.mat"]
print('flirt', '-in',tp1_brain, '-ref', tp2_brain, '-out', tp1_affine_tp2space, "-omat", wd + "affine.mat")
check_call(cmd)
cmd = ["flirt", "-init", wd + "affine.mat" , "-applyxfm", "-in", tp1_bm, "-ref", tp2_brain, "-out", tp1_tp2_brain_mask]
print("flirt", "-init", wd + "affine.mat" , "-applyxfm", "-in", tp1_bm, "-ref", tp2_brain, "-out", tp1_tp2_brain_mask)
check_call(cmd)
if not os.path.exists(tp1_warp_tp2space):
cmd = ["fnirt", "--ref={}".format(tp2_brain), "--in={}".format(tp1_brain), "--cout={}".format(tp1_warp_tp2space), "--iout={}".format(tp1_warp_tp2space).replace(".nii", "_img.nii")] # "--aff={}".format(wd+ "affine.mat")
print("fnirt", "--ref={}".format(tp2_brain), "--in={}".format(tp1_brain), "--cout={}".format(tp1_warp_tp2space),"--iout={}".format(tp1_warp_tp2space).replace(".nii", "_img.nii"))
check_call(cmd)
cmd = ["applywarp","-r", tp2_brain,"-i", tp1_bm,"-o",tp1_warp_tp2space.replace(".nii", "_new.nii"), "-w",tp1_warp_tp2space, "--premat={}".format(wd+ "/affine.mat")]
print( "applywarp","-r", tp2_brain,"-i", tp1_bm,"-o",tp1_warp_tp2space.replace(".nii", "_new.nii"), "-w",tp1_warp_tp2space, "--premat={}".format(wd+ "affine.mat") )
check_call(cmd)
cmd = ["fslmaths", tp1_warp_tp2space.replace(".nii","_new.nii"), "-mul", tp2_bm, combined_bm_tp2]
print("fslmaths", tp1_warp_tp2space.replace(".nii","_new.nii"), "-mul", tp2_bm, combined_bm_tp2)
check_call(cmd)
cmd = ["invwarp","-w", tp1_warp_tp2space,"-o", inv_warp,"-r", tp1_affine_tp2space ]
print("invwarp","-w", tp1_warp_tp2space,"-o", inv_warp,"-r", tp1_affine_tp2space )
check_call(cmd)
cmd = ["applywarp","-i", combined_bm_tp2, "-r", tp1_affine_tp2space,"-o", wd + "combined_BM_{}_warpspace.nii.gz".format(mse1), "-w", inv_warp ]
print("applywarp","-i", combined_bm_tp2, "-r", tp1_affine_tp2space,"-o", wd + "combined_BM_{}_warpspace.nii.gz".format(mse1), "-w", inv_warp)
check_call(cmd)
cmd = ["convert_xfm", "-inverse", wd + "affine.mat", "-omat", wd + "inv_aff.mat"]
print("convert_xfm", "-inverse", wd + "affine.mat", "-omat", wd + "inv_aff.mat")
check_call(cmd)
cmd = ["flirt", "-in",wd + "combined_BM_{}_warpspace.nii.gz".format(mse1), "-ref", tp1_brain, "-applyxfm","-init", wd + "inv_aff.mat" , "-o", wd + "combined_BM_{}space.nii.gz".format(mse1) ]
print("flirt", "-in",wd + "combined_BM_{}_warpspace.nii.gz".format(mse1), "-ref", tp1_brain, "-applyxfm","-init", wd + "inv_aff.mat" , "-o", wd + "combined_BM_{}space.nii.gz".format(mse1) )
check_call(cmd)
siena_final = pbr_long + msid + "/siena_fnirtBM_fromBL/"
if not os.path.exists(siena_final + mse1 + "_" +mse2 ):
cmd =["/data/henry6/gina/scripts/siena_BMinput",get_t1(mse1), get_t1(mse2), "-bm1",\
wd + "combined_BM_{}space.nii.gz".format(mse1),"-bm2", combined_bm_tp2, "-o", siena_final + mse1 + "_" +mse2 ]
print("*************************************************************")
print(cmd)
check_call(cmd)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-mse1', help = 'MSE1 - Moving subject')
parser.add_argument('-mse2', help = 'MSE2 - Reference Subject')
args = parser.parse_args()
mse1 = args.mse1
mse2 = args.mse2
wd = make_wd(mse1, mse2)
mse1_t1 = get_t1(mse1)
mse2_t1 = get_t1(mse2)
print(wd)
#out = args.o
run_warp(mse1, mse2, wd)
"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', help = 'csv containing the msid and mse, need to be sorted by msid and date')
parser.add_argument
args = parser.parse_args()
c = args.i
df = pd.read_csv("{}".format(c))
ind = 0
baseline_msid, mse_baseline, mse2 = ["","",""]
for idx in range (len(df)):
msid = df.loc[idx,'msid']
mse = df.loc[idx,'mse']
if msid == baseline_msid:
x = 0
ind = ind+1
#print(ind, msid, mse)
else:
baseline_msid = msid
ind = 0
if ind == 0 :
mse1 = df.loc[idx,'mse']
if not mse1 == mse:
mse2 = mse
print(mse1, mse)
wd = make_wd(mse1, mse2)
if not os.path.exists(wd + 'A_halfwayto_B_render.png'):
print("working directory", wd, mse1, mse2)
try:
run_warp(mse1, mse2, wd)
except:
pass"""
|
[
"gina.kirkish@ucsf.edu"
] |
gina.kirkish@ucsf.edu
|
0aca0fa93b087115f742f7f9df0f030f608de8bc
|
fd21d6384ba36aa83d0c9f05f889bdbf8912551a
|
/a10sdk/core/A10_file/file_dnssec_ds.py
|
ee5c3a4e9eccaa7b00d15e80f993072ec6a00574
|
[
"Apache-2.0"
] |
permissive
|
0xtobit/a10sdk-python
|
32a364684d98c1d56538aaa4ccb0e3a5a87ecd00
|
1ea4886eea3a1609b2ac1f81e7326758d3124dba
|
refs/heads/master
| 2021-01-18T03:08:58.576707
| 2014-12-10T00:31:52
| 2014-12-10T00:31:52
| 34,410,031
| 0
| 0
| null | 2015-04-22T19:05:12
| 2015-04-22T19:05:12
| null |
UTF-8
|
Python
| false
| false
| 1,904
|
py
|
from a10sdk.common.A10BaseClass import A10BaseClass
class DnssecDs(A10BaseClass):
""" :param action: {"optional": true, "enum": ["create", "import", "export", "copy", "rename", "check", "replace", "delete"], "type": "string", "description": "'create': create; 'import': import; 'export': export; 'copy': copy; 'rename': rename; 'check': check; 'replace': replace; 'delete': delete; ", "format": "enum"}
:param dst_file: {"description": "destination file name for copy and rename action", "format": "string", "minLength": 1, "optional": true, "maxLength": 32, "type": "string"}
:param file_handle: {"description": "full path of the uploaded file", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 255, "type": "string"}
:param file: {"description": "dnssec ds local file name", "format": "string", "minLength": 1, "optional": true, "maxLength": 127, "type": "string"}
:param size: {"optional": true, "type": "number", "description": "dnssec ds file size in byte", "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
Class Description::
dnssec ds file information and management commands.
Class dnssec-ds supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/file/dnssec-ds`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "dnssec-ds"
self.a10_url="/axapi/v3/file/dnssec-ds"
self.DeviceProxy = ""
self.action = ""
self.dst_file = ""
self.file_handle = ""
self.A10WW_file = ""
self.size = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
[
"doug@parksidesoftware.com"
] |
doug@parksidesoftware.com
|
e8fba3e66c853c5a89e54922bcbc99e07eea8bfc
|
1ad12cbda2d378c403b9349a867c1e181e4eedc2
|
/UWsubduction/params/utils.py
|
7aba9b4cf25e1bd3fddd9dea2cb03abd0f66b010
|
[
"CC-BY-4.0"
] |
permissive
|
dansand/UWsubduction
|
0633fd08b6e8505f156c62b59e730b49fd1ed03e
|
e13183de2dfb362a5531eaa926aeee0c352b8f29
|
refs/heads/master
| 2021-03-24T13:33:15.804823
| 2020-01-09T03:27:46
| 2020-01-09T03:27:46
| 119,228,582
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
from easydict import EasyDict as edict
#####################
#Now we map pd, md to non-nonDimensionalized dictionaries, paramDict, modelDict
#####################
def build_nondim_dict(d, sca):
ndd = edict({})
for key, val in d.items():
#can only call .magnitude on Pint quantities
if hasattr(val, 'dimensionality'):
if val.unitless:
ndd[key] = val.magnitude
else:
ndd[key] = sca.non_dimensionalise(val)
else:
ndd[key] = val
return ndd
|
[
"sonderfjord@gmail.com"
] |
sonderfjord@gmail.com
|
5f3b114edb77ef8372388472e509b780881e6bac
|
b9651b4a73d17847a9df7c953d3b0753b260af3e
|
/view/messageview.py
|
4dda6de42525476d7b2f55b0a97b544501673500
|
[] |
no_license
|
KevinB-a/forum
|
3270dd2dfa763455e9a6fdf0e3077f54413b3d77
|
b10bf8f7381e2bc03df9777cb0e1df1cea298785
|
refs/heads/master
| 2020-12-11T14:37:57.936880
| 2020-03-05T11:01:13
| 2020-03-05T11:01:13
| 233,873,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,172
|
py
|
from model.messagemodel import MessageModel
class MessageView ():
"""class to create and display messages"""
def __init__(self):
self.model = MessageModel()
def new_message(self):
"""write a new message with user entries """
content = input("Entrez le contenu de votre message :")
author = input("Entrez votre nom :")
self.model.write_message(content,author)
def show_message(self):
"""display every messages """
# get the messages from the model
messages = self.model.display_message()
print('Bonjour et bienvenue sur le forum, voici les derniers messages : ')
if messages:
for message in messages:
print("\nmessage {} : {}".format(message['id'], message['content']))
print("Posté par {} le {} à {}".format(
message['author'],
message['publishing_date'].strftime("%d/%m/%Y"),
message['publishing_date'].strftime("%H:%M")
))
print("\n------------------------------")
else:
print("Aucun message pour le moment")
|
[
"kevin.billet.pro@gmail.com"
] |
kevin.billet.pro@gmail.com
|
8bcb9a0df3639fe02a9b93a66e802fea7740ab65
|
ce864a3155ba441dab35fcd0f3d1d7f380d67106
|
/Part1/RelationaDatabases/innerJoin_pandas.py
|
2485139c6edea3fb659417323bea997b5ae11e74
|
[] |
no_license
|
rodrigoms2004/PythonImportDataCamp
|
c984e2b38c709899460f0a0cf5f1b4c06a7f91fa
|
23ffc36f1454974e22a12f3eaa46d06d3f700ee6
|
refs/heads/master
| 2020-04-05T01:12:36.317958
| 2019-02-20T19:43:29
| 2019-02-20T19:43:29
| 156,427,871
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 489
|
py
|
# Import packages
from sqlalchemy import create_engine
import pandas as pd
db_file = './RelationaDatabases/Northwind_small.sqlite'
# db_file = './RelationaDatabases/Chinook.sqlite'
# Create engine: engine
engine = create_engine('sqlite:///' + db_file)
# Execute query and store records in DataFrame: df
df = pd.read_sql_query("SELECT OrderId, CompanyName FROM Orders INNER JOIN Customers on Orders.CustomerID = Customers.CustomerID", engine)
# Print head of DataFrame
print(df.head())
|
[
"rodrigoms2004@gmail.com"
] |
rodrigoms2004@gmail.com
|
ccae764344b61e02e98a695fbbcb8f7a4ab64455
|
379049c3c3ec7fbac35aec44895971002059643d
|
/05-python基础-字符串、列表、元组、字典/02-判断名字是否存在.py
|
e397ca0996acdf755631518507bdd117831d3efb
|
[] |
no_license
|
mingyue33/python_base
|
29e771415b59a80c8f0354723f621826aafb46aa
|
f6c25bb5f9ea4352764a2c7d6d6dd802cdf2b1cb
|
refs/heads/master
| 2022-11-24T15:15:50.899518
| 2020-07-21T03:44:29
| 2020-07-21T03:44:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 681
|
py
|
#coding=utf-8
#1. 定义一个列表,里面有一些名字
names = ["xiaohong","xiaoming","laowang"]
#2. 获取一个要查找的名字
insertName = input("请输入您的名字:")
#3. 判断是否存在,并显示相应的提示
findFlag = 0
for name in names:
if name==insertName:
findFlag = 1
break#如果在前面已经找到了需要的名字,那么就结束循环,因为剩下的不会再进行判断,所以提升了程序的运行效率
#else:
# findFalg = 0
if findFlag == 1:
print("找到了")
else:
print("没有找到")
#第2种方法:
if insertName in names:
print("找到了")
else:
print("没有找到")
|
[
"qiilee@126.com"
] |
qiilee@126.com
|
471363907332458677f8c8441596c6f6ab3bfaaa
|
17e60f61fc82e7369802a1c597b58b0206ad9bec
|
/lib/rcDiskInfoWindows.py
|
d9621266bcf71fa24434e68c138816b64b16097e
|
[] |
no_license
|
SLB-DeN/opensvc
|
5e06d42947f51662fa16203a00670a88b9e1fea9
|
75baeb19e0d26d5e150e770aef4d615c2327f32e
|
refs/heads/master
| 2021-05-17T05:35:18.585791
| 2020-03-19T15:20:05
| 2020-03-19T15:20:05
| 250,651,667
| 1
| 0
| null | 2020-03-27T21:29:22
| 2020-03-27T21:29:22
| null |
UTF-8
|
Python
| false
| false
| 3,598
|
py
|
import rcDiskInfo
import wmi
from rcUtilities import justcall, which
class diskInfo(rcDiskInfo.diskInfo):
def __init__(self):
self.h = {}
self.fcluns = {}
self.wmi = wmi.WMI()
def scan_mapping(self):
if len(self.fcluns) > 0:
return
if not which('fcinfo'):
return
for index, portwwn, host in self._get_fc_hbas():
cmd = ['fcinfo', '/mapping', '/ai:'+index]
out, err, ret = justcall(cmd)
if ret != 0:
continue
lines = out.split('\n')
for i, line in enumerate(lines):
if line.startswith('( '):
l = line.split()
if len(l) < 3:
continue
bus = int(l[-3].strip(','))
target = int(l[-2].strip(','))
lun = int(l[-1].strip(')'))
_index = (host, bus, target, lun)
elif line.startswith('(cs:'):
l = line.split()
if len(l) < 2:
continue
wwid = l[-1].strip(')')
self.fcluns[_index] = dict(wwid=wwid)
def scan(self):
self.scan_mapping()
vid = 'unknown'
pid = 'unknown'
wwid = 'unknown'
size = 'unknown'
for drive in self.wmi.WIN32_DiskDrive():
id = drive.DeviceID
vid = str(drive.Manufacturer)
pid = str(drive.Caption)
try:
serial = str(drive.SerialNumber)
except:
serial = "unknown"
size = int(drive.Size) // 1024 // 1024
host = drive.SCSIPort
bus = drive.SCSIBus
target = drive.SCSITargetId
lun = drive.SCSILogicalUnit
d = dict(id=id,
vid=vid,
pid=pid,
wwid=wwid,
serial=serial,
host=host,
bus=bus,
target=target,
lun=lun,
size=size)
d['wwid'] = self.get_wwid(d)
if d['wwid'] is None:
d['wwid'] = d['serial']
self.h[id] = d
def get_wwid(self, d):
index = (d['host'], d['bus'], d['target'], d['lun'])
if index not in self.fcluns:
return None
return self.fcluns[index]['wwid']
def get(self, id, prop):
if len(self.h) == 0:
self.scan()
if id not in self.h:
return None
return self.h[id][prop]
def disk_id(self, dev):
return self.get(dev, 'wwid')
def disk_vendor(self, dev):
return self.get(dev, 'vid')
def disk_model(self, dev):
return self.get(dev, 'pid')
def disk_size(self, dev):
return self.get(dev, 'size')
def _get_fc_hbas(self):
hbas = []
if not which('fcinfo'):
return []
cmd = ['fcinfo']
out, err, ret = justcall(cmd)
if ret != 0:
return []
for line in out.split('\n'):
if 'PortWWN' not in line:
continue
l = line.split()
i = l.index('PortWWN:')
if len(l) < i+2:
continue
index = l[0].split('-')[-1].strip(':')
portwwn = l[i+1].replace(':', '')
host = int(l[-1].split('Scsi')[-1].strip(':'))
hbas.append((index, portwwn, host))
return hbas
|
[
"christophe.varoqui@opensvc.com"
] |
christophe.varoqui@opensvc.com
|
b285a6b8ca9eb4ac0643ebe0bdb04b7377c2d3da
|
fbd5c602a612ea9e09cdd35e3a2120eac5a43ccf
|
/Finished/old_py/393.utf-8-编码验证.py
|
76699d6f1fd1474e7ccec68a99bacf073a2a8d57
|
[] |
no_license
|
czccc/LeetCode
|
0822dffee3b6fd8a6c6e34be2525bbd65ccfa7c0
|
ddeb1c473935480c97f3d7986a602ee2cb3acaa8
|
refs/heads/master
| 2023-09-01T18:18:45.973563
| 2023-08-27T02:44:00
| 2023-08-27T02:44:00
| 206,226,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,403
|
py
|
#
# @lc app=leetcode.cn id=393 lang=python
#
# [393] UTF-8 编码验证
#
# @lc code=start
class Solution(object):
def validUtf8(self, data):
"""
:type data: List[int]
:rtype: bool
"""
def get_one_number(n):
for i in range(5):
if n & (128 >> i) == 0:
return i
return 5
index = 0
while index < len(data):
L = get_one_number(data[index])
if L == 1 or L == 5 or index + L - 1 >= len(data):
return False
elif L == 0:
index += 1
else:
for i in range(L - 1):
if get_one_number(data[index + i + 1]) != 1:
return False
index += L
return True
# @lc code=end
# TEST ONLY
import unittest
import sys
sys.path.append("..")
from Base.PyVar import *
class SolutionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._func = Solution().validUtf8
def test_1(self):
args = [[197, 130, 1]]
ans = True
cur_ans = self._func(*args)
self.assertEqual(cur_ans, ans)
def test_2(self):
args = [[235, 140, 4]]
ans = False
cur_ans = self._func(*args)
self.assertEqual(cur_ans, ans)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
[
"lichchchn@gmail.com"
] |
lichchchn@gmail.com
|
f59661020a5586029caaf7167761769a3761314c
|
3b7ea5ac5a8aadf4ce577d4988d3afca76672311
|
/ziplist.py
|
d415cdeded50cfd1521572175d8c8d18126ab365
|
[] |
no_license
|
Parya1112009/mytest
|
6c0fec16cbe5b83803fce9b270d8c03d247f34a3
|
6f698b64b34eae0d1d0f8ca1eab4257893fdce83
|
refs/heads/master
| 2022-11-23T22:38:24.463420
| 2022-11-16T03:48:44
| 2022-11-16T03:48:44
| 88,562,431
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
list1 = [1,2,3,4,13,14,15]
list2 = [5,6,7,8,19,20,21]
#print list[::]
list3 = zip(list1,list2)
print (list3)
print list3[1::2]
print list3[2::4]
print list3[::5]
print list3[4::]
|
[
"noreply@github.com"
] |
Parya1112009.noreply@github.com
|
14f43bf48d1b026bf488ee30115ec39a80659475
|
41586d36dd07c06860b9808c760e2b0212ed846b
|
/desktop/toolkit/qt5/qt5-svg/actions.py
|
472146b914b7669fbaa5b1036574caa84c1164b4
|
[] |
no_license
|
SulinOS/SulinRepository
|
4d5551861f57bc1f4bec6879dfe28ce68c7c125d
|
9686811a1e06080f63199233561a922fe1f78d67
|
refs/heads/master
| 2021-06-15T21:34:25.039979
| 2021-06-05T13:43:34
| 2021-06-05T13:43:34
| 207,672,864
| 6
| 3
| null | 2019-12-06T08:11:22
| 2019-09-10T22:16:17
|
Python
|
UTF-8
|
Python
| false
| false
| 827
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from inary.actionsapi import shelltools
from inary.actionsapi import autotools
from inary.actionsapi import inarytools
from inary.actionsapi import qt
from inary.actionsapi import get
def setup():
qt.configure()
def build():
qt.make()
qt.make("docs")
def install():
qt.install("INSTALL_ROOT=%s" % get.installDIR())
qt.install("INSTALL_ROOT=%s install_docs" % get.installDIR())
#I hope qtchooser will manage this issue
for bin in shelltools.ls("%s/usr/lib/qt5/bin" % get.installDIR()):
inarytools.dosym("/usr/lib/qt5/bin/%s" % bin, "/usr/bin/%s-qt5" % bin)
inarytools.insinto("/usr/share/licenses/qt5-svg/", "LICENSE.*")
|
[
"zaryob.dev@gmail.com"
] |
zaryob.dev@gmail.com
|
b0480d6a3406a5686ec59eb79bf0f6dabcf6e2f9
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/97/usersdata/126/56593/submittedfiles/lecker.py
|
bc318ca69e3b8c6abb02f03aae910aa0e1177b20
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 747
|
py
|
# -*- coding: utf-8 -*-
def lecker(a):
cont=0
for i in range(0,len(a),1):
if i==0:
if a[i]>a[i+1]:
cont=cont+1
elif i==len(a):
if a[i]>a[i-1]:
cont=cont+1
else:
if a[i-1]<a[i]>a[i+1]:
cont=cont+1
if cont==1:
return True
n=int(input('digite a quantidade de elementos da lista:'))
x=[]
y=[]
for i in range(0,n,1):
p=float(input('digite o valor da lista 1:'))
x.append(p)
i=i+1
for i in range(0,n,1):
q=float(input('digite o valor da segunda lista:'))
y.append(q)
i=i+1
if lecker(x):
print('S')
else:
print('N')
if lecker(y):
print('S')
else:
print('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
1b35196193a61bf28c62ee650d3e53c45e7541b0
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03053/s439502616.py
|
81a964f661902f7e08cba0681473061dbc558555
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 792
|
py
|
import sys
def input(): return sys.stdin.readline().strip()
def mapint(): return map(int, input().split())
sys.setrecursionlimit(10**9)
H, W = mapint()
As = [list(input()) for _ in range(H)]
from collections import deque
Q = deque()
dist = [[10**18]*W for _ in range(H)]
for h in range(H):
for w in range(W):
if As[h][w]=='#':
Q.append((h, w))
dist[h][w] = 0
dirc = ((0, 1), (0, -1), (1, 0), (-1, 0))
while Q:
y, x = Q.popleft()
for dy, dx in dirc:
ny, nx = y+dy, x+dx
if ny<0 or ny>=H or nx<0 or nx>=W:
continue
if dist[ny][nx]>dist[y][x]+1:
dist[ny][nx] = dist[y][x]+1
Q.append((ny, nx))
ans = 0
for h in range(H):
for w in range(W):
ans = max(ans, dist[h][w])
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
240ec89ce59b1aa0f0409a5eb3452080d774caf3
|
f76a36bb022c4da3fabf0cf13dac8c634c011b3a
|
/update.py
|
9ab3aa4af95a287f69a8816bdb56bf3bf7b12ef8
|
[] |
no_license
|
jtomasek/requirements
|
eb12da3afb082ebd69d55d8204c915aa018af2fb
|
e60166eae3795232fcd2bcf03592e8228e0aa0e4
|
refs/heads/master
| 2021-01-16T22:42:06.674581
| 2013-05-29T13:05:00
| 2013-05-29T13:05:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,826
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
r"""
A simple script to update the requirements files from a global set of
allowable requirements.
The script can be called like this:
$> python update.py ../myproj
Any requirements listed in the target files will have their versions
updated to match the global requirements. Requirements not in the global
files will be dropped.
"""
import os
import os.path
import sys
from pip import req
def _mod_to_path(mod):
return os.path.join(*mod.split('.'))
def _dest_path(path, base, dest_dir):
return os.path.join(dest_dir, _mod_to_path(base), path)
def _parse_reqs(filename):
reqs = dict()
pip_requires = open(filename, "r").readlines()
for pip in pip_requires:
pip = pip.strip()
if pip.startswith("#") or len(pip) == 0:
continue
install_require = req.InstallRequirement.from_line(pip)
if install_require.editable:
reqs[pip] = pip
elif install_require.url:
reqs[pip] = pip
else:
reqs[install_require.req.key] = pip
return reqs
def _copy_requires(req, dest_dir):
"""Copy requirements files."""
dest_path = _dest_path(req, 'tools', dest_dir)
source_path = os.path.join('tools', req)
source_reqs = _parse_reqs(source_path)
dest_reqs = _parse_reqs(dest_path)
dest_keys = [key.lower() for key in dest_reqs.keys()]
dest_keys.sort()
print "Syncing %s" % req
with open(dest_path, 'w') as new_reqs:
new_reqs.write("# This file is managed by openstack-depends\n")
for old_require in dest_keys:
# Special cases:
# versions of our stuff from tarballs.openstack.org are ok
# projects need to align pep8 version on their own time
if old_require in source_reqs or \
"http://tarballs.openstack.org" in old_require:
new_reqs.write("%s\n" % source_reqs[old_require])
if "pep8" in old_require:
new_reqs.write("%s\n" % dest_reqs[old_require])
def main(argv):
for req in ('pip-requires', 'test-requires'):
_copy_requires(req, argv[0])
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"mordred@inaugust.com"
] |
mordred@inaugust.com
|
9927cd4bca84ee185c4b5cdf617ba0a29e5b1f9a
|
251d6d11e807fa47fd1bad1f070b727500b17fd5
|
/shares/apps.py
|
9c121ab60adeccabe231925656fcce78bc87b080
|
[] |
no_license
|
khokhlov/dinv
|
a0964403a930f479fb744d90c4dbad887ba9810c
|
7943b533808c913ec3564aa28ada485f857609ee
|
refs/heads/master
| 2020-05-26T13:06:42.975971
| 2017-03-10T12:02:30
| 2017-03-10T12:02:30
| 82,479,422
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
#coding: utf-8
from __future__ import unicode_literals
from django.apps import AppConfig
class SharesConfig(AppConfig):
name = 'shares'
verbose_name = u'Акции'
|
[
"kolya.khokhlov@gmail.com"
] |
kolya.khokhlov@gmail.com
|
63186a8cdc7fa9612b687e2999dde46ed4eb74c7
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/valid_20200616215901.py
|
0a5eee49ea2e4313e5ba7c021044a1aa7cca4116
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,206
|
py
|
# Ipv4 --> 4 decimal numbers,between 0 to 255
# leading zero's is invalid
# check whethere its a digit between 0 to 255
import string
def valid(str):
if str[len(str)-1] == ":" or str[len(str)-1] == ".":
return "Neither"
address = str.split(".")
numbers = range(0,256)
result = None
if len(address) == 4:
for a in address:
if a.isdigit() == False:
return "Neither"
if int(a) in numbers:
if len(a) == 2 and a[0] == "0":
return "Neither"
return "IPV4"
# else:
# newAddress = str.split(":")
# i = 0
# while i < len(newAddress)-1:
# print(newAddress[i])
# well = all(c in string.hexdigits for c in newAddress[i])
# if newAddress[i] == "":
# return "Neither"
# if well == True:
# return "IPV6"
# else:
# return "Neither"
# i +=1
# return result
# "12..33.4"
print(valid("172.16.254.1"))
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
10c9f668c41efc95b861f012f1251780a6fb2386
|
4de28b1f6d97640834e4a795e68ca9987f9e2cd5
|
/check plugins 2.1/dell_idrac_redfish/agent_based/dell_idrac_rf_thermal.py
|
43eec1ae032a7af3d96c7df8ffabee97d4088086
|
[] |
no_license
|
Yogibaer75/Check_MK-Things
|
affa0f7e6e772074c547f7b1df5c07a37dba80b4
|
029c546dc921c4157000d8ce58a878618e7bfa97
|
refs/heads/master
| 2023-09-01T15:52:28.610282
| 2023-08-29T06:18:52
| 2023-08-29T06:18:52
| 20,382,895
| 47
| 16
| null | 2023-07-30T15:52:22
| 2014-06-01T18:04:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
#!/usr/bin/env python3
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# (c) Andreas Doehler <andreas.doehler@bechtle.com/andreas.doehler@gmail.com>
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
from .agent_based_api.v1 import register
from .utils.dell_idrac import parse_dell_idrac_rf
register.agent_section(
name="dell_idrac_rf_thermal",
parse_function=parse_dell_idrac_rf,
)
|
[
"andreas.doehler@gmail.com"
] |
andreas.doehler@gmail.com
|
9154f740d0f00f873094db8dbd94808071bf1838
|
95a6555114011d7ba9b0a842dd348dc4a18a56fc
|
/page_locators/about_page_locator.py
|
8d1ab126684c99a78c454f3e87d3de5abecd78ad
|
[
"Unlicense"
] |
permissive
|
battyone/ParaBankSeleniumAutomation
|
c96dfdcb11591dd12db31b7ddd373326ce4284f7
|
e28a886adba89b82a60831ad96a3a8f00f863116
|
refs/heads/master
| 2023-05-04T19:58:13.067568
| 2020-03-15T17:19:09
| 2020-03-15T17:19:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 718
|
py
|
# Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
from selenium.webdriver.common.by import By
from page_locators.base_page_locator import BasePageLocator
class AboutPageLocator(BasePageLocator):
'''
Holds all relevant locators for 'ABOUT' page web elements.
Each locator is a tuple.
Separate the locator strings from the place where they are being used.
'''
DESCRIPTION_TITLE = (By.XPATH, '//*[@id="rightPanel"]/h1')
FIRST_LINE = (By.XPATH, '//*[@id="rightPanel"]/p[1]')
SECOND_LINE = (By.XPATH, '//*[@id="rightPanel"]/p[2]')
THIRD_LINE = (By.XPATH, '//*[@id="rightPanel"]/p[3]')
LINK = (By.XPATH, '//*[@id="rightPanel"]/p[3]/a')
|
[
"igorkostan@gmail.com"
] |
igorkostan@gmail.com
|
d1ab74682b364990dd5ac2704bf6a733bc592772
|
46494ee049e72d99a1b371a685692f040ab21a36
|
/1345. Jump Game IV DC (27-12-20).py
|
25cf8c7d0cacabf19275acd2406bcef42dc0b8ed
|
[
"MIT"
] |
permissive
|
Dharaneeshwar/Leetcode
|
5570b953787a65f5c2a831aca3645bca50710844
|
cc3ed07f6ac5f4d6e3f60c57a94a06a8be2f5287
|
refs/heads/master
| 2023-03-25T18:40:26.258939
| 2021-03-18T06:04:11
| 2021-03-18T06:04:11
| 313,374,143
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,103
|
py
|
# Refer Solution
class Solution:
def minJumps(self, arr: List[int]) -> int:
arrlen = len(arr)
if arrlen in [0,1]:
return 0
graph = defaultdict(list)
for i in range(arrlen):
graph[arr[i]].append(i)
current = [0]
visited = set([0])
dist = 0
while current:
nexti = []
for node in current:
if node == arrlen-1:
return dist
for neighbor in graph[arr[node]]:
if neighbor not in visited:
visited.add(neighbor)
nexti.append(neighbor)
graph[arr[node]].clear()
for neighbor in [node-1,node+1]:
if 0<= neighbor < arrlen and neighbor not in visited:
visited.add(neighbor)
nexti.append(neighbor)
current = nexti
dist += 1
return -1
|
[
"daranip@gmail.com"
] |
daranip@gmail.com
|
cbea326e7b014b522803373a61372a366f563f9e
|
96e77a734bf865f998e719fafcaabd120b93759c
|
/Python/Django/login_reg_proj/apps/login_reg_app/models.py
|
ea5e8361c65eb78b014f69c076a74c0cb4e33b65
|
[] |
no_license
|
imronha/codingdojoprojects
|
3346feca1c03f625270eeded2cfb6a9b0249ab56
|
1b40688372844eca3fd02401f397c4ba4b334ce7
|
refs/heads/master
| 2020-04-05T12:59:14.237411
| 2017-11-06T08:27:06
| 2017-11-06T08:27:06
| 94,944,785
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,141
|
py
|
from __future__ import unicode_literals
from django.db import models
import re
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
class UserManager(models.Manager):
def basic_validator(self, postData):
errors = {}
if len(postData['first_name']) < 2:
errors['first_name'] = "First name must be at least 2 characters long."
if len(postData['last_name']) < 2:
errors['last_name'] = "Last name must be at least 2 characters long."
if not re.match(EMAIL_REGEX, postData['email']):
errors['email'] = "Email must be valid."
if len(postData['pw']) < 8:
errors['pw'] = "Password must be at least 8 characters long."
if postData['pw'] != postData['pwconfirm']:
errors['pw'] = "Password and confirmation must match."
return errors
# Create your models here.
class User(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
email = models.CharField(max_length=255)
password = models.CharField(max_length=255)
objects = UserManager()
|
[
"imronha@gmail.com"
] |
imronha@gmail.com
|
1cb8be49fabf46b7d1996007bd3c40c46fb353c1
|
1775a5522f465cb74a1e02393d32c363bb7ef215
|
/django/db/migrations/executor.py
|
7ebfe3d13b012c3f15b931e04127b07bd98bbff4
|
[
"BSD-3-Clause"
] |
permissive
|
trught007/django
|
b280eaff7706e72a6fc0f298c68e3c065daa448b
|
d55d21dbb8b307941c2d26b95be46bf83015d868
|
refs/heads/master
| 2022-12-21T04:23:49.786811
| 2020-10-01T08:24:33
| 2020-10-01T08:24:33
| 300,203,187
| 0
| 0
|
NOASSERTION
| 2020-10-01T08:23:34
| 2020-10-01T08:23:33
| null |
UTF-8
|
Python
| false
| false
| 6,401
|
py
|
from django.db import migrations
from .loader import MigrationLoader
from .recorder import MigrationRecorder
class MigrationExecutor(object):
"""
End-to-end migration execution - loads migrations, and runs them
up or down to a specified set of targets.
"""
def __init__(self, connection, progress_callback=None):
self.connection = connection
self.loader = MigrationLoader(self.connection)
self.recorder = MigrationRecorder(self.connection)
self.progress_callback = progress_callback
def migration_plan(self, targets):
"""
Given a set of targets, returns a list of (Migration instance, backwards?).
"""
plan = []
applied = set(self.loader.applied_migrations)
for target in targets:
# If the target is (app_label, None), that means unmigrate everything
if target[1] is None:
for root in self.loader.graph.root_nodes():
if root[0] == target[0]:
for migration in self.loader.graph.backwards_plan(root):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
# If the migration is already applied, do backwards mode,
# otherwise do forwards mode.
elif target in applied:
backwards_plan = self.loader.graph.backwards_plan(target)[:-1]
# We only do this if the migration is not the most recent one
# in its app - that is, another migration with the same app
# label is in the backwards plan
if any(node[0] == target[0] for node in backwards_plan):
for migration in backwards_plan:
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
else:
for migration in self.loader.graph.forwards_plan(target):
if migration not in applied:
plan.append((self.loader.graph.nodes[migration], False))
applied.add(migration)
return plan
def migrate(self, targets, plan=None, fake=False):
"""
Migrates the database up to the given targets.
"""
if plan is None:
plan = self.migration_plan(targets)
for migration, backwards in plan:
if not backwards:
self.apply_migration(migration, fake=fake)
else:
self.unapply_migration(migration, fake=fake)
def collect_sql(self, plan):
"""
Takes a migration plan and returns a list of collected SQL
statements that represent the best-efforts version of that plan.
"""
statements = []
for migration, backwards in plan:
with self.connection.schema_editor(collect_sql=True) as schema_editor:
project_state = self.loader.graph.project_state((migration.app_label, migration.name), at_end=False)
if not backwards:
migration.apply(project_state, schema_editor, collect_sql=True)
else:
migration.unapply(project_state, schema_editor, collect_sql=True)
statements.extend(schema_editor.collected_sql)
return statements
def apply_migration(self, migration, fake=False):
"""
Runs a migration forwards.
"""
if self.progress_callback:
self.progress_callback("apply_start", migration, fake)
if not fake:
# Test to see if this is an already-applied initial migration
if not migration.dependencies and self.detect_soft_applied(migration):
fake = True
else:
# Alright, do it normally
with self.connection.schema_editor() as schema_editor:
project_state = self.loader.graph.project_state((migration.app_label, migration.name), at_end=False)
migration.apply(project_state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_applied(app_label, name)
else:
self.recorder.record_applied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("apply_success", migration, fake)
def unapply_migration(self, migration, fake=False):
"""
Runs a migration backwards.
"""
if self.progress_callback:
self.progress_callback("unapply_start", migration, fake)
if not fake:
with self.connection.schema_editor() as schema_editor:
project_state = self.loader.graph.project_state((migration.app_label, migration.name), at_end=False)
migration.unapply(project_state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_unapplied(app_label, name)
else:
self.recorder.record_unapplied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("unapply_success", migration, fake)
def detect_soft_applied(self, migration):
"""
Tests whether a migration has been implicity applied - that the
tables it would create exist. This is intended only for use
on initial migrations (as it only looks for CreateModel).
"""
project_state = self.loader.graph.project_state((migration.app_label, migration.name), at_end=True)
apps = project_state.render()
for operation in migration.operations:
if isinstance(operation, migrations.CreateModel):
model = apps.get_model(migration.app_label, operation.name)
if model._meta.db_table not in self.connection.introspection.get_table_list(self.connection.cursor()):
return False
return True
|
[
"ekurgn@gmail.com"
] |
ekurgn@gmail.com
|
6efbff97a1eb3d659a5b884175b4e260d12f7142
|
49cb44cfe9b4cd382d8a7d10e1719de69e356ed9
|
/scripts/problems/ch4/p11/doSolutionStableNode.py
|
d1b8ad10850cc9c37fb79e5945439c7d0761cce3
|
[] |
no_license
|
joacorapela/figsResultsAndErrorsFromIzhikevich2007
|
913a25ff10479b04fa657cea013226766bef730c
|
2c04cacbaa94485168926ddc7e343207beb033b9
|
refs/heads/master
| 2022-01-28T15:26:30.122964
| 2022-01-21T19:56:52
| 2022-01-21T19:56:52
| 150,012,910
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,478
|
py
|
import sys
import pdb
import math
import matplotlib.pyplot as plt
from plotFunctions import plotVectorField
def main(argv):
a = -6.0
b = 1.0
c = 1.0
xMin = -4.0
xMax = 4.0
nXs = 16
yMin = -4.0
yMax = 4.0
nYs=16
colorSaddle = (1.0, 0.0, 0.0)
colorSaddleNode = (0.5, 0.0, 0.0)
colorStableNode = (0.0, 1.0, 0.0)
colorStableFocus = (0.0, 0.5, 0.0)
colorUnstableNode = (0.0, 0.0, 1.0)
colorUnstableFocus = (0.0, 0.0, 0.5)
if a>0.25*(b/c)**2:
print("No real equilibria")
return()
xe1 = (b/c+math.sqrt((b/c)**2-4*a))/2.0
ye1 = b/c*xe1
if a==0.25*(b/c)**2:
e1Type = "saddle-node"
e1Color = colorSaddleNode
print("(%f,%f) is a saddle-node"%(xe1,ye1))
else:
e1Type = "saddle"
e1Color = colorSaddle
print("(%f,%f) is a saddle"%(xe1,ye1))
xe2 = (b/c-math.sqrt((b/c)**2-4*a))/2.0
ye2 = b/c*xe2
if a==0.25*(b/c)**2:
if b==c**2:
raise NotImplementedError()
else:
e2Type = "saddle-node"
e2Color = colorSaddleNode
print("(%f,%f) is a saddle-node"%(xe2,ye2))
tau2 = b/c-math.sqrt((b/c)**2-4*a)-c
delta2 = c*math.sqrt((b/c)**2-4*a)
if b<=c**2 or a<(2*b-c**2)/4:
if tau2**2<4*delta2:
e2Type = "stable focus"
e2Color = colorStableFocus
print("(%f,%f) is a stable focus"%(xe2,ye2))
else:
e2Type = "stable node"
e2Color = colorStableNode
print("(%f,%f) is a stable node"%(xe2,ye2))
elif a>(2*b-c**2)/4:
if a==0.25*(b/c)**2:
raise NotImplementedError()
if tau2**2<4*delta2:
e2Type = "unstable focus"
e2Color = colorUnstableFocus
print("(%f,%f) is a unstable focus"%(xe2,ye2))
else:
e2Type = "unstable node"
e2Color = colorUnstableNode
print("(%f,%f) is a unstable node"%(xe2,ye2))
f = lambda x, y: a+x**2-y
g = lambda x, y: b*x-c*y
plotVectorField(xDotFun=f, yDotFun=g,
xMin=xMin, xMax=xMax, nXs=nXs,
yMin=yMin, yMax=yMax, nYs=nYs)
plt.scatter(xe1, ye1, marker="o", label=e1Type, color=e1Color)
plt.scatter(xe2, ye2, marker="o", label=e2Type, color=e2Color)
plt.legend(scatterpoints=1)
plt.show()
pdb.set_trace()
if __name__=="__main__":
main(sys.argv)
|
[
"joacorapela@gmail.com"
] |
joacorapela@gmail.com
|
76f8121fe98413ca6968d65b7bd6bef9ed0594fd
|
75e8f932e1e08c7e71380e6b71d85ddd04f052dd
|
/SDAPythonTesting/test_teacher.py
|
0be270a7e62250060e66527352106940a261ccd8
|
[] |
no_license
|
aeciovc/sda_python_ee4
|
fe470a253126ad307c651d252f9f9b489da32835
|
9e1e8be675fcafe4a61c354b55b71f53ad2af0fe
|
refs/heads/master
| 2023-08-29T15:17:34.033331
| 2021-10-31T09:41:57
| 2021-10-31T09:41:57
| 365,678,373
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,558
|
py
|
import pytest
from teacher import Teacher
@pytest.fixture()
def valid_person_name():
yield 'James'
print("Cleaning the environment")
@pytest.fixture()
def invalid_person_name():
return ''
class TestCreateDirector:
@pytest.mark.skip(reason="still not implemented")
def test_create_director_with_wrong_position(self):
pass
class TestCreateTeacher:
def test_create_teacher_with_optinal_args(self, valid_person_name):
# input
years_of_experience = 4
master_degree = True
topics = ['Python Tech', 'Data structure']
# process
teacher = Teacher(valid_person_name, years_of_experience, master_degree=master_degree, topics=topics)
# assert
assert isinstance(teacher, Teacher)
assert teacher.name == 'James'
assert teacher.years_of_experience == 4
assert teacher.has_master_degree is True
assert teacher.topics == ['Python Tech', 'Data structure']
assert teacher.amount_topics == 2
def test_create_teacher_with_defaults(self, valid_person_name):
# input
years_of_experience = 2
# process
teacher = Teacher(valid_person_name, years_of_experience)
# assert
assert isinstance(teacher, Teacher)
assert teacher.name == valid_person_name
assert teacher.years_of_experience == 2
assert teacher.has_master_degree is False
assert teacher.topics == []
def test_create_teacher_with_invalid_name(self, invalid_person_name):
pass
|
[
"aeciovc@gmail.com"
] |
aeciovc@gmail.com
|
f98c0ba473b4f18f1b2dc9a34cf29bf54651c48f
|
71acb7214efd91c0d327f6d8958e1798eadb4401
|
/locations/spiders/halfords_nl.py
|
a324790d8a41e361696f8252b1f530caa2776328
|
[
"CC0-1.0",
"MIT"
] |
permissive
|
alltheplaces/alltheplaces
|
21b9f8b4ace1352e52ae7b8f8825a930d2cb033e
|
1bcbb55cfcf06f2c714465570711f6e83f205c22
|
refs/heads/master
| 2023-08-30T19:45:35.098658
| 2023-08-30T17:51:54
| 2023-08-30T17:51:54
| 61,166,935
| 453
| 176
|
NOASSERTION
| 2023-09-14T17:16:40
| 2016-06-15T01:09:18
|
Python
|
UTF-8
|
Python
| false
| false
| 2,333
|
py
|
import json
import re
from scrapy import Request, Spider
from locations.hours import DAYS_NL, OpeningHours
from locations.items import Feature
from locations.structured_data_spider import extract_email, extract_phone
class HalfordsNLSpider(Spider):
name = "halfords_nl"
item_attributes = {"brand": "Halfords", "brand_wikidata": "Q3398786"}
start_urls = ["https://www.halfords.nl/halfords-winkels/"]
def parse(self, response):
store_urls = response.xpath('//*[@class="amlocator-store-list"]//*[@href]/@href').getall()
for store in store_urls:
yield Request(url=store, callback=self.parse_store)
def parse_store(self, response):
item = Feature()
item["ref"] = response.url
item["website"] = response.url
item["name"] = response.xpath("//*[@data-ui-id]/text()").get()
item = self.get_lat_long(response, item)
extract_phone(item, response)
extract_email(item, response)
item = self.get_address(response, item)
yield item
def get_lat_long(self, response, item):
latlong = response.xpath('//*[@id="maincontent"]/div[4]/div/script[2]').get()
lat_lon = json.loads(
re.findall("{ lat: [0-9]*.[0-9]*, lng: [0-9]*.[0-9]* }", latlong)[0]
.replace("lat", '"lat"')
.replace("lng", '"lng"')
)
item["lat"], item["lon"] = lat_lon["lat"], lat_lon["lng"]
return item
def get_address(self, response, item):
street_address, city_postcode, _ = response.xpath(
'//*[@class="amlocator-block -contact mb-6"]/div/span/text()'
).getall()
item["street_address"] = street_address
postcode, city = city_postcode.split(", ")
item["city"] = city
item["postcode"] = postcode
return item
def get_opening_hours(self, response, item):
oh = OpeningHours()
days = response.xpath('//*[@class="amlocator-cell -day"]/text()').getall()
hours = response.xpath('//*[@class="amlocator-cell -time"]/text()').getall()
for day, hour in zip(days, hours):
if "-" in hour:
oh.add_ranges_from_string(ranges_string=day + " " + hour, days=DAYS_NL, delimiters=" - ")
item["opening_hours"] = oh.as_opening_hours()
return item
|
[
"noreply@github.com"
] |
alltheplaces.noreply@github.com
|
b9d0e135072234e5bc41f7d24f5b094c3e911e1e
|
0fccee4c738449f5e0a8f52ea5acabf51db0e910
|
/genfragments/ThirteenTeV/RSGraviton/RSGravitonToGammaGamma_kMpl01_M_2500_TuneCUEP8M1_13TeV_pythia8_cfi.py
|
ef7485d71801413c97f0e543105aabed9bc8fecc
|
[] |
no_license
|
cms-sw/genproductions
|
f308ffaf3586c19b29853db40e6d662e937940ff
|
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
|
refs/heads/master
| 2023-08-30T17:26:02.581596
| 2023-08-29T14:53:43
| 2023-08-29T14:53:43
| 11,424,867
| 69
| 987
| null | 2023-09-14T12:41:28
| 2013-07-15T14:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,194
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(1.095e-3),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'ExtraDimensionsG*:all = on',
'ExtraDimensionsG*:kappaMG = 0.54',
'5100039:m0 = 2500',
'5100039:onMode = off',
'5100039:onIfAny = 22',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"sheffield@physics.rutgers.edu"
] |
sheffield@physics.rutgers.edu
|
276669d178cb5c98663816ef873c8f9ba74630fe
|
46bb96c640eaeeaffc81dfaf62ddca084d9e127c
|
/bugbuzz_service/db/tables/sessions.py
|
4c2ca601ff7e457f3d0b048f56db55183668c702
|
[
"MIT"
] |
permissive
|
pbehnke/bugbuzz-api
|
4d475d53620f839ef6b33c5326ca1bc079936c88
|
5ed6810f65f1edabf65282359eabc3845df26a67
|
refs/heads/master
| 2021-06-17T06:45:26.633291
| 2017-05-22T18:55:28
| 2017-05-22T18:55:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 893
|
py
|
from __future__ import unicode_literals
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import Table
from sqlalchemy import Unicode
from sqlalchemy.dialects.postgresql import BYTEA
from . import metadata
from . import now_func
from ...utils import GUIDFactory
from .utc_dt import UTCDateTime
sessions = Table(
'sessions',
metadata,
Column('guid', Unicode(64), primary_key=True, default=GUIDFactory('SE')),
Column('encrypted', Boolean, nullable=False, default=False),
# AES 256 encryption IV
Column('aes_iv', BYTEA),
# encrypted validation code
Column('encrypted_code', BYTEA),
# code for decryption validation
Column('validation_code', Unicode),
Column('created_at', UTCDateTime, default=now_func),
Column(
'updated_at',
UTCDateTime,
default=now_func,
onupdate=now_func,
),
)
|
[
"bornstub@gmail.com"
] |
bornstub@gmail.com
|
94383ebb2f76b0a23a095a78a15e0712ce32f6c3
|
745197407e81606718c4cdbedb6a81b5e8edf50b
|
/tests/texttest/TestSelf/Miscellaneous/TrackProcesses/TargetApp/hello.py
|
6d91a22c9c767c9d9e35c54069475d6b2eaed3b7
|
[] |
no_license
|
dineshkummarc/texttest-3.22
|
5b986c4f6cc11fd553dab173c7f2e90590e7fcf0
|
85c3d3627082cdc5860d9a8468687acb499a7293
|
refs/heads/master
| 2021-01-23T20:44:35.653866
| 2012-06-25T07:52:13
| 2012-06-25T07:52:13
| 4,779,248
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
#!/usr/bin/env python
import subprocess, os
def devnull():
if os.name == "posix":
return "/dev/null"
else:
return "nul"
print 'Hello World, now sleeping!'
proc = subprocess.Popen([ "python", "-c", "import time; time.sleep(10)" ], stdin=open(devnull()), stdout=open(devnull(), "w"), stderr=subprocess.STDOUT)
print "Leaking sleep process : sleep process :", proc.pid
|
[
"dineshkummarc@gmail.com"
] |
dineshkummarc@gmail.com
|
9e0f38961c074a19d919bca5e680adbb48167373
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/f4c41963e833ab041b269f62a726d26ed6433987-<translate_quota>-bug.py
|
df94bd39e3f7dbe8d2e19d9a07950641d3fd66c6
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
def translate_quota(self, quota, parent_quota):
if six.text_type(quota).endswith('%'):
pct = int(quota[:(- 1)])
quota = ((int(parent_quota) * pct) / 100)
if (not quota):
return _limit_from_settings(parent_quota)
return _limit_from_settings(quota)
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
c53846484eec2daf2dabdcd9d015cf99c3297d22
|
b47c136e077f5100478338280495193a8ab81801
|
/Lights/adafruit-circuitpython-bundle-6.x-mpy-20210310/examples/ble_heart_rate_simpletest.py
|
55bca725fc071fa3160f161bab7321a68ab8fcdb
|
[
"Apache-2.0"
] |
permissive
|
IanSMoyes/SpiderPi
|
22cd8747cc389f674cc8d95f32b4d86f9b7b2d8e
|
cc3469980ae87b92d0dc43c05dbd579f0fa8c4b1
|
refs/heads/master
| 2023-03-20T22:30:23.362137
| 2021-03-12T17:37:33
| 2021-03-12T17:37:33
| 339,555,949
| 16
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,881
|
py
|
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
"""
Read heart rate data from a heart rate peripheral using the standard BLE
Heart Rate service.
"""
import time
import adafruit_ble
from adafruit_ble.advertising.standard import ProvideServicesAdvertisement
from adafruit_ble.services.standard.device_info import DeviceInfoService
from adafruit_ble_heart_rate import HeartRateService
# PyLint can't find BLERadio for some reason so special case it here.
ble = adafruit_ble.BLERadio() # pylint: disable=no-member
hr_connection = None
while True:
print("Scanning...")
for adv in ble.start_scan(ProvideServicesAdvertisement, timeout=5):
if HeartRateService in adv.services:
print("found a HeartRateService advertisement")
hr_connection = ble.connect(adv)
print("Connected")
break
# Stop scanning whether or not we are connected.
ble.stop_scan()
print("Stopped scan")
if hr_connection and hr_connection.connected:
print("Fetch connection")
if DeviceInfoService in hr_connection:
dis = hr_connection[DeviceInfoService]
try:
manufacturer = dis.manufacturer
except AttributeError:
manufacturer = "(Manufacturer Not specified)"
try:
model_number = dis.model_number
except AttributeError:
model_number = "(Model number not specified)"
print("Device:", manufacturer, model_number)
else:
print("No device information")
hr_service = hr_connection[HeartRateService]
print("Location:", hr_service.location)
while hr_connection.connected:
print(hr_service.measurement_values)
time.sleep(1)
|
[
"ians.moyes@gmail.com"
] |
ians.moyes@gmail.com
|
2f705f3b5731158999fc4d2d605618c06352e922
|
2287048846a00f4cc5f55bd6d48da4a108aba7d7
|
/review/migrations/0011_auto_20170320_0931.py
|
e774ee2eda1b026683b3e3c566933be1b659a175
|
[
"BSD-2-Clause-Views"
] |
permissive
|
kgdunn/peer-review-system
|
ab840060ad799c25e730e47a09bcbb3a09898890
|
1fd5ac9d0f84d7637a86682e9e5fc068ac404afd
|
refs/heads/master
| 2021-02-08T10:10:16.271687
| 2017-08-07T10:55:21
| 2017-08-07T10:55:21
| 244,140,526
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 591
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-20 08:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('review', '0010_gradecomponent_name_in_table'),
]
operations = [
migrations.AlterField(
model_name='gradecomponent',
name='explanation',
field=models.TextField(help_text='HTML is possible; used in the template. Can include template elements: {{grade_text}}, {{pr.___}}, etc.', max_length=500),
),
]
|
[
"kgdunn@gmail.com"
] |
kgdunn@gmail.com
|
316d63df1926eb2c2fb3946ba166785457a4e14e
|
2c07edf26ef31574d40c96a2d4d9e13238ec74a6
|
/flask_backend/mailer.py
|
1eea1462647912c938d16b7291535db6e7255e57
|
[] |
no_license
|
fryger/wsb_sys_wbudowane
|
5a61270c4b22d51884117518d903fc386f8b25ca
|
3f6cb8581b872d3b590bf41875098fd9105aeef1
|
refs/heads/master
| 2023-07-31T23:11:56.472684
| 2021-10-01T22:59:06
| 2021-10-01T22:59:06
| 408,139,427
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,207
|
py
|
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from tinydb import TinyDB, Query
db = TinyDB('./db.json')
table = db.table('Email')
config = table.all()[0]
sender = config['sender']
password = config['pass']
receiver = config['receiver']
def send_email(name, spots):
with open('output.jpg', 'rb') as f:
img_data = f.read()
mail_content = f"Cześć, na parkingku {name} jest {spots} wolnych miejsc!"
sender_address = sender
sender_pass = password
receiver_address = receiver
message = MIMEMultipart()
message['From'] = sender_address
message['To'] = receiver_address
message['Subject'] = f'Parking {name} , wolne miejsca {spots}'
message.attach(MIMEText(mail_content, 'plain'))
image = MIMEImage(img_data, name='parking.jpg')
message.attach(image)
session = smtplib.SMTP('smtp.gmail.com', 587)
session.starttls()
session.login(sender_address, sender_pass)
text = message.as_string()
session.sendmail(sender_address, receiver_address, text)
session.quit()
if __name__ == "__main__":
send_email('Reda przód', '6')
|
[
"you@example.com"
] |
you@example.com
|
c1548dc4e5f4cfc7389941be28b7f70f05136071
|
f3686e5768fa645953d0a8847d6ad65e210bdc98
|
/pinry/core/migrations/0002_add_licence.py
|
141d07b9d282be395fc8ee9fff6c174ce68d976c
|
[
"BSD-2-Clause"
] |
permissive
|
Psycojoker/stickers.community
|
576dd9e90ce884582d0c1c68221f420cbf0fef00
|
a57b723e3c9100efbe55c07607c344e66828efa3
|
refs/heads/master
| 2021-01-20T20:44:42.412521
| 2016-07-19T09:08:40
| 2016-07-19T09:08:43
| 63,618,191
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 722
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='pin',
name='licence',
field=models.CharField(max_length=255, null=True, choices=[(b'cc-by', b'CC-BY'), (b'cc-by-sa', b'CC-BY-SA'), (b'cc-by-nc', b'CC-BY-NC'), (b'cc-by-nc-sa', b'CC-BY-NC-SA'), (b'cc-by-nd', b'CC-BY-ND'), (b'cc-by-nc-nd', b'CC-BY-NC-ND'), (b'cc0', b'Public Domain (CC0)'), (b'free-art', b'Free Art Licence'), (b'wtfpl', b'WTFpl'), (b'other', b'Other (put it in the description)')]),
),
]
|
[
"cortex@worlddomination.be"
] |
cortex@worlddomination.be
|
9aabd7aa49edf10476bf2da66ee2c45d98dc572b
|
7f5cf5f4f35ef5a01a62d9e68354859178909cf3
|
/homeassistant/components/device_tracker/ee_brightbox.py
|
fc23abda1db3564cd89ce97ac09d9e89f2507875
|
[
"Apache-2.0"
] |
permissive
|
unibeck/home-assistant
|
1cf2fb3f57528bb39a63e4dc8f7591f618d75715
|
937eba3dbec0f9eb75ddf2a8b39284afa1bc3301
|
refs/heads/master
| 2020-04-30T15:55:24.264362
| 2019-03-20T17:16:24
| 2019-03-20T17:16:24
| 176,933,979
| 1
| 0
|
Apache-2.0
| 2019-03-21T11:48:56
| 2019-03-21T11:48:55
| null |
UTF-8
|
Python
| false
| false
| 3,126
|
py
|
"""
Support for EE Brightbox router.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.ee_brightbox/
"""
import logging
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['eebrightbox==0.0.4']
_LOGGER = logging.getLogger(__name__)
CONF_VERSION = 'version'
CONF_DEFAULT_IP = '192.168.1.1'
CONF_DEFAULT_USERNAME = 'admin'
CONF_DEFAULT_VERSION = 2
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_VERSION, default=CONF_DEFAULT_VERSION): cv.positive_int,
vol.Required(CONF_HOST, default=CONF_DEFAULT_IP): cv.string,
vol.Required(CONF_USERNAME, default=CONF_DEFAULT_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
})
def get_scanner(hass, config):
"""Return a router scanner instance."""
scanner = EEBrightBoxScanner(config[DOMAIN])
return scanner if scanner.check_config() else None
class EEBrightBoxScanner(DeviceScanner):
"""Scan EE Brightbox router."""
def __init__(self, config):
"""Initialise the scanner."""
self.config = config
self.devices = {}
def check_config(self):
"""Check if provided configuration and credentials are correct."""
from eebrightbox import EEBrightBox, EEBrightBoxException
try:
with EEBrightBox(self.config) as ee_brightbox:
return bool(ee_brightbox.get_devices())
except EEBrightBoxException:
_LOGGER.exception("Failed to connect to the router")
return False
def scan_devices(self):
"""Scan for devices."""
from eebrightbox import EEBrightBox
with EEBrightBox(self.config) as ee_brightbox:
self.devices = {d['mac']: d for d in ee_brightbox.get_devices()}
macs = [d['mac'] for d in self.devices.values() if d['activity_ip']]
_LOGGER.debug('Scan devices %s', macs)
return macs
def get_device_name(self, device):
"""Get the name of a device from hostname."""
if device in self.devices:
return self.devices[device]['hostname'] or None
return None
def get_extra_attributes(self, device):
"""
Get the extra attributes of a device.
Extra attributes include:
- ip
- mac
- port - ethX or wifiX
- last_active
"""
port_map = {
'wl1': 'wifi5Ghz',
'wl0': 'wifi2.4Ghz',
'eth0': 'eth0',
'eth1': 'eth1',
'eth2': 'eth2',
'eth3': 'eth3',
}
if device in self.devices:
return {
'ip': self.devices[device]['ip'],
'mac': self.devices[device]['mac'],
'port': port_map[self.devices[device]['port']],
'last_active': self.devices[device]['time_last_active'],
}
return {}
|
[
"marhje52@kth.se"
] |
marhje52@kth.se
|
61c472871ca7ee207b156dd8f7ad8cbf68a7bc3e
|
b48ca984df88fa010cc0b57118e4e9789c13abb8
|
/main/urls.py
|
c8c157ed68ba37277904a340ba738f22299b04bd
|
[] |
no_license
|
SebGrobelny/CAWineClassics
|
1dd8fcf56762093814532560592618d75d9e8b66
|
1bad62e9174ce492db7cbdec6b4fd702446b92e7
|
refs/heads/master
| 2021-08-16T12:50:43.872384
| 2017-11-19T22:12:57
| 2017-11-19T22:12:57
| 108,331,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.splash, name='splash'),
url(r'home', views.home, name='home'),
url(r'about', views.about, name='about'),
url(r'team', views.team, name='team'),
url(r'katie', views.katie, name='katie'),
url(r'leslie', views.leslie, name='leslie'),
url(r'business', views.business, name='business'),
url(r'contact', views.contact, name='contact'),
url(r'winery', views.winery, name='winery'),
url(r'ice_house', views.ice_house, name='ice_house'),
url(r'contact', views.contact, name='contact'),
url(r'purchase', views.purchase, name='purchase'),
url(r'sparkling', views.sparkling, name='sparkling'),
url(r'serendipity', views.serendipity, name='serendipity'),
url(r'champenoise', views.champenoise, name='champenoise'),
url(r'news', views.news, name='news'),
url(r'event', views.event, name='event'),
url(r'still_wine', views.still_wine, name='still_wine'),
url(r'search', views.search, name='search'),
]
|
[
"sgrobelny@Sebastians-MacBook-Air-2.local"
] |
sgrobelny@Sebastians-MacBook-Air-2.local
|
ba73d784f4b93b8204ab8eccd085db5a52cc4fff
|
fd54c0886b81b49a55c31eb8c5254ce83df78785
|
/Source_Code/madagascar/appussd/ussd/services/prepaid/call_me_back/config.py
|
df7560fff2dee759e5b6768280ef316faedc4993
|
[] |
no_license
|
santsy03/RADIX
|
7854896651833b1be6e3279be409db59a71c76e4
|
da8f2535692697b80a6dc543b9eb270fe3d5e4d3
|
refs/heads/master
| 2021-01-12T09:48:32.085432
| 2016-12-13T06:01:41
| 2016-12-13T06:01:41
| 76,260,115
| 0
| 0
| null | 2016-12-13T06:01:41
| 2016-12-12T13:46:31
| null |
UTF-8
|
Python
| false
| false
| 6,559
|
py
|
# coding: utf-8
time_1 = '2015-01-07'
time_2 = '2015-01-08'
responses = {}
responses['True']={}
responses['False']={}
(responses['True'])['txt-1'] = 'Request successfully sent to $recipient. You can send $requestsdiff more requests today'
(responses['True'])['txt-2'] = "Nangataka ho antsoin'ny laharana $recipient ianao.Ambiny: $requestsdiff"
(responses['True'])['txt-3'] = "Vous avez demande a etre rappele par le numero $recipient.Reste: $requestsdiff"
(responses['False'])['txt-1'] = 'You have already used all of your Call Me Back requests.'
(responses['False'])['txt-2'] = 'Tratra ny FONEO 10 afaka ampiasainao anio. Misaotra Tompoko'
(responses['False'])['txt-3'] = "Vous avez utilisez les 10 FONEO que vous disposez pour aujourd'hui. Merci."
responses['failedValidation'] = {}
responses['failedValidation']['txt-1'] = 'Request is not processed. The number you have entered is incorrect. Please try again.'
responses['failedValidation']['txt-2'] = "Misy diso ny nomerao nampidirinao.Misaotra Tompoko."
responses['failedValidation']['txt-3'] = "Le numero entre est incorect. Veuillez corriger et reessayer. Merci"
responses['success'] = {}
#responses['success']['txt-1'] = 'You have asked $recipient to call you back. Out of credit? No worry, use SOS Credit.Press *500#'
responses['success']['txt-1'] = 'You have asked $recipient to call you back.Enjoy long call with FUN COOL: OAr to friends, Ar1 to Airtel, Ar3 off network.Press *100*12# for 950Ar'
#responses['success']['txt-2'] = "Nangataka ho antsoin ny laharana $recipient ianao. Mila miantso maikave ianao nefa tsy manana fahana? ampiasao SOS Credit , antsoy *500#"
responses['success']['txt-2'] = "Nangataka ho antsoin ny laharana $recipient ianao. Te hiresaka ela? Mampiasa FUN COOL: OAr miantso friends,1Ar Airtel,3Ar ny hafa.Antsoy *100*12# . Sarany 950Ar"
#responses['success']['txt-3'] = "Vous avez demande a etre rappele par $recipient.En panne de credit? SOS Credit est la solution.Tapez *500#"
responses['success']['txt-3'] = "Vous avez demande a etre rappele par $recipient. Parlez sans vous retenir avec FUN COOL: OAr friends,1Ar Airtel,3Ar autres operateurs.Code *100*12#.Cout: 950Ar"
responses['one']={}
responses['one']['txt-1'] = 'Request successfully sent to $recipient. You can send $requestsdiff more request today'
responses['one']['txt-2'] = "Nangataka ho antsoin'ny laharana $recipient ianao.Ambiny: $requestsdiff"
responses['one']['txt-3'] = "Vous avez demande a etre rappele par le numero $recipient.Reste: $requestsdiff"
responses['zero'] = {}
responses['zero']['txt-1'] = 'You have already used all of your Call Me Back requests.'
responses['zero']['txt-2'] = 'Tratra ny FONEO 10 afaka ampiasainao anio. Misaotra Tompoko'
responses['zero']['txt-3'] = "Vous avez utilisez les 10 FONEO que vous disposez pour aujourd'hui. Merci."
responses['offnet'] = {}
responses['offnet']['txt-1'] = 'Request is not processed. PCM service is available only for Airtel Customers.'
responses['offnet']['txt-2'] = 'Misy fahadisoana ny fangatahana nataonao.'
responses['offnet']['txt-3'] = 'Desole, la commande que vous avez effectue n est pas valide. Merci'
responses['wrongNumber'] ={}
responses['wrongNumber']['txt-1'] = 'Request is not processed. The number you have entered is incorrect. Please try again.'
responses['wrongNumber']['txt-2'] = 'Misy diso ny nomerao nampidirinao.Misaotra Tompoko.'
responses['wrongNumber']['txt-3'] = 'Le numero entre est incorect. Veuillez corriger et reessayer. Merci'
responses['b_party_message'] = {}
#responses['b_party_message']['txt-1'] = 'FONEO:The %s would like to recieve a call or credit from you. Enjoy 6 times more with BOOST. Press *100*101# .Cost Ar 1000.Thanks'
responses['b_party_message']['txt-1'] = 'FONEO:The %s would like to receive a call or credit from you. But u can also offer FUN COOL as a gift.Just press *100*12*recipient# ok.Cost 950Ar'
#responses['b_party_message']['txt-2'] = '%s FONEO aho na andefaso fahana azafady.Mibontsina avo 6 heny ny 1000Ar miaraka @ BOOST. Andramo anie. Fidirana *100*100#'
responses['b_party_message']['txt-2'] = '%s FONEO aho na andefaso fahana azafady.Azo atao ihany koa ny mandefa FUN COOL. Tsindrio *100*12*nomerao andefasana# ok.Sarany 950Ar'
#responses['b_party_message']['txt-3'] = 'FONEO:Le %s demande a etre rappele ou recevoir de credit de votre part.Misez sur BOOST a Ar1000 et gagnez 6 fois plus de credit.Code *100*101#. Merci'
responses['b_party_message']['txt-3'] = 'FONEO:le %s demande a etre rappele ou recevoir de credit de votre part. Ou offrez lui plutot FUN COOL en cadeau *100*12*destinataire# ok. Cout 950Ar'
responses['message_1'] = {}
responses['message_1']['txt-1'] = ("You have asked %s to call you back . "
+"You know what ? Lowest pack is at Airtel."
+"With FUN 100 , you can get 40sec for 100Ar.Press *100*1#")
responses['message_1']['txt-2'] = ("Nangataka ho antsoin ny laharana %s ianao."
+" Vita hatreo ny mi-bip na miandry antsoina miaraka @ FUN100,"
+" Ar 100 dia ahafahanao miantso. Tsindrio *100*1#")
responses['message_1']['txt-3'] = ("Vous avez demande a etre rappele par %s."
+" Le saviez vous ? FUN 100 de Airtel vous offre 40 sec d appel pour"
+ "seulement 100 Ar .Acces *100*1#")
responses['message_2'] = {}
responses['message_2']['txt-1'] = ("You have asked %s to call you back . "
+"Out of credit ? No worry ,use SOS CREDIT. Press *500#")
responses['message_2']['txt-2'] = ("Nangataka ho antsoin ny laharana %s"
+" ianao. Mila miantso maika ve ianao nefa tsy manana fahana,"
+" ampiasao SOS Credit, antsoy *500#")
responses['message_2']['txt-3'] = ("Vous avez demande a etre rappele par %s."
+" En panne de credit ? SOS Credit est la solution. Tapez *500#")
responses['message_3'] = {}
responses['message_3']['txt-1'] = ("You have asked %s to call you back."
+"You know what ? Lowest pack is at Airtel.With FUN 100 ,"
+" you can get 40sec for 100Ar.Press *100*1#")
responses['message_3']['txt-2'] = ("Nangataka ho antsoin ny laharana %s"
+" ianao. Vita hatreo ny mi-bip na miandry antsoina miaraka"
+" @ FUN100, Ar 100 dia ahafahanao miantso. Tsindrio *100*1#")
responses['message_3']['txt-3'] = ("Vous avez demande a etre rappele par %s."
+" Le saviez vous ? FUN 100 de Airtel vous offre 40 sec d appel"
+" pour seulement 100 Ar .Acces *100*1#")
'''madagascar'''
countryCode = '261'
messageSender = '298'
|
[
"root@oc4686551628.ibm.com"
] |
root@oc4686551628.ibm.com
|
a42a9da2f80c7831f9c9dbe372ebd51a07832af4
|
b2755ce7a643ae5c55c4b0c8689d09ad51819e6b
|
/anuvaad-etl/anuvaad-extractor/document-processor/word-detector/craft/src/utilities/filesystem.py
|
c37c99ac38c309b30030e0008937d396d5bad88a
|
[
"MIT"
] |
permissive
|
project-anuvaad/anuvaad
|
96df31170b27467d296cee43440b6dade7b1247c
|
2bfcf6b9779bf1abd41e1bc42c27007127ddbefb
|
refs/heads/master
| 2023-08-17T01:18:25.587918
| 2023-08-14T09:53:16
| 2023-08-14T09:53:16
| 265,545,286
| 41
| 39
|
MIT
| 2023-09-14T05:58:27
| 2020-05-20T11:34:37
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,001
|
py
|
import os
import shutil
import glob
import pdf2image
from anuvaad_auditor.loghandler import log_info
from anuvaad_auditor.loghandler import log_error
import config
def create_directory(path):
try:
os.mkdir(path)
return True
except FileExistsError as fe_error:
return True
except OSError as error:
log_info('unable to create directory : {}'.format(path), app_context.application_context)
return False
def read_directory_files(path, pattern='*'):
files = [f for f in sorted(glob.glob(os.path.join(path, pattern)))]
return files
def get_subdirectories(path):
return [f.path for f in os.scandir(path) if f.is_dir()]
def extract_image_paths_from_pdf(filepath, workspace_output_dir):
'''
function extracts image per page of the given PDF file.
return list of path of extracted images
'''
working_dir = os.path.join(workspace_output_dir, 'images')
image_filename = os.path.splitext(os.path.basename(filepath))[0]
create_directory(working_dir)
info = pdf2image.pdfinfo_from_path(filepath, userpw=None, poppler_path=None)
maxPages = info["Pages"]
# print(maxPages)
for page in range(1, maxPages+1, 10) :
# convert_from_path(pdf_file, dpi=300, first_page=page, last_page = min(page+10-1,maxPages))
paths = pdf2image.convert_from_path(filepath, dpi=config.EXRACTION_RESOLUTION,first_page=page, last_page = min(page+10-1,maxPages), output_file=image_filename, output_folder=working_dir, fmt='jpg', paths_only=True)
return paths
def extract_xml_path_from_digital_pdf(filepath, workspace_output_dir):
"""
function extracts the XML by using PDF2HTML commandline tool
and returns the path of XML file.
"""
working_dir = os.path.join(workspace_output_dir, 'pdftohtml')
create_directory(working_dir)
working_dir = os.path.join(working_dir, 'xml')
create_directory(working_dir)
shutil.copy(filepath, os.path.join(working_dir, os.path.basename(filepath)))
cmd = ( 'pdftohtml -xml %s' % (os.path.join(working_dir, os.path.basename(filepath))) )
os.system(cmd)
xml_files = read_directory_files(working_dir, pattern='*.xml')
return xml_files[0]
def extract_html_bg_image_paths_from_digital_pdf(filepath, workspace_output_dir):
"""
function extracts the HTML and Background empty image files
and return the paths of background image file paths
"""
working_dir = os.path.join(workspace_output_dir, 'pdftohtml')
create_directory(working_dir)
working_dir = os.path.join(working_dir, 'html')
create_directory(working_dir)
shutil.copy(filepath, os.path.join(working_dir, os.path.basename(filepath)))
cmd = ( 'pdftohtml -c %s' % (os.path.join(working_dir, os.path.basename(filepath))) )
os.system(cmd)
bg_img_files = read_directory_files(working_dir, pattern='*.png')
return bg_img_files
|
[
"srihari.nagaraj@tarento.com"
] |
srihari.nagaraj@tarento.com
|
2fc166f7544bd63905b3e4c486153cb90f70973c
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03360/s304226723.py
|
48ffa14fb022080910007afabd1e4a0e564073b3
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
num_list = list(map(int, input().split()))
k = int(input())
max = 0
index = -1
for i in num_list:
if max < i:
max = i
index = num_list.index(i)
change = max*(2**k)
num_list[index] = change
ans = 0
for j in num_list:
ans += j
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
a490519255f833a6757b089bece303247e25680e
|
eda678c6158431430fa195fd5d51c424293fc724
|
/experiments/dyad_isolate/Snakefile
|
e42ff25cfeb717a0fc291320fdfb94f76695d7d4
|
[] |
no_license
|
ohsu-comp-bio/dryads-research
|
8e75ecf812aa3c5139cffacf43116772d6a36376
|
c5c4b9e3c5e4ae5820b1dcfa669abf222e85d0db
|
refs/heads/master
| 2023-04-12T20:55:52.147569
| 2021-08-14T21:36:57
| 2021-08-14T21:36:57
| 139,887,441
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,665
|
import os
import sys
sys.path.extend([os.path.join(os.environ['CODEDIR'], 'dryads-research',
'experiments', 'utilities')])
from pipeline_setup import get_task_arr, tasks_files
TMPDIR = os.path.join(
os.environ['TEMPDIR'], 'dryads-research', 'dyad_isolate',
config['expr_source'], config['cohort'],
config['search'], config['mut_lvls'], config['classif']
)
OUTDIR = os.path.join(os.environ['DATADIR'],
'dryads-research', 'dyad_isolate',
'__'.join([config['expr_source'], config['cohort']]))
localrules: target, merge
rule target:
input:
expand("{outdir}/out-conf_{search}_{mut_lvls}_{classif}.p.gz",
outdir=OUTDIR, **config)
threads: 1
rule isolate:
output: "{TMPDIR}/output/out__cv-{cv_id}_task-{task_id}.p"
threads: 8
shell: """
set +u; source activate research; set -u;
export OMP_NUM_THREADS=1;
sleep $(( ({wildcards.cv_id} + 1) * ({wildcards.task_id} + 1) \
* $(shuf -i 1-9 -n 1) ));
python -m dryads-research.experiments.dyad_isolate.fit_isolate \
{config[classif]} {TMPDIR} \
--task_id={wildcards.task_id} --cv_id={wildcards.cv_id}
"""
rule gather:
input: tasks_files
output: "{TMPDIR}/merge/out-conf_{tasks}.p.gz"
threads: 12
shell: """
set +u; source activate research; set -u;
tasks={wildcards.tasks}
python -m dryads-research.experiments.dyad_isolate.gather_isolate \
{TMPDIR} --task_ids ${{tasks//-/ }} -c 12
"""
rule merge:
input:
[os.path.join(TMPDIR, 'merge',
"out-conf_{}.p.gz".format('-'.join(task_list)))
for task_list in get_task_arr(TMPDIR)]
output:
expand("{outdir}/out-conf_{search}_{mut_lvls}_{classif}.p.gz",
outdir=OUTDIR, **config)
threads: 1
shell: """
set +u; source activate research; set -u;
python -m dryads-research.experiments.subgrouping_isolate.merge_isolate \
{TMPDIR}
out_tag={config[search]}_{config[mut_lvls]}_{config[classif]}
cp {TMPDIR}/setup/cohort-data.p.gz \
{OUTDIR}/cohort-data_${{out_tag}}.p.gz
cp {TMPDIR}/out-pred.p.gz {OUTDIR}/out-pred_${{out_tag}}.p.gz
cp {TMPDIR}/out-tune.p.gz {OUTDIR}/out-tune_${{out_tag}}.p.gz
cp {TMPDIR}/out-pheno.p.gz {OUTDIR}/out-pheno_${{out_tag}}.p.gz
cp {TMPDIR}/out-aucs.p.gz {OUTDIR}/out-aucs_${{out_tag}}.p.gz
cp {TMPDIR}/out-conf.p.gz {OUTDIR}/out-conf_${{out_tag}}.p.gz
"""
|
[
"mgrzad@gmail.com"
] |
mgrzad@gmail.com
|
|
087b0a60a232ac975d6f97c182cdb3f97f2b8023
|
36ad0839d62d326f60bdcc17af21b48f2391987f
|
/Layers/bottlenecks.py
|
c44d28a9e8cdbb5cd6029fe63bbfa152a9a77a12
|
[
"MIT"
] |
permissive
|
esmou2/Kylearn-pytorch
|
adef4be5e59a4d6c7e6f33bc76fe6b1aaea0c5be
|
2e07f7b40b3cbbabf8be6b1abc8a350ddc66eef0
|
refs/heads/master
| 2022-11-09T23:11:49.225401
| 2020-05-09T22:45:44
| 2020-05-09T22:45:44
| 260,531,095
| 0
| 0
|
MIT
| 2020-05-01T18:31:32
| 2020-05-01T18:31:32
| null |
UTF-8
|
Python
| false
| false
| 5,175
|
py
|
import torch.nn as nn
class LinearBottleneckLayer(nn.Module):
''' Bottleneck Layer '''
def __init__(self, d_features, d_hid, d_out=None, dropout=0.1):
super().__init__()
if d_out == None:
d_out = d_features
self.encode = nn.Linear(d_features, d_hid)
self.decode = nn.Linear(d_hid, d_out)
nn.init.xavier_normal_(self.encode.weight)
nn.init.xavier_normal_(self.decode.weight)
self.layer_norm = nn.LayerNorm(d_features)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
'''
Arguments:
x {Tensor, shape [batch_size, d_features]}
Returns:
x {Tensor, shape [batch_size, d_features]}
'''
residual = x
encode = nn.functional.relu(self.encode(x))
decode = self.decode(encode)
output = self.dropout(decode)
output = self.layer_norm(output + residual)
output = output + residual
return output
class ShuffleBottleneckLayer(nn.Module):
''' Bottleneck Layer '''
def __init__(self, n_depth, d_features, mode, d_hid=None, dropout=0.1):
super().__init__()
self.n_depth = n_depth
self.d_features = d_features
self.mode = mode
if d_hid == None:
d_hid = d_features
if mode == '1d':
self.bottle_neck_1 = nn.Linear(d_features, d_hid)
self.bottle_neck_2 = nn.Linear(d_hid, d_features)
elif mode == '2d':
# self.bottle_neck_1 = nn.Conv1d(n_depth, d_hid, kernel_size=1, bias=False)
# self.bottle_neck_2 = nn.Conv1d(d_hid, n_depth, kernel_size=1, bias=False)
self.bottle_neck_1 = nn.Conv1d(d_features, d_hid, kernel_size=1)
self.bottle_neck_2 = nn.Conv1d(d_hid, d_features, kernel_size=1)
else:
pass
nn.init.xavier_normal_(self.bottle_neck_1.weight)
nn.init.xavier_normal_(self.bottle_neck_2.weight)
self.layer_norm = nn.LayerNorm([d_features])
self.activation = nn.functional.relu
self.dropout = nn.Dropout(dropout)
def forward(self, features):
'''
Arguments:
features {Tensor, shape [batch, d_features] or [batch, n_depth, d_features]} -- features
Returns:
x {Tensor, shape [batch_size, d_features]}
'''
residual = features
if self.mode == '1d':
output = self.bottle_neck_1(features)
output = self.activation(output)
output = self.bottle_neck_2(output)
elif self.mode == '2d':
output = features.transpose(1, 2)
output = self.bottle_neck_1(output)
output = self.activation(output)
output = self.bottle_neck_2(output)
output = output.transpose(2, 1)
else:
residual = 0
output = features
output = self.dropout(output)
output = self.layer_norm(output + residual)
return output
class ShuffleBottleneckLayerV2(nn.Module):
''' Bottleneck Layer '''
def __init__(self, n_depth, d_features, mode, d_hid=None, dropout=0.1, c_total=3):
super().__init__()
self.n_depth = n_depth
self.d_features = d_features
self.mode = mode
if d_hid == None:
d_hid = d_features
if mode == 'residual':
self.bottle_neck_1 = nn.Conv1d(d_features, d_hid, kernel_size=1)
self.bottle_neck_2 = nn.Conv1d(d_hid, d_features, kernel_size=1)
elif mode == 'dense':
self.bottle_neck_1 = nn.Conv1d(c_total, d_hid, kernel_size=1)
self.bottle_neck_2 = nn.Conv1d(d_hid, c_total, kernel_size=1)
else:
pass
nn.init.xavier_normal_(self.bottle_neck_1.weight)
nn.init.xavier_normal_(self.bottle_neck_2.weight)
self.layer_norm = nn.LayerNorm([d_features])
self.activation = nn.functional.relu
self.dropout = nn.Dropout(dropout)
def forward(self, features):
'''
Arguments:
features {Tensor, shape [batch, ?, d_features]} -- features
Returns:
x {Tensor, shape [batch_size, d_features]}
'''
residual = features
if self.mode == 'residual':
# features {Tensor, shape [batch, 3, d_features]}
output = features.transpose(1, 2)
output = self.bottle_neck_1(output)
output = self.activation(output)
output = self.bottle_neck_2(output)
output = output.transpose(2, 1)
elif self.mode == 'dense':
# features {Tensor, shape [batch, c_total, d_features]}
output = self.bottle_neck_1(features) # [batch, d_hid, d_features]
output = self.activation(output)
output = self.bottle_neck_2(output) # [batch, c_total, d_features]
output = output.transpose(2, 1)
else:
residual = 0
output = features
output = self.dropout(output)
output = self.layer_norm(output + residual)
return output
|
[
"sjc951213@gmail.com"
] |
sjc951213@gmail.com
|
57ec1c8837dbbf8ef325c7d10c00a3ba0a6fd2c6
|
673e829dda9583c8dd2ac8d958ba1dc304bffeaf
|
/data/multilingual/Latn.COT/Sun-ExtA_16/pdf_to_json_test_Latn.COT_Sun-ExtA_16.py
|
6f18e32b3f24f6feeadfec011553ba559a4c128f
|
[
"BSD-3-Clause"
] |
permissive
|
antoinecarme/pdf_to_json_tests
|
58bab9f6ba263531e69f793233ddc4d33b783b7e
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
refs/heads/master
| 2021-01-26T08:41:47.327804
| 2020-02-27T15:54:48
| 2020-02-27T15:54:48
| 243,359,934
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.COT/Sun-ExtA_16/udhr_Latn.COT_Sun-ExtA_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
7c03529bb6f59646fb9f76a6e8293168b77906bf
|
5c4d4a2c44eebf8fd9dd790da2a9ba4ededcfb70
|
/django_kala/django_kala/test_settings.py
|
8d079d8b604125d1fece8ccd9988b53999bdd663
|
[
"LicenseRef-scancode-other-permissive",
"MIT"
] |
permissive
|
brahimmade/kala-app
|
bc2602c6034203f83ced448ba54db7606a1234fe
|
6ac816e7c2711568cd7bcb1d996ba74c09513b3f
|
refs/heads/master
| 2023-03-21T05:15:52.436228
| 2020-03-10T15:50:29
| 2020-03-10T15:50:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,371
|
py
|
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
from django_kala.functions import import_from_string
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'storages',
'taggit',
'auth.apps.AuthConfig',
'organizations.apps.OrganizationsConfig',
'documents.apps.DocumentsConfig',
'django_kala',
'projects.apps.ProjectsConfig',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
AUTH_USER_MODEL = 'kala_auth.User'
ACCOUNT_ACTIVATION_DAYS = 15
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
USE_CERTIFICATES = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
MIDDLEWARE = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
ALLOWED_HOSTS = ['localhost']
ROOT_URLCONF = 'django_kala.urls'
STATIC_URL = '/static/'
LANGUAGE_CODE = 'en-us'
SECRET_KEY = 'foobar'
DOCUMENT_ROOT = ''
import os
STATIC_ROOT = os.path.join(os.path.dirname(__file__) + '/..', 'static/')
MEDIA_ROOT = '/tmp'
TIME_ZONE = 'Pacific/Honolulu'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOGIN_REDIRECT_URL = '/'
PLATFORM_MANAGER = import_from_string(
'django_kala.platforms.test.manager.PlatformManager',
'PLATFORM_MANAGER'
)
EXPORT_QUEUE = 'test'
CELERY_BROKER_URL = 'memory://'
EMAIL_APP = 'kala'
USE_HTML_EMAIL = False
APPLICATION_URL = 'http://localhost'
HELP_EMAIL = 'test.help'
FROM_EMAIL = 'test.help@test'
EXPORT_DIRECTORY = '/tmp/test_exports/'
|
[
"bgroff@hawaii.edu"
] |
bgroff@hawaii.edu
|
1f5531465bc0a3b731ae7971a7b903cc3feb40fe
|
e28ce5cca66c56ee7446a46e18375430d0d404eb
|
/OpenCV/color_study.py
|
6a9c15b7489c871bf2e477d6074c9f588571fd66
|
[] |
no_license
|
git4robot/PyKids
|
4fb60c5b107527336d9e686a98988ba7a8354f31
|
866e45e13171322ad1892d604508cfee9f8086c8
|
refs/heads/master
| 2020-04-17T20:45:26.741363
| 2020-04-14T02:21:55
| 2020-04-14T02:21:55
| 166,919,523
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,439
|
py
|
'''
# RGB: https://www.jianshu.com/p/25712f48a354
'''
import cv2
import numpy as np
from sklearn.cluster import KMeans
from collections import Counter
from matplotlib import pyplot as plt
def get_dominant_color(image, k=4, image_processing_size = None):
"""
takes an image as input
returns the dominant color of the image as a list
dominant color is found by running k means on the
pixels & returning the centroid of the largest cluster
processing time is sped up by working with a smaller image;
this resizing can be done with the image_processing_size param
which takes a tuple of image dims as input
>>> get_dominant_color(my_image, k=4, image_processing_size = (25, 25))
[56.2423442, 34.0834233, 70.1234123]
"""
#resize image if new dims provided
if image_processing_size is not None:
image = cv2.resize(image, image_processing_size,
interpolation = cv2.INTER_AREA)
#reshape the image to be a list of pixels
image = image.reshape((image.shape[0] * image.shape[1], 3))
#cluster and assign labels to the pixels
clt = KMeans(n_clusters = k)
labels = clt.fit_predict(image)
#count labels to find most popular
label_counts = Counter(labels)
#subset out most popular centroid
dominant_color = clt.cluster_centers_[label_counts.most_common(1)[0][0]]
return list(dominant_color)
def main():
img_src = cv2.imread(r'./test001.png')
#plt.imshow(img_src, cmap = 'gray', interpolation = 'bicubic')
#plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis
#plt.show()
cv2.namedWindow('img_src', cv2.WINDOW_AUTOSIZE)
#cv2.resizeWindow("img_src", 300,300)
cv2.imshow('img_src', img_src)
cv2.waitKey(0)
cv2.destroyAllWindows()
work_img = img_src.copy()
hsvImg = cv2.cvtColor(work_img, cv2.COLOR_BGR2HSV) #BGR not RGB
height = hsvImg.shape[0]
width = hsvImg.shape[1]
#收集图片不同像素
colorsMap = {}
for row in range(height):
for col in range(width):
pxH = hsvImg[row,col,0]
if pxH != 0:
if pxH not in colorsMap:
colorsMap[pxH] = [(row, col)]
colorsMap[pxH].append((row,col))
#print(colorsMap.keys())
# 分别设置HSV颜色空间中,红色、黄色、蓝色、绿色的阈值
min_red=np.array([156,43,46])
max_red=np.array([180,255,255])
min_red2=np.array([0,43,46])
max_red2=np.array([5,255,255])
min_purple=np.array([125,43,46])
max_purple=np.array([155,255,255])
min_yellow=np.array([26,43,46])
max_yellow=np.array([34,255,255])
min_blue=np.array([100,43,46])
max_blue=np.array([124,255,255])
min_green=np.array([35,43,46])
max_green=np.array([77,255,255])
min_orange=np.array([11,43,46])
max_orange=np.array([25,255,255])
min_cyan = np.array([78, 43, 46])
max_cyan = np.array([99,255,255])
min_black = np.array([0, 0, 0])
max_black = np.array([180, 255, 10])
min_white = np.array([0, 0, 70])
max_white = np.array([180, 30, 255])
COLORS = [
[min_red, max_red, 'red'], [min_red2, max_red2, 'red2'],
[min_green, max_green, 'green'], [min_blue, max_blue, 'blue'],
[min_yellow, max_yellow, 'yellow'],[min_purple, max_purple, 'purple'],
[min_orange, max_orange, 'orange'],[min_black, max_black, 'black'],
[min_white, max_white, 'white'], [min_cyan, max_cyan, 'cyan']
]
for (color_min, color_max, name) in COLORS:
# 使用inRange函数获取图像中目标颜色的索引
mask=cv2.inRange(hsvImg, color_min, color_max)
res=cv2.bitwise_and(work_img, work_img, mask=mask)
cv2.imshow(name, res)
gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
binary = cv2.threshold(gray,0,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
cnts = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
if cnts is None or len(cnts) == 0:
continue
cnt_max = sorted(cnts, key=cv2.contourArea)[-1]
if cnt_max is None:
continue
## (4) Crop and save it
x,y,w,h = cv2.boundingRect(cnt_max)
#print(x,y,w,h)
if w < (h*1.3):
continue
print(name)
cv2.waitKey(0)
cv2.destroyAllWindows()
'''
height = img_src.shape[0]
width = img_src.shape[1]
channels = img_src.shape[2]
for row in range(height):
for col in range(width):
for c in range(channels):
print(hsvImg[row, col, c],end=' ')
print()
'''
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv2.destroyAllWindows()
|
[
"49167781@qq.com"
] |
49167781@qq.com
|
0857e6a407614cfc12e549e81dca0581fdf90fe8
|
e2ca3205bb5240a1e4c87de0bdb13faa70241f16
|
/src/main/nspawn/wrapper/machinectl.py
|
51e4e7868c85c7b149882efb48b037a9ba526c72
|
[
"Apache-2.0"
] |
permissive
|
random-python/nspawn
|
67da4d96d54dcbf537adaf3421a03020ea5c1769
|
25f53aa565c0685842a89d48d949b0459b1de0a6
|
refs/heads/master
| 2023-05-11T21:27:44.557577
| 2023-05-07T17:00:18
| 2023-05-07T17:00:18
| 184,904,641
| 21
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,425
|
py
|
"""
Wrapper for machinectl
https://www.freedesktop.org/software/systemd/man/machinectl.html
"""
from nspawn.wrapper.base import Base
from nspawn.wrapper.sudo import Sudo
from typing import List
from dataclasses import dataclass
@dataclass(frozen=True)
class MachineDescriptor():
MACHINE:str
CLASS:str
SERVICE:str
OS:str
VERSION:str
ADDRESSES:str
def descriptor_from_line(line:str) -> MachineDescriptor:
term_list = line.split()
term_size = len(term_list)
invalid = '<>'
return MachineDescriptor(
MACHINE=term_list[0] if term_size > 0 else invalid,
CLASS=term_list[1] if term_size > 1 else invalid,
SERVICE=term_list[2] if term_size > 2 else invalid,
OS=term_list[3] if term_size > 3 else invalid,
VERSION=term_list[4] if term_size > 4 else invalid,
ADDRESSES=term_list[5] if term_size > 5 else invalid,
)
class MachineCtl(Base):
base = Sudo()
def __init__(self):
super().__init__('wrapper/machinectl')
def status(self, machine:str):
command = ['status', machine]
return self.execute_unit(command)
def start(self, machine:str):
command = ['start', machine]
return self.execute_unit(command)
def stop(self, machine:str):
command = ['stop', machine]
return self.execute_unit(command)
def shell(self, machine, script=['pwd']):
script = ['shell', '--quiet', machine] + script
return self.execute_unit(script)
def show(self, machine:str):
command = ['show', machine]
return self.execute_unit(command)
def show_property(self, machine, name):
command = ['show', '--name', name, '--value', machine]
return self.execute_unit(command)
def pid_get(self, machine:str) -> str:
result = self.show_property(machine, 'Leader')
result.assert_return()
return result.stdout.strip()
def list(self) -> List[MachineDescriptor]:
command = ['list', '--no-legend']
result = self.execute_unit(command)
line_list = result.stdout.splitlines()
meta_list = list(map(descriptor_from_line, line_list))
return meta_list
def has_machine(self, machine:str) -> bool:
meta_list = self.list()
machine_list = list(map(lambda store: store.MACHINE, meta_list))
return machine in machine_list
MACHINE_CTL = MachineCtl()
|
[
"andrei.pozolotin@gmail.com"
] |
andrei.pozolotin@gmail.com
|
c0ed6552ab058902a8ed5dcc21c397f9b6d9c417
|
048f2002ed13503d50428c8949c95a2e4f9bd532
|
/BST/331_VerifyPreorderSerializationofABinaryTree.py
|
1a2935f033ebb619a2fc7ed1ed56b4ab37754480
|
[] |
no_license
|
ZhengLiangliang1996/Leetcode_ML_Daily
|
9c9330bd2c7bab5964fbd3827a27eeff5bd2c502
|
8cdb97bc7588b96b91b1c550afd84e976c1926e0
|
refs/heads/master
| 2023-04-06T19:52:23.524186
| 2023-03-30T21:08:57
| 2023-03-30T21:08:57
| 213,055,072
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2021 liangliang <liangliang@Liangliangs-MacBook-Air.local>
#
# Distributed under terms of the MIT license.
class Solution(object):
def isValidSerialization(self, preorder):
"""
:type preorder: str
:rtype: bool
"""
# non null can have 2 ##
# null can have no #, so # at most
# R
# # R
# / # #
nodes = preorder.split(',')
count = 1
for n in nodes:
if count <= 0: return False
if n == '#':
count -= 1
else:
count += 1
return count == 0
|
[
"zhengliangliang1996@gmail.com"
] |
zhengliangliang1996@gmail.com
|
52c1d61d88bc555db478d99cd478969f3b6d8759
|
f49b0eb48e7549b9263e87515c10b3e9d40f695a
|
/tensorflow_hub/file_utils.py
|
99b421427d210ac37f19e50acdd3495557185052
|
[
"Apache-2.0"
] |
permissive
|
Pandinosaurus/hub
|
f8e0a6b95f91643ff51f7403f70080b5bc822f5f
|
fa1cf068b9cf034b59e7cd59a6ac0ce7e21a4fd4
|
refs/heads/master
| 2023-08-11T22:12:41.727750
| 2023-08-09T12:05:12
| 2023-08-09T12:05:45
| 129,443,631
| 1
| 0
|
Apache-2.0
| 2023-08-10T10:03:26
| 2018-04-13T19:12:58
|
Python
|
UTF-8
|
Python
| false
| false
| 2,779
|
py
|
# Copyright 2020 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for file operations."""
import os
import tarfile
import tensorflow as tf
def extract_file(tgz,
tarinfo,
dst_path,
buffer_size=10 << 20,
log_function=None):
"""Extracts 'tarinfo' from 'tgz' and writes to 'dst_path'."""
src = tgz.extractfile(tarinfo)
if src is None:
return
dst = tf.compat.v1.gfile.GFile(dst_path, "wb")
while 1:
buf = src.read(buffer_size)
if not buf:
break
dst.write(buf)
if log_function is not None:
log_function(len(buf))
dst.close()
src.close()
def extract_tarfile_to_destination(fileobj, dst_path, log_function=None):
"""Extract a tarfile. Optional: log the progress."""
with tarfile.open(mode="r|*", fileobj=fileobj) as tgz:
for tarinfo in tgz:
abs_target_path = merge_relative_path(dst_path, tarinfo.name)
if tarinfo.isfile():
extract_file(tgz, tarinfo, abs_target_path, log_function=log_function)
elif tarinfo.isdir():
tf.compat.v1.gfile.MakeDirs(abs_target_path)
else:
# We do not support symlinks and other uncommon objects.
raise ValueError("Unexpected object type in tar archive: %s" %
tarinfo.type)
def merge_relative_path(dst_path, rel_path):
"""Merge a relative tar file to a destination (which can be "gs://...")."""
# Convert rel_path to be relative and normalize it to remove ".", "..", "//",
# which are valid directories in fileystems like "gs://".
norm_rel_path = os.path.normpath(rel_path.lstrip("/"))
if norm_rel_path == ".":
return dst_path
# Check that the norm rel path does not starts with "..".
if norm_rel_path.startswith(".."):
raise ValueError("Relative path %r is invalid." % rel_path)
merged = os.path.join(dst_path, norm_rel_path)
# After merging verify that the merged path keeps the original dst_path.
if not merged.startswith(dst_path):
raise ValueError("Relative path %r is invalid. Failed to merge with %r." %
(rel_path, dst_path))
return merged
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
73c040d87bb8f39b8306ff100b8532bb96b26810
|
3eec249b3629c31ed0114eaaff485b94f23d6b47
|
/form/urls.py
|
cb7ce8fbbbc45917ab4e7149eccca260c57d5997
|
[] |
no_license
|
sarkmen/form
|
19bae21eafc01aa60c443861dad57833d98fa7d8
|
0efa1bee36e418095210e0fcc1459cdba8ffa1d6
|
refs/heads/master
| 2020-12-25T14:13:11.061223
| 2016-05-10T17:09:59
| 2016-05-10T17:09:59
| 58,479,505
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,211
|
py
|
"""form URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
from django.http import HttpResponse
def hello(request):
return HttpResponse('''
<h1>Hello, <a href="http://facebook.com/askdjango/" target="_blank">AskDjango</a></h1>
''')
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$' , 'blog.views.index'),
url(r'^blog/', include('blog.urls', namespace='blog'))
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"beomjun.gim3@gmail.com"
] |
beomjun.gim3@gmail.com
|
e6fe4e7c09f450b93ab07e860c2a7f03ae7f9b89
|
dd1e6fefcf0eb2fd41b30e93200b787fddb575a8
|
/fractals/sierpinski_square.py
|
c238014f80b2549a0ab3313d59f3136efb5869b2
|
[] |
no_license
|
blackbat13/Algorithms-Python
|
7a73f8fecbad00fe9c20a16ed35a2f83b2bc2220
|
751795ce8c4cf711f073bdaad43887186798b5cb
|
refs/heads/master
| 2023-01-30T07:55:11.096845
| 2023-01-19T21:02:09
| 2023-01-19T21:02:09
| 95,124,032
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 579
|
py
|
import turtle
def sierpinski_square(rank: int, length: float) -> None:
if rank == 0:
turtle.begin_fill()
for _ in range(4):
turtle.forward(length)
turtle.left(90)
turtle.end_fill()
return
for _ in range(4):
for _ in range(2):
turtle.forward(length / 3)
sierpinski_square(rank - 1, length / 3)
turtle.forward(length / 3)
turtle.left(90)
turtle.color('blue')
turtle.speed(0)
turtle.penup()
turtle.back(200)
turtle.pendown()
sierpinski_square(2, 300)
turtle.done()
|
[
"blackbat13@gmail.com"
] |
blackbat13@gmail.com
|
ce6ee73a31200e63a1139adc8ef584f8f14fe536
|
2a0b6c10c20051de0daad5e7e06513fe8d3abacb
|
/test_label.py
|
c829dfb624092e43440bca6c79b37e4018274001
|
[
"MIT"
] |
permissive
|
iancze/Pysplotter
|
b87f5e95aaee9c2081f0b151a3709b8f83c1e04c
|
54d273e27b26968b05730750c365c58375b6a5e7
|
refs/heads/master
| 2021-01-25T07:08:09.461659
| 2012-06-27T19:00:09
| 2012-06-27T19:00:09
| 2,457,047
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,392
|
py
|
import matplotlib
matplotlib.use("Qt4Agg")
from matplotlib.pyplot import figure, show
from matplotlib.patches import Ellipse
import numpy as np
if 1:
fig = figure(1,figsize=(8,5))
ax = fig.add_subplot(111, autoscale_on=False, xlim=(-1,5), ylim=(-4,3))
t = np.arange(0.0, 5.0, 0.01)
s = np.cos(2*np.pi*t)
line, = ax.plot(t, s, lw=3, color='purple')
ax.annotate('arrowstyle', xy=(0, 1), xycoords='data',
xytext=(-50, 30), textcoords='offset points',
arrowprops=dict(arrowstyle="->")
)
ax.annotate('arc3', xy=(0.5, -1), xycoords='data',
xytext=(-30, -30), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=.2")
)
ax.annotate('arc', xy=(1., 1), xycoords='data',
xytext=(-40, 30), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc,angleA=0,armA=30,rad=10"),
)
ax.annotate('arc', xy=(1.5, -1), xycoords='data',
xytext=(-40, -30), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc,angleA=0,armA=20,angleB=-90,armB=15,rad=7"),
)
ax.annotate('angle1', xy=(2., 1), xycoords='data',
xytext=(-50, 30), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,angleA=0,angleB=90,rad=10"),
)
ax.annotate('angle2(3)', xy=(2.5, -1), xycoords='data',
xytext=(-50, -30), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="angle3,angleA=0,angleB=-90"),
)
ax.annotate('angle3', xy=(3., 1), xycoords='data',
xytext=(-50, 30), textcoords='offset points',
bbox=dict(boxstyle="round,rounding_size=0.2", fc="white"),
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,angleA=0,angleB=90,rad=10"),
)
ax.annotate('angle4', xy=(3.5, -1), xycoords='data',
xytext=(-70, -60), textcoords='offset points',
size=20,
bbox=dict(boxstyle="round4,pad=.5", fc="0.8"),
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,angleA=0,angleB=-90,rad=10"),
)
ax.annotate('angle5', xy=(4., 1), xycoords='data',
xytext=(-50, 30), textcoords='offset points',
bbox=dict(boxstyle="round", fc="0.8"),
arrowprops=dict(arrowstyle="->",
shrinkA=0, shrinkB=10,
connectionstyle="angle,angleA=0,angleB=90,rad=10"),
)
ann = ax.annotate('', xy=(4., 1.), xycoords='data',
xytext=(4.5, -1), textcoords='data',
arrowprops=dict(arrowstyle="<->",
connectionstyle="bar",
ec="k",
shrinkA=5, shrinkB=5,
)
)
def plot_more():
fig = figure(2)
fig.clf()
ax = fig.add_subplot(111, autoscale_on=False, xlim=(-1,5), ylim=(-5,3))
el = Ellipse((2, -1), 0.5, 0.5)
ax.add_patch(el)
ax.annotate('$->$', xy=(2., -1), xycoords='data',
xytext=(-150, -140), textcoords='offset points',
bbox=dict(boxstyle="round", fc="0.8"),
arrowprops=dict(arrowstyle="->",
patchB=el,
connectionstyle="angle,angleA=90,angleB=0,rad=10"),
)
ax.annotate('fancy', xy=(2., -1), xycoords='data',
xytext=(-100, 60), textcoords='offset points',
size=20,
#bbox=dict(boxstyle="round", fc="0.8"),
arrowprops=dict(arrowstyle="fancy",
fc="0.6", ec="none",
patchB=el,
connectionstyle="angle3,angleA=0,angleB=-90"),
)
ax.annotate('simple', xy=(2., -1), xycoords='data',
xytext=(100, 60), textcoords='offset points',
size=20,
#bbox=dict(boxstyle="round", fc="0.8"),
arrowprops=dict(arrowstyle="simple",
fc="0.6", ec="none",
patchB=el,
connectionstyle="arc3,rad=0.3"),
)
ax.annotate('wedge1', xy=(2., -1), xycoords='data',
xytext=(-100, -100), textcoords='offset points',
size=20,
#bbox=dict(boxstyle="round", fc="0.8"),
arrowprops=dict(arrowstyle="wedge,tail_width=0.7",
fc="0.6", ec="none",
patchB=el,
connectionstyle="arc3,rad=-0.3"),
)
ann = ax.annotate('wedge2', xy=(2., -1), xycoords='data',
xytext=(0, -45), textcoords='offset points',
size=20,
bbox=dict(boxstyle="round", fc=(1.0, 0.7, 0.7), ec=(1., .5, .5)),
arrowprops=dict(arrowstyle="wedge,tail_width=1.",
fc=(1.0, 0.7, 0.7), ec=(1., .5, .5),
patchA=None,
patchB=el,
relpos=(0.2, 0.8),
connectionstyle="arc3,rad=-0.1"),
)
ann = ax.annotate('wedge3', xy=(2., -1), xycoords='data',
xytext=(35, 0), textcoords='offset points',
size=20, va="center",
bbox=dict(boxstyle="round", fc=(1.0, 0.7, 0.7), ec="none"),
arrowprops=dict(arrowstyle="wedge,tail_width=1.",
fc=(1.0, 0.7, 0.7), ec="none",
patchA=None,
patchB=el,
relpos=(0.2, 0.5),
)
)
show()
|
[
"iancze@gmail.com"
] |
iancze@gmail.com
|
1928c8ea213df09712461f81278cde51e2c03d74
|
c6c76e87ab639c120055783b71e295accd243cd3
|
/{{cookiecutter.module_name}}/src/bio2bel_{{ cookiecutter.module_name }}/__init__.py
|
ac0e57ac2c082f98391d68799216ff51820b9515
|
[
"MIT"
] |
permissive
|
deeenes/bio2bel-cookiecutter
|
48e3293744cfbbec0ee1b2907a602b01b47d6ee9
|
8464a34faf3e645de20fb098ced4ec31666dd10a
|
refs/heads/master
| 2020-04-25T22:45:38.970831
| 2019-02-24T19:29:14
| 2019-02-24T19:29:14
| 173,122,231
| 0
| 0
|
MIT
| 2019-02-28T14:01:14
| 2019-02-28T14:01:13
| null |
UTF-8
|
Python
| false
| false
| 182
|
py
|
# -*- coding: utf-8 -*-
"""{{ cookiecutter.short_description }}"""
from .constants import get_version
from .manager import Manager
__all__ = [
'Manager',
'get_version',
]
|
[
"cthoyt@gmail.com"
] |
cthoyt@gmail.com
|
7fff988fa5a77b5b720d1cd83cd7e8a266e4b410
|
f353f19174ea2fd8569c8f46bb2d99142fff919c
|
/String/Exercise-50.py
|
39a1a406836b67dd4b99c3644c3b1ead05da42e3
|
[] |
no_license
|
self-study-squad/Python-examples
|
a283d9efe47d71d2e134e8fc2e919dccffe5108a
|
07de281fbcc9fb7f421b89a74ff24bafc78a5d4b
|
refs/heads/master
| 2022-12-28T01:35:03.927338
| 2020-10-09T06:41:16
| 2020-10-09T06:41:16
| 298,136,591
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 151
|
py
|
#Write a Python program to split a string on the last occurrence of the delimiter.
strip ='w,3,r,e,s,o,u,r,c,e'
lst = strip.rsplit(',',2)
print(lst)
|
[
"longpham6595@gmail.com"
] |
longpham6595@gmail.com
|
d1802bdc126d931b61e7ad0bd26a97909c809bd9
|
d043a51ff0ca2f9fb3943c3f0ea21c61055358e9
|
/房天下全栈/MongoDb_write.py
|
50cbcf8646502f27c2635be6028ee5e0d127149e
|
[] |
no_license
|
lj1064201288/dell_python
|
2f7fd9dbcd91174d66a2107c7b7f7a47dff4a4d5
|
529985e0e04b9bde2c9e0873ea7593e338b0a295
|
refs/heads/master
| 2020-03-30T03:51:51.263975
| 2018-12-11T13:21:13
| 2018-12-11T13:21:13
| 150,707,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 994
|
py
|
import pymongo
class Mongo_DB():
def __init__(self, mongo_db, city, item):
self.host = 'localhost'
self.mongo_db = mongo_db
self.city = city
self.item = item
def open_spider(self):
try:
self.client = pymongo.MongoClient(self.host)
self.db = self.client[self.mongo_db]
except SystemError as s:
print(s.args)
except Exception as e:
print(e.args)
def process_item(self):
try:
self.db[self.city].insert(self.item)
print(self.item['楼盘名称'],"存储到数据库成功")
except SystemError as e:
print(e.args)
print( "存储到数据库失败", self.item)
except Exception as e:
print( self.item,"存储到数据库失败", e.args)
def run_sql(self):
self.open_spider()
self.process_item()
self.close_spider()
def close_spider(self):
self.client.close()
|
[
"1064201288@qq.com"
] |
1064201288@qq.com
|
ea3c7378eafbd7e24cb32a92bd84a33af4cf2f07
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/missingDigit_20200728194115.py
|
0cbdd8d70f0de557aa2de261f510c18ae656645a
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
def missing(s):
s = s.replace("=","==")
for x in range(1000000):
try:
if eval(s.replace("x",str(x))):
return x
except:
pass
return None
print(s)
missing("3x + 12 = 46")
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
3ec75456e5cf113904ab7c17e0059d937c023644
|
373939995a89ed84a26653bf4b11e02b9e060b3d
|
/20210503PythonAdvanced/05-contextmanager/ctx01.py
|
48cab9fb6ae9619ae2f1d2d1236c1f7fab38fe4e
|
[
"MIT"
] |
permissive
|
AuroraBoreas/pypj_sonic_pc
|
28406f1951280b9349a25fdbd0ad02bae8adc316
|
3016ed173d912e2ffa08c8581c98a5932c486467
|
refs/heads/master
| 2023-09-01T15:04:36.246303
| 2023-08-25T01:05:28
| 2023-08-25T01:05:28
| 279,821,926
| 0
| 0
|
MIT
| 2022-06-22T04:52:25
| 2020-07-15T09:15:32
|
Python
|
UTF-8
|
Python
| false
| false
| 702
|
py
|
"#Python is a protocol orientated lang; every top-level function has a corresponding dunder method implemented;"
import sqlite3
with sqlite3.connect('test.db') as conn:
cur = conn.cursor()
cur.execute('CREATE TABLE points(x int, y int);')
cur.execute('INSERT INTO points(x, y) VALUES(1, 1);')
cur.execute('INSERT INTO points(x, y) VALUES(1, 2);')
cur.execute('INSERT INTO points(x, y) VALUES(2, 1);')
cur.execute('INSERT INTO points(x, y) VALUES(2, 2);')
for row in cur.execute('SELECT x, y FROM points;'):
print(row)
for row in cur.execute('SELECT sum(x * y) FROM points;'):
print(row)
cur.execute('DROP TABLE points;')
|
[
"noreply@github.com"
] |
AuroraBoreas.noreply@github.com
|
1c6a029683af969af9e6686df9c21e1d0165a4b2
|
5e3ebc83bc3fe2f85c34563689b82b1fc8b93a04
|
/google/ads/googleads/v5/enums/types/account_budget_proposal_status.py
|
775b7b599ba465c2c0fdc70efefd98eefd7eb098
|
[
"Apache-2.0"
] |
permissive
|
pdsing/google-ads-python
|
0ce70227cd6bb13a25cd13de0ca05c2636279ecd
|
ee2c059498d5679a0d1d9011f3795324439fad7c
|
refs/heads/master
| 2023-05-04T18:39:57.412453
| 2021-05-21T16:38:17
| 2021-05-21T16:38:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,238
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v5.enums",
marshal="google.ads.googleads.v5",
manifest={"AccountBudgetProposalStatusEnum",},
)
class AccountBudgetProposalStatusEnum(proto.Message):
r"""Message describing AccountBudgetProposal statuses."""
class AccountBudgetProposalStatus(proto.Enum):
r"""The possible statuses of an AccountBudgetProposal."""
UNSPECIFIED = 0
UNKNOWN = 1
PENDING = 2
APPROVED_HELD = 3
APPROVED = 4
CANCELLED = 5
REJECTED = 6
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"noreply@github.com"
] |
pdsing.noreply@github.com
|
07c81e48ef1e0240cf2c4b5ca63eec342824fd44
|
846e8886bbe7e8c3cdee4ba505c2217f1da1d803
|
/python/catkin/test_results.py
|
b3471991e33c50440122cc729b6db58321bb9dd9
|
[] |
no_license
|
jamuraa/catkin
|
ef315aa644459a73443d2a8d74e6e8c0954b47f3
|
91b133d4c2048af097fdea270a0a19c57b422ad0
|
refs/heads/master
| 2020-11-30T13:03:20.220219
| 2012-10-02T18:54:56
| 2012-10-02T18:54:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,795
|
py
|
from __future__ import print_function
import os
from xml.etree.ElementTree import ElementTree
def read_junit(filename):
tree = ElementTree()
root = tree.parse(filename)
num_tests = int(root.attrib['tests'])
num_errors = int(root.attrib['errors'])
num_failures = int(root.attrib['failures'])
return (num_tests, num_errors, num_failures)
def test_results(test_results_dir):
results = {}
for dirpath, dirnames, filenames in os.walk(test_results_dir):
# do not recurse into folders starting with a dot
dirnames[:] = [d for d in dirnames if not d.startswith('.')]
for filename in [f for f in filenames if f.endswith('.xml')]:
filename_abs = os.path.join(dirpath, filename)
name = filename_abs[len(test_results_dir) + 1:]
try:
num_tests, num_errors, num_failures = read_junit(filename_abs)
except Exception as e:
print('Skipping "%s": %s' % (name, str(e)))
continue
results[name] = (num_tests, num_errors, num_failures)
return results
def print_summary(results, show_stable=False, show_unstable=True):
sum_tests = sum_errors = sum_failures = 0
for name in sorted(results.keys()):
(num_tests, num_errors, num_failures) = results[name]
sum_tests += num_tests
sum_errors += num_errors
sum_failures += num_failures
if show_stable and not num_errors and not num_failures:
print('%s: %d tests' % (name, num_tests))
if show_unstable and (num_errors or num_failures):
print('%s: %d tests, %d errors, %d failures' % (name, num_tests, num_errors, num_failures))
print('Summary: %d tests, %d errors, %d failures' % (sum_tests, sum_errors, sum_failures))
|
[
"dthomas@willowgarage.com"
] |
dthomas@willowgarage.com
|
810af9acd051bb92282777ed5159e2d3bea725ea
|
471b5d4df7c92af540c3d348594cc6ea98d65fed
|
/dojo_python/flask/survey/survey.py
|
d3be0e91dea895719a61bdd25aa6ec76be766ca5
|
[] |
no_license
|
samuellly/dojo_assignment_file
|
929c6d747077b47b35179f190075b1d9a54e257c
|
37363982238fa7591a139a3af9beb20a8e165997
|
refs/heads/master
| 2021-01-13T05:30:02.462066
| 2017-05-20T00:27:47
| 2017-05-20T00:27:47
| 80,334,980
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
from flask import Flask, render_template, request, redirect
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/result', methods=['POST'])
def result():
print ("Info received!")
return render_template('result.html', name = request.form['name'], location = request.form['location'], language = request.form['language'], comment = request.form['comment'])
app.run(debug=True)
|
[
"gjqorgus900327@gmail.com"
] |
gjqorgus900327@gmail.com
|
b029dde505319423c857d3ae2b468e2b48f9ea6d
|
543286f4fdefe79bd149ff6e103a2ea5049f2cf4
|
/Exercicios&cursos/eXcript/Aula 18 - Propriedade Sticky.py
|
505b6762f67eaf3c58b626f5f1c71a0c1459ee06
|
[] |
no_license
|
antonioleitebr1968/Estudos-e-Projetos-Python
|
fdb0d332cc4f12634b75984bf019ecb314193cc6
|
9c9b20f1c6eabb086b60e3ba1b58132552a84ea6
|
refs/heads/master
| 2022-04-01T20:03:12.906373
| 2020-02-13T16:20:51
| 2020-02-13T16:20:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
#width == largura
#height == altura
from tkinter import *
janela = Tk()
lb1 = Label(janela, text="ESPAÇO", width=15, height=3, bg="blue")
lbHORIZONTAL = Label(janela, text="HORIZONTAL", bg="yellow")
lbVERTICAL = Label(janela, text="VERTICAL", bg="yellow")
lb1.grid(row=0, column=0)
lbHORIZONTAL.grid(row=1, column=0, sticky=E)
lbVERTICAL.grid(row=0, column=1, sticky=S)
janela.geometry("200x200+100+100")
janela.mainloop()
|
[
"progmatheusmorais@gmail.com"
] |
progmatheusmorais@gmail.com
|
2056295116744d61aff23b37cb126feb78904a4e
|
863a56f99b4668211b96d66e3d2698196e46f3b1
|
/prng/cellular_automata/rule198/run.py
|
309e1673dd75c013f3267dff98a2899f99f68d8b
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
atoponce/scripts
|
15b958463d6e788ad6f7785d2614ddb372fc69a7
|
b2c8fd2a0b68e83562570c315f4c9596ee546011
|
refs/heads/master
| 2023-04-28T05:47:07.918556
| 2023-04-15T15:02:05
| 2023-04-15T15:02:05
| 8,612,257
| 22
| 4
| null | 2016-12-22T19:21:28
| 2013-03-06T20:18:33
|
Shell
|
UTF-8
|
Python
| false
| false
| 643
|
py
|
#!/usr/bin/python3
#seed = '00000000000000000100000000000000000' # textbook initial state
seed = '01011111110010010011010001100100010' # random initial state
bits = len(seed)
for n in range(5000):
print(int(seed, 2)/2**bits)
state = ''
p, q, r = -1, 0, 1
for n in range(bits): # there must be a more efficient way to do this
state += str(
#(int(seed[p])&int(seed[r]))^int(seed[q])^int(seed[r]) # boolean
(int(seed[q])+int(seed[r])+int(seed[p])*int(seed[r])) % 2 # algebraic
) # rule 198
p = (p + 1) % bits
q = (q + 1) % bits
r = (r + 1) % bits
seed = state
|
[
"aaron.toponce@gmail.com"
] |
aaron.toponce@gmail.com
|
946fd49ed7af083f41429c81ef2bb5819af47060
|
9a0a4e1f843d1457c4f466c05c994f3e6ecd842a
|
/change_transparency.py
|
543c49f2aa8d0ef476010ab9f243970f94d0c354
|
[] |
no_license
|
sjbrown/steam_jet_blower
|
688aa44e43ea8a285ebaf3923473b4a4049b5537
|
5b894354cb60b5d5d6eee74af77140af641580ee
|
refs/heads/master
| 2021-01-10T03:19:21.853486
| 2016-03-18T20:27:49
| 2016-03-18T20:27:49
| 54,229,208
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,145
|
py
|
#!/usr/bin/env python
#Import Modules
import pygame
from pygame.locals import *
_cachedOriginals = {}
_cachedCalculatedArrays = {}
#-----------------------------------------------------------------------------
def change_alpha_mult(img, percentAlpha):
global _cachedOriginals
global _cachedCalculatedArrays
if percentAlpha < 0 or percentAlpha > 100 or type(percentAlpha) != int:
raise Exception( "percentAlpha not an int between 0 and 100" )
floatAlpha = float(percentAlpha) / 100
alphaArray = pygame.surfarray.pixels_alpha( img )
if not _cachedOriginals.has_key( id(img) ):
origArray = alphaArray
_cachedOriginals[id(img)] = alphaArray[:]
else:
origArray = _cachedOriginals[id(img)]
key = ( percentAlpha, id(img) )
if _cachedCalculatedArrays.has_key( key ):
alphaArray = _cachedCalculatedArrays[ key ][:]
else:
for i in xrange( len(alphaArray) ):
alphaArray[i] = [ floatAlpha*x for x in origArray[i] ]
_cachedCalculatedArrays[ key ] = alphaArray[:]
del alphaArray #this unlocks the surface
#this calls the 'main' function when this script is executed
if __name__ == '__main__': print "didn't expect that!"
|
[
"github@ezide.com"
] |
github@ezide.com
|
7c89b5a70eaa41d0b10e26ac6461585729c21d14
|
05b80d92bb2efec76f898c527cc803f931031266
|
/Blind 75/Programs/Longest Repeating Character Replacement.py
|
1d5251bb53822143571115ed0129d2c93426ce21
|
[] |
no_license
|
PriyankaKhire/ProgrammingPracticePython
|
b5a6af118f3d4ec19de6fcccb7933d84f7522d1a
|
8dd152413dce2df66957363ff85f0f4cefa836e8
|
refs/heads/master
| 2022-08-28T00:44:34.595282
| 2022-08-12T19:08:32
| 2022-08-12T19:08:32
| 91,215,578
| 18
| 11
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,679
|
py
|
# Longest Repeating Character Replacement
# https://leetcode.com/problems/longest-repeating-character-replacement/
# Solution understood from.
# https://leetcode.com/problems/longest-repeating-character-replacement/discuss/358879/Java-Solution-Explained-and-Easy-to-Understand-for-Interviews
'''
formula: (length of substring - number of times of the maximum occurring character in the substring) <= k
'''
class Solution(object):
def addToHashMap(self, letter, hashMap):
if (letter not in hashMap):
hashMap[letter] = 0
hashMap[letter] = hashMap[letter] + 1
#print hashMap
def characterReplacement(self, s, k):
# key: character; value: count
hashMap = {}
start = 0
maxOccurringCharCount = 0
longestLength = 0
for end in range(len(s)):
#print "start", start, "end", end
#print "longestLength", longestLength
self.addToHashMap(s[end], hashMap)
# if the current letter is most frequently occurring then update the count.
maxOccurringCharCount = max(maxOccurringCharCount, hashMap[s[end]])
# get the length of current substring
substringLength = (end - start)+1
if((substringLength - maxOccurringCharCount) <= k):
longestLength = max(longestLength, substringLength)
else:
# since the character at start is no longer in our window
hashMap[s[start]] = hashMap[s[start]] - 1
start = start + 1
return longestLength
"""
:type s: str
:type k: int
:rtype: int
"""
|
[
"priyanka.khire@gmail.com"
] |
priyanka.khire@gmail.com
|
6f62aac4b432ea6c0ddfaf845217dc767679d71f
|
12d1bcb4bb0a473d163048f1c5ac9eef6389bc24
|
/HypothesisTesting/Quiz.py
|
d386378049509604a12e023d6c89890c25f5779e
|
[] |
no_license
|
Bharadwaja92/DataScienceProjects
|
339795c08c4b631006f1602ec84f3b48b828e538
|
088305387339affa662ac3d88ea5fac2651295b5
|
refs/heads/master
| 2020-03-29T19:23:58.041782
| 2019-01-29T12:22:03
| 2019-01-29T12:22:03
| 150,261,056
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,227
|
py
|
""""""
"""
Which of these is an accurate statement of the Central Limit Theorem?
For a large enough sample size, our sample mean will be sufficiently close to the population mean.
What is a statistical hypothesis test?
A way of quantifying the truth of a statement.
Which of the following describes a Type II error? False negative
A survey on preferred ice cream flavors not establishing a clear favorite when the majority of people prefer chocolate.
What is a p-value?
In a hypothesis test, a p-value is the probability that the null hypothesis is true.
Suppose we were exploring the relationship between local honey and allergies.
Which of these would be a statement of the null hypothesis?
Local honey has no effect on allergies, any relationship between consuming local honey and allergic outbreaks is due to chance.
Which of these describes a sample mean?
The mean of a subset of our population which is hopefully, but not necessarily, representative of the overall average.
Which of the following hypothesis tests would be used to compare two sets of numerical data?
2 Sample T-Test
* Analysis of variance is used to determine if three or more numerical samples come from the same population.
"""
|
[
"saibharadwaja92@gmail.com"
] |
saibharadwaja92@gmail.com
|
b508232586963bd3703658b87b4854b11d1c3e75
|
fc3f784c8d00f419b11cbde660fe68a91fb080ca
|
/algoritm/20상반기 코딩테스트/한수/bj1065.py
|
b711cbf86b44c09064fe63cda2dc9461a9d7b1d7
|
[] |
no_license
|
choo0618/TIL
|
09f09c89c8141ba75bf92657ac39978913703637
|
70437a58015aecee8f3d86e6bfd0aa8dc11b5447
|
refs/heads/master
| 2021-06-25T07:01:34.246642
| 2020-12-21T04:57:13
| 2020-12-21T04:57:13
| 163,782,782
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
import sys
sys.stdin = open('bj1065.txt','r')
N=int(input())
if N<100:print(N)
else:
R=0
for i in range(100,N+1):
a,b,c=i//100,(i%100)//10,i%10
if a-b==b-c:R+=1
print(99+R)
|
[
"choo0618@naver.com"
] |
choo0618@naver.com
|
263feec81bd5161ad7aca3304939729b59c6e0f5
|
6e466112c3682338ec56c892c883284704fbb727
|
/bflib/restrictions/weapons.py
|
e21e12d074b299dcaffacd3c90e51a5f8e5dbcfd
|
[
"MIT"
] |
permissive
|
ChrisLR/BFLib
|
5aee153aeaef72516f737abf74cf89e7ec1cb90a
|
2af49cc113792c4967c0c8c5bf32a1b76876e6e2
|
refs/heads/master
| 2021-01-22T17:52:58.790057
| 2017-11-15T17:46:56
| 2017-11-15T17:46:56
| 102,407,112
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 617
|
py
|
from bflib.keywords.weapons import WeaponWieldKeyword
from bflib.restrictions.base import Restriction
class WeaponRestrictionSet(Restriction):
__slots__ = ["included", "excluded"]
def __init__(self, included=None, excluded=None):
self.included = included
self.excluded = excluded
class WeaponSizeRestrictionSet(Restriction):
__slots__ = ["large", "medium", "small"]
keywords = WeaponWieldKeyword
def __init__(self, large=keywords.CanWield, medium=keywords.CanWield, small=keywords.CanWield):
self.large = large
self.medium = medium
self.small = small
|
[
"arzhul@gmail.com"
] |
arzhul@gmail.com
|
581eb71ed8e3a43f72e7d7c856a6ef0ca4273774
|
a78b1c41fc038703e58d5249a9948fbfd06f8159
|
/code_nodeperturbation/FM4/sim2/gene/gene.py
|
47d12a4d13f3c51625eb54494462cfc38ce251d7
|
[] |
no_license
|
le-chang/DISC1_interactome
|
15ed1025048e49d5bb6b6bd13eac4f148fe83d04
|
b517309b8583358220c2a639d4ef5d303bfb0acd
|
refs/heads/master
| 2021-02-13T21:00:20.418928
| 2019-04-24T13:59:50
| 2019-04-24T13:59:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,652
|
py
|
"""
Migration simulator
It is also a demonstration on how the collector works
"""
import boolean2
from boolean2 import Model, util
from random import choice
# ocasionally randomized nodes
TARGETS = set( "Migration".split() )
def new_getvalue( state, name, p):
"""
Called every time a node value is used in an expression.
It will override the value for the current step only.
Returns random values for the node states
"""
global TARGETS
value = util.default_get_value( state, name, p )
if name in TARGETS:
# pick at random from True, False and original value
return choice( [True, False, value] )
else:
return value
def run( text, nodes, repeat, steps ):
"""
Runs the simulation and collects the nodes into a collector,
a convenience class that can average the values that it collects.
"""
coll = util.Collector()
for i in xrange( repeat ):
engine = Model( mode='async', text=text )
engine.RULE_GETVALUE = new_getvalue
# minimalist initial conditions, missing nodes set to false
engine.initialize( missing=util.false )
engine.iterate( steps=steps)
coll.collect( states=engine.states, nodes=nodes )
print '- completed'
avgs = coll.get_averages( normalize=True )
return avgs
if __name__ == '__main__':
# read in the text
text = file( 'sim2.txt').read()
# the nodes of interest that are collected over the run
# NODES = 'Apoptosis STAT3 FasL Ras'.split()
# this collects the state of all nodes
NODES = boolean2.all_nodes( text )
#
# raise this for better curves (will take about 2 seconds per repeat)
# plots were made for REPEAT = 1000, STEPS=150
#
REPEAT = 1000
STEPS = 150
data = []
print '- starting simulation with REPEAT=%s, STEPS=%s' % (REPEAT, STEPS)
# multiple overexrpessed nodes
mtext = boolean2.modify_states( text=text, turnon=['APP'] )
avgs = run( text=mtext, repeat=REPEAT, nodes=NODES, steps=STEPS)
data.append( avgs )
mtext = boolean2.modify_states( text=text, turnon=['DAB1'] )
avgs = run( text=mtext, repeat=REPEAT, nodes=NODES, steps=STEPS)
data.append( avgs )
mtext = boolean2.modify_states( text=text, turnon=['DISC1'] )
avgs = run( text=mtext, repeat=REPEAT, nodes=NODES, steps=STEPS)
data.append( avgs )
mtext = boolean2.modify_states( text=text, turnon=['NDEL1'] )
avgs = run( text=mtext, repeat=REPEAT, nodes=NODES, steps=STEPS)
data.append( avgs )
mtext = boolean2.modify_states( text=text, turnon=['PAFAH1B1'] )
avgs = run( text=mtext, repeat=REPEAT, nodes=NODES, steps=STEPS)
data.append( avgs )
mtext = boolean2.modify_states( text=text, turnoff=['APP'] )
avgs = run( text=mtext, repeat=REPEAT, nodes=NODES, steps=STEPS)
data.append( avgs )
mtext = boolean2.modify_states( text=text, turnoff=['DAB1'] )
avgs = run( text=mtext, repeat=REPEAT, nodes=NODES, steps=STEPS)
data.append( avgs )
mtext = boolean2.modify_states( text=text, turnoff=['DISC1'] )
avgs = run( text=mtext, repeat=REPEAT, nodes=NODES, steps=STEPS)
data.append( avgs )
mtext = boolean2.modify_states( text=text, turnoff=['NDEL1'] )
avgs = run( text=mtext, repeat=REPEAT, nodes=NODES, steps=STEPS)
data.append( avgs )
mtext = boolean2.modify_states( text=text, turnoff=['PAFAH1B1'] )
avgs = run( text=mtext, repeat=REPEAT, nodes=NODES, steps=STEPS)
data.append( avgs )
fname = 'gene.bin'
util.bsave( data, fname=fname )
print '- data saved into %s' % fname
|
[
"noreply@github.com"
] |
le-chang.noreply@github.com
|
8520cda3561bf8b7c960f8602b2bced270173fa2
|
7c0acdc46cfce5dc116d394f6990ee5ab1c0fa0c
|
/venv/lib/python3.7/site-packages/builders/logger.py
|
2315b5710fc7b44fd1e8f35c660d87e62010e1e9
|
[
"MIT"
] |
permissive
|
Vatansever27/ExchangeCode
|
84fb4a02371fdda7cd94d00971be76bcd1068be0
|
ab284653a337937139a9a28c036efe701fb376c7
|
refs/heads/master
| 2020-04-07T16:38:59.819929
| 2018-11-21T12:18:30
| 2018-11-21T12:18:30
| 158,537,067
| 0
| 0
| null | 2018-11-21T12:18:31
| 2018-11-21T11:22:14
| null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
'''
Created on Sep 10, 2013
This module holds logger configuration for builders
@author: pupssman
'''
import logging
logger = logging.getLogger('builders')
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.WARN)
logger.addHandler(handler)
|
[
"doguhan@puentedev.io"
] |
doguhan@puentedev.io
|
4f122446d7f74b618c9d6df7407213c5b1993795
|
70744b927246edb4cfdc405bd3528513d9ea9ded
|
/envios/autocomplete_light_registry.py
|
cd1e842920390dfa7c8bab2e7b617952f2f99540
|
[] |
no_license
|
jesusmaherrera/enviamexpaisano
|
e0616cbba47a4b4bddc897fbf2244d92c59c10fd
|
dd9e3e8270616a8cb73704dc7076791e36ecc98f
|
refs/heads/master
| 2016-09-06T04:30:19.848954
| 2013-06-07T06:05:27
| 2013-06-07T06:05:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 336
|
py
|
import autocomplete_light
from cities_light.models import City
autocomplete_light.register(City, search_fields=('search_names',),
autocomplete_js_attributes={'placeholder': 'Nombre de la ciudad..'})
autocomplete_light.register(City, search_fields=('name',),
autocomplete_js_attributes={'placeholder': 'Nombre de la ciudad..'})
|
[
"jesusmaherrera@gmail.com"
] |
jesusmaherrera@gmail.com
|
e944ac632c5986200ef656717afb0a52d305c33e
|
5ec48e90f711c9514a6d2ee36dbb46bc1ba71b74
|
/shop/urls.py
|
c552e41a6565ef31e6acd61ea30c24f84cf3f152
|
[] |
no_license
|
hanieh-mav/hanieh_shop
|
1ca5042fefb970459d9f48fb716a95fec6a530bb
|
b7cf253e11b6c167e78b245f253a8d057f435026
|
refs/heads/main
| 2023-06-10T16:37:26.385048
| 2021-07-07T14:19:58
| 2021-07-07T14:19:58
| 372,892,835
| 2
| 0
| null | 2021-07-07T14:19:59
| 2021-06-01T16:19:48
|
CSS
|
UTF-8
|
Python
| false
| false
| 443
|
py
|
from django.urls import path
from .views import home , category_detail , ProductDetail
app_name = 'shop'
urlpatterns = [
path('',home,name='home'),
path('page/<int:page>',home,name='home'),
path('category/<slug:slug>',category_detail,name='category_detail'),
path('category/<slug:slug>/<int:page>',category_detail,name='category_detail'),
path('detail/<int:pk>',ProductDetail.as_view(),name='product_detaill'),
]
|
[
"h.mehdiabadi@gmail.com"
] |
h.mehdiabadi@gmail.com
|
df7281a7926eb33f1778ef246c2fdeca5fbffa99
|
aafc9140c662fcb2b36fb092cbf861d80e4da7e9
|
/examples/misc/chained_callbacks.py
|
87879ff42488aca6c81fd210a4a5cc2f14054791
|
[] |
no_license
|
alecordev/dashing
|
12fb8d303143130f3351c8042615a0f7497f59cf
|
aac810147f8459834b6c693291b1276e8a84c36e
|
refs/heads/master
| 2023-02-18T08:55:22.410205
| 2022-04-07T08:17:37
| 2022-04-07T08:17:37
| 99,436,393
| 0
| 0
| null | 2023-02-16T03:20:21
| 2017-08-05T17:01:29
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,473
|
py
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
all_options = {
"America": ["New York City", "San Francisco", "Cincinnati"],
"Canada": ["Montréal", "Toronto", "Ottawa"],
}
app.layout = html.Div(
[
dcc.RadioItems(
id="countries-radio",
options=[{"label": k, "value": k} for k in all_options.keys()],
value="America",
),
html.Hr(),
dcc.RadioItems(id="cities-radio"),
html.Hr(),
html.Div(id="display-selected-values"),
]
)
@app.callback(Output("cities-radio", "options"), [Input("countries-radio", "value")])
def set_cities_options(selected_country):
return [{"label": i, "value": i} for i in all_options[selected_country]]
@app.callback(Output("cities-radio", "value"), [Input("cities-radio", "options")])
def set_cities_value(available_options):
return available_options[0]["value"]
@app.callback(
Output("display-selected-values", "children"),
[Input("countries-radio", "value"), Input("cities-radio", "value")],
)
def set_display_children(selected_country, selected_city):
return "{} is a city in {}".format(
selected_city,
selected_country,
)
if __name__ == "__main__":
app.run_server(debug=True)
|
[
"alecor.dev@gmail.com"
] |
alecor.dev@gmail.com
|
3b08994748c30a31baf779c095991557e4427e44
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/rasbt_mlxtend/mlxtend-master/mlxtend/classifier/softmax_regression.py
|
04e5d621bb0f443e834b5ed9ae559e12551abd2b
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 5,868
|
py
|
# Sebastian Raschka 2014-2017
# mlxtend Machine Learning Library Extensions
#
# Implementation of the mulitnomial logistic regression algorithm for
# classification.
# Author: Sebastian Raschka <sebastianraschka.com>
#
# License: BSD 3 clause
import numpy as np
from time import time
from .._base import _BaseModel
from .._base import _IterativeModel
from .._base import _MultiClass
from .._base import _Classifier
class SoftmaxRegression(_BaseModel, _IterativeModel, _MultiClass, _Classifier):
"""Softmax regression classifier.
Parameters
------------
eta : float (default: 0.01)
Learning rate (between 0.0 and 1.0)
epochs : int (default: 50)
Passes over the training dataset.
Prior to each epoch, the dataset is shuffled
if `minibatches > 1` to prevent cycles in stochastic gradient descent.
l2 : float
Regularization parameter for L2 regularization.
No regularization if l2=0.0.
minibatches : int (default: 1)
The number of minibatches for gradient-based optimization.
If 1: Gradient Descent learning
If len(y): Stochastic Gradient Descent (SGD) online learning
If 1 < minibatches < len(y): SGD Minibatch learning
n_classes : int (default: None)
A positive integer to declare the number of class labels
if not all class labels are present in a partial training set.
Gets the number of class labels automatically if None.
random_seed : int (default: None)
Set random state for shuffling and initializing the weights.
print_progress : int (default: 0)
Prints progress in fitting to stderr.
0: No output
1: Epochs elapsed and cost
2: 1 plus time elapsed
3: 2 plus estimated time until completion
Attributes
-----------
w_ : 2d-array, shape={n_features, 1}
Model weights after fitting.
b_ : 1d-array, shape={1,}
Bias unit after fitting.
cost_ : list
List of floats, the average cross_entropy for each epoch.
"""
def __init__(self, eta=0.01, epochs=50,
l2=0.0,
minibatches=1,
n_classes=None,
random_seed=None,
print_progress=0):
self.eta = eta
self.epochs = epochs
self.l2 = l2
self.minibatches = minibatches
self.n_classes = n_classes
self.random_seed = random_seed
self.print_progress = print_progress
self._is_fitted = False
def _net_input(self, X, W, b):
return (X.dot(W) + b)
def _softmax(self, z):
e_x = np.exp(z - z.max(axis=1, keepdims=True))
out = e_x / e_x.sum(axis=1, keepdims=True)
return out
# return (np.exp(z.T) / np.sum(np.exp(z), axis=1)).T
def _cross_entropy(self, output, y_target):
return - np.sum(np.log(output) * (y_target), axis=1)
def _cost(self, cross_entropy):
L2_term = self.l2 * np.sum(self.w_ ** 2)
cross_entropy = cross_entropy + L2_term
return 0.5 * np.mean(cross_entropy)
def _to_classlabels(self, z):
return z.argmax(axis=1)
def _fit(self, X, y, init_params=True):
self._check_target_array(y)
if init_params:
if self.n_classes is None:
self.n_classes = np.max(y) + 1
self._n_features = X.shape[1]
self.b_, self.w_ = self._init_params(
weights_shape=(self._n_features, self.n_classes),
bias_shape=(self.n_classes,),
random_seed=self.random_seed)
self.cost_ = []
y_enc = self._one_hot(y=y, n_labels=self.n_classes, dtype=np.float)
self.init_time_ = time()
rgen = np.random.RandomState(self.random_seed)
for i in range(self.epochs):
for idx in self._yield_minibatches_idx(
rgen=rgen,
n_batches=self.minibatches,
data_ary=y,
shuffle=True):
# givens:
# w_ -> n_feat x n_classes
# b_ -> n_classes
# net_input, softmax and diff -> n_samples x n_classes:
net = self._net_input(X[idx], self.w_, self.b_)
softm = self._softmax(net)
diff = softm - y_enc[idx]
# gradient -> n_features x n_classes
grad = np.dot(X[idx].T, diff)
# update in opp. direction of the cost gradient
self.w_ -= (self.eta * grad +
self.eta * self.l2 * self.w_)
self.b_ -= (self.eta * np.sum(diff, axis=0))
# compute cost of the whole epoch
net = self._net_input(X, self.w_, self.b_)
softm = self._softmax(net)
cross_ent = self._cross_entropy(output=softm, y_target=y_enc)
cost = self._cost(cross_ent)
self.cost_.append(cost)
if self.print_progress:
self._print_progress(iteration=i + 1,
n_iter=self.epochs,
cost=cost)
return self
def predict_proba(self, X):
"""Predict class probabilities of X from the net input.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
Class probabilties : array-like, shape= [n_samples, n_classes]
"""
net = self._net_input(X, self.w_, self.b_)
softm = self._softmax(net)
return softm
def _predict(self, X):
probas = self.predict_proba(X)
return self._to_classlabels(probas)
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
748eb1b3110d4ce4036007555737afa714ca4d1e
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/rdbms/azure-mgmt-rdbms/generated_samples/mysql/virtual_network_rules_create_or_update.py
|
d18659e34af6cf458a211fb1e990c431312e142a
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,010
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.rdbms.mysql import MySQLManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-rdbms
# USAGE
python virtual_network_rules_create_or_update.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = MySQLManagementClient(
credential=DefaultAzureCredential(),
subscription_id="ffffffff-ffff-ffff-ffff-ffffffffffff",
)
response = client.virtual_network_rules.begin_create_or_update(
resource_group_name="TestGroup",
server_name="vnet-test-svr",
virtual_network_rule_name="vnet-firewall-rule",
parameters={
"properties": {
"ignoreMissingVnetServiceEndpoint": False,
"virtualNetworkSubnetId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/TestGroup/providers/Microsoft.Network/virtualNetworks/testvnet/subnets/testsubnet",
}
},
).result()
print(response)
# x-ms-original-file: specification/mysql/resource-manager/Microsoft.DBforMySQL/legacy/stable/2017-12-01/examples/VirtualNetworkRulesCreateOrUpdate.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
fb2a5ba96ca24f614cac37db2dbc94f81c00928d
|
e838076bc1c8aedbb8c77710b1a1a32efc3a4da1
|
/site_selection/migrations/0002_siteselectionselectedsites.py
|
6d1aaf6ccd16ac174ac7cf7e4c86b045fbcf5e69
|
[] |
no_license
|
abbasgis/ferrp
|
5f2f7768f0e38e299498c2e74379311698b6321f
|
77736c33e7ec82b6adf247a1bf30ccbc4897f02e
|
refs/heads/master
| 2023-05-25T09:59:45.185025
| 2021-06-12T09:15:07
| 2021-06-12T09:15:07
| 376,236,936
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,267
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-11-15 20:28
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('site_selection', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='SiteSelectionSelectedsites',
fields=[
('oid', models.AutoField(primary_key=True, serialize=False)),
('site_name', models.CharField(blank=True, max_length=256, null=True)),
('project_id', models.CharField(blank=True, max_length=256, null=True)),
('created_by', models.IntegerField(blank=True, null=True)),
('updated_by', models.IntegerField(blank=True, null=True)),
('created_at', models.DateTimeField(blank=True, null=True)),
('updated_at', models.DateTimeField(blank=True, null=True)),
('geom', django.contrib.gis.db.models.fields.GeometryField(blank=True, null=True, srid=3857)),
],
options={
'db_table': 'site_selection_selectedsites',
'managed': False,
},
),
]
|
[
"abbas123@abc"
] |
abbas123@abc
|
0f478534f7fcad7d99d58f79b2fc2d2cc39d3729
|
d2332604fc80b6d622a263b2af644425a7e703de
|
/fast-track/dynamic_programming/11_decode_ways.py
|
24d39552909846b648b35486f8055c00aeb4d3b3
|
[] |
no_license
|
abhijitdey/coding-practice
|
b3b83a237c1930266768ce38500d6812fc31c529
|
6ae2a565042bf1d6633cd98ed774e4a77f492cc8
|
refs/heads/main
| 2023-08-14T23:31:06.090613
| 2021-10-18T21:35:56
| 2021-10-18T21:35:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,238
|
py
|
"""
A message containing letters from A-Z can be encoded into numbers using the following mapping:
'A' -> "1"
'B' -> "2"
...
'Z' -> "26"
To decode an encoded message, all the digits must be grouped then mapped back into letters using the reverse of the mapping above (there may be multiple ways).
For example, "11106" can be mapped into:
"AAJF" with the grouping (1 1 10 6)
"KJF" with the grouping (11 10 6)
Note that the grouping (1 11 06) is invalid because "06" cannot be mapped into 'F' since "6" is different from "06".
Given a string s containing only digits, return the number of ways to decode it.
Range of any letter: 1-26
"""
def decode_ways(s, dp, n):
if len(s[n - 1 :]) == 0:
return 1
if s[n - 1] == "0":
return 0
if len(s[n - 1 :]) == 1:
return 1
if dp[n] is not None:
return dp[n]
if int(s[n - 1]) <= 2 and int(s[n - 1 : n + 1]) <= 26:
# Two ways to decode
dp[n] = decode_ways(s, dp, n + 1) + decode_ways(s, dp, n + 2)
else:
# Only one way to decode
dp[n] = decode_ways(s, dp, n + 1)
return dp[n]
if __name__ == "__main__":
s = "226"
dp = [None] * (len(s) + 1)
dp[0] = 1
print(decode_ways(s, dp, n=1))
|
[
"ashiz2013@gmail.com"
] |
ashiz2013@gmail.com
|
a2e126193720517843439923118b13b875d7f842
|
bd2a3d466869e0f8cb72075db7daec6c09bbbda1
|
/sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_06_01_preview/models/_paged_models.py
|
fdec95712a6365532286786ba2a82a0e79c2e307
|
[
"MIT"
] |
permissive
|
samvaity/azure-sdk-for-python
|
7e8dcb2d3602d81e04c95e28306d3e2e7d33b03d
|
f2b072688d3dc688fed3905c558cff1fa0849b91
|
refs/heads/master
| 2021-08-11T21:14:29.433269
| 2019-07-19T17:40:10
| 2019-07-19T17:40:10
| 179,733,339
| 0
| 1
|
MIT
| 2019-04-05T18:17:43
| 2019-04-05T18:17:42
| null |
UTF-8
|
Python
| false
| false
| 3,607
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class RegistryPaged(Paged):
"""
A paging container for iterating over a list of :class:`Registry <azure.mgmt.containerregistry.v2019_06_01_preview.models.Registry>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Registry]'}
}
def __init__(self, *args, **kwargs):
super(RegistryPaged, self).__init__(*args, **kwargs)
class OperationDefinitionPaged(Paged):
"""
A paging container for iterating over a list of :class:`OperationDefinition <azure.mgmt.containerregistry.v2019_06_01_preview.models.OperationDefinition>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[OperationDefinition]'}
}
def __init__(self, *args, **kwargs):
super(OperationDefinitionPaged, self).__init__(*args, **kwargs)
class ReplicationPaged(Paged):
"""
A paging container for iterating over a list of :class:`Replication <azure.mgmt.containerregistry.v2019_06_01_preview.models.Replication>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Replication]'}
}
def __init__(self, *args, **kwargs):
super(ReplicationPaged, self).__init__(*args, **kwargs)
class WebhookPaged(Paged):
"""
A paging container for iterating over a list of :class:`Webhook <azure.mgmt.containerregistry.v2019_06_01_preview.models.Webhook>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Webhook]'}
}
def __init__(self, *args, **kwargs):
super(WebhookPaged, self).__init__(*args, **kwargs)
class EventPaged(Paged):
"""
A paging container for iterating over a list of :class:`Event <azure.mgmt.containerregistry.v2019_06_01_preview.models.Event>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Event]'}
}
def __init__(self, *args, **kwargs):
super(EventPaged, self).__init__(*args, **kwargs)
class RunPaged(Paged):
"""
A paging container for iterating over a list of :class:`Run <azure.mgmt.containerregistry.v2019_06_01_preview.models.Run>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Run]'}
}
def __init__(self, *args, **kwargs):
super(RunPaged, self).__init__(*args, **kwargs)
class TaskPaged(Paged):
"""
A paging container for iterating over a list of :class:`Task <azure.mgmt.containerregistry.v2019_06_01_preview.models.Task>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Task]'}
}
def __init__(self, *args, **kwargs):
super(TaskPaged, self).__init__(*args, **kwargs)
|
[
"laurent.mazuel@gmail.com"
] |
laurent.mazuel@gmail.com
|
ece90d1b27b7bda334a307b0a1726b78af015b34
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_20866.py
|
43b89f649be556f519aaca98d7c1a6b0b17da9d8
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141
| 2015-10-22T19:19:40
| 2015-10-22T19:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
# Python: Splitting a string into elements and adding them in a list
foo = '"MARY","PATRICIA","LINDA","BARBARA","ELIZABETH","JENNIFER","MARIA","SUSAN","MARGARET","DOROTHY","LISA","NANCY","KAREN","BETTY","HELEN","SANDRA","DONNA","CAROL"'
output = foo.replace('"','').split(",")
|
[
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
|
0efc657b8dcb8e6b318ea4ca6e2a6c04543e1dbd
|
891902687207fb335b65dbb8d31d6e20301764f9
|
/pe048.py
|
bc475ea81eba7af44d87a0dfa5b0a74bcdc8ceb0
|
[] |
no_license
|
maecchi/PE
|
93bd050eaca2733aa37db6ca493b820fe3d7a351
|
3d9092635807f0036719b65adb16f1c0926c2321
|
refs/heads/master
| 2020-05-04T16:38:36.476355
| 2012-06-10T05:26:10
| 2012-06-10T05:26:10
| 1,746,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#
# pe048.py - Project Euler
#
LIMIT = 1000
series = [pow(x,x) for x in xrange(1, LIMIT+1)]
total = sum(series)
total_str = str(total)
ten_digit_str = total_str[-10:]
print ten_digit_str
|
[
"aos81922710@yahoo.co.jp"
] |
aos81922710@yahoo.co.jp
|
6a51e12f7a32aaa10eff1954b31dffd2d63024dd
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/193/usersdata/274/70731/submittedfiles/al7.py
|
1054fb07641b98114c1cfba9aaba25c980ae4b02
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
# -*- coding: utf-8 -*-
n = int(input("Valor de n: "))
i=1
s=0
while (i<n):
if (n%i)==0:
s-s+1
i=i+1
print(s)
if s==n:
print("PERFEITO")
else:
print("NÃO PERFEITO")
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
a5e3c2dd3665157ca080d0fc9762c4e20c48c388
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-2/7cf0626d7b9176f0eba3ff83c69c5b4553ae3f7e-<validate_distribution_from_caller_reference>-fix.py
|
b639b22fa205318f577ce5de14a54a2382c3197b
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,000
|
py
|
def validate_distribution_from_caller_reference(self, caller_reference):
try:
distributions = self.__cloudfront_facts_mgr.list_distributions(False)
distribution_name = 'Distribution'
distribution_config_name = 'DistributionConfig'
distribution_ids = [dist.get('Id') for dist in distributions]
for distribution_id in distribution_ids:
distribution = self.__cloudfront_facts_mgr.get_distribution(distribution_id)
if (distribution is not None):
distribution_config = distribution[distribution_name].get(distribution_config_name)
if ((distribution_config is not None) and (distribution_config.get('CallerReference') == caller_reference)):
distribution[distribution_name][distribution_config_name] = distribution_config
return distribution
except Exception as e:
self.module.fail_json_aws(e, msg='Error validating distribution from caller reference')
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
7b46f5761fbed7cb98152ac3384dc472e21fbcc6
|
fb1e852da0a026fb59c8cb24aeb40e62005501f1
|
/edgelm/fairseq/file_io.py
|
dd2865cd448fe581b22d069b32f12c045efc8c1f
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
microsoft/unilm
|
134aa44867c5ed36222220d3f4fd9616d02db573
|
b60c741f746877293bb85eed6806736fc8fa0ffd
|
refs/heads/master
| 2023-08-31T04:09:05.779071
| 2023-08-29T14:07:57
| 2023-08-29T14:07:57
| 198,350,484
| 15,313
| 2,192
|
MIT
| 2023-08-19T11:33:20
| 2019-07-23T04:15:28
|
Python
|
UTF-8
|
Python
| false
| false
| 5,806
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import shutil
from typing import List, Optional
logger = logging.getLogger(__file__)
try:
from iopath.common.file_io import g_pathmgr as IOPathManager
try:
# [FB only - for now] AWS PathHandler for PathManager
from .fb_pathhandlers import S3PathHandler
IOPathManager.register_handler(S3PathHandler())
except KeyError:
logging.warning("S3PathHandler already registered.")
except ImportError:
logging.debug(
"S3PathHandler couldn't be imported. Either missing fb-only files, or boto3 module."
)
except ImportError:
IOPathManager = None
class PathManager:
"""
Wrapper for insulating OSS I/O (using Python builtin operations) from
iopath's PathManager abstraction (for transparently handling various
internal backends).
"""
@staticmethod
def open(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
if IOPathManager:
return IOPathManager.open(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
return open(
path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
@staticmethod
def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool:
if IOPathManager:
return IOPathManager.copy(
src_path=src_path, dst_path=dst_path, overwrite=overwrite
)
return shutil.copyfile(src_path, dst_path)
@staticmethod
def get_local_path(path: str, **kwargs) -> str:
if IOPathManager:
return IOPathManager.get_local_path(path, **kwargs)
return path
@staticmethod
def exists(path: str) -> bool:
if IOPathManager:
return IOPathManager.exists(path)
return os.path.exists(path)
@staticmethod
def isfile(path: str) -> bool:
if IOPathManager:
return IOPathManager.isfile(path)
return os.path.isfile(path)
@staticmethod
def ls(path: str) -> List[str]:
if IOPathManager:
return IOPathManager.ls(path)
return os.listdir(path)
@staticmethod
def mkdirs(path: str) -> None:
if IOPathManager:
return IOPathManager.mkdirs(path)
os.makedirs(path, exist_ok=True)
@staticmethod
def rm(path: str) -> None:
if IOPathManager:
return IOPathManager.rm(path)
os.remove(path)
@staticmethod
def chmod(path: str, mode: int) -> None:
if not PathManager.path_requires_pathmanager(path):
os.chmod(path, mode)
@staticmethod
def register_handler(handler) -> None:
if IOPathManager:
return IOPathManager.register_handler(handler=handler)
@staticmethod
def copy_from_local(
local_path: str, dst_path: str, overwrite: bool = False, **kwargs
) -> None:
if IOPathManager:
return IOPathManager.copy_from_local(
local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs
)
return shutil.copyfile(local_path, dst_path)
@staticmethod
def path_requires_pathmanager(path: str) -> bool:
"""Do we require PathManager to access given path?"""
if IOPathManager:
for p in IOPathManager._path_handlers.keys():
if path.startswith(p):
return True
return False
@staticmethod
def supports_rename(path: str) -> bool:
# PathManager doesn't yet support renames
return not PathManager.path_requires_pathmanager(path)
@staticmethod
def rename(src: str, dst: str):
os.rename(src, dst)
"""
ioPath async PathManager methods:
"""
@staticmethod
def opena(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
"""
Return file descriptor with asynchronous write operations.
"""
global IOPathManager
if not IOPathManager:
logging.info("ioPath is initializing PathManager.")
try:
from iopath.common.file_io import PathManager
IOPathManager = PathManager()
except Exception:
logging.exception("Failed to initialize ioPath PathManager object.")
return IOPathManager.opena(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
@staticmethod
def async_close() -> bool:
"""
Wait for files to be written and clean up asynchronous PathManager.
NOTE: `PathManager.async_close()` must be called at the end of any
script that uses `PathManager.opena(...)`.
"""
global IOPathManager
if IOPathManager:
return IOPathManager.async_close()
return False
|
[
"tage@sandbox12.t0ekrjpotp2uhbmhwy0wiwkeya.xx.internal.cloudapp.net"
] |
tage@sandbox12.t0ekrjpotp2uhbmhwy0wiwkeya.xx.internal.cloudapp.net
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.