hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6637a11d7303e4cb1649f5cb8e8396c1af55c974
| 671
|
py
|
Python
|
examples/misc/30-macro_call_in_background.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 501
|
2018-12-06T23:48:17.000Z
|
2022-03-31T11:53:18.000Z
|
examples/misc/30-macro_call_in_background.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 710
|
2018-11-26T22:04:52.000Z
|
2022-03-30T03:53:12.000Z
|
examples/misc/30-macro_call_in_background.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 273
|
2018-11-26T10:10:24.000Z
|
2022-03-30T12:25:28.000Z
|
#!/usr/bin/env python
'''
This example shows how to use the call_in_background macro
'''
from pyscf import lib
import time
def fa():
print('a')
time.sleep(0.5)
def fb():
print('b')
time.sleep(0.8)
print('type 1')
w0 = time.time()
with lib.call_in_background(fa) as afa, lib.call_in_background(fb) as afb:
for i in range(3):
afa()
afb()
print('total time = %.1f s = [fb]0.8 * 3 seconds' % (time.time() - w0))
print('type 2')
w0 = time.time()
with lib.call_in_background(fa, fb) as (afa, afb):
for i in range(3):
afa()
afb()
print('total time = %.1f s = ([fa]0.5 + [fb]0.8) * 3 seconds' % (time.time() - w0))
| 20.333333
| 84
| 0.584203
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 209
| 0.311475
|
6638916f5159760226a21213fab0fff099a144f2
| 158
|
py
|
Python
|
segments/segmentsCommon.py
|
pinfort/jpeg-analyzer
|
5e68a1125d8626f34fdac2cf094cef5a2a8a1b58
|
[
"MIT"
] | 5
|
2018-12-09T01:32:24.000Z
|
2019-10-23T17:38:50.000Z
|
segments/segmentsCommon.py
|
pinfort/jpeg-analyzer
|
5e68a1125d8626f34fdac2cf094cef5a2a8a1b58
|
[
"MIT"
] | null | null | null |
segments/segmentsCommon.py
|
pinfort/jpeg-analyzer
|
5e68a1125d8626f34fdac2cf094cef5a2a8a1b58
|
[
"MIT"
] | 2
|
2018-12-09T01:41:56.000Z
|
2019-09-28T03:10:04.000Z
|
from abc import ABCMeta, abstractmethod
class SegmentsCommon(metaclass=ABCMeta):
@abstractmethod
def analyze(self, marker, body):
pass
| 22.571429
| 41
| 0.702532
| 113
| 0.71519
| 0
| 0
| 67
| 0.424051
| 0
| 0
| 0
| 0
|
663b03b22a0af9984081680da2ed3fb151315440
| 86
|
py
|
Python
|
autofront/tests/simple_test.py
|
JimmyLamothe/autofront
|
d179e54411f5d53046a5fa52b4430e09b01ebaca
|
[
"BSD-3-Clause"
] | 1
|
2020-11-16T22:18:03.000Z
|
2020-11-16T22:18:03.000Z
|
autofront/tests/simple_test.py
|
JimmyLamothe/autofront
|
d179e54411f5d53046a5fa52b4430e09b01ebaca
|
[
"BSD-3-Clause"
] | null | null | null |
autofront/tests/simple_test.py
|
JimmyLamothe/autofront
|
d179e54411f5d53046a5fa52b4430e09b01ebaca
|
[
"BSD-3-Clause"
] | null | null | null |
import autofront
from simple_functions import foo
autofront.add(foo)
autofront.run()
| 14.333333
| 32
| 0.825581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
663b6588c78c4b2d7f4f5cddb71f7f5b145f3ce6
| 54
|
py
|
Python
|
prototype/data/datasets/__init__.py
|
ModelTC/mqbench-paper
|
8d25a3b63c0cde4d904f77439fc435b49b0b33d4
|
[
"Apache-2.0"
] | 6
|
2021-09-26T03:24:26.000Z
|
2022-03-17T09:19:18.000Z
|
prototype/data/datasets/__init__.py
|
TheGreatCold/mqbench-paper
|
8d25a3b63c0cde4d904f77439fc435b49b0b33d4
|
[
"Apache-2.0"
] | null | null | null |
prototype/data/datasets/__init__.py
|
TheGreatCold/mqbench-paper
|
8d25a3b63c0cde4d904f77439fc435b49b0b33d4
|
[
"Apache-2.0"
] | 2
|
2021-09-01T03:22:06.000Z
|
2021-09-19T06:16:38.000Z
|
from .imagenet_dataset import ImageNetDataset # noqa
| 27
| 53
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 0.12963
|
663b9f98e1aef6a83eac108928f75e32ee2a2b00
| 859
|
py
|
Python
|
problems/287.Find_the_Duplicate_Number/solution-36801_binary.py
|
subramp-prep/leetcode
|
d125201d9021ab9b1eea5e5393c2db4edd84e740
|
[
"Unlicense"
] | null | null | null |
problems/287.Find_the_Duplicate_Number/solution-36801_binary.py
|
subramp-prep/leetcode
|
d125201d9021ab9b1eea5e5393c2db4edd84e740
|
[
"Unlicense"
] | null | null | null |
problems/287.Find_the_Duplicate_Number/solution-36801_binary.py
|
subramp-prep/leetcode
|
d125201d9021ab9b1eea5e5393c2db4edd84e740
|
[
"Unlicense"
] | null | null | null |
class Solution(object):
def findDuplicate(self, nums):
low = 0
high = len(nums) - 1
mid = (high + low) / 2
while high - low > 1:
count = 0
for k in nums:
if mid < k <= high:
count += 1
if count > high - mid:
low = mid
else:
high = mid
mid = (high + low) / 2
return high
# The difficulty in this problem lies in O(1) space, and many solution using O(n) space can also be accepted by OJ.
# The solution is applying bi-search in the range[1, n] by counting the element which falls in sub range(n/2, n].
# If the number is bigger than capacity of that sub range, it means the duplicated integer falls in the sub-range.
# Otherwise the duplicated integer falls in the other half sub range.
| 37.347826
| 115
| 0.563446
| 442
| 0.514552
| 0
| 0
| 0
| 0
| 0
| 0
| 411
| 0.478463
|
663cc62975bf84eb8f4aa1091e6cd8fcbbd6b541
| 1,826
|
py
|
Python
|
src/data/sets.py
|
HninPwint/adv_dsi_lab_2
|
0c350444046bf0f468985437b59d2cae3516817a
|
[
"MIT"
] | null | null | null |
src/data/sets.py
|
HninPwint/adv_dsi_lab_2
|
0c350444046bf0f468985437b59d2cae3516817a
|
[
"MIT"
] | null | null | null |
src/data/sets.py
|
HninPwint/adv_dsi_lab_2
|
0c350444046bf0f468985437b59d2cae3516817a
|
[
"MIT"
] | null | null | null |
def subset_x_y(target, features, start_index:int, end_index:int):
'''
'''
return features[start_index:end_index], target[start_index:end_index]
def split_sets_by_time(df, target_col, test_ratio=0.2):
'''
'''
df_copy = df.copy()
target = df_copy.pop(target_col)
cutoff = int(len(target)/5)
X_train, y_train = subset_x_y(target=target, features=df_copy, start_index=0, end_index=-cutoff*2)
X_val, y_val = subset_x_y(target=target, features=df_copy, start_index=-cutoff*2, end_index=-cutoff)
X_test, y_test = subset_x_y(target=target, features=df_copy, start_index=-cutoff, end_index=len(target))
return X_train, y_train, X_val, y_val, X_test, y_test
def save_sets(X_train=None, y_train=None, X_val=None, y_val=None, X_test=None, y_test=None, path='../data/processed/'):
"""Save the different sets locally
Parameters
----------
X_train: Numpy Array
Features for the training set
y_train: Numpy Array
Target for the training set
X_val: Numpy Array
Features for the validation set
y_val: Numpy Array
Target for the validation set
X_test: Numpy Array
Features for the testing set
y_test: Numpy Array
Target for the testing set
path : str
Path to the folder where the sets will be saved (default: '../data/processed/')
Returns
-------
"""
import numpy as np
if X_train is not None:
np.save(f'{path}X_train', X_train)
if X_val is not None:
np.save(f'{path}X_val', X_val)
if X_test is not None:
np.save(f'{path}X_test', X_test)
if y_train is not None:
np.save(f'{path}y_train', y_train)
if y_val is not None:
np.save(f'{path}y_val', y_val)
if y_test is not None:
np.save(f'{path}y_test', y_test)
| 32.607143
| 119
| 0.654984
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 701
| 0.383899
|
663eb6c24c50403fabcf3408feba6dc17b7e3aa6
| 853
|
py
|
Python
|
Bio_III/DPChange.py
|
BlackAdder84/Bioinformatics
|
6cc662b6c4a3349a89f6fdd26f05f1f6228bd912
|
[
"MIT"
] | 1
|
2017-06-09T03:06:21.000Z
|
2017-06-09T03:06:21.000Z
|
Bio_III/DPChange.py
|
BlackAdder84/Bioinformatics
|
6cc662b6c4a3349a89f6fdd26f05f1f6228bd912
|
[
"MIT"
] | null | null | null |
Bio_III/DPChange.py
|
BlackAdder84/Bioinformatics
|
6cc662b6c4a3349a89f6fdd26f05f1f6228bd912
|
[
"MIT"
] | 1
|
2017-05-01T21:15:11.000Z
|
2017-05-01T21:15:11.000Z
|
# Works
"""
Given a quantity of money and a list of coins, return the minimum number of coins.
"""
def DPChange(money, coins):
MinNumCoins = [0]*(money+1)
MinNumCoins[0] = 0
for m in range(1, money+1):
MinNumCoins[m] = 100000
for i in range(0, len(coins)):
if m >= coins[i]: # only take coins not greater than money
if MinNumCoins[m-coins[i]] + 1 < MinNumCoins[m]:
MinNumCoins[m] = MinNumCoins[m-coins[i]] + 1
return MinNumCoins[money]
def test():
in_ = (40, [50,25,20,10,5,1])
out_ = 2
assert(DPChange(*in_) == out_), "Test 1 FAILED"
in_ = (8074, [24,13,12,7,5,3,1])
out_ = 338
assert(DPChange(*in_) == out_), "Test 2 FAILED"
if __name__ == "__main__":
money = 16807
coins = [18,17,16,7,6,5,3,1]
print(DPChange(money, coins))
| 25.088235
| 82
| 0.570926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 177
| 0.207503
|
66400ba0baa5a1b25e3daf468b7b3e31fa7361b6
| 30,286
|
py
|
Python
|
venv/lib/python3.6/site-packages/feedgen/entry.py
|
jannahuang/blog
|
e1d8cfa9d79ac06097a0e55531bba9421fcbf283
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/feedgen/entry.py
|
jannahuang/blog
|
e1d8cfa9d79ac06097a0e55531bba9421fcbf283
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/feedgen/entry.py
|
jannahuang/blog
|
e1d8cfa9d79ac06097a0e55531bba9421fcbf283
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
feedgen.entry
~~~~~~~~~~~~~
:copyright: 2013-2020, Lars Kiesow <lkiesow@uos.de>
:license: FreeBSD and LGPL, see license.* for more details.
'''
from datetime import datetime
import dateutil.parser
import dateutil.tz
import warnings
from lxml.etree import CDATA # nosec - adding CDATA entry is safe
from feedgen.compat import string_types
from feedgen.util import ensure_format, formatRFC2822, xml_fromstring, xml_elem
def _add_text_elm(entry, data, name):
"""Add a text subelement to an entry"""
if not data:
return
elm = xml_elem(name, entry)
type_ = data.get('type')
if data.get('src'):
if name != 'content':
raise ValueError("Only the 'content' element of an entry can "
"contain a 'src' attribute")
elm.attrib['src'] = data['src']
elif data.get(name):
# Surround xhtml with a div tag, parse it and embed it
if type_ == 'xhtml':
xhtml = '<div xmlns="http://www.w3.org/1999/xhtml">' \
+ data.get(name) + '</div>'
elm.append(xml_fromstring(xhtml))
elif type_ == 'CDATA':
elm.text = CDATA(data.get(name))
# Parse XML and embed it
elif type_ and (type_.endswith('/xml') or type_.endswith('+xml')):
elm.append(xml_fromstring(data[name]))
# Embed the text in escaped form
elif not type_ or type_.startswith('text') or type_ == 'html':
elm.text = data.get(name)
# Everything else should be included base64 encoded
else:
raise NotImplementedError(
'base64 encoded {} is not supported at the moment. '
'Pull requests adding support are welcome.'.format(name)
)
# Add type description of the content
if type_:
elm.attrib['type'] = type_
class FeedEntry(object):
'''FeedEntry call representing an ATOM feeds entry node or an RSS feeds item
node.
'''
def __init__(self):
# ATOM
# required
self.__atom_id = None
self.__atom_title = None
self.__atom_updated = datetime.now(dateutil.tz.tzutc())
# recommended
self.__atom_author = None
self.__atom_content = None
self.__atom_link = None
self.__atom_summary = None
# optional
self.__atom_category = None
self.__atom_contributor = None
self.__atom_published = None
self.__atom_source = None
self.__atom_rights = None
# RSS
self.__rss_author = None
self.__rss_category = None
self.__rss_comments = None
self.__rss_description = None
self.__rss_content = None
self.__rss_enclosure = None
self.__rss_guid = {}
self.__rss_link = None
self.__rss_pubDate = None
self.__rss_source = None
self.__rss_title = None
# Extension list:
self.__extensions = {}
self.__extensions_register = {}
def atom_entry(self, extensions=True):
'''Create an ATOM entry and return it.'''
entry = xml_elem('entry')
if not (self.__atom_id and self.__atom_title and self.__atom_updated):
raise ValueError('Required fields not set')
id = xml_elem('id', entry)
id.text = self.__atom_id
title = xml_elem('title', entry)
title.text = self.__atom_title
updated = xml_elem('updated', entry)
updated.text = self.__atom_updated.isoformat()
# An entry must contain an alternate link if there is no content
# element.
if not self.__atom_content:
links = self.__atom_link or []
if not [l for l in links if l.get('rel') == 'alternate']:
raise ValueError('Entry must contain an alternate link or ' +
'a content element.')
# Add author elements
for a in self.__atom_author or []:
# Atom requires a name. Skip elements without.
if not a.get('name'):
continue
author = xml_elem('author', entry)
name = xml_elem('name', author)
name.text = a.get('name')
if a.get('email'):
email = xml_elem('email', author)
email.text = a.get('email')
if a.get('uri'):
uri = xml_elem('uri', author)
uri.text = a.get('uri')
_add_text_elm(entry, self.__atom_content, 'content')
for l in self.__atom_link or []:
link = xml_elem('link', entry, href=l['href'])
if l.get('rel'):
link.attrib['rel'] = l['rel']
if l.get('type'):
link.attrib['type'] = l['type']
if l.get('hreflang'):
link.attrib['hreflang'] = l['hreflang']
if l.get('title'):
link.attrib['title'] = l['title']
if l.get('length'):
link.attrib['length'] = l['length']
_add_text_elm(entry, self.__atom_summary, 'summary')
for c in self.__atom_category or []:
cat = xml_elem('category', entry, term=c['term'])
if c.get('scheme'):
cat.attrib['scheme'] = c['scheme']
if c.get('label'):
cat.attrib['label'] = c['label']
# Add author elements
for c in self.__atom_contributor or []:
# Atom requires a name. Skip elements without.
if not c.get('name'):
continue
contrib = xml_elem('contributor', entry)
name = xml_elem('name', contrib)
name.text = c.get('name')
if c.get('email'):
email = xml_elem('email', contrib)
email.text = c.get('email')
if c.get('uri'):
uri = xml_elem('uri', contrib)
uri.text = c.get('uri')
if self.__atom_published:
published = xml_elem('published', entry)
published.text = self.__atom_published.isoformat()
if self.__atom_rights:
rights = xml_elem('rights', entry)
rights.text = self.__atom_rights
if self.__atom_source:
source = xml_elem('source', entry)
if self.__atom_source.get('title'):
source_title = xml_elem('title', source)
source_title.text = self.__atom_source['title']
if self.__atom_source.get('link'):
xml_elem('link', source, href=self.__atom_source['link'])
if extensions:
for ext in self.__extensions.values() or []:
if ext.get('atom'):
ext['inst'].extend_atom(entry)
return entry
def rss_entry(self, extensions=True):
'''Create a RSS item and return it.'''
entry = xml_elem('item')
if not (self.__rss_title or
self.__rss_description or
self.__rss_content):
raise ValueError('Required fields not set')
if self.__rss_title:
title = xml_elem('title', entry)
title.text = self.__rss_title
if self.__rss_link:
link = xml_elem('link', entry)
link.text = self.__rss_link
if self.__rss_description and self.__rss_content:
description = xml_elem('description', entry)
description.text = self.__rss_description
XMLNS_CONTENT = 'http://purl.org/rss/1.0/modules/content/'
content = xml_elem('{%s}encoded' % XMLNS_CONTENT, entry)
content.text = CDATA(self.__rss_content['content']) \
if self.__rss_content.get('type', '') == 'CDATA' \
else self.__rss_content['content']
elif self.__rss_description:
description = xml_elem('description', entry)
description.text = self.__rss_description
elif self.__rss_content:
description = xml_elem('description', entry)
description.text = CDATA(self.__rss_content['content']) \
if self.__rss_content.get('type', '') == 'CDATA' \
else self.__rss_content['content']
for a in self.__rss_author or []:
author = xml_elem('author', entry)
author.text = a
if self.__rss_guid.get('guid'):
guid = xml_elem('guid', entry)
guid.text = self.__rss_guid['guid']
permaLink = str(self.__rss_guid.get('permalink', False)).lower()
guid.attrib['isPermaLink'] = permaLink
for cat in self.__rss_category or []:
category = xml_elem('category', entry)
category.text = cat['value']
if cat.get('domain'):
category.attrib['domain'] = cat['domain']
if self.__rss_comments:
comments = xml_elem('comments', entry)
comments.text = self.__rss_comments
if self.__rss_enclosure:
enclosure = xml_elem('enclosure', entry)
enclosure.attrib['url'] = self.__rss_enclosure['url']
enclosure.attrib['length'] = self.__rss_enclosure['length']
enclosure.attrib['type'] = self.__rss_enclosure['type']
if self.__rss_pubDate:
pubDate = xml_elem('pubDate', entry)
pubDate.text = formatRFC2822(self.__rss_pubDate)
if self.__rss_source:
source = xml_elem('source', entry, url=self.__rss_source['url'])
source.text = self.__rss_source['title']
if extensions:
for ext in self.__extensions.values() or []:
if ext.get('rss'):
ext['inst'].extend_rss(entry)
return entry
def title(self, title=None):
'''Get or set the title value of the entry. It should contain a human
readable title for the entry. Title is mandatory for both ATOM and RSS
and should not be blank.
:param title: The new title of the entry.
:returns: The entriess title.
'''
if title is not None:
self.__atom_title = title
self.__rss_title = title
return self.__atom_title
def id(self, id=None):
'''Get or set the entry id which identifies the entry using a
universally unique and permanent URI. Two entries in a feed can have
the same value for id if they represent the same entry at different
points in time. This method will also set rss:guid with permalink set
to False. Id is mandatory for an ATOM entry.
:param id: New Id of the entry.
:returns: Id of the entry.
'''
if id is not None:
self.__atom_id = id
self.__rss_guid = {'guid': id, 'permalink': False}
return self.__atom_id
def guid(self, guid=None, permalink=False):
'''Get or set the entries guid which is a string that uniquely
identifies the item. This will also set atom:id.
:param guid: Id of the entry.
:param permalink: If this is a permanent identifier for this item
:returns: Id and permalink setting of the entry.
'''
if guid is not None:
self.__atom_id = guid
self.__rss_guid = {'guid': guid, 'permalink': permalink}
return self.__rss_guid
def updated(self, updated=None):
'''Set or get the updated value which indicates the last time the entry
was modified in a significant way.
The value can either be a string which will automatically be parsed or
a datetime.datetime object. In any case it is necessary that the value
include timezone information.
:param updated: The modification date.
:returns: Modification date as datetime.datetime
'''
if updated is not None:
if isinstance(updated, string_types):
updated = dateutil.parser.parse(updated)
if not isinstance(updated, datetime):
raise ValueError('Invalid datetime format')
if updated.tzinfo is None:
raise ValueError('Datetime object has no timezone info')
self.__atom_updated = updated
self.__rss_lastBuildDate = updated
return self.__atom_updated
def author(self, author=None, replace=False, **kwargs):
'''Get or set author data. An author element is a dict containing a
name, an email address and a uri. Name is mandatory for ATOM, email is
mandatory for RSS.
This method can be called with:
- the fields of an author as keyword arguments
- the fields of an author as a dictionary
- a list of dictionaries containing the author fields
An author has the following fields:
- *name* conveys a human-readable name for the person.
- *uri* contains a home page for the person.
- *email* contains an email address for the person.
:param author: Dict or list of dicts with author data.
:param replace: Add or replace old data.
Example::
>>> author({'name':'John Doe', 'email':'jdoe@example.com'})
[{'name':'John Doe','email':'jdoe@example.com'}]
>>> author([{'name': 'Mr. X'}, {'name': 'Max'}])
[{'name':'John Doe','email':'jdoe@example.com'},
{'name':'John Doe'}, {'name':'Max'}]
>>> author(name='John Doe', email='jdoe@example.com', replace=True)
[{'name':'John Doe','email':'jdoe@example.com'}]
'''
if author is None and kwargs:
author = kwargs
if author is not None:
if replace or self.__atom_author is None:
self.__atom_author = []
self.__atom_author += ensure_format(author,
set(['name', 'email', 'uri']),
set())
self.__rss_author = []
for a in self.__atom_author:
if a.get('email'):
if a.get('name'):
self.__rss_author.append('%(email)s (%(name)s)' % a)
else:
self.__rss_author.append('%(email)s' % a)
return self.__atom_author
def content(self, content=None, src=None, type=None):
'''Get or set the content of the entry which contains or links to the
complete content of the entry. Content must be provided for ATOM
entries if there is no alternate link, and should be provided if there
is no summary. If the content is set (not linked) it will also set
rss:description.
:param content: The content of the feed entry.
:param src: Link to the entries content.
:param type: If type is CDATA content would not be escaped.
:returns: Content element of the entry.
'''
if src is not None:
self.__atom_content = {'src': src}
elif content is not None:
self.__atom_content = {'content': content}
self.__rss_content = {'content': content}
if type is not None:
self.__atom_content['type'] = type
self.__rss_content['type'] = type
return self.__atom_content
def link(self, link=None, replace=False, **kwargs):
'''Get or set link data. An link element is a dict with the fields
href, rel, type, hreflang, title, and length. Href is mandatory for
ATOM.
This method can be called with:
- the fields of a link as keyword arguments
- the fields of a link as a dictionary
- a list of dictionaries containing the link fields
A link has the following fields:
- *href* is the URI of the referenced resource (typically a Web page)
- *rel* contains a single link relationship type. It can be a full URI,
or one of the following predefined values (default=alternate):
- *alternate* an alternate representation of the entry or feed, for
example a permalink to the html version of the entry, or the
front page of the weblog.
- *enclosure* a related resource which is potentially large in size
and might require special handling, for example an audio or video
recording.
- *related* an document related to the entry or feed.
- *self* the feed itself.
- *via* the source of the information provided in the entry.
- *type* indicates the media type of the resource.
- *hreflang* indicates the language of the referenced resource.
- *title* human readable information about the link, typically for
display purposes.
- *length* the length of the resource, in bytes.
RSS only supports one link with nothing but a URL. So for the RSS link
element the last link with rel=alternate is used.
RSS also supports one enclusure element per entry which is covered by
the link element in ATOM feed entries. So for the RSS enclusure element
the last link with rel=enclosure is used.
:param link: Dict or list of dicts with data.
:param replace: Add or replace old data.
:returns: List of link data.
'''
if link is None and kwargs:
link = kwargs
if link is not None:
if replace or self.__atom_link is None:
self.__atom_link = []
self.__atom_link += ensure_format(
link,
set(['href', 'rel', 'type', 'hreflang', 'title', 'length']),
set(['href']),
{'rel': ['alternate', 'enclosure', 'related', 'self', 'via']},
{'rel': 'alternate'})
# RSS only needs one URL. We use the first link for RSS:
for l in self.__atom_link:
if l.get('rel') == 'alternate':
self.__rss_link = l['href']
elif l.get('rel') == 'enclosure':
self.__rss_enclosure = {'url': l['href']}
self.__rss_enclosure['type'] = l.get('type')
self.__rss_enclosure['length'] = l.get('length') or '0'
# return the set with more information (atom)
return self.__atom_link
def summary(self, summary=None, type=None):
'''Get or set the summary element of an entry which conveys a short
summary, abstract, or excerpt of the entry. Summary is an ATOM only
element and should be provided if there either is no content provided
for the entry, or that content is not inline (i.e., contains a src
attribute), or if the content is encoded in base64. This method will
also set the rss:description field if it wasn't previously set or
contains the old value of summary.
:param summary: Summary of the entries contents.
:returns: Summary of the entries contents.
'''
if summary is not None:
# Replace the RSS description with the summary if it was the
# summary before. Not if it is the description.
if not self.__rss_description or (
self.__atom_summary and
self.__rss_description == self.__atom_summary.get("summary")
):
self.__rss_description = summary
self.__atom_summary = {'summary': summary}
if type is not None:
self.__atom_summary['type'] = type
return self.__atom_summary
def description(self, description=None, isSummary=False):
'''Get or set the description value which is the item synopsis.
Description is an RSS only element. For ATOM feeds it is split in
summary and content. The isSummary parameter can be used to control
which ATOM value is set when setting description.
:param description: Description of the entry.
:param isSummary: If the description should be used as content or
summary.
:returns: The entries description.
'''
if description is not None:
self.__rss_description = description
if isSummary:
self.__atom_summary = description
else:
self.__atom_content = {'content': description}
return self.__rss_description
def category(self, category=None, replace=False, **kwargs):
'''Get or set categories that the entry belongs to.
This method can be called with:
- the fields of a category as keyword arguments
- the fields of a category as a dictionary
- a list of dictionaries containing the category fields
A categories has the following fields:
- *term* identifies the category
- *scheme* identifies the categorization scheme via a URI.
- *label* provides a human-readable label for display
If a label is present it is used for the RSS feeds. Otherwise the term
is used. The scheme is used for the domain attribute in RSS.
:param category: Dict or list of dicts with data.
:param replace: Add or replace old data.
:returns: List of category data.
'''
if category is None and kwargs:
category = kwargs
if category is not None:
if replace or self.__atom_category is None:
self.__atom_category = []
self.__atom_category += ensure_format(
category,
set(['term', 'scheme', 'label']),
set(['term']))
# Map the ATOM categories to RSS categories. Use the atom:label as
# name or if not present the atom:term. The atom:scheme is the
# rss:domain.
self.__rss_category = []
for cat in self.__atom_category:
rss_cat = {}
rss_cat['value'] = cat.get('label', cat['term'])
if cat.get('scheme'):
rss_cat['domain'] = cat['scheme']
self.__rss_category.append(rss_cat)
return self.__atom_category
def contributor(self, contributor=None, replace=False, **kwargs):
'''Get or set the contributor data of the feed. This is an ATOM only
value.
This method can be called with:
- the fields of an contributor as keyword arguments
- the fields of an contributor as a dictionary
- a list of dictionaries containing the contributor fields
An contributor has the following fields:
- *name* conveys a human-readable name for the person.
- *uri* contains a home page for the person.
- *email* contains an email address for the person.
:param contributor: Dictionary or list of dictionaries with contributor
data.
:param replace: Add or replace old data.
:returns: List of contributors as dictionaries.
'''
if contributor is None and kwargs:
contributor = kwargs
if contributor is not None:
if replace or self.__atom_contributor is None:
self.__atom_contributor = []
self.__atom_contributor += ensure_format(
contributor, set(['name', 'email', 'uri']), set(['name']))
return self.__atom_contributor
def published(self, published=None):
'''Set or get the published value which contains the time of the initial
creation or first availability of the entry.
The value can either be a string which will automatically be parsed or
a datetime.datetime object. In any case it is necessary that the value
include timezone information.
:param published: The creation date.
:returns: Creation date as datetime.datetime
'''
if published is not None:
if isinstance(published, string_types):
published = dateutil.parser.parse(published)
if not isinstance(published, datetime):
raise ValueError('Invalid datetime format')
if published.tzinfo is None:
raise ValueError('Datetime object has no timezone info')
self.__atom_published = published
self.__rss_pubDate = published
return self.__atom_published
def pubDate(self, pubDate=None):
'''Get or set the pubDate of the entry which indicates when the entry
was published. This method is just another name for the published(...)
method.
'''
return self.published(pubDate)
def pubdate(self, pubDate=None):
'''Get or set the pubDate of the entry which indicates when the entry
was published. This method is just another name for the published(...)
method.
pubdate(…) is deprecated and may be removed in feedgen ≥ 0.8. Use
pubDate(…) instead.
'''
warnings.warn('pubdate(…) is deprecated and may be removed in feedgen '
'≥ 0.8. Use pubDate(…) instead.')
return self.published(pubDate)
def rights(self, rights=None):
'''Get or set the rights value of the entry which conveys information
about rights, e.g. copyrights, held in and over the entry. This ATOM
value will also set rss:copyright.
:param rights: Rights information of the feed.
:returns: Rights information of the feed.
'''
if rights is not None:
self.__atom_rights = rights
return self.__atom_rights
def comments(self, comments=None):
'''Get or set the value of comments which is the URL of the comments
page for the item. This is a RSS only value.
:param comments: URL to the comments page.
:returns: URL to the comments page.
'''
if comments is not None:
self.__rss_comments = comments
return self.__rss_comments
def source(self, url=None, title=None):
'''Get or set the source for the current feed entry.
Note that ATOM feeds support a lot more sub elements than title and URL
(which is what RSS supports) but these are currently not supported.
Patches are welcome.
:param url: Link to the source.
:param title: Title of the linked resource
:returns: Source element as dictionaries.
'''
if url is not None and title is not None:
self.__rss_source = {'url': url, 'title': title}
self.__atom_source = {'link': url, 'title': title}
return self.__rss_source
def enclosure(self, url=None, length=None, type=None):
'''Get or set the value of enclosure which describes a media object
that is attached to the item. This is a RSS only value which is
represented by link(rel=enclosure) in ATOM. ATOM feeds can furthermore
contain several enclosures while RSS may contain only one. That is why
this method, if repeatedly called, will add more than one enclosures to
the feed. However, only the last one is used for RSS.
:param url: URL of the media object.
:param length: Size of the media in bytes.
:param type: Mimetype of the linked media.
:returns: Data of the enclosure element.
'''
if url is not None:
self.link(href=url, rel='enclosure', type=type, length=length)
return self.__rss_enclosure
def ttl(self, ttl=None):
'''Get or set the ttl value. It is an RSS only element. ttl stands for
time to live. It's a number of minutes that indicates how long a
channel can be cached before refreshing from the source.
:param ttl: Integer value representing the time to live.
:returns: Time to live of of the entry.
'''
if ttl is not None:
self.__rss_ttl = int(ttl)
return self.__rss_ttl
def load_extension(self, name, atom=True, rss=True):
'''Load a specific extension by name.
:param name: Name of the extension to load.
:param atom: If the extension should be used for ATOM feeds.
:param rss: If the extension should be used for RSS feeds.
'''
# Check loaded extensions
if not isinstance(self.__extensions, dict):
self.__extensions = {}
if name in self.__extensions.keys():
raise ImportError('Extension already loaded')
# Load extension
extname = name[0].upper() + name[1:] + 'EntryExtension'
try:
supmod = __import__('feedgen.ext.%s_entry' % name)
extmod = getattr(supmod.ext, name + '_entry')
except ImportError:
# Use FeedExtension module instead
supmod = __import__('feedgen.ext.%s' % name)
extmod = getattr(supmod.ext, name)
ext = getattr(extmod, extname)
self.register_extension(name, ext, atom, rss)
def register_extension(self, namespace, extension_class_entry=None,
atom=True, rss=True):
'''Register a specific extension by classes to a namespace.
:param namespace: namespace for the extension
:param extension_class_entry: Class of the entry extension to load.
:param atom: If the extension should be used for ATOM feeds.
:param rss: If the extension should be used for RSS feeds.
'''
# Check loaded extensions
# `load_extension` ignores the "Extension" suffix.
if not isinstance(self.__extensions, dict):
self.__extensions = {}
if namespace in self.__extensions.keys():
raise ImportError('Extension already loaded')
if not extension_class_entry:
raise ImportError('No extension class')
extinst = extension_class_entry()
setattr(self, namespace, extinst)
# `load_extension` registry
self.__extensions[namespace] = {
'inst': extinst,
'extension_class_entry': extension_class_entry,
'atom': atom,
'rss': rss
}
| 40.982409
| 80
| 0.589777
| 28,398
| 0.93729
| 0
| 0
| 0
| 0
| 0
| 0
| 14,761
| 0.487194
|
6641ac84a38502d2627abdd02f07c01a4087638b
| 527
|
py
|
Python
|
return_type_test.py
|
leeamen/eva
|
311c59d24dec74359a5e15ff5b6b50c2c51dae5b
|
[
"Apache-2.0"
] | null | null | null |
return_type_test.py
|
leeamen/eva
|
311c59d24dec74359a5e15ff5b6b50c2c51dae5b
|
[
"Apache-2.0"
] | null | null | null |
return_type_test.py
|
leeamen/eva
|
311c59d24dec74359a5e15ff5b6b50c2c51dae5b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#coding:utf8
import mybaselib
import logging
import jieba
import jieba.analyse
import numpy as np
import csv
import sys
import stat
import os
reload(sys)
sys.setdefaultencoding('utf-8')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
if __name__ == '__main__':
fname = sys.argv[1]
logger.debug(fname)
return_rules = mybaselib.ReturnRules()
return_rules.Load(fname)
sentence = '向日葵餐厅秩怎么样'
return_type = return_rules.GetReturnType(sentence)
logger.debug('%s', return_type)
| 18.821429
| 52
| 0.766603
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 79
| 0.144954
|
66421e56797abbadff0d086947c6577be6113573
| 3,165
|
py
|
Python
|
tools/reduce_dimension.py
|
AtsushiHashimoto/exp_idc
|
9b79a02d3a8fc3950b5fe775e75353b37dc612cc
|
[
"BSD-2-Clause"
] | null | null | null |
tools/reduce_dimension.py
|
AtsushiHashimoto/exp_idc
|
9b79a02d3a8fc3950b5fe775e75353b37dc612cc
|
[
"BSD-2-Clause"
] | null | null | null |
tools/reduce_dimension.py
|
AtsushiHashimoto/exp_idc
|
9b79a02d3a8fc3950b5fe775e75353b37dc612cc
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
DESCRIPTION="This script reduce dimensionality of input data."
#from memory_profiler import profile
import numpy as np
import argparse
from sklearn.decomposition import PCA
from sklearn.decomposition import NMF
import logging
import sys
from os.path import dirname
sys.path.append(dirname(__file__))
from my_target_counter import TargetCounter
logger = logging.getLogger(__file__)
#@profile
def main(args):
src_dir = args.src_dir
dest_dir = args.dest_dir
src_pat = "X_(\d{3}).csv$"
tar_template = "X_%s.csv"
tc=TargetCounter(src_pat,tar_template,src_dir,dest_dir)
target_ids,src_files = tc.listup_targets()
n_targets = len(target_ids)
if args.count_targets:
print(len(target_ids))
sys.exit()
if n_targets==0:
logger.warn("There are no before-process src files in '%s'"%src_dir)
sys.exit()
if args.algorithm == "pca":
model = PCA(args.dimensions)
elif args.algorithm == "nmf":
model = NMF(args.dimensions,max_iter=args.max_iter)
else:
logger.warn("Unknown algorithm '%s'"%args.algorithm)
sys.exit()
for id,src_file in zip(target_ids,src_files):
dest_file = "%s/%s"%(args.dest_dir,tc.id2destfile(id))
#print(id,src_file,dest_file)
X=np.loadtxt(src_file,delimiter=",")
model.fit(X)
X_ = model.transform(X)
np.savetxt(dest_file,X_,delimiter=",")
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('dimensions', \
action='store', \
nargs=None, \
const=None, \
default=None, \
type=int, \
choices=None, \
help='Dimensionality of target projection subspace', \
metavar=None)
parser.add_argument('src_dir', \
action='store', \
nargs=None, \
const=None, \
default=None, \
type=str, \
choices=None, \
help='Directory path where the source data are located.', \
metavar=None)
parser.add_argument('dest_dir', \
action='store', \
nargs=None, \
const=None, \
default=None, \
type=str, \
choices=None, \
help='Directory path where the dimension-reduced data will be located.', \
metavar=None)
parser.add_argument('-a', '--algorithm', \
action='store', \
nargs='?', \
const=None, \
default='pca', \
type=str, \
choices=None, \
help='Algorithm for dimension reduction. pca|nmf are supported (default: pca)', \
metavar=None)
parser.add_argument('-M', '--max_iter', \
action='store', \
nargs='?', \
const=None, \
default=1000, \
type=int, \
choices=None, \
help='Maximum iteration number. (default: 1000)', \
metavar=None)
parser.add_argument('--count_targets',\
action="store_true", default=False, help='count processing targets, and exit.')
if __name__ == '__main__':
args = parser.parse_args()
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
logger.addHandler(sh)
main(args)
| 27.521739
| 89
| 0.618957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 745
| 0.235387
|
66436d46f2399420648d1ae5afad1941d5ac5dfe
| 981
|
py
|
Python
|
python/PythonForNetworkEngineers/Lesson3/exercise4.py
|
ModestTG/scripts
|
91517b1238267185852816f73e7f7221012faa8b
|
[
"MIT"
] | null | null | null |
python/PythonForNetworkEngineers/Lesson3/exercise4.py
|
ModestTG/scripts
|
91517b1238267185852816f73e7f7221012faa8b
|
[
"MIT"
] | null | null | null |
python/PythonForNetworkEngineers/Lesson3/exercise4.py
|
ModestTG/scripts
|
91517b1238267185852816f73e7f7221012faa8b
|
[
"MIT"
] | null | null | null |
from __future__ import print_function, unicode_literals, division
arp_table = [('10.220.88.1', '0062.ec29.70fe'),
('10.220.88.20', 'c89c.1dea.0eb6'),
('10.220.88.21', '1c6a.7aaf.576c'),
('10.220.88.28', '5254.aba8.9aea'),
('10.220.88.29', '5254.abbe.5b7b'),
('10.220.88.30', '5254.ab71.e119'),
('10.220.88.32', '5254.abc7.26aa'),
('10.220.88.33', '5254.ab3a.8d26'),
('10.220.88.35', '5254.abfb.af12'),
('10.220.88.37', '0001.00ff.0001'),
('10.220.88.38', '0002.00ff.0001'),
('10.220.88.39', '6464.9be8.08c8'),
('10.220.88.40', '001c.c4bf.826a'),
('10.220.88.41', '001b.7873.5634')]
i = 0
while i < len(arp_table):
output = ""
mac_parts = arp_table[i][1].split(".")
for element in mac_parts:
output += element[0:2].upper() + "." + element[2:4].upper() + "."
print(output[:-1])
i += 1
| 39.24
| 73
| 0.489297
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 430
| 0.438328
|
6649fd3abee8b975b14bebfd695b9dead71ff67f
| 834
|
py
|
Python
|
coursebuilder/tests/test_views.py
|
gcallah/CourseBuilder
|
68f3d175aba43e62b20e6c1c4c6a604601c2c60a
|
[
"Apache-2.0"
] | 2
|
2019-10-07T23:03:54.000Z
|
2019-10-15T23:18:08.000Z
|
coursebuilder/tests/test_views.py
|
gcallah/CourseBuilder
|
68f3d175aba43e62b20e6c1c4c6a604601c2c60a
|
[
"Apache-2.0"
] | 3
|
2019-12-11T15:49:38.000Z
|
2021-06-10T22:04:41.000Z
|
coursebuilder/tests/test_views.py
|
gcallah/CourseBuilder
|
68f3d175aba43e62b20e6c1c4c6a604601c2c60a
|
[
"Apache-2.0"
] | 1
|
2019-12-02T06:08:11.000Z
|
2019-12-02T06:08:11.000Z
|
from django.test import TestCase
from django.urls import reverse
class CourseBuilderViewTest(TestCase):
def test_landing_page(self):
url = reverse("coursebuilder:landing_page")
response = self.client.post(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "landing_page.html")
def test_about_page(self):
url = reverse("coursebuilder:dynamic_about")
response = self.client.post(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "dynamic_about.html")
def test_glossary_page(self):
url = reverse("coursebuilder:dynamic_gloss")
response = self.client.post(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "dynamic_gloss.html")
| 36.26087
| 63
| 0.711031
| 766
| 0.918465
| 0
| 0
| 0
| 0
| 0
| 0
| 145
| 0.173861
|
664a09ed3a949b821074a7ea5edcf32f555375e2
| 336
|
py
|
Python
|
LC/249.py
|
szhu3210/LeetCode_Solutions
|
64747eb172c2ecb3c889830246f3282669516e10
|
[
"MIT"
] | 2
|
2018-02-24T17:20:02.000Z
|
2018-02-24T17:25:43.000Z
|
LC/249.py
|
szhu3210/LeetCode_Solutions
|
64747eb172c2ecb3c889830246f3282669516e10
|
[
"MIT"
] | null | null | null |
LC/249.py
|
szhu3210/LeetCode_Solutions
|
64747eb172c2ecb3c889830246f3282669516e10
|
[
"MIT"
] | null | null | null |
class Solution(object):
def groupStrings(self, strings):
"""
:type strings: List[str]
:rtype: List[List[str]]
"""
d=collections.defaultdict(list)
for s in strings:
d[tuple([((ord(s[i])-ord(s[0]))%26) for i in range(len(s))])].append(s)
return [d[key] for key in d]
| 33.6
| 83
| 0.529762
| 336
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 80
| 0.238095
|
664b6b48aac6edc74751d03dafd588e100ea7322
| 2,460
|
py
|
Python
|
cfcc/cogs/stats.py
|
denuoweb/tipbot-v2
|
bd25205fd4a0440ffe11b7e94905881d4c799282
|
[
"Apache-2.0"
] | 2
|
2020-03-05T00:08:38.000Z
|
2020-12-21T04:34:31.000Z
|
cfcc/cogs/stats.py
|
denuoweb/tipbot-v2
|
bd25205fd4a0440ffe11b7e94905881d4c799282
|
[
"Apache-2.0"
] | null | null | null |
cfcc/cogs/stats.py
|
denuoweb/tipbot-v2
|
bd25205fd4a0440ffe11b7e94905881d4c799282
|
[
"Apache-2.0"
] | 2
|
2020-08-30T23:33:20.000Z
|
2022-01-28T22:52:58.000Z
|
import discord, os
from discord.ext import commands
from utils import checks, output
from aiohttp import ClientSession
import urllib.request
import json
class Stats:
def __init__(self, bot: discord.ext.commands.Bot):
self.bot = bot
@commands.command()
async def stats(self, amount=1):
"""
Show stats about HTMLCOIN
"""
headers={"user-agent" : "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36"}
try:
async with ClientSession() as session:
async with session.get("https://api.coingecko.com/api/v3/coins/htmlcoin", headers=headers) as response:
responseRaw = await response.read()
priceData = json.loads(responseRaw)
for item in priceData:
embed = discord.Embed(color=0x00FF00)
embed.set_author(name='HTMLCOIN Coin Information', icon_url="i.ibb.co/GkBSpV3/logo-icon-no-txt-32x32.png")
embed.add_field(name="current_price", value="${}".format(item['usd']))
#embed.add_field(name="Price (BTC)", value="{} BTC".format(item['btc']))
#embed.add_field(name='\Altmarkets',value='\altmarkets')
#embed.add_field(name="Volume (USD)", value="${}".format(item['24h_volume_usd']))
#embed.add_field(name="Market Cap", value="${}".format(item['market_cap_usd']))
#embed.add_field(name='\u200b',value='\u200b')
#embed.add_field(name="% 1h", value="{}%".format(item['percent_change_1h']))
#embed.add_field(name="% 24h", value="{}%".format(item['percent_change_24h']))
#embed.add_field(name="% 7d", value="{}%".format(item['percent_change_7d']))
#embed.add_field(name="Circulating Supply", value="{} HTMLCOIN".format(item['available_supply']))
#embed.add_field(name="Total Supply", value="{} HTMLCOIN".format(item['total_supply']))
embed.set_footer(text="https://www.coingecko.com/en/coins/htmlcoin", icon_url="i.ibb.co/GkBSpV3/logo-icon-no-txt-32x32.png")
await self.bot.say(embed=embed)
except:
await self.bot.say(":warning: Error fetching prices!")
def setup(bot):
bot.add_cog(Stats(bot))
| 54.666667
| 148
| 0.579268
| 2,259
| 0.918293
| 0
| 0
| 2,163
| 0.879268
| 2,139
| 0.869512
| 1,185
| 0.481707
|
664d2d03cdb7e33d3656413c2db3c4e8abff4e55
| 996
|
py
|
Python
|
ssaw/headquarters.py
|
vavalomi/ssaw
|
30172f22e8703f29b1abc159e52e4090960207be
|
[
"MIT"
] | 9
|
2019-04-06T09:36:20.000Z
|
2022-01-18T18:25:37.000Z
|
ssaw/headquarters.py
|
vavalomi/ssaw
|
30172f22e8703f29b1abc159e52e4090960207be
|
[
"MIT"
] | 4
|
2020-06-15T01:36:37.000Z
|
2021-12-02T06:51:37.000Z
|
ssaw/headquarters.py
|
vavalomi/ssaw
|
30172f22e8703f29b1abc159e52e4090960207be
|
[
"MIT"
] | 3
|
2018-04-09T18:17:54.000Z
|
2022-01-14T08:38:02.000Z
|
from requests import Session
from .__about__ import __title__, __version__
from .models import Version
class Client(object):
"""Initializes the API client
:param url: URL of the headquarters app
:param api_user: API user name
:param api_password: API user password
:param workspace: Name of the workspace. If `None`, "primary" will be assumed
"""
def __init__(self, url: str, api_user: str, api_password: str, workspace: str = "primary"):
session = Session()
session.auth = (api_user, api_password)
signature = "python-{}/{}".format(__title__, __version__)
session.headers.update({"User-Agent": signature})
self.baseurl = url.rstrip("/")
self.session = session
self.workspace = workspace
@property
def version(self) -> Version:
res = self.session.get("{}/.version".format(self.baseurl))
if res.status_code == 200:
return Version(res.text)
| 32.129032
| 96
| 0.633534
| 880
| 0.883534
| 0
| 0
| 186
| 0.186747
| 0
| 0
| 299
| 0.300201
|
664d6f4bea48853ff584b2801386ca40d6d04218
| 35
|
py
|
Python
|
DeepChecker/__init__.py
|
Fixy-TR/DeepChecker
|
99bf6e133890aa44711536723ad9acf16314830d
|
[
"MIT"
] | 21
|
2020-08-05T20:52:35.000Z
|
2022-02-18T19:27:21.000Z
|
DeepChecker/__init__.py
|
umitylmz/DeepChecker
|
99bf6e133890aa44711536723ad9acf16314830d
|
[
"MIT"
] | null | null | null |
DeepChecker/__init__.py
|
umitylmz/DeepChecker
|
99bf6e133890aa44711536723ad9acf16314830d
|
[
"MIT"
] | 6
|
2020-08-05T14:17:12.000Z
|
2022-03-03T05:52:28.000Z
|
from DeepChecker.Checkers import *
| 17.5
| 34
| 0.828571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
664f4b3b2027544507e9b37e6ef7413af737a3d5
| 231
|
py
|
Python
|
1101-1200/1163-Distribute Candies/1163-Distribute Candies.py
|
jiadaizhao/LintCode
|
a8aecc65c47a944e9debad1971a7bc6b8776e48b
|
[
"MIT"
] | 77
|
2017-12-30T13:33:37.000Z
|
2022-01-16T23:47:08.000Z
|
1101-1200/1163-Distribute Candies/1163-Distribute Candies.py
|
jxhangithub/LintCode-1
|
a8aecc65c47a944e9debad1971a7bc6b8776e48b
|
[
"MIT"
] | 1
|
2018-05-14T14:15:40.000Z
|
2018-05-14T14:15:40.000Z
|
1101-1200/1163-Distribute Candies/1163-Distribute Candies.py
|
jxhangithub/LintCode-1
|
a8aecc65c47a944e9debad1971a7bc6b8776e48b
|
[
"MIT"
] | 39
|
2017-12-07T14:36:25.000Z
|
2022-03-10T23:05:37.000Z
|
class Solution:
"""
@param candies: a list of integers
@return: return a integer
"""
def distributeCandies(self, candies):
# write your code here
return min(len(set(candies)), len(candies) // 2)
| 25.666667
| 56
| 0.61039
| 230
| 0.995671
| 0
| 0
| 0
| 0
| 0
| 0
| 102
| 0.441558
|
66511045abd4f920bda913b71a97de73ca69c525
| 3,496
|
py
|
Python
|
commongroups/tests/test_cmgroup.py
|
akokai/metacamel
|
d2a060e5e3c908dcedd3de7e46497f8802fd8bde
|
[
"MIT"
] | 1
|
2020-01-29T04:39:16.000Z
|
2020-01-29T04:39:16.000Z
|
commongroups/tests/test_cmgroup.py
|
akokai/metacamel
|
d2a060e5e3c908dcedd3de7e46497f8802fd8bde
|
[
"MIT"
] | 12
|
2016-06-18T02:54:13.000Z
|
2016-07-13T05:17:02.000Z
|
commongroups/tests/test_cmgroup.py
|
akokai/commongroups-archived
|
d2a060e5e3c908dcedd3de7e46497f8802fd8bde
|
[
"MIT"
] | 1
|
2016-06-20T17:33:11.000Z
|
2016-06-20T17:33:11.000Z
|
# -*- coding: utf-8 -*-
"""Unit tests for CMGroup class."""
import os
import shutil
from itertools import islice
from commongroups.env import CommonEnv
from commongroups import cmgroup as cmg
# Locate the test params to use.
_CUR_PATH = os.path.abspath(os.path.dirname(__file__))
PARAMS_JSON = os.path.join(_CUR_PATH, 'params.json')
PARAMS_LIST = cmg.params_from_json(PARAMS_JSON)
# This creates test environment directories on filesystem as a side effect.
env = CommonEnv('test')
# def test_cmgroup():
# for params in PARAMS_LIST:
# group = cmg.CMGroup(params, env)
# assert group.materialid == params['materialid']
# assert group.name == params['name']
# def test_params():
# # Test saving parameters to file when starting a search.
# group = cmg.CMGroup(PARAMS_LIST[0], env)
# assert 'current_update' in group.params
# group.init_pubchem_search()
# params = group.get_params()
# assert params['current_update'] is not None
# # Test initialize new group with existing params file.
# new_group = cmg.CMGroup(PARAMS_LIST[0], env)
# new_params = new_group.params
# assert new_params['current_update'] is not None
# def test_clear_data():
# group = cmg.CMGroup(PARAMS_LIST[3], env)
# shutil.copy(os.path.join(_CUR_PATH, 'cids.json'), group._cids_file)
# shutil.copy(os.path.join(_CUR_PATH, 'cpds.jsonl'), group._compounds_file)
# assert len(group.get_compounds()) == 3
# assert len(group.get_returned_cids()) == 5
# group.clear_data()
# assert group.get_compounds() == []
# assert group.get_returned_cids() == []
# def test_resume_update():
# # Test initialize group with existing saved _cids_file (search results)
# # and existing but incomplete _compounds_file (data/updates).
# # Calling update_from_cids() should seamlessly resume the update.
# group = cmg.CMGroup(PARAMS_LIST[3], env)
# shutil.copy(os.path.join(_CUR_PATH, 'cids.json'), group._cids_file)
# shutil.copy(os.path.join(_CUR_PATH, 'cpds.jsonl'), group._compounds_file)
# group.update_from_cids()
# assert len(group.get_compounds()) == 5
# # Test what happens when _compounds_file contains CIDS that are
# # not listed in the _cids_file. It should append compounds.
# shutil.copy(os.path.join(_CUR_PATH, 'cpds_other.jsonl'),
# group._compounds_file)
# group.update_from_cids()
# assert len(group.get_compounds()) == 8
# # Test what happens when _compounds_file is absent. In this case
# # it should end up containing all the CIDs in _cids_file.
# # group.clear_data()
# # shutil.copy(os.path.join(_CUR_PATH, 'cids.json'), group._cids_file)
# # group.update_from_cids()
# # assert len(group.get_compounds()) == 5
# def test_pubchem_update():
# group = cmg.CMGroup(PARAMS_LIST[0], env)
# # To save time, only retrieve the first 5 CIDs.
# # TODO: Ideally we should also test without any `listkey_count`,
# # i.e. with a search that returns very few results.
# group.pubchem_update(listkey_count=5)
# assert len(group.get_compounds()) > 0
# def test_batch_cmg_search():
# groups = list(islice(cmg.cmgs_from_json(PARAMS_JSON, env), None))
# # To save time, only retrieve the first 3 CIDs.
# cmg.batch_cmg_search(groups, wait=30, listkey_count=3)
# for group in groups:
# assert len(group.get_compounds()) > 0
# assert group.get_compounds()[0]['IUPAC_name'] is not None
| 37.191489
| 79
| 0.687357
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,119
| 0.892162
|
6651c8c1061de1521ef9b16fc164b0b76ba7d323
| 5,563
|
py
|
Python
|
src/reinigung.py
|
infiniteloop-youth/reinigung
|
1a16603ab97989b4180d51813c203ee81d9d7f5e
|
[
"MIT"
] | null | null | null |
src/reinigung.py
|
infiniteloop-youth/reinigung
|
1a16603ab97989b4180d51813c203ee81d9d7f5e
|
[
"MIT"
] | null | null | null |
src/reinigung.py
|
infiniteloop-youth/reinigung
|
1a16603ab97989b4180d51813c203ee81d9d7f5e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- Coding: utf-8 -*-
from argparse import ArgumentParser
from os import environ, makedirs
from datetime import datetime
from os.path import abspath, join, dirname, exists, splitext
from time import time
from dotenv import load_dotenv
import slack
DIR = dirname(dirname(abspath(__file__)))
load_dotenv(join(DIR, ".env"))
ADMIN_SLACK_TOKEN = environ.get("ADMIN_SLACK_TOKEN")
POST_SLACK_TOKEN = environ.get("POST_SLACK_TOKEN")
TARGET_CHANNEL = environ.get("TARGET_CHANNEL")
TARGET_AGO = int(environ.get("TARGET_AGO"))
DOWNLOAD_PATH = environ.get("DOWNLOAD_PATH")
REPORT_CHANNEL = environ.get("REPORT_CHANNEL")
def normalization(text, char):
"""
Replace symbols which cant use in filename
"""
symbols = list(range(0, 33)) + [34, 39] + list(range(42, 48)) + list(range(58, 64)) + list(range(91, 95)) + [96] + list(range(123, 128))
for symbol in symbols:
text = text.replace(chr(symbol), char)
return text
def main(is_dry, is_all):
"""
Knock knock
"""
started_at = datetime.now().strftime("%Y/%m/%d %H:%M:%S")
delete_to = int(time() - TARGET_AGO)
print("Reinigung")
print("started_at:"+started_at)
if is_dry:
print("Dry run")
# Read settings
is_all = True if TARGET_CHANNEL == "" else False
print("All channel" if is_all else "")
all_channels = slack.get_channels(ADMIN_SLACK_TOKEN, exclude_archived=True, exclude_members=True)["channels"]
users = slack.get_users(ADMIN_SLACK_TOKEN)["members"]
# set find range
if is_all:
channels = [channel for channel in all_channels if channel["is_channel"] and not channel["is_private"]]
else:
channels = [channel for channel in all_channels if channel["name"] == TARGET_CHANNEL]
report_log = ""
total_count = 0
# in channel
for channel in channels:
channel_count = 0
report_log += "#{}({}) - {}\n\n".format(
channel["name"],
channel["id"],
channel["purpose"]["value"]
)
folder_path = abspath(join(DIR, DOWNLOAD_PATH, channel["name"]))
print("in #{}".format(channel["name"]))
# make folder
if not exists(folder_path) and not is_dry:
makedirs(folder_path)
files = slack.get_files(
ADMIN_SLACK_TOKEN,
channel=channel["id"],
ts_to=delete_to
)["files"]
# in file
for file in files:
# make file name
file_name = "{}-{}-{}-{}{}-{}{}".format(
datetime.fromtimestamp(int(file["timestamp"])).strftime("%Y%m%d%H%M%S"),
file["id"],
[user["name"] for user in users if user["id"] == file["user"]][0],
normalization(file["title"], "_")[:10],
"-"+normalization(file["initial_comment"]["comment"], "_")[:30] if "initial_comment" in file else "",
normalization(splitext(file["name"])[0], "_"),
splitext(file["name"])[1]
)
file_path = abspath(join(folder_path, file_name))
if not is_dry:
# download
file_content = slack.get_file(ADMIN_SLACK_TOKEN, file["id"])
with open(file_path, "wb") as save_file:
save_file.write(file_content)
# delete
deleted = slack.delete_file(ADMIN_SLACK_TOKEN, file["id"])
# increment channel counter
channel_count += 1
# add log
report_log += "- {} @{} {} - {} {}\n - {}\n\n".format(
datetime.fromtimestamp(int(file["timestamp"])).strftime("%Y/%m/%d %H:%M:%S"),
[user["name"] for user in users if user["id"] == file["user"]][0],
file["title"],
file["initial_comment"]["comment"].replace("\n","") if "initial_comment" in file else "",
file["name"],
file_name
)
print("- {}".format(file_path))
# increment total counter
total_count += channel_count
report_log += "Total : {} files\n\n".format(channel_count)
finished_at = datetime.now().strftime("%Y/%m/%d %H:%M:%S")
# make great report
report = """
Reinigung - Auto clean up slack files
===== Settings report =====
All delete? : {}
Dry run? : {}
Target channel : {}
Delete before : {}
Started at : {}
Finished at : {}
===== Running report =====
Total delete : {} files
===== Running log =====
{}
===== End of report ======
""".format(
"Yes" if is_all else "No",
"Yes" if is_dry else "No",
TARGET_CHANNEL,
datetime.fromtimestamp(delete_to).strftime("%Y/%m/%d %H:%M:%S"),
started_at,
finished_at,
total_count,
report_log
)
slack.post_file(
POST_SLACK_TOKEN,
report,
channels=slack.get_channel_id(ADMIN_SLACK_TOKEN, REPORT_CHANNEL),
filename="reinigung-report-{}.txt".format(datetime.now().strftime("%Y-%m-%d-%H-%M-%S")),
filetype="text",
title="Reinigung report"
)
print("finished_at:"+finished_at)
print("done")
if __name__ == "__main__":
# Parse arguments
parser = ArgumentParser(description="Auto clean up slack files")
parser.add_argument("-d", "--dry", help="Testing mode", action="store_true")
parser.add_argument("-a", "--all", help="Remove in all channel", action="store_true")
args = parser.parse_args()
# Call main function
main(is_dry=args.dry, is_all=args.all)
| 30.398907
| 140
| 0.580262
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,539
| 0.276649
|
6654074dd5ec2877f4151dd8acf9472e6d0390e3
| 294
|
py
|
Python
|
test.py
|
paleherring/paleworld
|
6097fc062238cd3701ea468ea750c7a1d6b70dca
|
[
"Unlicense"
] | null | null | null |
test.py
|
paleherring/paleworld
|
6097fc062238cd3701ea468ea750c7a1d6b70dca
|
[
"Unlicense"
] | null | null | null |
test.py
|
paleherring/paleworld
|
6097fc062238cd3701ea468ea750c7a1d6b70dca
|
[
"Unlicense"
] | null | null | null |
import tensorflow as tf
# Create TensorFlow object called tensor
hello_constant = tf.constant('Hello World!')
with tf.Session() as sess:
# Run the tf.constant operation in the session
output = sess.run(hello_constant)
print(output.decode())# bytestring decode to string.
| 29.4
| 57
| 0.721088
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 133
| 0.452381
|
665411990ceedf780468299b1765689361fdab45
| 348
|
py
|
Python
|
resources/dot_PyCharm/system/python_stubs/-762174762/PySide/QtGui/QClipboardEvent.py
|
basepipe/developer_onboarding
|
05b6a776f8974c89517868131b201f11c6c2a5ad
|
[
"MIT"
] | 1
|
2020-04-20T02:27:20.000Z
|
2020-04-20T02:27:20.000Z
|
resources/dot_PyCharm/system/python_stubs/cache/8cdc475d469a13122bc4bc6c3ac1c215d93d5f120f5cc1ef33a8f3088ee54d8e/PySide/QtGui/QClipboardEvent.py
|
basepipe/developer_onboarding
|
05b6a776f8974c89517868131b201f11c6c2a5ad
|
[
"MIT"
] | null | null | null |
resources/dot_PyCharm/system/python_stubs/cache/8cdc475d469a13122bc4bc6c3ac1c215d93d5f120f5cc1ef33a8f3088ee54d8e/PySide/QtGui/QClipboardEvent.py
|
basepipe/developer_onboarding
|
05b6a776f8974c89517868131b201f11c6c2a5ad
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
# module PySide.QtGui
# from C:\Python27\lib\site-packages\PySide\QtGui.pyd
# by generator 1.147
# no doc
# imports
import PySide.QtCore as __PySide_QtCore
import Shiboken as __Shiboken
class QClipboardEvent(__PySide_QtCore.QEvent):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
| 19.333333
| 65
| 0.729885
| 138
| 0.396552
| 0
| 0
| 0
| 0
| 0
| 0
| 160
| 0.45977
|
66542479cdedfadc3cc79ca36adbd5a5951278b3
| 43
|
py
|
Python
|
tests/test_MBS/__init__.py
|
bigdata-ustc/EduSim
|
849eed229c24615e5f2c3045036311e83c22ea68
|
[
"MIT"
] | 18
|
2019-11-11T03:45:35.000Z
|
2022-02-09T15:31:51.000Z
|
tests/test_MBS/__init__.py
|
ghzhao78506/EduSim
|
cb10e952eb212d8a9344143f889207b5cd48ba9d
|
[
"MIT"
] | 3
|
2020-10-23T01:05:57.000Z
|
2021-03-16T12:12:24.000Z
|
tests/test_MBS/__init__.py
|
bigdata-ustc/EduSim
|
849eed229c24615e5f2c3045036311e83c22ea68
|
[
"MIT"
] | 6
|
2020-06-09T21:32:00.000Z
|
2022-03-12T00:25:18.000Z
|
# coding: utf-8
# 2021/03/12 @ zhaoguanhao
| 14.333333
| 26
| 0.674419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 41
| 0.953488
|
b07211dd68e613a65eed975d73ce7904748f47e1
| 3,207
|
py
|
Python
|
server.py
|
shucheng-ai/WDA-dwg2dxf
|
ab89320c0e98e13d17cdde9e2ffa6060f9ec8443
|
[
"Apache-2.0"
] | null | null | null |
server.py
|
shucheng-ai/WDA-dwg2dxf
|
ab89320c0e98e13d17cdde9e2ffa6060f9ec8443
|
[
"Apache-2.0"
] | null | null | null |
server.py
|
shucheng-ai/WDA-dwg2dxf
|
ab89320c0e98e13d17cdde9e2ffa6060f9ec8443
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import os
import logging
import time
from datetime import datetime
import subprocess as sp
import json
from flask import Flask, request, jsonify, send_from_directory
LOG_LEVEL = 'INFO'
LOG_PATH = 'dwg2dxf.log'
LOG_DIR = 'log'
PORT=8001
try:
from local_config import *
except:
pass
fmt = "%(asctime)s | %(levelname)s | %(message)s | %(filename)s/%(funcName)s/%(lineno)d"
datefmt = "%Y-%m-%d %H:%M:%S"
logging.basicConfig(
# DEBUG,INFO,WARNING,ERROR,CRITICAL
level=LOG_LEVEL,
format=fmt,
datefmt=datefmt,
filename=LOG_PATH,
filemode='w'
)
logger = logging.getLogger('dwg2dxf')
logger.warning("logger:")
logger.warning(f"logger level:{LOG_LEVEL}")
logger.warning(f"logger path:{LOG_PATH}")
# 最后每个请求有个序列号,格式为 p-c
# 其中p是一个unix时间,代表服务器启动的时间
# c是本次服务器启动后的请求数,从0开始
epoch = int(time.time())
serial_prefix = '%d-' % epoch
serial_count = 0
# 获得请求序列号
def get_serial ():
global serial_prefix
global serial_count
s = serial_prefix + '%06d' % serial_count
serial_count += 1
return s
# 每个请求对应一个目录, 有一个日期的目录结构,最后目录名为序列号
# LOG_DIR/年月日/时/序列号
def make_request_dir (serial):
now = datetime.now()
now_date = now.date()
now_time = now.time()
path = '%s/%04d%02d%02d/%02d/%s' % (LOG_DIR, now_date.year, now_date.month, now_date.day, now_time.hour, serial)
os.makedirs(path, exist_ok=True)
return path
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index ():
return "post dwg file to /dwg2dxf/"
def convert (input_fmt, output_fmt, input_fname, output_fname, attachment_fname_fn):
upload = request.files['file']
serial = get_serial()
wdir = make_request_dir(serial)
idir = os.path.join(wdir, 'input')
odir = os.path.join(wdir, 'output')
os.makedirs(idir, exist_ok=True)
os.makedirs(odir, exist_ok=True)
with open(os.path.join(wdir, 'meta.json'), 'w') as f:
json.dump({
'filename': upload.filename,
'input_format': input_fmt,
'output_format': output_fmt
}, f)
pass
upload.save(os.path.join(idir, input_fname))
cmd = 'xvfb-run /usr/bin/ODAFileConverter_21.3.0.0/ODAFileConverter %s %s ACAD2007 %s 0 1' % (idir, odir, output_fmt)
sp.call(cmd, shell=True)
attachment_fname = attachment_fname_fn(upload.filename)
return send_from_directory(directory=odir, filename=output_fname, as_attachment=True, attachment_filename=attachment_fname)
def dxf_to_dwg (dxfpath, dwgpath):
command = "xvfb-run /usr/bin/ODAFileConverter_21.3.0.0/ODAFileConverter " + dxfpath + " " + dwgpath +" ACAD2007 DWG 0 1"
os.system(command)
pass
def replace_ext (fname, ext):
out = 'output'
try:
out = fname.rsplit('/', 1)[-1].rsplit('.', 1)[0]
except:
pass
return out + ext
@app.route('/dwg2dxf/', methods=['POST'])
def dwg2dxf ():
return convert('DWG', 'DXF', 'upload.dwg', 'upload.dxf', lambda x: replace_ext(x, '.dxf'))
@app.route('/dxf2dwg/', methods=['POST'])
def dxf2dwg ():
return convert('DXF', 'DWG', 'upload.dxf', 'upload.dwg', lambda x: replace_ext(x, '.dwg'))
if __name__ == "__main__":
app.run(host='0.0.0.0', port=PORT, debug=True)
pass
| 27.410256
| 127
| 0.665108
| 0
| 0
| 0
| 0
| 390
| 0.114875
| 0
| 0
| 1,034
| 0.304566
|
b0728c4bb800f77d5ab35c76ddb2e7148e9e9c1b
| 6,856
|
py
|
Python
|
main.py
|
KunalKatiyar/ChatBox-App
|
2a53c7ed155aeb56a5304acf546762bf1c62569a
|
[
"MIT"
] | 1
|
2020-04-03T09:07:35.000Z
|
2020-04-03T09:07:35.000Z
|
main.py
|
KunalKatiyar/ChatBox-App
|
2a53c7ed155aeb56a5304acf546762bf1c62569a
|
[
"MIT"
] | null | null | null |
main.py
|
KunalKatiyar/ChatBox-App
|
2a53c7ed155aeb56a5304acf546762bf1c62569a
|
[
"MIT"
] | null | null | null |
import kivy
from kivy.app import App
from kivy.uix.label import Label
from kivy.uix.gridlayout import GridLayout
from kivy.uix.textinput import TextInput
from kivy.uix.button import Button
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.uix.scrollview import ScrollView
import os
import socket_client
import sys
kivy.require("1.11.1")
class ScrollableLabel(ScrollView):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.layout = GridLayout(cols=1, size_hint_y=None)
self.add_widget(self.layout)
self.chat_history = Label(size_hint_y=None, markup =True)
self.scroll_to_point = Label()
self.layout.add_widget(self.chat_history)
self.layout.add_widget(self.scroll_to_point)
def update_chat_history(self, message):
self.chat_history.text += '\n' + message
self.layout.height =self.chat_history.texture_size[1] + 15
self.chat_history.height = self.chat_history.texture_size[1]
self.chat_history.text_size =(self.chat_history.width*0.98,None)
self.scroll_to(self.scroll_to_point)
def update_chat_history_layout(self, _=None):
self.layout.height = self.chat_history.texture_size[1] + 15
self.chat_history.height = self.chat_history.texture_size[1]
self.chat_history.text_size = (self.chat_history.width * 0.98, None)
class ConnectPage(GridLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.cols = 2
if os.path.isfile("prev_details.txt"):
with open("prev_details.txt", "r") as f:
d= f.read().split(",")
prev_ip = d[0]
prev_port = d[1]
prev_username = d[2]
else:
prev_ip = ""
prev_port = ""
prev_username = ""
self.add_widget(Label(text = "IP:"))
self.ip = TextInput(text = prev_ip,multiline= False)
self.add_widget(self.ip)
self.add_widget(Label(text = "Port:"))
self.port = TextInput(text = prev_port,multiline= False)
self.add_widget(self.port)
self.add_widget(Label(text = "Username:"))
self.username = TextInput(text = prev_username,multiline= False)
self.add_widget(self.username)
self.join = Button(text="Join")
self.join.bind(on_press=self.join_button)
self.add_widget(Label())
self.add_widget(self.join)
def join_button(self, instance):
port =self.port.text
ip = self.ip.text
username = self.username.text
with open("prev_details.txt","w") as f:
f.write(f"{ip},{port},{username}")
info = f"Attempting to join {ip}:{port} as {username}"
chat_app.info_page.update_info(info)
chat_app.screen_manager.current = "Info"
Clock.schedule_once(self.connect, 1)
def connect(self, _):
port = int(self.port.text)
ip = self.ip.text
username = self.username.text
if not socket_client.connect(ip , port, username, show_error):
return
chat_app.create_chat_page()
chat_app.screen_manager.current = "Chat"
class InfoPage(GridLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.cols = 1
self.message = Label(halign="center", valign="middle", font_size=30)
self.message.bind(width=self.update_text_width)
self.add_widget(self.message)
def update_info(self,message):
self.message.text = message
def update_text_width(self,*_):
self.message.text_size = (self.message.width*0.9, None)
class ChatPage(GridLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.cols = 1
self.rows =2
self.history = ScrollableLabel(height=Window.size[1]*0.9, size_hint_y=None)
self.add_widget(self.history)
self.new_message = TextInput(width=Window.size[0]*0.8, size_hint_x=None, multiline = False)
self.send = Button(text="Send")
self.send.bind(on_press = self.send_message)
bottom_line =GridLayout(cols =2)
bottom_line.add_widget(self.new_message)
bottom_line.add_widget(self.send)
self.add_widget(bottom_line)
Window.bind(on_key_down=self.on_key_down)
Clock.schedule_once(self.focus_text_input, 1)
socket_client.start_listening(self.incoming_message, show_error)
self.bind(size=self.adjust_fields)
def adjust_fields(self, *_):
if Window.size[1] * 0.1 < 50:
new_height = Window.size[1] - 50
else:
new_height = Window.size[1] * 0.9
self.history.height = new_height
if Window.size[0] * 0.2 < 160:
new_width = Window.size[0] - 160
else:
new_width = Window.size[0] * 0.8
self.new_message.width = new_width
Clock.schedule_once(self.history.update_chat_history_layout, 0.01)
def on_key_down(self, instance, keyboard, keycode , text , modifiers):
if keycode == 40:
self.send_message(None)
def send_message(self, _):
message = self.new_message.text
self.new_message.text = ""
if message:
self.history.update_chat_history(f"[color=dd2020]{chat_app.connect_page.username.text}[/color] > {message}")
socket_client.send(message)
Clock.schedule_once(self.focus_text_input, 0.1)
def focus_text_input(self, _):
self.new_message_focus =True
def incoming_message(self, username, message):
self.history.update_chat_history(f"[color=20dd20]{username}[/color] > {message}")
class EpicApp(App):
def build(self):
self.screen_manager = ScreenManager()
self.connect_page = ConnectPage()
screen = Screen(name="Connect")
screen.add_widget(self.connect_page)
self.screen_manager.add_widget(screen)
self.info_page = InfoPage()
screen = Screen(name="Info")
screen.add_widget(self.info_page)
self.screen_manager.add_widget(screen)
return self.screen_manager
def create_chat_page(self):
self.chat_page = ChatPage()
screen = Screen(name = "Chat")
screen.add_widget(self.chat_page)
self.screen_manager.add_widget(screen)
def show_error(message):
chat_app.info_page.update_info(message)
chat_app.screen_manager.current = "Info"
Clock.schedule_once(sys.exit, 10)
if __name__ == "__main__":
chat_app = EpicApp()
chat_app.run()
| 35.895288
| 121
| 0.625292
| 6,176
| 0.900817
| 0
| 0
| 0
| 0
| 0
| 0
| 376
| 0.054842
|
b0747e6f6d9760d681c73f7d16222536711564c9
| 4,607
|
py
|
Python
|
parlai/agents/recosa/recosa.py
|
ying-A/RED
|
27971a5a6d6768e8a7052d8fba06e5056da7619e
|
[
"MIT"
] | 3
|
2020-12-04T07:29:18.000Z
|
2021-04-08T06:23:20.000Z
|
parlai/agents/recosa/recosa.py
|
ying-A/RED
|
27971a5a6d6768e8a7052d8fba06e5056da7619e
|
[
"MIT"
] | null | null | null |
parlai/agents/recosa/recosa.py
|
ying-A/RED
|
27971a5a6d6768e8a7052d8fba06e5056da7619e
|
[
"MIT"
] | 1
|
2020-12-04T07:29:04.000Z
|
2020-12-04T07:29:04.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.agents import Agent
from parlai.core.utils import warn_once
from parlai.core.utils import padded_3d
from parlai.core.torch_generator_agent import TorchGeneratorAgent
from .modules import RecosaGeneratorModel
import torch
warn_once(
"Public release transformer models are currently in beta. The name of "
"command line options may change or disappear before a stable release. We "
"welcome your feedback. Please file feedback as issues at "
"https://github.com/facebookresearch/ParlAI/issues/new"
)
def add_common_cmdline_args(argparser):
argparser.add_argument('-esz', '--embedding-size', type=int, default=300,
help='Size of all embedding layers')
argparser.add_argument('-nl', '--n-layers', type=int, default=2)
argparser.add_argument('-hid', '--ffn-size', type=int, default=300,
help='Hidden size of the FFN layers')
argparser.add_argument('--attention-dropout', type=float, default=0.0)
argparser.add_argument('--relu-dropout', type=float, default=0.0)
argparser.add_argument('--n-heads', type=int, default=2,
help='Number of multihead attention heads')
argparser.add_argument('--learn-positional-embeddings', type='bool', default=False)
argparser.add_argument('--embeddings-scale', type='bool', default=True)
argparser.add_argument('--n-positions', type=int, default=None, hidden=True,
help='Number of positional embeddings to learn. Defaults '
'to truncate or 1024 if not provided.')
class Recosa(Agent):
"""
Placeholder class, which just throws an error telling the user to specify
whether they want the ranker or the generator.
"""
def __init__(self, opt, shared=None):
raise RuntimeError(
"`--model recosa` is not a valid choice. Please select "
" `--model recosa/generator' "
)
class RecosaGeneratorAgent(TorchGeneratorAgent):
@classmethod
def add_cmdline_args(cls, argparser):
"""Add command-line arguments specifically for this agent."""
agent = argparser.add_argument_group('Transformer Arguments')
agent.add_argument('-ord', '--order', default='no',
choices=['no', '1_order', '2_order', '3_order','full'],
help='Choices: no_order, 1_order, 2_order, 3_order,full_order.')
agent.add_argument('-dli_in_dim','--dli_input_dim',default=300,type=int, help='size of the dli input dim')
agent.add_argument('-dli_rnn_hid','--dli_rnn_hiddensize',default=64,type=int, help='size of the dli rnn hidden dim')
agent.add_argument('-dli_ffn_dim','--dli_ffn_dimension',default=128,type=int, help='size of the dli ffn dim')
agent.add_argument('-rnn_hid','--rnn_hiddensize',default=300,type=int, help='size of the rnn input embedding')
agent.add_argument('-rnn_esz','--rnn_embeddingsize',default=300,type=int, help='size of the rnn hidden layers')
agent.add_argument('-rnn_nlayers','--rnn_numlayers',default=2,type=int, help='the number of rnn hidden layers')
agent.add_argument('-rnn_cls','--rnn_class',default='gru',choices=['lstm','gru','rnn'], help='rnn class for utterance encoder')
agent.add_argument('-rnn_bi','--rnn_bidirectional',default=False,type=bool, help='whether use bi-dir rnn')
agent.add_argument('--rnn_dropout',default=0.0,type=float, help='dropout for rnn hidden layers')
agent.add_argument('--input_dropout',default=0.0,type=float, help='input dropout for inputs')
agent.add_argument('--max_turns',default=30,type=int, help='the max number of history turns')
agent.add_argument('--max_single_seq_len',default=50,type=int, help='the max length of single history utterance')
add_common_cmdline_args(agent)
cls.dictionary_class().add_cmdline_args(argparser)
super(RecosaGeneratorAgent, cls).add_cmdline_args(argparser)
return agent
def build_model(self, states=None):
self.model = RecosaGeneratorModel(self.opt, self.dict)
if self.opt['embedding_type'] != 'random':
self._copy_embeddings(
self.model.encoder.embeddings.weight, self.opt['embedding_type']
)
if self.use_cuda:
self.model.cuda()
return self.model
| 51.188889
| 135
| 0.674191
| 2,820
| 0.612112
| 0
| 0
| 2,043
| 0.443456
| 0
| 0
| 1,974
| 0.428478
|
b075806cabe0b1312272705ea0a85e81c75f2115
| 20,972
|
py
|
Python
|
dataloading/nvidia.py
|
UT-ADL/lidar-as-camera
|
daccb2ae21b4899ecfd8611b7a27f91681617383
|
[
"Apache-2.0"
] | null | null | null |
dataloading/nvidia.py
|
UT-ADL/lidar-as-camera
|
daccb2ae21b4899ecfd8611b7a27f91681617383
|
[
"Apache-2.0"
] | null | null | null |
dataloading/nvidia.py
|
UT-ADL/lidar-as-camera
|
daccb2ae21b4899ecfd8611b7a27f91681617383
|
[
"Apache-2.0"
] | null | null | null |
import sys
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
import torchvision
from torchvision import transforms
import torchvision.transforms.functional as F
from skimage.util import random_noise
from dataloading.camera import Camera
class NvidiaResizeAndCrop(object):
def __call__(self, data):
xmin = 186
ymin = 600
scale = 6.0
width = 258
height = 66
scaled_width = int(width * scale)
scaled_height = int(height * scale)
cropped = transforms.functional.resized_crop(data["image"], ymin, xmin, scaled_height, scaled_width,
(height, width))
data["image"] = cropped
return data
class NvidiaCropWide(object):
def __init__(self, x_delta=0):
self.x_delta = x_delta
def __call__(self, data):
xmin = 300
xmax = 1620
ymin = 520
ymax = 864
scale = 0.2
height = ymax - ymin
width = xmax - xmin
cropped = F.resized_crop(data["image"], ymin, xmin + self.x_delta, height, width,
(int(scale * height), int(scale * width)))
data["image"] = cropped
return data
class CropViT(object):
def __call__(self, data):
xmin = 540
xmax = 1260
ymin = 244
ymax = 964
scale = 0.312
height = ymax - ymin
width = xmax - xmin
cropped = F.resized_crop(data["image"], ymin, xmin, height, width,
(int(scale * height), int(scale * width)))
data["image"] = cropped
return data
class NvidiaSideCameraZoom(object):
def __init__(self, zoom_ratio):
self.zoom_ratio = zoom_ratio
def __call__(self, data):
width = 1920
height = 1208
xmin = int(self.zoom_ratio * width)
ymin = int(self.zoom_ratio * height)
scaled_width = width - (2 * xmin)
scaled_height = height - (2 * ymin)
cropped = F.resized_crop(data["image"], ymin, xmin, scaled_height, scaled_width,
(height, width))
data["image"] = cropped
return data
class AugmentationConfig:
def __init__(self, color_prob=0.0, noise_prob=0.0, blur_prob=0.0):
self.color_prob = color_prob
self.noise_prob = noise_prob
self.blur_prob = blur_prob
class AugmentImage:
def __init__(self, augment_config):
print(f"augmentation: color_prob={augment_config.color_prob}, "
f"noise_prob={augment_config.noise_prob}, "
f"blur_prob={augment_config.blur_prob}")
self.augment_config = augment_config
def __call__(self, data):
if np.random.random() <= self.augment_config.color_prob:
jitter = transforms.ColorJitter(contrast=0.5, saturation=0.5, brightness=0.5)
data["image"] = jitter(data["image"])
if np.random.random() <= self.augment_config.noise_prob:
if np.random.random() > 0.5:
data["image"] = torch.tensor(random_noise(data["image"], mode='gaussian', mean=0, var=0.005, clip=True),
dtype=torch.float)
else:
data["image"] = torch.tensor(random_noise(data["image"], mode='salt', amount=0.005),
dtype=torch.float)
if np.random.random() <= self.augment_config.blur_prob:
blurrer = transforms.GaussianBlur(kernel_size=(3, 3), sigma=(0.3, 1))
data["image"] = blurrer(data['image'])
return data
class Normalize(object):
def __call__(self, data, transform=None):
# normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
image = data["image"]
image = image / 255
# data["image"] = normalize(image)
data["image"] = image
return data
class NvidiaDataset(Dataset):
#CAP_WAYPOINTS = 30
def __init__(self, dataset_paths, transform=None, camera="front_wide", name="Nvidia dataset",
filter_turns=False, output_modality="steering_angle", n_branches=1, n_waypoints=6,
metadata_file="nvidia_frames.csv", color_space="rgb", side_cameras_weight=0.33):
self.name = name
self.metadata_file = metadata_file
self.color_space = color_space
self.dataset_paths = dataset_paths
if transform:
self.transform = transform
else:
self.transform = transforms.Compose([Normalize()])
self.camera_name = camera
self.output_modality = output_modality
self.n_waypoints = n_waypoints
self.side_cameras_weight = side_cameras_weight
if self.output_modality == "waypoints":
self.target_size = 2 * self.n_waypoints
elif self.output_modality == "steering_angle":
self.target_size = 1
else:
print(f"Unknown output modality {self.output_modality}")
sys.exit()
self.n_branches = n_branches
if camera == 'all':
datasets = [self.read_dataset(dataset_path, "left") for dataset_path in dataset_paths] + \
[self.read_dataset(dataset_path, "right") for dataset_path in dataset_paths] + \
[self.read_dataset(dataset_path, "front_wide") for dataset_path in dataset_paths]
else:
datasets = [self.read_dataset(dataset_path, camera) for dataset_path in dataset_paths]
self.frames = pd.concat(datasets)
if filter_turns:
print("Filtering turns with blinker signal")
self.frames = self.frames[self.frames.turn_signal == 1]
def __getitem__(self, idx):
frame = self.frames.iloc[idx]
if self.color_space == "rgb":
image = torchvision.io.read_image(frame["image_path"])
elif self.color_space == "bgr":
image = cv2.imread(frame["image_path"])
image = torch.tensor(image, dtype=torch.uint8).permute(2, 0, 1)
else:
print(f"Unknown color space: ", self.color_space)
sys.exit()
# TODO replace if-else with map
if self.camera_name == Camera.LEFT.value:
steering_angle = np.array(frame["steering_angle_left"])
elif self.camera_name == Camera.RIGHT.value:
steering_angle = np.array(frame["steering_angle_right"])
else:
steering_angle = np.array(frame["steering_angle"])
data = {
'image': image,
'steering_angle': steering_angle,
'vehicle_speed': np.array(frame["vehicle_speed"]),
'autonomous': np.array(frame["autonomous"]),
'position_x': np.array(frame["position_x"]),
'position_y': np.array(frame["position_y"]),
'yaw': np.array(frame["yaw"]),
'turn_signal': np.array(frame["turn_signal"]),
'row_id': np.array(frame["row_id"]),
}
turn_signal = int(frame["turn_signal"])
if self.output_modality == "waypoints":
waypoints = []
for i in np.arange(1, self.n_waypoints + 1):
waypoints.append(frame[f"wp{i}_{self.camera_name}_x"])
waypoints.append(frame[f"wp{i}_{self.camera_name}_y"])
data['waypoints'] = np.array(waypoints)
target_values = waypoints
else:
target_values = frame["steering_angle"]
if self.transform:
data = self.transform(data)
if self.n_branches > 1:
target = np.zeros((self.n_branches, self.target_size))
target[turn_signal, :] = target_values
conditional_mask = np.zeros((self.n_branches, self.target_size))
conditional_mask[turn_signal, :] = 1
else:
target = np.zeros((self.n_branches, self.target_size))
target[0, :] = target_values
conditional_mask = np.ones((self.n_branches, self.target_size))
return data, target.reshape(-1), conditional_mask.reshape(-1)
def __len__(self):
return len(self.frames.index)
def get_waypoints(self):
wp_x_cols = [f"wp{i}_{self.camera_name}_x" for i in np.arange(1, self.n_waypoints + 1)]
wp_y_cols = [f"wp{i}_{self.camera_name}_y" for i in np.arange(1, self.n_waypoints + 1)]
waypoint_cols = np.column_stack((wp_x_cols, wp_y_cols)).reshape(-1)
return self.frames[waypoint_cols].to_numpy()
def read_dataset(self, dataset_path, camera):
if type(dataset_path) is dict:
frames_df = pd.read_csv(dataset_path['path'] / self.metadata_file)
len_before_filtering = len(frames_df)
frames_df = frames_df.iloc[dataset_path['start']:dataset_path['end']]
dataset_path = dataset_path['path']
else:
frames_df = pd.read_csv(dataset_path / self.metadata_file)
len_before_filtering = len(frames_df)
frames_df["row_id"] = frames_df.index
# temp hack
if "autonomous" not in frames_df.columns:
frames_df["autonomous"] = False
# frames_df["autonomous"] = False
frames_df = frames_df[frames_df['steering_angle'].notna()] # TODO: one steering angle is NaN, why?
if camera != Camera.FRONT_WIDE.value:
frames_df = frames_df[frames_df['steering_angle_left'].notna()]
frames_df = frames_df[frames_df['steering_angle_right'].notna()]
frames_df = frames_df[frames_df['vehicle_speed'].notna()]
frames_df = frames_df[frames_df[f'{camera}_filename'].notna()]
frames_df["turn_signal"].fillna(1, inplace=True)
frames_df["turn_signal"] = frames_df["turn_signal"].astype(int)
# Removed frames marked as skipped
frames_df = frames_df[frames_df["turn_signal"] != -1] # TODO: remove magic values.
if self.output_modality == "waypoints":
frames_df = frames_df[frames_df[f"position_x"].notna()]
frames_df = frames_df[frames_df[f"position_y"].notna()]
for i in np.arange(1, self.n_waypoints + 1):
frames_df = frames_df[frames_df[f"wp{i}_{camera}_x"].notna()]
frames_df = frames_df[frames_df[f"wp{i}_{camera}_y"].notna()]
frames_df["yaw_delta"] = np.abs(frames_df["yaw"]) - np.abs(frames_df["yaw"]).shift(-1)
frames_df = frames_df[np.abs(frames_df["yaw_delta"]) < 0.1]
# if self.calculate_waypoints:
#
# vehicle_x = frames_df["position_x"]
# vehicle_y = frames_df["position_y"]
#
# for i in np.arange(1, self.N_WAYPOINTS + 1):
# wp_global_x = frames_df["position_x"].shift(-i * self.CAP_WAYPOINTS)
# wp_global_y = frames_df["position_y"].shift(-i * self.CAP_WAYPOINTS)
# frames_df[f"x_{i}"] = wp_global_x
# frames_df[f"y_{i}"] = wp_global_y
# yaw = frames_df["yaw"]
# #frames_df["yaw"] = yaw
#
# wp_local_x = (wp_global_x - vehicle_x) * np.cos(yaw) + (wp_global_y - vehicle_y) * np.sin(yaw)
# wp_local_y = -(wp_global_x - vehicle_x) * np.sin(yaw) + (wp_global_y - vehicle_y) * np.cos(yaw)
# frames_df[f"x_{i}_offset"] = wp_local_x
# frames_df[f"y_{i}_offset"] = wp_local_y
#
# # Remove rows without trajectory offsets, should be last N_WAYPOINTS rows
# frames_df = frames_df[frames_df[f"x_{i}_offset"].notna()]
#
# # frames_df["yaw_delta"] = np.abs(frames_df["yaw"]) - np.abs(frames_df["yaw"]).shift(-1)
# # frames_df = frames_df[np.abs(frames_df["yaw_delta"]) < 0.1]
# #
# # frames_df["x_1_delta"] = frames_df["x_1_offset"] - frames_df["x_1_offset"].shift(-1)
# # frames_df = frames_df[np.abs(frames_df["x_1_delta"]) < 0.1]
# #
# # frames_df["y_1_delta"] = frames_df["y_1_offset"] - frames_df["y_1_offset"].shift(-1)
# # frames_df = frames_df[np.abs(frames_df["y_1_delta"]) < 0.1]
#
# # frames_df = frames_df[np.abs(frames_df["steering_angle"]) < 2.0]
len_after_filtering = len(frames_df)
camera_images = frames_df[f"{camera}_filename"].to_numpy()
frames_df["image_path"] = [str(dataset_path / image_path) for image_path in camera_images]
if self.output_modality == "waypoints":
for i in np.arange(1, self.n_waypoints + 1):
frames_df[f"wp{i}_all_x"] = frames_df[f"wp{i}_{camera}_x"]
frames_df[f"wp{i}_all_y"] = frames_df[f"wp{i}_{camera}_y"]
frames_df["camera_type"] = camera
print(f"{dataset_path}: lenght={len(frames_df)}, filtered={len_before_filtering-len_after_filtering}")
frames_df.reset_index(inplace=True)
return frames_df
def steering_angles_degrees(self):
return self.frames.steering_angle.to_numpy() / np.pi * 180
class NvidiaTrainDataset(NvidiaDataset):
def __init__(self, root_path, output_modality="steering_angle", n_branches=3, n_waypoints=6,
camera="front_wide", augment_conf=AugmentationConfig(), metadata_file="nvidia_frames.csv"):
self.dataset_paths = [
root_path / "2021-05-20-12-36-10_e2e_sulaoja_20_30",
root_path / "2021-05-20-12-43-17_e2e_sulaoja_20_30",
root_path / "2021-05-20-12-51-29_e2e_sulaoja_20_30",
root_path / "2021-05-20-13-44-06_e2e_sulaoja_10_10",
root_path / "2021-05-20-13-51-21_e2e_sulaoja_10_10",
root_path / "2021-05-20-13-59-00_e2e_sulaoja_10_10",
root_path / "2021-05-28-15-07-56_e2e_sulaoja_20_30",
root_path / "2021-05-28-15-17-19_e2e_sulaoja_20_30",
{'path': root_path / "2021-06-09-13-14-51_e2e_rec_ss2", 'start': 125, 'end': 49725},
{'path': root_path / "2021-06-09-13-55-03_e2e_rec_ss2_backwards", 'start': 150, 'end': 53625},
{'path': root_path / "2021-06-09-14-58-11_e2e_rec_ss3", 'start': 175, 'end': 43775},
{'path': root_path / "2021-06-09-15-42-05_e2e_rec_ss3_backwards", 'start': 100, 'end': 40625},
root_path / "2021-06-09-16-24-59_e2e_rec_ss13",
root_path / "2021-06-09-16-50-22_e2e_rec_ss13_backwards",
root_path / "2021-06-10-12-59-59_e2e_ss4",
root_path / "2021-06-10-13-19-22_e2e_ss4_backwards",
root_path / "2021-06-10-13-51-34_e2e_ss12",
root_path / "2021-06-10-14-02-24_e2e_ss12_backwards",
root_path / "2021-06-10-14-44-24_e2e_ss3_backwards",
root_path / "2021-06-10-15-03-16_e2e_ss3_backwards",
root_path / "2021-06-14-11-08-19_e2e_rec_ss14",
root_path / "2021-06-14-11-22-05_e2e_rec_ss14",
root_path / "2021-06-14-11-43-48_e2e_rec_ss14_backwards",
{'path': root_path / "2021-09-24-11-19-25_e2e_rec_ss10", 'start': 400, 'end': 34550},
{'path': root_path / "2021-09-24-11-40-24_e2e_rec_ss10_2", 'start': 150, 'end': 16000},
{'path': root_path / "2021-09-24-12-02-32_e2e_rec_ss10_3", 'start': 350, 'end': 8050},
root_path / "2021-09-24-12-21-20_e2e_rec_ss10_backwards",
root_path / "2021-09-24-13-39-38_e2e_rec_ss11",
{'path': root_path / "2021-09-30-13-57-00_e2e_rec_ss14", 'start': 100, 'end': 3200},
root_path / "2021-09-30-15-03-37_e2e_ss14_from_half_way",
root_path / "2021-09-30-15-20-14_e2e_ss14_backwards",
{'path': root_path / "2021-09-30-15-56-59_e2e_ss14_attempt_2", 'start': 80, 'end': 54600},
root_path / "2021-10-07-11-05-13_e2e_rec_ss3",
root_path / "2021-10-07-11-44-52_e2e_rec_ss3_backwards",
root_path / "2021-10-07-12-54-17_e2e_rec_ss4",
root_path / "2021-10-07-13-22-35_e2e_rec_ss4_backwards",
root_path / "2021-10-11-16-06-44_e2e_rec_ss2",
root_path / "2021-10-11-17-10-23_e2e_rec_last_part",
root_path / "2021-10-11-17-14-40_e2e_rec_backwards",
root_path / "2021-10-11-17-20-12_e2e_rec_backwards",
root_path / "2021-10-20-14-55-47_e2e_rec_vastse_ss13_17",
root_path / "2021-10-20-13-57-51_e2e_rec_neeruti_ss19_22",
root_path / "2021-10-20-14-15-07_e2e_rec_neeruti_ss19_22_back",
root_path / "2021-10-25-17-31-48_e2e_rec_ss2_arula",
root_path / "2021-10-25-17-06-34_e2e_rec_ss2_arula_back"
# '2021-11-08-11-24-44_e2e_rec_ss12_raanitsa.bag' \
# '2021-11-08-12-08-40_e2e_rec_ss12_raanitsa_backward.bag' \
]
tr = transforms.Compose([AugmentImage(augment_config=augment_conf), Normalize()])
super().__init__(self.dataset_paths, tr, camera=camera, output_modality=output_modality, n_branches=n_branches,
n_waypoints=n_waypoints, metadata_file=metadata_file)
class NvidiaValidationDataset(NvidiaDataset):
# todo: remove default parameters
def __init__(self, root_path, output_modality="steering_angle", n_branches=3, n_waypoints=6, camera="front_wide",
metadata_file="nvidia_frames.csv"):
self.dataset_paths = [
root_path / "2021-05-28-15-19-48_e2e_sulaoja_20_30",
root_path / "2021-06-07-14-20-07_e2e_rec_ss6",
root_path / "2021-06-07-14-06-31_e2e_rec_ss6",
root_path / "2021-06-07-14-09-18_e2e_rec_ss6",
root_path / "2021-06-07-14-36-16_e2e_rec_ss6",
root_path / "2021-09-24-14-03-45_e2e_rec_ss11_backwards",
root_path / "2021-10-26-10-49-06_e2e_rec_ss20_elva",
root_path / "2021-10-26-11-08-59_e2e_rec_ss20_elva_back",
root_path / "2021-10-20-15-11-29_e2e_rec_vastse_ss13_17_back",
{'path': root_path / "2021-10-11-14-50-59_e2e_rec_vahi", 'start': 100, 'end': 15000},
{'path': root_path / "2021-10-14-13-08-51_e2e_rec_vahi_backwards", 'start': 80, 'end': 13420}
]
tr = transforms.Compose([Normalize()])
super().__init__(self.dataset_paths, tr, camera=camera, output_modality=output_modality, n_branches=n_branches,
n_waypoints=n_waypoints, metadata_file=metadata_file)
class NvidiaWinterTrainDataset(NvidiaDataset):
def __init__(self, root_path, output_modality="steering_angle",
n_branches=3, n_waypoints=6, augment_conf=AugmentationConfig()):
train_paths = [
root_path / "2022-01-28-10-21-14_e2e_rec_peipsiaare_forward",
root_path / "2022-01-28-12-46-59_e2e_rec_peipsiaare_backward",
root_path / "2022-01-14-10-05-16_e2e_rec_raanitsa_forward",
root_path / "2022-01-14-10-50-05_e2e_rec_raanitsa_backward",
root_path / "2022-01-14-11-54-33_e2e_rec_kambja_forward2",
root_path / "2022-01-14-12-21-40_e2e_rec_kambja_forward2_continue",
root_path / "2022-01-14-13-09-05_e2e_rec_kambja_backward",
root_path / "2022-01-14-13-18-36_e2e_rec_kambja_backward_continue",
root_path / "2022-01-14-12-35-13_e2e_rec_neeruti_forward",
root_path / "2022-01-14-12-45-51_e2e_rec_neeruti_backward",
root_path / "2022-01-18-13-03-03_e2e_rec_arula_backward",
root_path / "2022-01-18-13-43-33_e2e_rec_otepaa_forward",
root_path / "2022-01-18-13-52-35_e2e_rec_otepaa_forward",
root_path / "2022-01-18-13-56-22_e2e_rec_otepaa_forward",
root_path / "2022-01-18-14-12-14_e2e_rec_otepaa_backward",
root_path / "2022-01-18-15-20-35_e2e_rec_kanepi_forward",
root_path / "2022-01-18-15-49-26_e2e_rec_kanepi_backwards",
]
tr = transforms.Compose([AugmentImage(augment_config=augment_conf), Normalize()])
super().__init__(train_paths, tr, output_modality=output_modality, n_branches=n_branches, n_waypoints=n_waypoints)
class NvidiaWinterValidationDataset(NvidiaDataset):
def __init__(self, root_path, output_modality="steering_angle", n_branches=3, n_waypoints=6):
valid_paths = [
root_path / "2022-01-18-12-37-01_e2e_rec_arula_forward",
root_path / "2022-01-18-12-47-32_e2e_rec_arula_forward_continue",
root_path / "2022-01-28-14-47-23_e2e_rec_elva_forward",
root_path / "2022-01-28-15-09-01_e2e_rec_elva_backward",
root_path / "2022-01-25-15-25-15_e2e_rec_vahi_forward",
root_path / "2022-01-25-15-34-01_e2e_rec_vahi_backwards",
]
tr = transforms.Compose([Normalize()])
super().__init__(valid_paths, tr, output_modality=output_modality, n_branches=n_branches, n_waypoints=n_waypoints)
| 44.716418
| 122
| 0.613961
| 20,621
| 0.983263
| 0
| 0
| 0
| 0
| 0
| 0
| 6,970
| 0.332348
|
b0767a0c923e3d01ffc014eff24c6e6e510dea0d
| 118
|
py
|
Python
|
ilexconf/console/__init__.py
|
vduseev/holly-config
|
92b7f902d92a4bae0f9fef6241b5ef3e11b44082
|
[
"MIT"
] | 9
|
2020-10-04T15:55:08.000Z
|
2020-10-09T11:48:44.000Z
|
ilexconf/console/__init__.py
|
vduseev/holly-config
|
92b7f902d92a4bae0f9fef6241b5ef3e11b44082
|
[
"MIT"
] | 47
|
2020-10-10T23:19:15.000Z
|
2021-04-02T21:29:36.000Z
|
ilexconf/console/__init__.py
|
ilexconf/ilexconf
|
92b7f902d92a4bae0f9fef6241b5ef3e11b44082
|
[
"MIT"
] | null | null | null |
from ilexconf.console.application import Application
def main(): # pragma: no cover
return Application().run()
| 19.666667
| 52
| 0.737288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 18
| 0.152542
|
b076c6afbdbadd2bb1cb6f1a1f6c0c794d322733
| 7,206
|
py
|
Python
|
further/deployment.py
|
openfurther/further-open-fabric-deployment
|
d571d603dad43996c66d716c69a247547f6f0ba1
|
[
"Apache-2.0"
] | null | null | null |
further/deployment.py
|
openfurther/further-open-fabric-deployment
|
d571d603dad43996c66d716c69a247547f6f0ba1
|
[
"Apache-2.0"
] | null | null | null |
further/deployment.py
|
openfurther/further-open-fabric-deployment
|
d571d603dad43996c66d716c69a247547f6f0ba1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) [2013] [The FURTHeR Project]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
from fabric.api import local, lcd, prompt
from os import walk
from os.path import join
from ConfigParser import ConfigParser
import fileinput
import sys
import string
import random
import re
def deployFurtherCore(environment):
"""Deploy further-core to a given environment where environment is represented as a folder with configuration. This command is meant to be run locally"""
version = prompt("FURTHeR version to deploy?")
config = _load_configuration(environment, 'further-core.cfg')
config['version'] = version
_replace_tokens('further-core/' + environment, config)
_deploy_further_configuration(environment)
def deployFurtherI2b2(environment):
"""Deploy further-core to a given environment where environment is represented as a folder with configuration. This command is meant to be run locally"""
config = _load_configuration(environment, 'further-i2b2.cfg')
_replace_tokens('further-i2b2/' + environment, config)
_deploy_i2b2_configuration(environment)
_deploy_further_i2b2_hook(environment)
_deploy_jboss_configuration(environment)
def _deploy_i2b2_configuration(environment):
"""Deploys the i2b2 configuration to the i2b2 server environment. This function is meant to be run locally and relies on $JBOSS_HOME, $TOMCAT_HOME, and SRC_HOME being configured"""
with lcd('further-i2b2'):
with lcd(environment):
with lcd('edu.harvard.i2b2.crc'):
local('cp *-ds.xml $JBOSS_HOME/server/default/deploy')
local('cp CRCApplicationContext.xml $JBOSS_HOME/server/default/conf/crcapp')
with lcd('edu.harvard.i2b2.crc.loader'):
local('cp CRCLoaderApplicationContext.xml $JBOSS_HOME/server/default/conf/crcloaderapp')
with lcd('edu.harvard.i2b2.ontology'):
local('cp *-ds.xml $JBOSS_HOME/server/default/deploy')
with lcd('edu.harvard.i2b2.pm'):
with lcd('database'):
local('cp hibernate.properties $TOMCAT_HOME/webapps/gridsphere/WEB-INF/CustomPortal/database')
with lcd('persistence'):
local('cp hibernate.properties $TOMCAT_HOME/webapps/default/WEB-INF/persistence')
local('cp secret.properties $TOMCAT_HOME/webapps/axis2/WEB-INF/classes/')
with lcd('edu.harvard.i2b2.workplace'):
local('cp *-ds.xml $JBOSS_HOME/server/default/deploy')
with lcd('i2b2-webclient'):
local('rm -rf /var/www/html/i2b2')
local('cp -R $SRC_HOME/i2b2-webclient/src/main/webapp/i2b2 /var/www/html')
local('cp i2b2config.ini.php /var/www/html/i2b2/includes')
def _deploy_further_i2b2_hook(environment):
"""Deploys the further-i2b2-hook that is responsible for sending i2b2 queries to be processed by FURTHeR. Relies on $JBOSS_HOME being configured"""
with lcd('further-i2b2'):
with lcd(environment):
with lcd('i2b2-hook'):
local('cp further.properties $JBOSS_HOME/server/default/deploy/i2b2.war/WEB-INF/classes')
# Remove old jars
with lcd('$JBOSS_HOME/server/default/deploy/i2b2.war/WEB-INF'):
local('rm -rf lib/core-*')
local('rm -rf lib/i2b2-hook-further*')
local('rm -rf lib/slf4j-*')
local('rm -rf lib/fqe-ds-api*')
with lcd('$SRC_HOME/i2b2-hook/i2b2-hook-further/target'):
tmp_dir = 'hook-tmp'
local('rm -rf ' + tmp_dir)
local('mkdir ' + tmp_dir)
local('cp i2b2-hook-further-bin.zip ' + tmp_dir);
with lcd(tmp_dir):
local('unzip i2b2-hook-further-bin.zip')
with lcd('i2b2-hook-further'):
local('mv *.jar $JBOSS_HOME/server/default/deploy/i2b2.war/WEB-INF/lib')
local('mv web.xml.further $JBOSS_HOME/server/default/deploy/i2b2.war/WEB-INF/web.xml')
def _deploy_jboss_configuration(environment):
with lcd('further-i2b2'):
with lcd(environment):
with lcd('jboss'):
with lcd('jmx-console'):
local('cp *.xml $JBOSS_HOME/server/default/deploy/jmx-console.war/WEB-INF')
with lcd('props'):
local('cp *.properties $JBOSS_HOME/server/default/conf/props')
with lcd('web-console'):
local('cp *.xml $JBOSS_HOME/server/default/deploy/management/console-mgr.sar/web-console.war/WEB-INF')
local('cp *.properties $JBOSS_HOME/server/default/deploy/management/console-mgr.sar/web-console.war/WEB-INF/classes')
def _deploy_further_configuration(environment):
"""Deploy the further-core configuration. Relies on $ESB_HOME being configured"""
with lcd('further-core'):
with lcd(environment):
local('cp *.cfg $ESB_HOME/etc')
local('cp *.properties $ESB_HOME/etc')
def _load_configuration(environment, path):
"""Loads a given configuration file specified by path and environment header (ini file).
returns a key value representing the configuration. Values enclosed in {} are automatically
decrypted using the $FURTHER_PASSWORD variable. Values that equal [RND] will be replaced with
a random string."""
# Read configuration file
parser = ConfigParser()
parser.read(path)
config = {}
for option in parser.options(environment):
value = parser.get(environment, option)
# Handle encrypted configuration
if (re.match(r'^\{.*\}$', value)):
encrypted_value = re.match(r'^\{(.*)\}$', value).group(1)
value = (local('decrypt.sh input="' + encrypted_value + '" password=$FURTHER_PASSWORD algorithm="PBEWithSHA1AndDESede" verbose="false"', capture=True))
# Handle random values
if (re.match(r'\[RND\]', value)):
value = _random_string()
config[option] = value;
return config
def _replace_tokens(path, config):
"""Recursively walks the given path and replaces any tokens (@value@) with
given values within the configuration"""
replace_tokens = config.keys()
for dirname, dirnames, filenames in walk(path):
for filename in filenames:
for line in fileinput.input(join(dirname, filename), inplace=True):
newline = line
for token in replace_tokens:
replace = '@' + token.upper() + '@'
if replace in line:
newline = line.replace(replace, config.get(token))
break
sys.stdout.write(newline)
def _random_string(characters=string.ascii_uppercase + string.ascii_lowercase + string.digits, size=32):
"""Generates a random string from all upper, lower, and digits"""
return ''.join(random.choice(characters) for x in range(size))
| 45.0375
| 181
| 0.684707
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,985
| 0.553011
|
b077615a398497bbf160df8bf076bda9f756c99d
| 337
|
py
|
Python
|
vendor/views/sign_in_views.py
|
hossainchisty/Multi-Vendor-eCommerce
|
42c5f62b8b098255cc9ea57858d3cc7de94bd76a
|
[
"MIT"
] | 16
|
2021-09-22T19:08:28.000Z
|
2022-03-18T18:57:02.000Z
|
vendor/views/sign_in_views.py
|
hossainchisty/Multi-Vendor-eCommerce
|
42c5f62b8b098255cc9ea57858d3cc7de94bd76a
|
[
"MIT"
] | 6
|
2021-09-30T12:36:02.000Z
|
2022-03-18T22:18:00.000Z
|
vendor/views/sign_in_views.py
|
hossainchisty/Multi-Vendor-eCommerce
|
42c5f62b8b098255cc9ea57858d3cc7de94bd76a
|
[
"MIT"
] | 6
|
2021-12-06T02:04:51.000Z
|
2022-03-13T14:38:14.000Z
|
from django.contrib.auth.views import LoginView
from django.contrib.auth.forms import AuthenticationForm
class SignInView(LoginView):
''' Sign in for vendor '''
form_class = AuthenticationForm
template_name = 'vendor/sign_in.html'
redirect_field_name = 'vendor:root_path'
success_url = 'vendor:root_path'
| 30.636364
| 57
| 0.735905
| 224
| 0.664688
| 0
| 0
| 0
| 0
| 0
| 0
| 83
| 0.246291
|
b078190b27e28c76e2f6312fda29c3c87dbe3e12
| 692
|
py
|
Python
|
django_excel_to_model/tests/test_openpyxl.py
|
weijia/django-excel-to-model
|
2bab354835e31133f1344bee2cb12cb3627eef3d
|
[
"BSD-3-Clause"
] | 2
|
2021-03-14T14:29:19.000Z
|
2021-05-02T10:36:47.000Z
|
django_excel_to_model/tests/test_openpyxl.py
|
weijia/django-excel-to-model
|
2bab354835e31133f1344bee2cb12cb3627eef3d
|
[
"BSD-3-Clause"
] | 1
|
2020-03-11T06:20:59.000Z
|
2020-04-22T02:17:35.000Z
|
django_excel_to_model/tests/test_openpyxl.py
|
weijia/django-excel-to-model
|
2bab354835e31133f1344bee2cb12cb3627eef3d
|
[
"BSD-3-Clause"
] | 4
|
2018-04-28T02:39:05.000Z
|
2021-07-27T02:04:27.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_django-excel-to-model
------------
Tests for `django-excel-to-model` models module.
"""
from django.test import TestCase
# from unittest import TestCase
from django_excel_to_model.openpyxl_reader import OpenpyxlExcelFile
from sap_asset_master_data20191224.models import mapping
class TestOpenpyxl(TestCase):
def setUp(self):
pass
def test_something(self):
x = OpenpyxlExcelFile(r"C:\N-PC0WN7R6-Data\q19420\Downloads\sapItems20191223-1.XLSx")
s = x.get_sheet(0)
s.set_header_row(0)
for i in s.enumerate_mapped(mapping, 2):
print(i)
def tearDown(self):
pass
| 21.625
| 93
| 0.679191
| 352
| 0.508671
| 0
| 0
| 0
| 0
| 0
| 0
| 234
| 0.33815
|
b079078e2de1f33efb9b000ddc8a7958596a0333
| 1,461
|
py
|
Python
|
Ene-Jun-2021/aguilar-cedillo-jonathan-ivan/Examen Especial/app/api.py
|
jarmarj/DAS_Sistemas
|
36c876673e7abae503cc137c3f66585a0e45ed79
|
[
"MIT"
] | 41
|
2017-09-26T09:36:32.000Z
|
2022-03-19T18:05:25.000Z
|
Ene-Jun-2021/aguilar-cedillo-jonathan-ivan/Examen Especial/app/api.py
|
jarmarj/DAS_Sistemas
|
36c876673e7abae503cc137c3f66585a0e45ed79
|
[
"MIT"
] | 67
|
2017-09-11T05:06:12.000Z
|
2022-02-14T04:44:04.000Z
|
Ene-Jun-2021/aguilar-cedillo-jonathan-ivan/Examen Especial/app/api.py
|
jarmarj/DAS_Sistemas
|
36c876673e7abae503cc137c3f66585a0e45ed79
|
[
"MIT"
] | 210
|
2017-09-01T00:10:08.000Z
|
2022-03-19T18:05:12.000Z
|
from flask import Flask, render_template, jsonify
from mongo import mongo
import json
# from persona import persona
import time
app = Flask(__name__, static_url_path='/static')
cliente=mongo()
peoples=cliente['mydb']['people'].find()
def parsePerson(person):
return {
'id':person['id'],
'first_name':person['first_name'],
'last_name':person['last_name'],
'company':person['company'],
'email':person['email'],
'ip_address':person['ip_address'],
'phone_number':person['phone_number']
}
def getPeoplesFromDB(peoples):
x = []
for p in peoples:
x.append(
parsePerson(p)
)
return x
def getPersonFromDB(id):
person=cliente['mydb']['people'].find_one({"id": str(id)})
return parsePerson(person)
x=getPeoplesFromDB(peoples)
@app.route('/')
def index():
return render_template('index.html',personas =x)
@app.route('/people')
def people():
return jsonify(x)
@app.route('/people/<string:idPerson>')
def heroById(idPerson):
if(idPerson.isdigit()):
if(0<int(idPerson)<1001):
try:
persona=getPersonFromDB(idPerson)
return persona
except:
return jsonify({'status':500})
return jsonify({'status':500})
if __name__ == "__main__":
app.run(host="0.0.0.0", port=7777)
| 25.631579
| 63
| 0.576318
| 0
| 0
| 0
| 0
| 482
| 0.329911
| 0
| 0
| 295
| 0.201916
|
b07a58de9437362aaa9673805d54668fa6ee8b1a
| 1,592
|
py
|
Python
|
audio_to_video.py
|
skratchdot/media-tools
|
bca0c683fb637aeefda1c49454a118f809047d97
|
[
"MIT"
] | 13
|
2019-12-09T07:56:13.000Z
|
2021-08-03T01:45:53.000Z
|
audio_to_video.py
|
skratchdot/media-tools
|
bca0c683fb637aeefda1c49454a118f809047d97
|
[
"MIT"
] | 1
|
2020-04-29T00:00:14.000Z
|
2021-07-09T14:24:19.000Z
|
audio_to_video.py
|
skratchdot/media-tools
|
bca0c683fb637aeefda1c49454a118f809047d97
|
[
"MIT"
] | 3
|
2020-04-27T15:36:36.000Z
|
2021-03-29T17:52:35.000Z
|
# -*- coding: utf-8 -*-
# Attempt to convert audio to video by visualizing audio
import argparse
from lib.audio_utils import *
from lib.collection_utils import *
from lib.io_utils import *
from lib.math_utils import *
import librosa
import os
import numpy as np
from pprint import pprint
import sys
# input
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_FILE", default="media/sample/bird.wav", help="Input file pattern; can be an audio file, a .csv file, or a glob string")
parser.add_argument('-dir', dest="MEDIA_DIRECTORY", default="media/downloads/", help="Input dir")
parser.add_argument('-width', dest="WIDTH", default=480, type=int, help="Video width")
parser.add_argument('-height', dest="HEIGHT", default=320, type=int, help="Video height")
parser.add_argument('-fps', dest="FPS", default=30, type=int, help="Output video frames per second")
parser.add_argument('-out', dest="OUTPUT_DIR", default="output/%s.mp4", help="Video output file pattern")
parser.add_argument('-threads', dest="THREADS", default=1, type=int, help="Amount of parallel processes")
parser.add_argument('-overwrite', dest="OVERWRITE", action="store_true", help="Overwrite existing data?")
a = parser.parse_args()
# Make sure output dirs exist
makeDirectories(a.OUTPUT_DIR)
# Read files
fieldNames, files, fileCount = getFilesFromString(a)
# Check for valid audio
if "duration" in fieldNames and "hasAudio" in fieldNames:
files = filterWhere(files, [("duration", 0, ">"), ("hasAudio", 0, ">")])
fileCount = len(files)
print("Found %s rows after filtering" % fileCount)
| 40.820513
| 158
| 0.736809
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 664
| 0.417085
|
b07d0af1d5f8010ec7eddc7c33bb9c1c0be29019
| 982
|
py
|
Python
|
server/api/login.py
|
ONSdigital/nsdc
|
5117189c28f923adca2cd7173673d798fb9232c2
|
[
"MIT"
] | 4
|
2016-12-19T11:26:38.000Z
|
2017-01-27T09:51:48.000Z
|
server/api/login.py
|
ONSdigital/nsdc
|
5117189c28f923adca2cd7173673d798fb9232c2
|
[
"MIT"
] | 6
|
2017-01-04T08:21:49.000Z
|
2017-01-18T09:33:59.000Z
|
server/api/login.py
|
ONSdigital/nsdc
|
5117189c28f923adca2cd7173673d798fb9232c2
|
[
"MIT"
] | 2
|
2017-01-12T14:46:00.000Z
|
2021-04-11T08:20:45.000Z
|
from flask import jsonify, abort
from flask_restful import reqparse, Resource
from config import db
from data.user import UserData
from datetime import datetime, timedelta
from data.user_session import UserSessionData
parser = reqparse.RequestParser()
parser.add_argument('username')
parser.add_argument('password')
class Login(Resource):
def post(self):
request_json = parser.parse_args()
username = request_json['username']
password = request_json['password']
user = UserData.query.filter(
UserData.username == username,
UserData.password == password,
UserData.status == 'active'
).first()
if not user:
return abort(400)
# create new user session and return session_id
user_session = UserSessionData(user.id, datetime.now() + timedelta(days=10))
db.session.add(user_session)
db.session.commit()
return jsonify(user_session.serialize())
| 31.677419
| 84
| 0.680244
| 662
| 0.674134
| 0
| 0
| 0
| 0
| 0
| 0
| 95
| 0.096741
|
b07eaf9c823c7f4577dd939bb64e4ed246d82116
| 1,775
|
py
|
Python
|
tests/test_main.py
|
openalto/alto
|
294f4e1d45d5e3f0cc476a2f0cbb85164c7d32ae
|
[
"MIT"
] | null | null | null |
tests/test_main.py
|
openalto/alto
|
294f4e1d45d5e3f0cc476a2f0cbb85164c7d32ae
|
[
"MIT"
] | null | null | null |
tests/test_main.py
|
openalto/alto
|
294f4e1d45d5e3f0cc476a2f0cbb85164c7d32ae
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2021 OpenALTO Community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors:
# - Jensen Zhang <jingxuan.n.zhang@gmail.com>
import pytest
from alto.main import fib, main
__author__ = "OpenALTO"
__copyright__ = "OpenALTO"
__license__ = "MIT"
def test_fib():
"""API Tests"""
assert fib(1) == 1
assert fib(2) == 1
assert fib(7) == 13
with pytest.raises(AssertionError):
fib(-10)
def test_main(capsys):
"""CLI Tests"""
# capsys is a pytest fixture that allows asserts agains stdout/stderr
# https://docs.pytest.org/en/stable/capture.html
main(["7"])
captured = capsys.readouterr()
assert "The 7-th Fibonacci number is 13" in captured.out
| 34.134615
| 80
| 0.729014
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,389
| 0.782535
|
b0808b108456f6e159e093505f69f5f9b21e00dc
| 2,501
|
py
|
Python
|
tradedate.py
|
Anton-Mu/finance_sentiment_analysis
|
e319073646f8b11a3f6b5140137a7f0205918c19
|
[
"MIT"
] | null | null | null |
tradedate.py
|
Anton-Mu/finance_sentiment_analysis
|
e319073646f8b11a3f6b5140137a7f0205918c19
|
[
"MIT"
] | null | null | null |
tradedate.py
|
Anton-Mu/finance_sentiment_analysis
|
e319073646f8b11a3f6b5140137a7f0205918c19
|
[
"MIT"
] | 1
|
2022-02-08T06:11:51.000Z
|
2022-02-08T06:11:51.000Z
|
import re
import requests
import json
# 从百度的php接口中获取到数据
def catch_url_from_baidu(calcultaion_year, month):
header = {
"Content-Type": "application/json;charset=UTF-8"
}
param = {
"query": str(calcultaion_year) + "年" + month + "月",
"resource_id": "39043",
"t": "1604395059555",
"ie": "utf8",
"oe": "gbk",
"format": "json",
"tn": "wisetpl",
"cb": ""
}
# 抓取位置:百度搜索框搜索日历,上面的日历的接口,可以在页面上进行核对
r = requests.get(url="https://sp0.baidu.com/8aQDcjqpAAV3otqbppnN2DJv/api.php",
headers=header, params=param).text
month_data = json.loads(r)["data"][0]["almanac"]
work_day = []
for one in month_data:
if (one["cnDay"] != '日' and one["cnDay"] != '六'
and ('status' not in one)):
work_day.append(one)
work_days = output_info(work_day)
return work_days
# 输出格式,可以修改成insert语句进行输出
def output_info(work_day):
work_days = []
for one in work_day:
date = one["year"] + '-' + one["month"] + '-' + one["day"]
work_days.append(date)
return work_days
# 先抓取全年交易日历,再提取需要的时间段
def trade_date(start_year, start_month, start_day, end_year, end_month, end_day):
# 此处只能算当年之前的,因为国务院是每年12月份才会发布第二年的放假计划,所以此接口对于下一年的统计是错的。
# eg:2020年11月4日,国务院没有发布21年的放假计划,那查询2021年元旦的时候,元旦那天不显示休息
tradedates = []
for year in range(start_year, end_year + 1):
calculation_year = year
# 因该接口传入的时间,查询了前一个月,当前月和后一个月的数据,所以只需要2、5、8、11即可全部获取到。比如查询5月份,则会查询4,5,6月分的数据
calculation_month = ["2", "5", "8", "11"]
for one_month in calculation_month:
work_days = catch_url_from_baidu(calculation_year, one_month)
for work_day in work_days:
tradedates.append(work_day)
start_date = str(start_year) + "-" + str(start_month) + "-" + str(start_day)
end_date = str(end_year) + "-" + str(end_month) + "-" + str(end_day)
for i in range(len(tradedates)):
if start_date == tradedates[i]:
start_num = i
elif end_date == tradedates[i]:
end_num = i
try:
date_get = tradedates[start_num:end_num]
return date_get
except:
print("输入的数字不合规或不在工作日范围内。")
print("起始日期应在如下日期中:", tradedates)
# 分别按年月日输入起始的工作日日期,一位数不需要加0
if __name__ == '__main__':
date_get = trade_date(2021, 11, 1, 2022, 1, 28)
print(date_get)
| 30.13253
| 84
| 0.587765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,237
| 0.401754
|
b080d8609691c7b1384b852df88e37fc68699acb
| 1,377
|
py
|
Python
|
config/trainer_configs/model_config/mnist_conv_auto.py
|
jwspaeth/FAA-Project
|
afa9d3bec10deead48c4b17dff69df2e02691e41
|
[
"MIT"
] | null | null | null |
config/trainer_configs/model_config/mnist_conv_auto.py
|
jwspaeth/FAA-Project
|
afa9d3bec10deead48c4b17dff69df2e02691e41
|
[
"MIT"
] | 2
|
2019-10-20T00:42:40.000Z
|
2019-10-30T18:06:11.000Z
|
config/trainer_configs/model_config/mnist_conv_auto.py
|
jwspaeth/FAA-Project
|
afa9d3bec10deead48c4b17dff69df2e02691e41
|
[
"MIT"
] | null | null | null |
from yacs.config import CfgNode as CN
from lib.classes.dataset_classes.SubjectDataset import SubjectDataset
from config.model_helper import get_size_input
_C = CN()
_C.framework = "Keras"
_C.model_type = "ConvAutoencoder"
_C.build_type = "subclass"
# Define encoder parameters
_C.Encoder = CN()
_C.Encoder.n_filters_list = [32, 32, 16, 16, 8, 8]
_C.Encoder.kernel_size_list = [(3, 3), (2, 2), (3, 3), (2, 2), (3, 3), (2, 2)]
_C.Encoder.activation_type_list = ["elu", "elu", "elu", "elu", "elu", "sigmoid"]
_C.Encoder.n_strides_list = [(2, 2), (1, 1), (2, 2), (1, 1), (2, 2), (1, 1)]
_C.Encoder.padding_list = ["same", "same", "same", "same", "same", "same"]
# Define encoder parameters
_C.Decoder = CN()
_C.Decoder.n_filters_list = [8, 16, 32, 1]
_C.Decoder.kernel_size_list = [(2, 2), (3, 3), (3, 3), (2, 2)]
_C.Decoder.activation_type_list = ["elu", "elu", "elu", "elu", "elu", "sigmoid"]
_C.Decoder.n_strides_list = [(2, 2), (2, 2), (2, 2), (1,1)]
_C.Decoder.padding_list = ["same", "same", "same", "same", "same", "same"]
_C.Decoder.output_padding = [(1, 1), None, None, None]
# Define noise parameters
_C.Noise = CN()
_C.Noise.dummy_val = 0
def get_cfg_defaults():
"""Get a yacs CfgNode object with default values for my_project."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
return _C.clone()
| 35.307692
| 80
| 0.659405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 423
| 0.30719
|
b080e30a075631abb8736fd8c977482b5bcb3a76
| 577
|
py
|
Python
|
src/graphviz/graphviz_5.py
|
lrodrin/TFG
|
b4199b5970deb7b7394ecac1c950acaa3cfde695
|
[
"MIT"
] | 2
|
2017-01-16T12:08:34.000Z
|
2017-01-16T13:00:12.000Z
|
src/graphviz/graphviz_5.py
|
lrodrin/TFG
|
b4199b5970deb7b7394ecac1c950acaa3cfde695
|
[
"MIT"
] | null | null | null |
src/graphviz/graphviz_5.py
|
lrodrin/TFG
|
b4199b5970deb7b7394ecac1c950acaa3cfde695
|
[
"MIT"
] | null | null | null |
import pydot
callgraph = pydot.Dot(graph_type='digraph', fontname="Verdana", compound='true')
cluster_foo = pydot.Cluster('foo', label='foo')
callgraph.add_subgraph(cluster_foo)
node_foo = pydot.Node('foo_method_1', label='method_1')
cluster_foo.add_node(node_foo)
cluster_bar = pydot.Cluster('bar', label='Component1')
callgraph.add_subgraph(cluster_bar)
node_bar = pydot.Node('bar_method_a')
cluster_bar.add_node(node_bar)
callgraph.add_edge(pydot.Edge(node_foo, node_bar, ltail=cluster_foo.get_name(), lhead=cluster_bar.get_name()))
callgraph.write('graphviz_5.dot')
| 28.85
| 110
| 0.785095
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 105
| 0.181976
|
b0833cf5f5755370727e1adbc3f27b3a23bd86c3
| 12,544
|
py
|
Python
|
lfa.py
|
jwbensley/pyFRR
|
7f5792586b528c9f7c0d84f263eaae5f1c718661
|
[
"MIT"
] | 2
|
2021-08-12T06:57:59.000Z
|
2021-09-09T19:28:04.000Z
|
lfa.py
|
jwbensley/pyFRR
|
7f5792586b528c9f7c0d84f263eaae5f1c718661
|
[
"MIT"
] | 1
|
2021-11-19T16:32:54.000Z
|
2021-11-19T16:32:54.000Z
|
lfa.py
|
jwbensley/pyFRR
|
7f5792586b528c9f7c0d84f263eaae5f1c718661
|
[
"MIT"
] | null | null | null |
import networkx as nx
import os
from diagram import Diagram
from spf import spf
class lfa:
"""This class provides RFC5286 lfa calculations"""
def __init__(self, debug=0):
"""
Init the lfa class.
:param int debug: debug level, 0 is disabled.
:return None: __init__ shouldn't return anything
:rtype: None
"""
self.debug = debug
self.diagram = Diagram(debug=self.debug)
self.path_types = ["lfas_dstream", "lfas_link", "lfas_node"]
self.spf = spf(debug=self.debug)
def draw(self, graph, outdir, topology):
"""
Loop over the generated topologies and render them as diagram files.
:param networkx.Graph graph: NetworkX graph object
:param str outdir: string of the root output directory path
:param dict topology: topology paths dict
:return bool True: True if all diagrams rendered otherwise False
:rtype: bool
"""
self.diagram.gen_sub_dirs(graph, outdir, self.path_types, topology)
for src, dst in [
(s, d) for d in graph.nodes for s in graph.nodes if s != d
]:
for path_type in self.path_types:
if path_type not in topology[src][dst]:
continue
if len(topology[src][dst][path_type]) > 0:
frr_graph = graph.copy()
# Highlight the failed first-hop link(s) as red
for path in topology[src][dst]["spf_metric"]:
frr_graph = self.diagram.highlight_fh_link(
"red",
frr_graph,
path,
)
# Highlight the failed first-hop node(s) as red
if path_type == "lfas_dstream":
for path in topology[src][dst]["spf_metric"]:
frr_graph = self.diagram.highlight_fh_node(
"red",
frr_graph,
path,
)
elif path_type == "lfas_node":
for path in topology[src][dst]["spf_metric"]:
frr_graph = self.diagram.highlight_fh_node(
"red",
frr_graph,
path,
)
for path in topology[src][dst][path_type]:
frr_graph = self.diagram.highlight_links(
"green",
frr_graph,
path,
)
frr_graph = self.diagram.highlight_nodes(
"green",
frr_graph,
path,
)
frr_graph = self.diagram.highlight_src_dst(
"lightblue", dst, frr_graph, src
)
# Add labels to links showing their cost
frr_graph = self.diagram.label_link_weights(frr_graph)
self.diagram.gen_diagram(
(src + "_" + dst + "_" + path_type),
frr_graph,
os.path.join(outdir, src, path_type),
)
def gen_metric_paths(self, dst, graph, src):
"""
Return all lfa paths between the src and dst nodes in graph, based on
link metric (not hop count), which provide link, downstream, or node
protection, and return all alternate paths in a dict of lists keyed by
lfa path protection type.
:param str dst: Destination node name in graph
:param networkx.Graph graph: NetworkX graph object
:param str src: Source node name in graph
:return lfas: dict of lists keyed by lfa type
:rtype: dict
"""
lfas = {"lfas_link": [], "lfas_dstream": [], "lfas_node": []}
if self.debug > 0:
print(f"Calculating for lfa paths from {src} to {dst}")
s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=src)
# There are no paths between this src,dst pair
if not s_d_paths:
return lfas
# Loop over each neighbour to check if each one is an lfa candidate
for nei in graph.neighbors(src):
# If dst is directly connceted
if nei == dst:
continue
if self.debug > 1:
print(f"Checking for lfa paths via {nei}")
# This nei is the next-hop for the current best path(s)
if nei in [path[1] for path in s_d_paths]:
if self.debug > 1:
print(
f"Rejected lfas via next-hop {nei}, it is a next-hop "
f"in the current best path(s): {s_d_paths}"
)
continue
"""
ECMP may be used meaning src has multiple equal cost best paths to
dst. And/or, nei may have multiple equal cost best paths to dst.
Regardless, of the number of paths, they are the same cost, so only
check the cost of the first best path of src against the first best
path of nei.
"""
nh = s_d_paths[0][1]
try:
n_d_cost = nx.dijkstra_path_length(graph, source=nei, target=dst)
n_s_cost = nx.dijkstra_path_length(graph, source=nei, target=src)
s_d_cost = nx.dijkstra_path_length(graph, source=src, target=dst)
n_nh_cost = nx.dijkstra_path_length(graph, source=nei, target=nh)
nh_d_cost = nx.dijkstra_path_length(graph, source=nh, target=dst)
except nx.exception.NetworkXNoPath:
# There isn't connectivity between the nodes; src, dst, nh, nei
continue
if self.debug > 1:
print(
f"{nei} -> {dst}: {n_d_cost}\n"
f"{nei} -> {src}: {n_s_cost}\n"
f"{src} -> {dst}: {s_d_cost}\n"
f"{nei} -> {nh}: {n_nh_cost}\n"
f"{nh} -> {dst}: {nh_d_cost}"
)
link_prot = False
down_prot = False
node_prot = False
"""
RFC5286:
Inequality 1: Loop-Free Criterion
A neighbor N of source S can provide a loop-free alternate (lfa)
toward destination D, that is link protecting, iff:
Distance_opt(N, D) < Distance_opt(N, S) + Distance_opt(S, D)
In this scenario, N's cost to D is lower than N's cost to S + S's
cost to D, so N must have an alternative path to D not via S, but
S and N might be sharing the same next-hop router, and N simply
has another link to that shared next-hop router, so it is link
protecting only, for S's link to it's next-hop.
"""
if n_d_cost < (n_s_cost + s_d_cost):
if self.debug > 1:
print(
f"{nei} to {dst} < ({nei} to {src} + {src} to {dst}), "
f"{n_d_cost} < {n_s_cost+s_d_cost}"
)
# nei protects src against link failure to next-hop toward dst
link_prot = True
"""
RFC5286:
Inequality 2: Downstream Path Criterion
A neighbor N of source S can provide a loop-free alternate (lfa)
to downstream paths of D, which could be link or node protecting,
iff:
Distance_opt(N, D) < Distance_opt(S, D)
In this scenario, N's cost to D is lower than S's so N won't route
back to S. This is basic loop avoidance gaurenteed but it doesn't
restrict the lfa path to be link protecting or node protecting.
This scenario is usually used to provide protection for a specific
downstream prefix of node D rather than S's next-hop node or link
toward D.
"""
if n_d_cost < (s_d_cost):
if self.debug > 1:
print(
f"{nei} to {dst} < {src} to {dst}: "
f"{n_d_cost} < {n_s_cost}"
)
# nei protects src against failure of link or node toward dst
down_prot = True
"""
RFC5286:
Inequality 3: Criteria for a Node-Protecting Loop-Free Alternate
For an alternate next-hop N to protect against node failure of a
primary neighbor E for destination D, N must be loop-free with
respect to both E and D.
Distance_opt(N, D) < Distance_opt(N, E) + Distance_opt(E, D)
In this scenario, neighbour N of source router S, uses a different
next-hop router toward destination D, than router E which is S's
next-hop router toward D. This provides node protection against S's
next-hop router E.
"""
if n_d_cost < (n_nh_cost + nh_d_cost):
if self.debug > 1:
print(
f"{nei} to {dst} < ({nei} to {nh} + {nh} to {dst}), "
f"{n_d_cost} < {n_nh_cost+nh_d_cost}"
)
# nei protects src against next-hop node failure toward dst
node_prot = True
# nei might have multiple equal-cost best paths to dst
n_d_paths = self.spf.gen_metric_paths(
dst=dst, graph=graph, src=nei
)
for n_d_path in n_d_paths:
if link_prot:
# Append src to n_d_path because it starts from nei
if n_d_path[0] != src:
n_d_path.insert(0, src)
lfas["lfas_link"].append(n_d_path)
if self.debug > 1:
print(
f"New link protecting lfa from {src} to "
f"{dst} via {nei}, protects against link "
f"{src}-{nh}: {n_d_path}"
)
if down_prot:
# Append src to n_d_path because it starts from nei
if n_d_path[0] != src:
n_d_path.insert(0, src)
lfas["lfas_dstream"].append(n_d_path)
if self.debug > 1:
print(f"New downstream protecting lfa: {n_d_path}")
if node_prot:
"""
In order to protect pre-failure ECMP best-paths, check that
this node protecting path doesn't overlap with any of the
ECMP next-hop nodes
"""
s_d_fhs = [path[1] for path in s_d_paths]
overlap = [
fh
for fh in s_d_fhs
for n_d_path in n_d_paths
if fh in n_d_path
]
if overlap:
if self.debug > 1:
print(
f"lfa path {n_d_path} is not node protecting "
f"against {overlap} from {src} to {dst}"
)
continue
lfas["lfas_node"].append(n_d_path)
if self.debug > 1:
print(
f"New node protecting path from {src} to {dst} "
f"via {nei}, protects against node {nh}: "
f"{n_d_path}"
)
return lfas
def init_topo(self, graph, topo):
"""
Create empty dict keys for all possible paths this class can generate
:return None:
:rtype: None
"""
for src in graph.nodes:
for dst in graph.nodes:
if src == dst:
continue
for path_type in self.path_types:
if path_type not in topo[src][dst]:
topo[src][dst][path_type] = []
| 39.949045
| 81
| 0.479034
| 12,461
| 0.993383
| 0
| 0
| 0
| 0
| 0
| 0
| 5,743
| 0.457828
|
b0836df3831634d69eba10f383e0ec13d3b01887
| 4,523
|
py
|
Python
|
acapy_client/models/presentation_definition.py
|
dbluhm/acapy-client
|
d92ef607ba2ff1152ec15429f2edb20976991424
|
[
"Apache-2.0"
] | 4
|
2021-08-05T09:20:34.000Z
|
2021-08-08T19:37:29.000Z
|
acapy_client/models/presentation_definition.py
|
dbluhm/acapy-client
|
d92ef607ba2ff1152ec15429f2edb20976991424
|
[
"Apache-2.0"
] | null | null | null |
acapy_client/models/presentation_definition.py
|
dbluhm/acapy-client
|
d92ef607ba2ff1152ec15429f2edb20976991424
|
[
"Apache-2.0"
] | 2
|
2021-08-12T18:18:45.000Z
|
2021-08-14T13:22:28.000Z
|
from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from ..models.claim_format import ClaimFormat
from ..models.input_descriptors import InputDescriptors
from ..models.submission_requirements import SubmissionRequirements
from ..types import UNSET, Unset
T = TypeVar("T", bound="PresentationDefinition")
@attr.s(auto_attribs=True)
class PresentationDefinition:
""" """
format_: Union[Unset, ClaimFormat] = UNSET
id: Union[Unset, str] = UNSET
input_descriptors: Union[Unset, List[InputDescriptors]] = UNSET
name: Union[Unset, str] = UNSET
purpose: Union[Unset, str] = UNSET
submission_requirements: Union[Unset, List[SubmissionRequirements]] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
format_: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.format_, Unset):
format_ = self.format_.to_dict()
id = self.id
input_descriptors: Union[Unset, List[Dict[str, Any]]] = UNSET
if not isinstance(self.input_descriptors, Unset):
input_descriptors = []
for input_descriptors_item_data in self.input_descriptors:
input_descriptors_item = input_descriptors_item_data.to_dict()
input_descriptors.append(input_descriptors_item)
name = self.name
purpose = self.purpose
submission_requirements: Union[Unset, List[Dict[str, Any]]] = UNSET
if not isinstance(self.submission_requirements, Unset):
submission_requirements = []
for submission_requirements_item_data in self.submission_requirements:
submission_requirements_item = submission_requirements_item_data.to_dict()
submission_requirements.append(submission_requirements_item)
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if format_ is not UNSET:
field_dict["format"] = format_
if id is not UNSET:
field_dict["id"] = id
if input_descriptors is not UNSET:
field_dict["input_descriptors"] = input_descriptors
if name is not UNSET:
field_dict["name"] = name
if purpose is not UNSET:
field_dict["purpose"] = purpose
if submission_requirements is not UNSET:
field_dict["submission_requirements"] = submission_requirements
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
_format_ = d.pop("format", UNSET)
format_: Union[Unset, ClaimFormat]
if isinstance(_format_, Unset):
format_ = UNSET
else:
format_ = ClaimFormat.from_dict(_format_)
id = d.pop("id", UNSET)
input_descriptors = []
_input_descriptors = d.pop("input_descriptors", UNSET)
for input_descriptors_item_data in _input_descriptors or []:
input_descriptors_item = InputDescriptors.from_dict(input_descriptors_item_data)
input_descriptors.append(input_descriptors_item)
name = d.pop("name", UNSET)
purpose = d.pop("purpose", UNSET)
submission_requirements = []
_submission_requirements = d.pop("submission_requirements", UNSET)
for submission_requirements_item_data in _submission_requirements or []:
submission_requirements_item = SubmissionRequirements.from_dict(submission_requirements_item_data)
submission_requirements.append(submission_requirements_item)
presentation_definition = cls(
format_=format_,
id=id,
input_descriptors=input_descriptors,
name=name,
purpose=purpose,
submission_requirements=submission_requirements,
)
presentation_definition.additional_properties = d
return presentation_definition
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| 36.475806
| 110
| 0.668804
| 4,169
| 0.921733
| 0
| 0
| 4,196
| 0.927703
| 0
| 0
| 176
| 0.038912
|
b085477d012b84ae2bc4a7c010866eb15378b909
| 12,344
|
py
|
Python
|
aiodata/server.py
|
Exahilosys/aiodata
|
b4e475eac055f5f391219ae8b893a3e508b51fd8
|
[
"MIT"
] | 1
|
2021-11-03T13:06:21.000Z
|
2021-11-03T13:06:21.000Z
|
aiodata/server.py
|
Exahilosys/aiodata
|
b4e475eac055f5f391219ae8b893a3e508b51fd8
|
[
"MIT"
] | null | null | null |
aiodata/server.py
|
Exahilosys/aiodata
|
b4e475eac055f5f391219ae8b893a3e508b51fd8
|
[
"MIT"
] | null | null | null |
"""
Launch a proxy for tranforming `/paths/like/these` to PostgREST filters.
Usage:
aiodata -h | --help
aiodata <file> [--host=<str>] [--port=<int>] [--query=<str>] [--state=<str>]
aiodata [--db-uri=<uri>] [--pr-uri=<uri>] [--host=<str>] [--port=<int>] [--schema=<str>] [--secret=<str>] [--query=<str>] [--state=<str>]
Options:
-h --help Show this screen.
file Path to the `.conf` file for PostgREST.
--db-uri=<uri> Uri to the PostgreSQL database. [default: postgres://admin@localhost/postgres]
--pr-uri=<uri> Uri to the PostgREST server. [default: http://localhost:3000]
--host=<str> Host to launch the proxy at. [default: localhost]
--port=<int> Port to launch the proxy at. [default: 4000]
--schema=<str> The exposed schema to describe. [default: api]
--secret=<str> Authenticates websocket tokens (claims dont matter).
--query=<str> Routing path to expose queries at. [default: /query]
--state=<str> Routing path to expose websockets at if applicable. [default: /state]
Queries are proxy-adjusted requests whose paths get trasformed to filters.
For example, `/table/val1/val2` turns into `/table?clm1=eq.val1&clm2=eq.val2`.
- There is no way to specify the returned columns.
- All responses use the `Prefer: return=representation` header.
- Binary values are not supported. Covert to base64 in the databse.
Websocket connections are iniated through `/state`.
Authorization is only enforced if `secret` is present. Claims are irrelevant.
Json data is sent upon any successful POST, PATCH or DELETE.
The payload itself is a 4-item array:
1: Name of the request method.
2: Name of the affected table.
3: Query used for this operation, eg {"clm1": "val1", "clm2": "val2"}.
4: The entries returned from the PostgREST response.
Send a `SIGUSR1` signal to reload the schema upon changes.
"""
import asyncio
import asyncpg
import aiohttp
import aiohttp.web
import yarl
import os
import aiofiles
import collections
import itertools
import jwt
import signal
import json
import warnings
import sys
import docopt
import configparser
import io
__all__ = ()
def connect(uri):
return asyncpg.create_pool(
host = uri.host,
port = uri.port,
user = uri.user,
password = uri.password,
database = uri.parts[1]
)
_NOTIFY = {'POST', 'PATCH', 'DELETE'}
_HDRS_PASS = {'Authorization', 'Range', 'Content-Type'}
_HDRS_SKIP = {'Content-Type'}
_anon = object()
class Server:
"""
Main means of launching the server proxy.
:param asyncpg.pool.Pool pool:
The connection pool.
:param str origin:
The PostgreSQL database uri.
:param str target:
The address to connect to.
:param str schema:
The schema exposed by PostgREST.
"""
__slots__ = ('_pool', '_session', '_origin', '_schema', '_script',
'_details', '_primaries', '_secret', '_websockets', '_ready')
path = '/{steps:.+}'
def __init__(self, pool, origin, schema, secret = None):
self._pool = pool
self._session = None
self._origin = origin
self._schema = schema
self._script = None
self._details = None
self._primaries = None
self._secret = secret
self._websockets = collections.defaultdict(list)
self._ready = asyncio.Event()
@property
def details(self):
return self._details
@property
def ready(self):
return self._ready
def _resolve_path(self, path):
"""
Get query and tables.
"""
(table, *values) = path.split('/')
names = self._primaries.get(table, ())
query = tuple(zip(names, values))
return (table, query)
def _resolve_query(self, query):
"""
Get PostgREST filter.
"""
return {name: f'eq.{value}' for (name, value) in query}
def _auth(self, headers):
token = headers.get('Authorization')
if self._secret and token:
token = token.split(' ')[-1] # - Bearer
claims = jwt.decode(token, self._secret)
return claims['role']
return _anon
async def query(self, request):
"""
Handle requests to querying the database.
"""
await self._ready.wait()
method = request.method
headers = request.headers.copy()
for key in tuple(headers.keys()):
if key in _HDRS_PASS:
continue
del headers[key]
headers['Prefer'] = 'return=representation'
path = request.match_info['steps']
(table, query) = self._resolve_path(path)
params = self._resolve_query(query)
uri = self._origin.with_path(table)
data = request.content
response = await self._session.request(
method,
uri,
params = params,
headers = headers,
data = data
)
if 200 <= response.status <= 201 and method in _NOTIFY:
entries = await response.json()
try:
(names, values) = zip(*query)
except ValueError:
values = ()
payload = json.dumps((method, table, values, entries))
apply = lambda websocket: websocket.send_str(payload)
try:
role = self._auth(headers)
except jwt.InvalidSignatureError:
warnings.warn('Secret could not validate accepted token.')
else:
websockets = self._websockets[role]
await asyncio.gather(*map(apply, websockets))
data = json.dumps(entries).encode()
else:
data = response.content
response = aiohttp.web.Response(
body = data,
headers = response.headers,
status = response.status,
)
response.enable_compression()
response.enable_chunked_encoding()
return response
async def state(self, request, id = None):
"""
Handle requests for connecting to the database.
"""
try:
role = self._auth(request.headers)
except jwt.InvalidSignatureError:
raise aiohttp.web.HTTPUnauthorized(reason = 'Invalid token.')
websockets = self._websockets[role]
websocket = aiohttp.web.WebSocketResponse(heartbeat = 30)
await websocket.prepare(request)
websockets.append(websocket)
try:
async for message in websocket:
pass # receiving does nothing
finally:
websockets.remove(websocket)
return websocket
async def describe(self):
"""
Create the schema description.
"""
self._ready.clear()
entries = await self._pool.fetch(self._script)
details = collections.defaultdict(dict)
primaries = collections.defaultdict(list)
for entry in map(dict, entries):
table = entry.pop('table')
field = entry.pop('field')
details[table][field] = entry
if entry['main']:
primaries[table].append(field)
self._details = dict(details)
self._primaries = dict(primaries)
self._ready.set()
async def _load(self, name = 'schema.psql'):
"""
Get the description script.
"""
path = os.path.realpath(__file__)
directory = os.path.dirname(path)
path = os.path.join(directory, name)
async with aiofiles.open(path) as file:
template = await file.read()
self._script = template.format(self._schema)
async def _setup(self):
self._session = aiohttp.ClientSession(skip_auto_headers = _HDRS_SKIP)
async def start(self):
"""
Start the client.
"""
await self._load()
await self._setup()
await self.describe()
async def stop(self):
"""
Stop the client.
"""
await self._session.close()
apply = lambda websocket: websocket.close()
websockets = itertools.chain.from_iterable(self._websockets.values())
await asyncio.gather(*map(apply, websockets))
self._websockets.clear()
async def make(pool,
uri ,
schema = 'api',
secret = None,
query = '/query',
state = '/state'):
routes = aiohttp.web.RouteTableDef()
server = Server(pool, uri, schema, secret = secret)
path = query + server.path
for verb in ('GET', 'POST', 'PATCH', 'DELETE'):
routes.route(verb, path)(server.query)
async def handle(request):
await server.ready.wait()
return aiohttp.web.json_response(server.details)
routes.route('GET', '/')(handle)
routes.route('GET', state)(server.state)
return (routes, server)
async def main(app, db_uri, pr_uri, host, port, **options):
"""
Start the proxy.
:param str db_uri:
URL for the PostgreSQL database.
:param str pr_uri:
URL for the PostgREST server.
:param str host:
Host to launch the proxy at.
:param int port:
Port to launch the proxy at.
:param str schema:
The exposed schema.
:param str secret:
Used for authenticating websocket tokens and use their ``role`` claim.
:param str query:
The path to expose queries at.
:param str state:
The path to expose websockets at if applicable.
"""
loop = asyncio.get_event_loop()
db_uri = yarl.URL(db_uri)
pool = await connect(db_uri)
pr_uri = yarl.URL(pr_uri)
(routes, server) = await make(pool, pr_uri, **options)
app.router.add_routes(routes)
reload = lambda: asyncio.ensure_future(server.describe())
loop.add_signal_handler(signal.SIGUSR1, reload)
await server.start()
runner = aiohttp.web.AppRunner(app)
await runner.setup()
site = aiohttp.web.TCPSite(runner, host, port)
await site.start()
try:
await loop.create_future()
except asyncio.CancelledError:
pass
await server.stop()
await site.stop()
await runner.cleanup()
def serve(env_prefix = 'AIODT_'):
"""
Console functionality.
"""
args = docopt.docopt(__doc__, argv = sys.argv[1:])
def geta(key):
try:
conkey = key.lstrip('-').replace('-', '_').upper()
return os.environ[env_prefix + conkey]
except KeyError:
pass
return args[key]
pr_uri = yarl.URL(geta('--pr-uri'))
path = args['<file>']
if path:
config = configparser.ConfigParser()
with open(path) as file:
data = file.read()
head = '_'
data = f'[{head}]\n{data}'
config.read_string(data)
config = config[head]
def getf(key, default = None):
try:
value = config[key]
except KeyError:
return default
return value.strip('"')
db_uri = getf('db-uri')
schema = getf('db-schema')
secret = getf('jwt-secret')
host = getf('server-host', None)
if host:
pr_uri = pr_uri.with_host(host)
port = getf('server-port', None)
if port:
pr_uri = pr_uri.with_port(int(port))
else:
db_uri = geta('--db-uri')
schema = geta('--schema')
secret = geta('--secret')
host = geta('--host')
port = geta('--port')
port = int(port)
query = geta('--query')
state = geta('--state')
loop = asyncio.get_event_loop()
app = aiohttp.web.Application()
task = loop.create_task(
main(
app, db_uri, pr_uri, host, port,
schema = schema, secret = secret,
query = query, state = state
)
)
try:
loop.run_until_complete(task)
except KeyboardInterrupt:
pass
task.cancel()
try:
loop.run_until_complete(task)
except asyncio.CancelledError:
pass
| 26.660907
| 141
| 0.577041
| 5,842
| 0.473266
| 0
| 0
| 118
| 0.009559
| 6,031
| 0.488577
| 4,018
| 0.325502
|
b08769fd169d1bb3f57d9800c62bb6c63602cc3c
| 3,685
|
py
|
Python
|
kappmax_prediction_scripts/media.py
|
coltonlloyd/keff_mapping
|
eea8450561232d32c049455948d88917bf56ddd8
|
[
"MIT"
] | null | null | null |
kappmax_prediction_scripts/media.py
|
coltonlloyd/keff_mapping
|
eea8450561232d32c049455948d88917bf56ddd8
|
[
"MIT"
] | null | null | null |
kappmax_prediction_scripts/media.py
|
coltonlloyd/keff_mapping
|
eea8450561232d32c049455948d88917bf56ddd8
|
[
"MIT"
] | 2
|
2019-02-05T15:24:29.000Z
|
2020-04-16T11:06:06.000Z
|
LB_media = {
"EX_ni2_e": -1000,
"EX_dcyt_e": -1000,
"EX_hg2_e": -1000,
"EX_ins_e": -1000,
"EX_cd2_e": -1000,
"EX_so4_e": -1000,
"EX_uri_e": -1000,
"EX_tungs_e": -1000,
"EX_glu__L_e": -1000,
"EX_slnt_e": -1000,
"EX_trp__L_e": -1000,
"EX_dad__2_e": -1000,
"EX_mobd_e": -1000,
"EX_val__L_e": -1000,
"EX_cobalt2_e": -1000,
"EX_gln__L_e": -1000,
"EX_co2_e": -1000,
"EX_k_e": -1000,
"EX_cu2_e": -1000,
"EX_sel_e": -1000,
"EX_na1_e": -1000,
"EX_cl_e": -1000,
"EX_fe3_e": -1000,
"EX_arg__L_e": -1000,
"EX_pnto__R_e": -1000,
"EX_lys__L_e": -1000,
"EX_ala__L_e": -1000,
"EX_gal_e": -1000,
"EX_cbl1_e": -1000,
"EX_ser__L_e": -1000,
"EX_adn_e": -1000,
"EX_thr__L_e": -1000,
"EX_pi_e": -1000,
"EX_thymd_e": -1000,
"EX_mn2_e": -1000,
"EX_phe__L_e": -1000,
"EX_leu__L_e": -1000,
"EX_ura_e": -1000,
"EX_h_e": -100,
"EX_h2o_e": -100,
"EX_aso3_e": -1000,
"EX_hxan_e": -1000,
"EX_glc__D_e": -1000,
"EX_nac_e": -1000,
"EX_his__L_e": -1000,
"EX_o2_e": -1000,
"EX_pro__L_e": -1000,
"EX_mg2_e": -1000,
"EX_asp__L_e": -1000,
"EX_gly_e": -1000,
"EX_cys__L_e": -1000,
"EX_fe2_e": -1000,
"EX_ca2_e": -1000,
"EX_tyr__L_e": -1000,
"EX_zn2_e": -1000,
"EX_fru_e": -1000,
"EX_met__L_e": -1000,
"EX_ile__L_e": -1000
}
aas = {"EX_glyc_e": -1000,
"EX_asp__L_e": -1000,
"EX_gly_e": -1000,
"EX_cys__L_e": -1000,
"EX_met__L_e": -1000,
"EX_ile__L_e": -1000,
"EX_tyr__L_e": -1000,
"EX_pro__L_e": -1000,
"EX_his__L_e": -1000,
"EX_phe__L_e": -1000,
"EX_leu__L_e": -1000,
"EX_ser__L_e": -1000,
"EX_arg__L_e": -1000,
"EX_lys__L_e": -1000,
"EX_ala__L_e": -1000,
"EX_gln__L_e": -1000,
"EX_glu__L_e": -1000,
"EX_trp__L_e": -1000,
"EX_val__L_e": -1000,
"EX_thr__L_e": -1000,
"EX_asn__L_e": -1000
}
# Mapping of Aerbersold media conditions to exchange reaction
media_dict = {'Glucose': 'EX_glc__D_e', 'Acetate': 'EX_ac_e',
'Pyruvate': 'EX_pyr_e', 'Glycerol': 'EX_glyc_e',
'Fumarate': 'EX_fum_e', 'Succinate': 'EX_succ_e',
'LB': '', 'Glucosamine': 'EX_gam_e',
'Mannose': 'EX_man_e', 'Xylose': 'EX_xyl__D_e',
'Fructose': 'EX_fru_e', 'Glycerol + AA': '',
'Galactose': 'EX_gal_e', 'Gluconate': 'EX_glcn_e'}
map_media_to_old_me_df = {
'Glucose': 'base', 'Acetate': 'Acetate', 'Fumarate': 'Fumarate',
'Glycerol': 'Glycerol', 'Pyruvate': 'Pyruvate', 'Succinate': 'Succinate'
}
def set_media(model, name, value=-1000):
model.reactions.EX_glc__D_e.lower_bound = 0
reactions_changed = []
if name in model.reactions:
model.reactions.get_by_id(name).lower_bound = value
reactions_changed.append(name)
elif name == 'Glycerol + AA':
for r, v in aas.items():
model_rxn = model.reactions.get_by_id(r)
if model_rxn.lower_bound == 0:
model_rxn.lower_bound = v
reactions_changed.append(r)
elif name == 'LB':
for r, v in LB_media.items():
model_rxn = model.reactions.get_by_id(r)
if model_rxn.lower_bound == 0:
model_rxn.lower_bound = v
reactions_changed.append(r)
elif name in media_dict:
model.reactions.get_by_id(media_dict[name]).lower_bound = value
reactions_changed.append(media_dict[name])
else:
raise UserWarning('Media (s) not valid' % name)
return reactions_changed
| 29.246032
| 76
| 0.566079
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,407
| 0.381818
|
b0889dcb4a9fc459f520ba21dc747165ea106830
| 838
|
py
|
Python
|
urldownload/DataOutput.py
|
MisterZhouZhou/pythonLearn
|
8933c7a6d444d3d86a173984e6cf4c08dbf84039
|
[
"Apache-2.0"
] | 1
|
2019-07-09T09:59:39.000Z
|
2019-07-09T09:59:39.000Z
|
urldownload/DataOutput.py
|
MisterZhouZhou/pythonLearn
|
8933c7a6d444d3d86a173984e6cf4c08dbf84039
|
[
"Apache-2.0"
] | null | null | null |
urldownload/DataOutput.py
|
MisterZhouZhou/pythonLearn
|
8933c7a6d444d3d86a173984e6cf4c08dbf84039
|
[
"Apache-2.0"
] | null | null | null |
# coding:utf-8
import codecs
class DataOutput(object):
def __init__(self):
self.datas = []
def store_data(self, data):
if data is None:
return
self.datas.append(data)
def output_html(self, path, data):
fout = codecs.open(path, 'w+', encoding='utf-8')
fout.write('<html>')
fout.write('<body>')
for t_data in data:
fout.write(str(t_data))
fout.write('</body>')
fout.write('</html>')
fout.close()
# def output_html(self, **kwargs):
# fout = codecs.open('baike.html', 'w', encoding='utf-8')
# fout.writer('<html>')
# fout.writer('<body>')
# for data in self.datas:
# fout.writer(data)
# fout.writer('</body>')
# fout.writer('</html>')
# fout.close()
| 26.1875
| 65
| 0.516706
| 808
| 0.9642
| 0
| 0
| 0
| 0
| 0
| 0
| 338
| 0.403341
|
b089aca6a94743358087e5f0bbece897c4de4d3c
| 2,600
|
py
|
Python
|
ga4gh/htsget/compliance/schema_validator.py
|
jmtcsngr/htsget-compliance
|
35521f0b1361307f887b22c559823f1ba9dd8052
|
[
"Apache-2.0"
] | 1
|
2020-01-22T17:11:59.000Z
|
2020-01-22T17:11:59.000Z
|
ga4gh/htsget/compliance/schema_validator.py
|
jmtcsngr/htsget-compliance
|
35521f0b1361307f887b22c559823f1ba9dd8052
|
[
"Apache-2.0"
] | null | null | null |
ga4gh/htsget/compliance/schema_validator.py
|
jmtcsngr/htsget-compliance
|
35521f0b1361307f887b22c559823f1ba9dd8052
|
[
"Apache-2.0"
] | 2
|
2020-02-06T10:29:16.000Z
|
2020-02-10T09:59:54.000Z
|
# -*- coding: utf-8 -*-
"""Validates htsget response matches JSON schema"""
import inspect
import json
import os
from jsonschema import validate
from jsonschema import RefResolver
from jsonschema.exceptions import ValidationError
from ga4gh.htsget.compliance.config import constants as c
class SchemaValidator(object):
"""Validates htsget response matches JSON schema
Attributes:
SUCCESS (int): constant. indicates successful validation
FAILURE (int): constant. indicates unsuccessful validation
schema_file (str): filename containing JSON schema
schema_dir (str): path to local dir containing htsget JSON schemas
schema_path (str): full path to htsget response JSON schema file
resolver (RefResolver): resolves external references to the schema dir
schema_json (dict): loaded htsget response JSON schema
"""
SUCCESS = 1
FAILURE = -1
def __init__(self):
"""Instantiates a SchemaValidator object"""
self.schema_file = c.SCHEMA_HTSGET_RESPONSE
self.schema_dir = os.path.join(
os.path.dirname(
os.path.dirname(inspect.getmodule(self).__file__)
),
"schemas"
)
self.schema_path = os.path.join(self.schema_dir, self.schema_file)
self.resolver = RefResolver('file://{}/'.format(self.schema_dir), None)
self.schema_json = json.loads(open(self.schema_path, 'r').read())
def validate_instance(self, instance_json):
"""Validate a JSON object/response against the htsget response schema
Args:
instance_json (dict): loaded JSON object to validate
Returns:
dict: contains success/failure of validation, and message
"""
# setup validation object
# test status initialized as passing
validation_result = {
"status": SchemaValidator.SUCCESS,
"exception_class": "",
"message": ""
}
try:
# api method to compare json instance to the schema
validate(instance=instance_json, schema=self.schema_json,
resolver=self.resolver)
except ValidationError as e:
# if the api method raises an error, the result dictionary set
# to include failure status and error message
validation_result["status"] = SchemaValidator.FAILURE
validation_result["exception_class"] = str(e.__class__.__name__)
validation_result["message"] = e.message
return validation_result
| 35.616438
| 79
| 0.65
| 2,309
| 0.888077
| 0
| 0
| 0
| 0
| 0
| 0
| 1,243
| 0.478077
|
b08a24bef63250d06b5557da066625b5924bd251
| 178
|
py
|
Python
|
17. Chapter_/xmlrpc_client.py
|
Mikma03/Python_Bill_Lubanovic_BookCodes
|
8b5b228bb500a08af645a1db6f7c5f33ef5f0512
|
[
"MIT"
] | null | null | null |
17. Chapter_/xmlrpc_client.py
|
Mikma03/Python_Bill_Lubanovic_BookCodes
|
8b5b228bb500a08af645a1db6f7c5f33ef5f0512
|
[
"MIT"
] | null | null | null |
17. Chapter_/xmlrpc_client.py
|
Mikma03/Python_Bill_Lubanovic_BookCodes
|
8b5b228bb500a08af645a1db6f7c5f33ef5f0512
|
[
"MIT"
] | null | null | null |
import xmlrpc.client
proxy = xmlrpc.client.ServerProxy("http://localhost:6789/")
num = 7
result = proxy.double(num)
print("Dwukrotność liczby %s jest równa %s" % (num, result))
| 25.428571
| 60
| 0.724719
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 64
| 0.353591
|
b08a5207d720024f5a7b224624903d987ddda152
| 172
|
py
|
Python
|
sails/scripts/s4ilsd.py
|
metrasynth/solar-sails
|
3a10774dad29d85834d3acb38171741b3a11ef91
|
[
"MIT"
] | 6
|
2016-11-22T14:32:55.000Z
|
2021-08-15T01:35:33.000Z
|
sails/scripts/s4ilsd.py
|
metrasynth/s4ils
|
efc061993d15ebe662b72ab8b3127f7f7ce2f66b
|
[
"MIT"
] | 2
|
2022-03-18T16:47:43.000Z
|
2022-03-18T16:47:44.000Z
|
sails/scripts/s4ilsd.py
|
metrasynth/s4ils
|
efc061993d15ebe662b72ab8b3127f7f7ce2f66b
|
[
"MIT"
] | 2
|
2019-07-09T23:44:08.000Z
|
2021-08-15T01:35:37.000Z
|
import begin
from sails.server import Server
@begin.start
def main():
"""Start a sailsd server."""
s = Server()
s.start_loop()
print('Stopping sailsd.')
| 14.333333
| 32
| 0.639535
| 0
| 0
| 0
| 0
| 123
| 0.715116
| 0
| 0
| 46
| 0.267442
|
b08a93c2485c9207b162b22abf4d7cb7a8947024
| 173
|
py
|
Python
|
bitwise/logic/__init__.py
|
jamesjiang52/Bitwise
|
c71f151d23034b3f9e2a939f637be0eaa16c45c3
|
[
"MIT"
] | null | null | null |
bitwise/logic/__init__.py
|
jamesjiang52/Bitwise
|
c71f151d23034b3f9e2a939f637be0eaa16c45c3
|
[
"MIT"
] | null | null | null |
bitwise/logic/__init__.py
|
jamesjiang52/Bitwise
|
c71f151d23034b3f9e2a939f637be0eaa16c45c3
|
[
"MIT"
] | null | null | null |
from .AND import *
from .COMP import *
from .NAND import *
from .NOR import *
from .NOT import *
from .OR import *
from .PAR import *
from .XNOR import *
from .XOR import *
| 17.3
| 19
| 0.687861
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b08bef0d9eac560231bad41dab1d173a448b34ce
| 843
|
py
|
Python
|
Chapter03/function_enforcement.py
|
PacktPublishing/Secret-Recipes-of-the-Python-Ninja
|
805d00c7a54927ba94c9077e9a580508ee3c5e56
|
[
"MIT"
] | 13
|
2018-06-21T01:44:49.000Z
|
2021-12-01T10:49:53.000Z
|
Chapter03/function_enforcement.py
|
PacktPublishing/Secret-Recipes-of-the-Python-Ninja
|
805d00c7a54927ba94c9077e9a580508ee3c5e56
|
[
"MIT"
] | null | null | null |
Chapter03/function_enforcement.py
|
PacktPublishing/Secret-Recipes-of-the-Python-Ninja
|
805d00c7a54927ba94c9077e9a580508ee3c5e56
|
[
"MIT"
] | 6
|
2018-10-05T08:29:24.000Z
|
2022-01-11T14:49:50.000Z
|
def accepts(*types):
def check_accepts(f):
assert len(types) == f.func_code.co_argcount
def new_f(*args, **kwds):
for (a, t) in zip(args, types):
assert isinstance(a, t), \
"arg %r does not match %s" % (a,t)
return f(*args, **kwds)
new_f.func_name = f.func_name
return new_f
return check_accepts
def returns(rtype):
def check_returns(f):
def new_f(*args, **kwds):
result = f(*args, **kwds)
assert isinstance(result, rtype), \
"return value %r does not match %s" % (result,rtype)
return result
new_f.func_name = f.func_name
return new_f
return check_returns
@accepts(int, (int,float))
@returns((int,float))
def func(arg1, arg2):
return arg1 * arg2
| 30.107143
| 71
| 0.548043
| 0
| 0
| 0
| 0
| 93
| 0.11032
| 0
| 0
| 61
| 0.072361
|
b08c3106ba8836d69e6a9d54ebffc20973d03ab3
| 2,412
|
py
|
Python
|
tests/unit/test_CombineUtils.py
|
ylipacbio/pbtranscript
|
6b4ef164f191ffd4201feb62b951d9eeac3315b6
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/test_CombineUtils.py
|
ylipacbio/pbtranscript
|
6b4ef164f191ffd4201feb62b951d9eeac3315b6
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/test_CombineUtils.py
|
ylipacbio/pbtranscript
|
6b4ef164f191ffd4201feb62b951d9eeac3315b6
|
[
"BSD-3-Clause"
] | 1
|
2021-02-26T10:08:09.000Z
|
2021-02-26T10:08:09.000Z
|
"""Test classes defined within pbtranscript.CombineUtils."""
import unittest
import os.path as op
from pbcore.io import FastaReader, FastqReader
from pbtranscript.Utils import rmpath, mkdir
from pbtranscript.ClusterOptions import IceQuiverHQLQOptions
from pbtranscript.CombineUtils import CombineRunner
from test_setpath import DATA_DIR, OUT_DIR, SIV_DATA_DIR
class TEST_CombineUtils(unittest.TestCase):
"""Test functions of pbtranscript.CombineUtils."""
def setUp(self):
"""Define input and output file."""
pass
def test_runner(self):
"""Test CombineRunner."""
ipq_opts = IceQuiverHQLQOptions(qv_trim_5=100, qv_trim_3=30)
d = op.join(SIV_DATA_DIR, "test_tool_contract_chunks")
split_dirs = [op.join(d, b, "cluster_out") for b in
("0to1kb_part0", "1to2kb_part0", "2to3kb_part0", "3to4kb_part0", "4to5kb_part0")]
print split_dirs
out_combined_dir = op.join(OUT_DIR, "test_CombineUtils", "combined_dir")
rmpath(out_combined_dir)
mkdir(out_combined_dir)
obj = CombineRunner(combined_dir=out_combined_dir,
sample_name="mysample",
split_dirs=split_dirs,
ipq_opts=ipq_opts)
obj.run()
expected_out_fns = (obj.all_hq_fa, obj.all_hq_fq, obj.all_lq_fa, obj.all_lq_fq,
obj.all_consensus_isoforms_fa,
obj.all_cluster_report_fn, obj.all_cluster_summary_fn)
self.assertTrue(all([op.exists(f) for f in expected_out_fns]))
expected_hq_isoforms = ['i1_HQ_mysample|c0/f2p16/1826', 'i2_HQ_mysample|c2/f9p14/2470',
'i2_HQ_mysample|c5/f7p19/2472', 'i2_HQ_mysample|c10/f8p16/2457',
'i2_HQ_mysample|c98/f2p10/2081', 'i2_HQ_mysample|c108/f23p28/2471']
self.assertEqual([r.name.split(' ')[0] for r in FastaReader(obj.all_hq_fa)], expected_hq_isoforms)
self.assertEqual([r.name.split(' ')[0] for r in FastqReader(obj.all_hq_fq)], expected_hq_isoforms)
expected_lq_isoforms_num = 73
self.assertEqual(len([r for r in FastaReader(obj.all_lq_fa)]), expected_lq_isoforms_num)
expected_consensus_isoforms_num = 79
self.assertEqual(len([r for r in FastaReader(obj.all_consensus_isoforms_fa)]), expected_consensus_isoforms_num)
| 44.666667
| 119
| 0.66791
| 2,046
| 0.848259
| 0
| 0
| 0
| 0
| 0
| 0
| 514
| 0.213101
|
b08de9297fed4fd2dc955f61769ed5126ab756ff
| 7,063
|
py
|
Python
|
rattlemediaplayer.py
|
simonjduff/rattle-media
|
10a15b47653fce1962a022f973cfb9adcfc8976d
|
[
"MIT"
] | null | null | null |
rattlemediaplayer.py
|
simonjduff/rattle-media
|
10a15b47653fce1962a022f973cfb9adcfc8976d
|
[
"MIT"
] | null | null | null |
rattlemediaplayer.py
|
simonjduff/rattle-media
|
10a15b47653fce1962a022f973cfb9adcfc8976d
|
[
"MIT"
] | null | null | null |
import config
from gmusicapi import Mobileclient
import logging
from gi.repository import Gst, GLib
from collections import deque
from gevent import Greenlet
import gevent
class PlayerStates:
Stopped = "Stopped"
Paused = "Paused"
Playing = "Playing"
class RattleMediaPlayer:
def __init__(self):
self._logger = logging.getLogger('rattlemedia')
Gst.init(None)
self._player = Gst.ElementFactory.make('playbin', None)
if not self._player:
raise Exception('Player is None')
self._player.set_state(Gst.State.NULL)
self._logger.info('Starting to watch for gstreamer signals')
Greenlet.spawn(self.watch_for_message)
def watch_for_message(self):
bus = self._player.get_bus()
if not bus:
raise Exception('Couldn\'t create bus')
# Ideally we'd be using signal_watch on bus to fire on an event basis
# but getting the GLib main loop to work with gevent has proved problematic
# Polling works, but isn't as elegant
while True:
message = bus.pop()
if message:
self._logger.debug('Message received: {0}'.format(message.type))
if message.type == Gst.MessageType.EOS:
self._logger.info('End of stream received')
self.end_of_stream_event_handler()
elif message.type == Gst.MessageType.STATE_CHANGED:
self._logger.debug('State changed {0}'.format(self._player.get_state(100)[1]))
if not message:
gevent.sleep(0.5)
def _set_state(self, state):
try:
if state == PlayerStates.Stopped:
self._player.set_state(Gst.State.NULL)
elif state == PlayerStates.Paused:
self._player.set_state(Gst.State.PAUSED)
elif state == PlayerStates.Playing:
self._player.set_state(Gst.State.PLAYING)
else:
raise Exception('Unknown state')
finally:
self.state_change_event_handler()
def get_state(self):
current_state = self._player.get_state(Gst.CLOCK_TIME_NONE)[1]
if current_state == Gst.State.NULL:
return PlayerStates.Stopped
elif current_state == Gst.State.PAUSED:
return PlayerStates.Paused
elif current_state == Gst.State.PLAYING:
return PlayerStates.Playing
else:
self._logger.error('GStreamer player in unknown state {0}'.format(current_state))
def play_track(self, track_url):
self._player.set_property('uri', track_url)
self._set_state(PlayerStates.Playing)
def stop(self):
self._set_state(PlayerStates.Stopped)
def pause(self):
self._set_state(PlayerStates.Paused)
def play(self):
self._set_state(PlayerStates.Playing)
# Override with function to call on end of stream
def end_of_stream_event_handler(self):
pass
# Override with function to call on state change
def state_change_event_handler(self):
pass
class ControllerState:
def __init__(self, controller, player):
self._player = player
self._controller = controller
self._logger = logging.getLogger('rattlemedia')
def __play_next_track(self):
self._logger.info('Playing')
try:
# This sucks a bit. Should state own the api?
track_url = self._controller._api.get_stream_url(self._controller._queue.popleft(), config.google_device_id)
self._player.play_track(track_url)
except IndexError:
self._logger.info('Queue empty. Stopping.')
self._player.stop()
finally:
self._controller.update_state()
def play(self):
self.__play_next_track()
def stop(self):
self._logger.info('Stopping')
self._player.stop()
def toggle(self):
pass
def next(self):
self.__play_next_track()
class ControllerStatePlaying(ControllerState):
def play(self):
pass
def toggle(self):
self._player.pause()
class ControllerStateStopped(ControllerState):
def stop(self):
pass
def toggle(self):
pass
class ControllerStatePaused(ControllerState):
def play(self):
self._player.play()
def toggle(self):
self.play()
class RattleMediaController:
_states = None
def __init__(self):
api = Mobileclient()
api.login(config.google_username, config.google_password, config.google_device_id)
self._api = api
self._logger = logging.getLogger('rattlemedia')
self._player = RattleMediaPlayer()
self._player.end_of_stream_event_handler = self.end_of_stream_event
self._player.state_change_event_handler = self.update_state
self._queue = deque([])
RattleMediaController._states = {PlayerStates.Paused: ControllerStatePaused(self, self._player),
PlayerStates.Stopped: ControllerStateStopped(self, self._player),
PlayerStates.Playing: ControllerStatePlaying(self, self._player),
'Unknown': ControllerState(self, self._player)}
self.state = ControllerState(self, self._player)
self.update_state()
def end_of_stream_event(self):
self._player.stop()
self.play()
def search(self, search_term):
self._logger.debug('Searching for {0}'.format(search_term))
return self._api.search_all_access(search_term)
def enqueue(self, song_id):
self._logger.info('Enqueuing {0}'.format(song_id))
self._queue.append(song_id)
def play(self):
self.state.play()
def stop(self):
self.state.stop()
self._queue.clear()
def toggle_playback(self):
self.state.toggle()
def next(self):
self.state.next()
def play_album(self, album_id):
self._logger.info('Playing album {0}'.format(album_id))
self.stop()
self.enqueue_album(album_id)
self.play()
def enqueue_album(self, album_id):
album = self._api.get_album_info(album_id)
tracks = album['tracks']
for track in tracks:
self._queue.append(track['nid'])
def update_state(self):
current_state = None
try:
current_state = self._player.get_state()
self._logger.debug('Switching state to {0}'.format(current_state))
self.state = self._states[current_state]
self._logger.info('Switched state to {0}'.format(self.state))
except KeyError:
self._logger.warn('Switching to unknown state {0}'.format(current_state))
self.state = self._states['Unknown']
finally:
self.state_change_callback(current_state)
# Override with callback if required
def state_change_callback(self, new_state):
pass
| 30.97807
| 120
| 0.626646
| 6,870
| 0.972675
| 0
| 0
| 0
| 0
| 0
| 0
| 843
| 0.119354
|
b08ef2d8e3033532d604bc5d0f9f77203768c894
| 16,116
|
py
|
Python
|
monitor_ed/ed_monitor.py
|
Andywang201605/astro_tools
|
e1ceaeaa17d1391cb2d7f1eb15f9bdeced42b534
|
[
"MIT"
] | null | null | null |
monitor_ed/ed_monitor.py
|
Andywang201605/astro_tools
|
e1ceaeaa17d1391cb2d7f1eb15f9bdeced42b534
|
[
"MIT"
] | null | null | null |
monitor_ed/ed_monitor.py
|
Andywang201605/astro_tools
|
e1ceaeaa17d1391cb2d7f1eb15f9bdeced42b534
|
[
"MIT"
] | 1
|
2021-02-15T06:00:47.000Z
|
2021-02-15T06:00:47.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 25 18:17:13 2021
@author: AndyWang
"""
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from datetime import datetime
from time import sleep
import pandas as pd
import numpy as np
import os
import subprocess
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
def _start_webdriver(browser, driverpath):
'''
Function to start a webdriver
Parameters
----------
browser : str
Type of the browser you want to open
driverpath : str
Path of the driver.
Returns
-------
selenium.webdriver
Webdriver object for further usage
'''
if browser.lower() == 'edge':
return webdriver.Edge(executable_path=driverpath)
elif browser.lower() == 'chrome':
return webdriver.Chrome(executable_path=driverpath)
else:
raise NotImplementedError(f'Code for {browser} is not implemented')
def _open_browser_cmd(port, cache_dir):
'''
Open chrome in debugging mode
'''
chrome_cmd = f'chrome.exe --remote-debugging-port={port} --user-data-dir="{cache_dir}"'
subprocess.Popen(chrome_cmd)
def _connect_selenium(driverpath, port, cache_dir):
'''
connect your browser to python
Returns
-------
driver: Selenium.webdriver object that is connected to your browser
'''
chrome_options = Options()
chrome_options.add_experimental_option("debuggerAddress", f"127.0.0.1:{port}")
driver = webdriver.Chrome(driverpath, options=chrome_options)
return driver
def _find_inputbox(driver, timeout=30):
'''
Find inputbox element in Ed analytics page
Parameters
----------
driver : selenium.webdriver
timeout : float/int, optional
Timeout limit for finding the element. The default is 15.
Raises
------
TimeoutError
Returns
-------
inputbox : selenium.webdriver.remote.webelement.WebElement
Input box for searching.
'''
tstart = datetime.now()
while True:
# break out the loop if
# 1) find the element successfully
# 2) reach the time limit
try:
inputbox = driver.find_element_by_tag_name('input')
return inputbox
except:
tnow = datetime.now()
if (tnow - tstart).total_seconds() > timeout:
raise TimeoutError('Check out your connection!')
sleep(0.3)
def _search_tut(inputbox, tutcode):
'''
Searching tut in Ed analytics page
Parameters
----------
inputbox : selenium.webdriver.remote.webelement.WebElement
Webelement for input box
tutcode : str
tutorial for searching.
Returns
-------
None.
'''
inputbox.clear()
inputbox.send_keys(tutcode)
def _get_header_use(thtag):
'''
Get header attribute from usetag
Parameters
----------
thtag : bs4.element.Tag
Table header tag.
Returns
-------
str
header attribute.
'''
usetag = thtag.findAll('use')
if len(usetag) == 0:
return '#'
return usetag[0].attrs['xlink:href']
def _get_tdstatus(tdtag):
'''
Get table cell content or status (for questions)
Parameters
----------
tdtag : bs4.element.Tag
table cell tag.
Returns
-------
str
table cell content or status.
'''
text = tdtag.text
if text:
if text != '\u200b':
return text
if 'class' in tdtag.attrs:
cellclass = tdtag.attrs['class']
if len(cellclass) > 1:
return cellclass[1].split('-')[-1]
return ''
def _get_tdlink(tdtag):
atags = tdtag.findAll('a')
if len(atags) > 0:
return 'https://edstem.org{}'.format(atags[0].attrs['href'])
return 'N/A'
def _get_analytics_table(driver):
'''
Get analytics table from driver
Parameters
----------
driver : selenium.webdriver
Driver that opens Ed analytics page.
Returns
-------
analytics_df : pandas.DataFrame
DataFrame for analytics table.
colattrs : list
A list of column's attribute.
'''
soup = BeautifulSoup(driver.page_source, 'lxml')
table = soup.findAll('table', attrs={'class':"lesson-analytics-table"})[0]
### get header and body tag
thead = table.findAll('thead')[0]
tbody = table.findAll('tbody')[0]
### extract info from html to list
### (Note: pandas.read_html doesn't work for this case)
# header
header = []
colattrs = []
for thtag in thead.findAll('th'):
header.append(thtag.text.strip())
colattrs.append(_get_header_use(thtag))
# body
tablecells = []
tablehtmls = []
trtags = tbody.findAll('tr')
for trtag in trtags:
rowcells = []
rowhtmls = []
tdtags = trtag.findAll('td')
for tdtag in tdtags:
rowcells.append(_get_tdstatus(tdtag))
rowhtmls.append(_get_tdlink(tdtag))
tablecells.append(rowcells)
tablehtmls.append(rowhtmls)
analytics_df = pd.DataFrame(tablecells, columns=header)
analytics_html = pd.DataFrame(tablehtmls, columns=header)
return analytics_df, analytics_html, colattrs
def _check_search_loaded(driver, tutcode):
df, _, _ = _get_analytics_table(driver)
tutcol = df['Tutorial'].apply(lambda x:x.lower())
if (tutcol != tutcode.lower()).sum() > 0:
return False
return True
def _get_online_students(analytics_df):
'''
Get students that are online
'''
opened_count = (analytics_df.iloc[:, 3:] != 'unopened').sum(axis=1)
return opened_count > 0
def _get_code_cols(colattrs):
'''
Get columns for code only
'''
code_check = []
for attr in colattrs:
if attr == '#lesson-slide-code' or attr == '#lesson-slide-postgres':
code_check.append(True)
else:
code_check.append(False)
return code_check
def _prepare_code_plotting(analytics_df, colattrs):
good_stu = _get_online_students(analytics_df)
code_check = _get_code_cols(colattrs)
cleaned_df = analytics_df.loc[good_stu, code_check]
### preparing statistics
### We use .iloc here to avoid same question in one week
stats = {'completed':[],
'attempted':[],
'opened':[],
'unopened':[],
}
for colidx in range(cleaned_df.shape[1]):
colseries = cleaned_df.iloc[:,colidx]
for status in stats:
stats[status].append((colseries == status).sum())
colnames = cleaned_df.columns.tolist()
### return values
return stats, colnames
def _plot_code_status(stats, colnames):
fig = plt.figure(figsize=(12, len(colnames)/2))
ax = fig.add_subplot(111)
ypos = range(len(colnames),0,-1)
left = np.zeros(len(colnames))
statuses = ['completed', 'attempted', 'opened', 'unopened']
barcolor = {'completed':'green',
'attempted':'orange',
'opened':'yellow',
'unopened':'white'
}
for status in statuses:
ax.barh(ypos, stats[status], left=left,
color=barcolor[status],
label=status,
edgecolor='black'
)
left = np.add(left, stats[status])
ax.set_yticks(ypos)
ax.set_yticklabels(colnames, fontsize=15)
ax.set_ylim(0.5, len(colnames)+0.5)
xlim_max = 5 * ((int(left[0]) // 5) + 1)
ax.set_xticks(range(0, xlim_max+1, 5))
ax.set_xlim(0, xlim_max)
ax.grid(axis='x', linestyle='--')
fig.savefig('Class_status.png', bbox_inches='tight', dpi=100)
plt.close()
### for printing
def _get_value_rowcol(df, value):
rowcols = []
for i in range(df.shape[0]):
for j in range(df.shape[1]):
if df.iloc[i, j] == value:
rowcols.append((i, j))
return rowcols
def _print_new_attempted(analytics_df, analytics_html, rowcols):
print('NEW ATTEMPTS'.center(70, '*'))
for row, col in rowcols:
print('{} attempted {}!\n{}\n'.format(analytics_df.iloc[row, 0],
analytics_df.columns[col],
analytics_html.iloc[row, col]
))
print('*'*70)
def _print_gone_attempted(analytics_df, rowcols):
print('THESE ATTEMPTS ARE SOLVED'.center(70, '*'))
for row, col in rowcols:
print('{} finished {}!'.format(analytics_df.iloc[row, 0],
analytics_df.columns[col]))
print('*'*70)
def _print_old_attempted(analytics_df, analytics_html, rowcols):
print('OLD ATTEMPTS'.center(70, '*'))
for row, col in rowcols:
print('{} is still trying {}!\n{}\n'.format(analytics_df.iloc[row, 0],
analytics_df.columns[col],
analytics_html.iloc[row, col]
))
print('*'*70)
def _compare_analytics_dfs(analytics_df, analytics_html, oldpath='./old_analytics_df.pickle'):
if not os.path.exists(oldpath):
rowcols = _get_value_rowcol(analytics_df, 'attempted')
_print_gone_attempted(analytics_df, [])
_print_old_attempted(analytics_df, analytics_html, [])
_print_new_attempted(analytics_df, analytics_html, rowcols)
else:
old_analytics_df = pd.read_pickle(oldpath)
oldatttab = old_analytics_df == 'attempted'
changetab = analytics_df != old_analytics_df
newatttab = analytics_df == 'attempted'
### attempts gone
goneatt_ = (oldatttab & changetab)
rowcols = _get_value_rowcol(goneatt_, True)
_print_gone_attempted(analytics_df, rowcols)
### old attempts
oldatt_ = (oldatttab & newatttab)
rowcols = _get_value_rowcol(oldatt_, True)
_print_old_attempted(analytics_df, analytics_html, rowcols)
### new attempts
newatt_ = (newatttab & changetab)
rowcols = _get_value_rowcol(newatt_, True)
_print_new_attempted(analytics_df, analytics_html, rowcols)
analytics_df.to_pickle(oldpath)
def _get_html_table(analytics_df, analytics_html, rowcols):
html_table = []
for row, col in rowcols:
name = analytics_df.iloc[row, 0]
question_name = analytics_df.columns[col]
url = analytics_html.iloc[row, col]
url = f'<a href="{url}" target="_blank">{url}</a>'
html_table.append([name, question_name, url])
return pd.DataFrame(html_table, columns=['NAME', 'QUESTION', 'WORKSPACE'])
def _make_html(analytics_df, analytics_html, oldpath):
html_content = ''
time_update = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
### The basic information for the course
tut_info = f'''<h2>TUTCODE {TUTCODE} UPDATED @ {time_update}</h2><hr>\n'''
html_content += tut_info
# if there is no old pickle
if not os.path.exists(oldpath):
### new attempts
html_content += '<h3>NEW ATTEMPTS</h3>\n'
rowcols = _get_value_rowcol(analytics_df, 'attempted')
if len(rowcols) != 0:
newatt_ = _get_html_table(analytics_df, analytics_html, rowcols)
html_content += newatt_.to_html(escape=False)
else:
html_content += '<p> no new attempts</p>\n'
###
html_content += '<hr><h3>OLD ATTEMPTS</h3>\n'
html_content += '<p> no old attempts</p>\n'
### attempts are gone
html_content += '<hr><h3>ATTEMPTS SOLVED</h3>\n'
html_content += '<p> no old attempts solved</p>\n'
else:
old_analytics_df = pd.read_pickle(oldpath)
oldatttab = old_analytics_df == 'attempted'
changetab = analytics_df != old_analytics_df
newatttab = analytics_df == 'attempted'
### new attempts
html_content += '<h3>NEW ATTEMPTS</h3>\n'
newatt_ = (newatttab & changetab)
rowcols = _get_value_rowcol(newatt_, True)
if len(rowcols) != 0:
newatt_ = _get_html_table(analytics_df, analytics_html, rowcols)
html_content += newatt_.to_html(escape=False)
else:
html_content += '<p> no new attempts</p>\n'
###
html_content += '<hr><h3>OLD ATTEMPTS</h3>\n'
oldatt_ = (oldatttab & newatttab)
rowcols = _get_value_rowcol(oldatt_, True)
if len(rowcols) != 0:
oldatt_ = _get_html_table(analytics_df, analytics_html, rowcols)
html_content += oldatt_.to_html(escape=False)
else:
html_content += '<p> no old attempts</p>\n'
### attempts are gone
html_content += '<hr><h3>ATTEMPTS SOLVED</h3>\n'
goneatt_ = (oldatttab & changetab)
rowcols = _get_value_rowcol(goneatt_, True)
if len(rowcols) != 0:
goneatt_ = _get_html_table(analytics_df, analytics_html, rowcols)
html_content += goneatt_.to_html(escape=False)
else:
html_content += '<p> no old attempts solved</p>\n'
html_content += '<hr>\n'
html_content += '<h3>CLASS MONITORING</h3>\n'
html_content += '<a href="./Class_status.png"><img src="Class_status.png" width="1000"><\a>'
with open('monitor.html', 'w', encoding='utf-8') as fp:
fp.write(html_content)
def _check_login(driver):
if 'Log in to continue' in driver.page_source:
return True
return False
def _manually_check():
### read settings
with open('./setup.py') as fp:
code = fp.read()
exec(code, globals())
if os.path.exists(OLDPICKLEPATH):
os.remove(OLDPICKLEPATH)
### start!
if not OPEN_WITH_CACHE:
driver = _start_webdriver(BROWSER, DRIVERPATH)
elif BROWSER.lower() == 'chrome':
_open_browser_cmd(PORT, CACHE_DIR)
driver = _connect_selenium(DRIVERPATH, PORT, CACHE_DIR)
else:
raise NotImplementedError('NOT IMPLEMENTED')
driver.get(EDURL)
wait = input('Please wait till the webpage responds!')
while _check_login(driver):
status_code = input('Please Log in Ed first!!!'.center(70, '+'))
print(f'The Tutorial Code is {TUTCODE}')
# tutnew = input("Input the new TUTCODE if it is not correct, or press enter")
# if tutnew:
# TUTCODE = tutnew
### starting the loop!
break_sign = ''
while break_sign != 'q':
driver.refresh()
inputbox = _find_inputbox(driver)
_search_tut(inputbox, TUTCODE)
### get analytics dataframe
while not _check_search_loaded(driver, TUTCODE):
sleep(0.3)
analytics_df, analytics_html, colattrs = _get_analytics_table(driver)
stats, colnames = _prepare_code_plotting(analytics_df, colattrs)
_plot_code_status(stats, colnames)
_make_html(analytics_df, analytics_html, OLDPICKLEPATH)
_compare_analytics_dfs(analytics_df, analytics_html, OLDPICKLEPATH)
print("Please check './monitor.html' for a webpage version!")
break_sign = input('Type "q" to quit! Press Enter to continue! ')
print('\n\n')
driver.quit()
if CLEAN:
os.remove(OLDPICKLEPATH)
os.remove('./Class_status.png')
print('Thanks for using!'.center(70, '-'))
if __name__ == '__main__':
# pass
_manually_check()
| 32.491935
| 97
| 0.584636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,674
| 0.290022
|
b08f4780d97da42bde288fca67a55a22f887a6e4
| 796
|
py
|
Python
|
Matrix/Leetcode 1219. Path with Maximum Gold.py
|
kaizhengny/LeetCode
|
67d64536ab80f4966699fe7460d165f2a98d6a82
|
[
"MIT"
] | 31
|
2020-06-23T00:40:04.000Z
|
2022-01-08T11:06:24.000Z
|
Matrix/Leetcode 1219. Path with Maximum Gold.py
|
kaizhengny/LeetCode
|
67d64536ab80f4966699fe7460d165f2a98d6a82
|
[
"MIT"
] | null | null | null |
Matrix/Leetcode 1219. Path with Maximum Gold.py
|
kaizhengny/LeetCode
|
67d64536ab80f4966699fe7460d165f2a98d6a82
|
[
"MIT"
] | 7
|
2020-04-30T08:46:03.000Z
|
2021-08-28T16:25:54.000Z
|
class Solution:
def getMaximumGold(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
res = 0
visited = set()
for i in range(m):
for j in range(n):
if grid[i][j]!=0:
x = self.helper(grid, i, j, visited)
res = max(res, x)
return res
def helper(self, grid, x, y, visited):
m, n = len(grid), len(grid[0])
res = 0
if 0<=x<m and 0<=y<n and (x,y) not in visited and grid[x][y]!=0:
visited.add((x,y))
res = grid[x][y] + max(self.helper(grid, x-1,y,visited), self.helper(grid, x,y-1,visited), self.helper(grid, x+1,y,visited), self.helper(grid, x,y+1,visited))
visited.remove((x,y))
return res
| 37.904762
| 170
| 0.487437
| 796
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b09026b586af08d54afda4ec0f2c75827cc35592
| 419
|
py
|
Python
|
Random-Programs/optimization/simonSays/simonSays.py
|
naumoff0/Archive
|
d4ad2da89abb1576dd5a7c72ded6bf9b45c3f610
|
[
"MIT"
] | null | null | null |
Random-Programs/optimization/simonSays/simonSays.py
|
naumoff0/Archive
|
d4ad2da89abb1576dd5a7c72ded6bf9b45c3f610
|
[
"MIT"
] | null | null | null |
Random-Programs/optimization/simonSays/simonSays.py
|
naumoff0/Archive
|
d4ad2da89abb1576dd5a7c72ded6bf9b45c3f610
|
[
"MIT"
] | null | null | null |
import random, time, os
def m():
l, t, d = 3,0.5,1
o=input("Opp\n0)Yes\n1)No\n:")
while l>0:
S=r(d,"")
print("Lives:"+str(l)+"\nSimon Says:"+str(S))
time.sleep(t*d)
os.system("clear")
if int(input("RPT\n>> "))!=S: l=(l - 1)
else: d+=1
print("PTS:"+str((d-3)))
def r(d,n):
for _ in range(d) : n = n + str(random.randint(0, 9))
return int(n)
m()
| 26.1875
| 57
| 0.46778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 69
| 0.164678
|
b090822888b5d41d4c5adb4d5a180180c92743de
| 1,572
|
py
|
Python
|
quantarhei/symbolic/lang.py
|
slamavl/quantarhei
|
d822bc2db86152c418e330a9152e7866869776f7
|
[
"MIT"
] | 14
|
2016-10-16T13:26:05.000Z
|
2021-11-09T11:40:52.000Z
|
quantarhei/symbolic/lang.py
|
slamavl/quantarhei
|
d822bc2db86152c418e330a9152e7866869776f7
|
[
"MIT"
] | 61
|
2016-09-19T10:45:56.000Z
|
2021-11-10T13:53:06.000Z
|
quantarhei/symbolic/lang.py
|
slamavl/quantarhei
|
d822bc2db86152c418e330a9152e7866869776f7
|
[
"MIT"
] | 21
|
2016-08-30T09:09:28.000Z
|
2022-03-30T03:16:35.000Z
|
# -*- coding: utf-8 -*-
def rename_function(ss, oldname, newname):
"""Replaces all occurences of a name by a new name
"""
#return ss.replace("conjugate","numpy.conj")
return ss.replace(oldname, newname)
def fce2array(sr, pat):
"""Converts functions into arrays
"""
se = "".join(sr)
so = ""
ln = len(se)
while ln > 0:
# find pattern
pos = se.find(pat)
if pos < 0:
break
# position just behind the pattern
pos += len(pat)
sl = list(se)
# exchange ( for [
if sl[pos] == "(":
sl[pos] = "["
se = "".join(sl)
# save everything in front of the pattern
so += se[0:pos]
se = se[pos:ln]
# find clossing braket
pos2 = se.find(")")
# echange ) for ]
sl = list(se)
if sl[pos2] == ")":
sl[pos2] = "]"
se = "".join(sl)
ln = len(se)
so += se
return so
def python_code(ss, arrays=None):
"""Generate Python code with numpy functions
"""
sr = rename_function(ss,"conjugate","numpy.conj")
sr = rename_function(sr,"exp","numpy.exp")
if arrays is not None:
for ar in arrays:
sr = fce2array(sr,ar)
return sr
def fortran_code(ss, arrays=None):
"""Generate Fortran code with numpy functions
"""
sr = rename_function(ss,"conjugate","conjg")
#sr = rename_function(sr,"exp","numpy.exp")
#for ar in arrays:
# sr = fce2array(sr,ar)
return sr
| 24.952381
| 57
| 0.512087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 609
| 0.387405
|
b091329df5d41c9910c06cb7d5946e735531a835
| 345
|
py
|
Python
|
python/1920.py
|
zheedong/BaekJoon
|
7f9e00085276a337d18ee3bb90c98126f7af4d3a
|
[
"MIT"
] | null | null | null |
python/1920.py
|
zheedong/BaekJoon
|
7f9e00085276a337d18ee3bb90c98126f7af4d3a
|
[
"MIT"
] | null | null | null |
python/1920.py
|
zheedong/BaekJoon
|
7f9e00085276a337d18ee3bb90c98126f7af4d3a
|
[
"MIT"
] | null | null | null |
test_case_1 = """
5 # N
4 1 5 2 3
5 # M
1 3 7 9 5
"""
'''
result
1
1
0
0
1
'''
N = int(input())
A = list(map(int, input().split()))
A_set = set(A) # Set in으로 문제 한 번에 해결...
M = int(input())
B = list(map(int, input().split()))
for i in range(0, M):
if B[i] in A_set:
print(1)
else:
print(0)
| 12.321429
| 46
| 0.46087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 127
| 0.349862
|
b093e8d1dc7ce6bd823606fd89573b9ddd010409
| 5,246
|
py
|
Python
|
fashion_segmentation/code/main.py
|
TayaPenskaya/Diploma
|
dee4e13eccdd0d0ddc4f667d2eb94260a7ed3847
|
[
"MIT"
] | null | null | null |
fashion_segmentation/code/main.py
|
TayaPenskaya/Diploma
|
dee4e13eccdd0d0ddc4f667d2eb94260a7ed3847
|
[
"MIT"
] | null | null | null |
fashion_segmentation/code/main.py
|
TayaPenskaya/Diploma
|
dee4e13eccdd0d0ddc4f667d2eb94260a7ed3847
|
[
"MIT"
] | null | null | null |
import argparse
import os
import numpy as np
import matplotlib.pyplot as plt
import torch
import yaml
import cv2
from PIL import Image, ImageFilter
from base64 import b64decode, b64encode
import io
from predictors.predictor import Predictor
from src.models.modnet import MODNet
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
class Segmentation:
def __init__(self):
config_path = './configs/config.yml'
with open(config_path) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
config['network']['use_cuda'] = config['network']['use_cuda'] and torch.cuda.is_available()
self.predictor = Predictor(config, checkpoint_path='./experiments/checkpoint_last.pth.tar')
self.modnet = MODNet(backbone_pretrained=False)
self.modnet = nn.DataParallel(self.modnet)
self.modnet.load_state_dict(torch.load('./pretrained/modnet_photographic_portrait_matting.ckpt', map_location=torch.device('cpu')))
self.modnet.eval()
def get_matte(self, im):
ref_size = 512
im_transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
)
im = np.asarray(im)
if len(im.shape) == 2:
im = im[:, :, None]
if im.shape[2] == 1:
im = np.repeat(im, 3, axis=2)
elif im.shape[2] == 4:
im = im[:, :, 0:3]
# convert image to PyTorch tensor
im = Image.fromarray(im)
im = im_transform(im)
# add mini-batch dim
im = im[None, :, :, :]
# resize image for input
im_b, im_c, im_h, im_w = im.shape
if max(im_h, im_w) < ref_size or min(im_h, im_w) > ref_size:
if im_w >= im_h:
im_rh = ref_size
im_rw = int(im_w / im_h * ref_size)
elif im_w < im_h:
im_rw = ref_size
im_rh = int(im_h / im_w * ref_size)
else:
im_rh = im_h
im_rw = im_w
im_rw = im_rw - im_rw % 32
im_rh = im_rh - im_rh % 32
im = F.interpolate(im, size=(im_rh, im_rw), mode='area')
# inference
_, _, matte = self.modnet(im, True)
# resize and save matte
matte = F.interpolate(matte, size=(im_h, im_w), mode='area')
matte = matte[0][0].data.cpu().numpy()
Image.fromarray(((matte * 255).astype('uint8')), mode='L').save('./results/matte.jpg')
return matte * 255
def get_image(self, image, matte):
image = np.asarray(image)
if len(image.shape) == 2:
image = image[:, :, None]
if image.shape[2] == 1:
image = np.repeat(image, 3, axis=2)
elif image.shape[2] == 4:
image = image[:, :, 0:3]
matte = np.repeat(np.asarray(matte)[:, :, None], 3, axis=2)
mt = Image.fromarray(np.uint8(matte)).convert("RGBA")
mt = mt.filter(ImageFilter.ModeFilter(size=30))
matte_blur = np.array(mt.getdata()) / 255
matte_blur = matte_blur[:, :3]
matte = matte / 255
foreground = image * matte + np.full(image.shape, 255) * (1 - matte)
img = Image.fromarray(np.uint8(foreground)).convert("RGBA")
datas = img.getdata()
newData = []
width, height, _ = foreground.shape
for x in range(width):
for y in range(height):
newData.append(
(255, 255, 255, 0) if np.all(matte_blur[x * height + y] < 0.1) else datas[x * height + y])
if img.mode in ("RGBA", "P"):
img = img.convert("RGB")
img.putdata(newData)
img.save('./results/segm.jpg')
return img
def get_segmentation(self, img):
imgdata = b64decode(str(img))
img = Image.open(io.BytesIO(imgdata))
#img = Image.open(img)
image, prediction = self.predictor.segment_image(img)
my_cm = plt.get_cmap('nipy_spectral')
plt.imsave('./results/tmp.jpg', prediction, cmap=my_cm)
prediction = cv2.imread('./results/tmp.jpg')
added_image = cv2.addWeighted(image.astype(int),0.5,prediction.astype(int),0.3,0)
added_image = cv2.cvtColor(np.uint8(added_image), cv2.COLOR_BGR2RGB)
cv2.imwrite('./results/res.jpg', added_image)
matte = self.get_matte(Image.open('./results/res.jpg'))
segm = self.get_image(Image.open('./results/res.jpg'), matte)
is_success, buffer = cv2.imencode(".jpg", cv2.imread('./results/segm.jpg'))
io_buf = io.BytesIO(buffer)
#return "ku"
return b64encode(io_buf.getvalue()).decode("utf-8")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='This is a program that gets image detailed segmentation.')
parser.add_argument('-i', '--image', help='image in base64')
args = parser.parse_args()
if args.image is None:
raise Exception('missing --image IMAGE')
else:
s = Segmentation()
print(s.get_segmentation(args.image))
| 32.993711
| 139
| 0.573199
| 4,451
| 0.848456
| 0
| 0
| 0
| 0
| 0
| 0
| 658
| 0.125429
|
b09420355cd78009585e40b4e31a9c435f3442c4
| 1,853
|
py
|
Python
|
motor-blog/motor_blog/text/__init__.py
|
gaoyuankidult/SocialRoboics
|
c363089e220ae0d2780b26693a154ad55fa64132
|
[
"MIT"
] | null | null | null |
motor-blog/motor_blog/text/__init__.py
|
gaoyuankidult/SocialRoboics
|
c363089e220ae0d2780b26693a154ad55fa64132
|
[
"MIT"
] | null | null | null |
motor-blog/motor_blog/text/__init__.py
|
gaoyuankidult/SocialRoboics
|
c363089e220ae0d2780b26693a154ad55fa64132
|
[
"MIT"
] | null | null | null |
import re
from HTMLParser import HTMLParser
class HTMLPassThrough(HTMLParser):
"""Maintains a stack of tags and returns the same HTML it parses.
Base class for more interesting parsers in markup.py.
"""
def reset(self):
HTMLParser.reset(self)
self.stack = []
self.out = []
def emit(self, data):
self.out.append(data)
def close(self):
HTMLParser.close(self)
return ''.join(self.out)
def handle_endtag(self, tag):
assert self.stack, "Unmatched closing tag %s" % tag
if self.stack[-1] != tag:
raise AssertionError(
"Unmatched closing tag %s, expected %s.\nLine %s" % (
tag, self.stack[-1], self.lineno))
self.stack.pop()
self.out.append('</%s>' % tag)
def handle_starttag(self, tag, attrs):
self.stack.append(tag)
if attrs:
self.out.append(
"<%s %s>" % (
tag, ' '.join('%s="%s"' % (k, v) for k, v in attrs)))
else:
self.out.append("<%s>" % tag)
def handle_data(self, data):
self.out.append(data)
def handle_entityref(self, name):
self.out.append('&%s;' % name)
def handle_charref(self, name):
return self.handle_entityref('#' + name)
whitespace = re.compile('\s+')
class HTMLStripTags(HTMLParser):
"""Strip tags
"""
def __init__(self, *args, **kwargs):
HTMLParser.__init__(self, *args, **kwargs)
self.out = ""
def handle_data(self, data):
self.out += data
def handle_entityref(self, name):
self.out += '&%s;' % name
def handle_charref(self, name):
return self.handle_entityref('#' + name)
def value(self):
# Collapse whitespace.
return whitespace.sub(' ', self.out).strip()
| 25.736111
| 73
| 0.557474
| 1,770
| 0.955208
| 0
| 0
| 0
| 0
| 0
| 0
| 314
| 0.169455
|
b09631a3441037754162b7e57cc1cff16fb106f6
| 1,452
|
py
|
Python
|
src/api/access_module.py
|
GuilhermeVieira/mac0350-database
|
1b3b344cda09187cc5f7186d86bea385f77509dc
|
[
"MIT"
] | null | null | null |
src/api/access_module.py
|
GuilhermeVieira/mac0350-database
|
1b3b344cda09187cc5f7186d86bea385f77509dc
|
[
"MIT"
] | 1
|
2019-06-02T22:40:24.000Z
|
2019-06-02T22:40:24.000Z
|
src/api/access_module.py
|
GuilhermeVieira/mac0350-database
|
1b3b344cda09187cc5f7186d86bea385f77509dc
|
[
"MIT"
] | null | null | null |
import databases
from database_handler import load_session, func
from flask_login import LoginManager, UserMixin
class AccessModule:
session, Base = load_session(databases.urls['DATABASE_ACCESS_URL'])
def __init__(self):
'''
for item in self.session.query(User.us_id):
print(item.first())
'''
return
def create_user(self, email, password):
try:
self.session.execute(func.cria_usuario(email, password))
self.session.commit()
return True
except Exception as e:
return str(e)
def authenticate_user(self, email, password):
try:
return self.session.execute(func.verifica_senha(email, password)).first()[0]
except Exception as e:
print('Erro: ' + str(e))
return None
def get_user_by_id(self, us_id):
return self.session.query(User).get(us_id)
def get_user_by_email(self, us_email):
return self.session.query(User).filter_by(us_email=str(us_email)).first()
def is_allowed(self, us_id, service):
try:
return self.session.execute(func.tem_acesso(us_id, service)).first()[0]
except Exception as e:
print('Error: ' + str(e))
return False
class User(UserMixin, AccessModule.Base):
__tablename__ = 'users'
__table_args__ = { 'autoload': True }
def get_id(self):
return self.us_id
| 29.632653
| 88
| 0.623967
| 1,335
| 0.919421
| 0
| 0
| 0
| 0
| 0
| 0
| 154
| 0.106061
|
b0970f0b88a768e2c8fb0cd991053af1b46afa1b
| 2,675
|
py
|
Python
|
.leetcode/75.sort-colors.py
|
KuiyuanFu/PythonLeetCode
|
8962df2fa838eb7ae48fa59de272ba55a89756d8
|
[
"MIT"
] | null | null | null |
.leetcode/75.sort-colors.py
|
KuiyuanFu/PythonLeetCode
|
8962df2fa838eb7ae48fa59de272ba55a89756d8
|
[
"MIT"
] | null | null | null |
.leetcode/75.sort-colors.py
|
KuiyuanFu/PythonLeetCode
|
8962df2fa838eb7ae48fa59de272ba55a89756d8
|
[
"MIT"
] | null | null | null |
# @lc app=leetcode id=75 lang=python3
#
# [75] Sort Colors
#
# https://leetcode.com/problems/sort-colors/description/
#
# algorithms
# Medium (49.90%)
# Likes: 5201
# Dislikes: 298
# Total Accepted: 663.5K
# Total Submissions: 1.3M
# Testcase Example: '[2,0,2,1,1,0]'
#
# Given an array nums with n objects colored red, white, or blue, sort them
# in-place so that objects of the same color are adjacent, with the colors in
# the order red, white, and blue.
#
# We will use the integers 0, 1, and 2 to represent the color red, white, and
# blue, respectively.
#
#
# Example 1:
# Input: nums = [2,0,2,1,1,0]
# Output: [0,0,1,1,2,2]
# Example 2:
# Input: nums = [2,0,1]
# Output: [0,1,2]
# Example 3:
# Input: nums = [0]
# Output: [0]
# Example 4:
# Input: nums = [1]
# Output: [1]
#
#
# Constraints:
#
#
# n == nums.length
# 1 <= n <= 300
# nums[i] is 0, 1, or 2.
#
#
#
# Follow up:
#
#
# Could you solve this problem without using the library's sort function?
# Could you come up with a one-pass algorithm using only O(1) constant space?
#
#
#
# @lc tags=array;two-pointers;sort
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 原址排序,元素只有三种取值。
# 要求一次遍历,使用恒定空间。
# 使用双指针,保证指针的一侧都是指定颜色的。
#
# @lc idea=end
# @lc group=two-pointers
# @lc rank=6
# @lc code=start
class Solution:
def sortColors(self, nums: List[int]) -> None:
l ,r = 0 ,len(nums) -1
p = 0
while p <= r:
if nums[p] == 0:
# 交换过来的一定是 1
nums[p],nums[l] = nums[l],nums[p]
l+=1
p+=1
elif nums[p] == 2:
nums[p],nums[r] = nums[r],nums[p]
r-=1
else:
p+=1
return nums
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print('nums = [2,0,2,1,1,0]')
print('Output :')
print(str(Solution().sortColors([2,0,2,1,1,0])))
print('Exception :')
print('[0,0,1,1,2,2]')
print()
print('Example 2:')
print('Input : ')
print('nums = [2,0,1]')
print('Output :')
print(str(Solution().sortColors([2,0,1])))
print('Exception :')
print('[0,1,2]')
print()
print('Example 3:')
print('Input : ')
print('nums = [0]')
print('Output :')
print(str(Solution().sortColors([0])))
print('Exception :')
print('[0]')
print()
print('Example 4:')
print('Input : ')
print('nums = [1]')
print('Output :')
print(str(Solution().sortColors([1])))
print('Exception :')
print('[1]')
print()
pass
# @lc main=end
| 20.265152
| 77
| 0.542804
| 471
| 0.168878
| 0
| 0
| 0
| 0
| 0
| 0
| 1,679
| 0.602008
|
b0975e691b95560eeb7040b6b852833062f50974
| 2,245
|
py
|
Python
|
mmdet3d/ops/dgcnn_modules/dgcnn_fa_module.py
|
maskjp/mmdetection3d
|
98f332372b1a4c82bc2d57588a5d764f4176c869
|
[
"Apache-2.0"
] | 5
|
2022-01-26T13:03:12.000Z
|
2022-01-27T03:59:09.000Z
|
mmdet3d/ops/dgcnn_modules/dgcnn_fa_module.py
|
maskjp/mmdetection3d
|
98f332372b1a4c82bc2d57588a5d764f4176c869
|
[
"Apache-2.0"
] | 1
|
2022-03-31T08:33:12.000Z
|
2022-03-31T08:35:55.000Z
|
mmdet3d/ops/dgcnn_modules/dgcnn_fa_module.py
|
maskjp/mmdetection3d
|
98f332372b1a4c82bc2d57588a5d764f4176c869
|
[
"Apache-2.0"
] | 1
|
2022-03-30T04:08:39.000Z
|
2022-03-30T04:08:39.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, force_fp32
from torch import nn as nn
class DGCNNFAModule(BaseModule):
"""Point feature aggregation module used in DGCNN.
Aggregate all the features of points.
Args:
mlp_channels (list[int]): List of mlp channels.
norm_cfg (dict, optional): Type of normalization method.
Defaults to dict(type='BN1d').
act_cfg (dict, optional): Type of activation method.
Defaults to dict(type='ReLU').
init_cfg (dict, optional): Initialization config. Defaults to None.
"""
def __init__(self,
mlp_channels,
norm_cfg=dict(type='BN1d'),
act_cfg=dict(type='ReLU'),
init_cfg=None):
super().__init__(init_cfg=init_cfg)
self.fp16_enabled = False
self.mlps = nn.Sequential()
for i in range(len(mlp_channels) - 1):
self.mlps.add_module(
f'layer{i}',
ConvModule(
mlp_channels[i],
mlp_channels[i + 1],
kernel_size=(1, ),
stride=(1, ),
conv_cfg=dict(type='Conv1d'),
norm_cfg=norm_cfg,
act_cfg=act_cfg))
@force_fp32()
def forward(self, points):
"""forward.
Args:
points (List[Tensor]): tensor of the features to be aggregated.
Returns:
Tensor: (B, N, M) M = mlp[-1], tensor of the output points.
"""
if len(points) > 1:
new_points = torch.cat(points[1:], dim=-1)
new_points = new_points.transpose(1, 2).contiguous() # (B, C, N)
new_points_copy = new_points
new_points = self.mlps(new_points)
new_fa_points = new_points.max(dim=-1, keepdim=True)[0]
new_fa_points = new_fa_points.repeat(1, 1, new_points.shape[-1])
new_points = torch.cat([new_fa_points, new_points_copy], dim=1)
new_points = new_points.transpose(1, 2).contiguous()
else:
new_points = points
return new_points
| 32.536232
| 77
| 0.559911
| 2,075
| 0.924276
| 0
| 0
| 869
| 0.387082
| 0
| 0
| 749
| 0.33363
|
b098b19f23b70a7dc6b13df3898c87a35d698d26
| 158
|
py
|
Python
|
tests/test_flake8.py
|
ROZ-MOFUMOFU-ME/bitnodes
|
47f6c1cf886cd522b5fb4ea921dcb4cf63d1eaa4
|
[
"MIT"
] | 2
|
2021-09-06T04:42:08.000Z
|
2021-11-12T12:05:14.000Z
|
tests/test_flake8.py
|
ROZ-MOFUMOFU-ME/bitnodes
|
47f6c1cf886cd522b5fb4ea921dcb4cf63d1eaa4
|
[
"MIT"
] | 1
|
2020-03-19T09:00:18.000Z
|
2020-03-19T09:00:18.000Z
|
tests/test_flake8.py
|
ROZ-MOFUMOFU-ME/bitnodes
|
47f6c1cf886cd522b5fb4ea921dcb4cf63d1eaa4
|
[
"MIT"
] | 3
|
2019-02-22T22:57:34.000Z
|
2021-05-22T10:58:53.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from subprocess import call
def test_flake8():
return_code = call(['flake8'])
assert return_code == 0
| 17.555556
| 34
| 0.64557
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 52
| 0.329114
|
b098cf5dfcf4280fa0a7565a9bca4ad07ebf52e7
| 1,653
|
py
|
Python
|
dir_monch/dir_monch.py
|
shivaghose/nifty_tools
|
587ed6403222ee959f8e0789e66e61f6f187f80b
|
[
"MIT"
] | 1
|
2016-08-11T18:49:44.000Z
|
2016-08-11T18:49:44.000Z
|
dir_monch/dir_monch.py
|
shivaghose/nifty_tools
|
587ed6403222ee959f8e0789e66e61f6f187f80b
|
[
"MIT"
] | null | null | null |
dir_monch/dir_monch.py
|
shivaghose/nifty_tools
|
587ed6403222ee959f8e0789e66e61f6f187f80b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
'''
dir_monch: a utility to help shorten paths.
'''
from __future__ import print_function
from os.path import expanduser
from os import sep as os_sep
from sys import exit
import sys
def dir_monch(path):
# Substitute ~ for the home dir path when possible
HOME = expanduser("~")
if path.startswith(HOME):
path = path.replace(HOME, '~', 1)
MAX_PATH_LENGTH = 20
if len(path) < MAX_PATH_LENGTH:
return path
split_path = path.split(os_sep)
shortened_path = []
for directory in split_path:
end_idx = 1
short_name = directory[0:end_idx]
while (short_name in shortened_path) and end_idx < len(directory):
end_idx += 1
short_name = directory[0:end_idx]
shortened_path.append(short_name)
final_path = ''
for short_dir in shortened_path[0:-1]:
final_path += short_dir + os_sep
final_path += split_path[-1]
return final_path
def run_tests():
test_paths = ['/Users/shiva', '/Users/shiva/git', '/Users/shiva/anaconda2/bin/Assistant.app', \
'/etc/apache2/extra', '/bin', '/', '/A/A/A/A/A/A/A/A', 'aaa/aaa/aaa/aaa/aaa/', \
'/Users/shiva/this\ folder\ has\ spaces/folder']
expected_outputs = ['~', '~/git', '~/a/b/Assistant.app', '/etc/apache2/extra', '/bin', \
'/', '/A/A/A/A/A/A/A/A', 'a/aa/aaa/aaa/aaa/', '~/t/folder']
for input_str, expected_str in zip (test_paths, expected_outputs):
output = dir_monch(input_str)
assert output == expected_str
if __name__ == '__main__':
print(dir_monch(sys.argv[1]))
exit(0)
| 30.054545
| 99
| 0.61585
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 439
| 0.265578
|
b099b5932476f4ea71fffb1250e8c27e0a4ecda3
| 7,302
|
py
|
Python
|
coherence/extern/simple_config.py
|
crass/coherence
|
d1cf150f5fa4a4bd75c7ec682ef2a2783ccf50ca
|
[
"MIT"
] | 4
|
2016-01-23T11:06:37.000Z
|
2017-06-03T18:05:53.000Z
|
coherence/extern/simple_config.py
|
sreichholf/python-coherence
|
f39fbd2bd0ae749d2ca161f807a2efebc493492f
|
[
"MIT"
] | null | null | null |
coherence/extern/simple_config.py
|
sreichholf/python-coherence
|
f39fbd2bd0ae749d2ca161f807a2efebc493492f
|
[
"MIT"
] | 3
|
2017-02-02T18:58:35.000Z
|
2019-02-15T03:07:19.000Z
|
# -*- coding: utf-8 -*-
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# based on: http://code.activestate.com/recipes/573463/
# Modified by Philippe Normand
# Copyright 2008, Frank Scholz <coherence@beebits.net>
from coherence.extern.et import ET as ElementTree, indent, parse_xml
class ConfigItem(object):
""" the base """
class Config(ConfigItem):
def __init__(self,filename,root=None,preamble=False,element2attr_mappings=None):
self.filename = filename
self.element2attr_mappings = element2attr_mappings or {}
self.db = parse_xml(open(self.filename).read())
self.config = self.db = ConvertXmlToDict(self.db.getroot())
self.preamble = ''
if preamble == True:
self.preamble = """<?xml version="1.0" encoding="utf-8"?>\n"""
if root != None:
try:
self.config = self.config[root]
except KeyError:
pass
self.config.save = self.save
def tostring(self):
root = ConvertDictToXml(self.db,self.element2attr_mappings)
tree = ElementTree.ElementTree(root).getroot()
indent(tree,0)
xml = self.preamble + ElementTree.tostring(tree, encoding='utf-8')
return xml
def save(self, new_filename=None):
if new_filename != None:
self.filename = new_filename
xml = self.tostring()
f = open(self.filename, 'wb')
f.write(xml)
f.close()
def get(self,key,default=None):
if key in self.config:
item = self.config[key]
try:
if item['active'] == 'no':
return default
return item
except (TypeError,KeyError):
return item
return default
def set(self,key,value):
self.config[key] = value
class XmlDictObject(dict,ConfigItem):
def __init__(self, initdict=None):
if initdict is None:
initdict = {}
dict.__init__(self, initdict)
self._attrs = {}
def __getattr__(self, item):
value = self.__getitem__(item)
try:
if value['active'] == 'no':
raise KeyError
except (TypeError,KeyError):
return value
return value
def __setattr__(self, item, value):
if item == '_attrs':
object.__setattr__(self, item, value)
else:
self.__setitem__(item, value)
def get(self,key,default=None):
try:
item = self[key]
try:
if item['active'] == 'no':
return default
return item
except (TypeError,KeyError):
return item
except KeyError:
pass
return default
def set(self,key,value):
self[key] = value
def __str__(self):
if self.has_key('_text'):
return self.__getitem__('_text')
else:
return ''
def __repr__(self):
return repr(dict(self))
@staticmethod
def Wrap(x):
if isinstance(x, dict):
return XmlDictObject((k, XmlDictObject.Wrap(v)) for (k, v) in x.iteritems())
elif isinstance(x, list):
return [XmlDictObject.Wrap(v) for v in x]
else:
return x
@staticmethod
def _UnWrap(x):
if isinstance(x, dict):
return dict((k, XmlDictObject._UnWrap(v)) for (k, v) in x.iteritems())
elif isinstance(x, list):
return [XmlDictObject._UnWrap(v) for v in x]
else:
return x
def UnWrap(self):
return XmlDictObject._UnWrap(self)
def _ConvertDictToXmlRecurse(parent, dictitem,element2attr_mappings=None):
assert type(dictitem) is not type([])
if isinstance(dictitem, dict):
for (tag, child) in dictitem.iteritems():
if str(tag) == '_text':
parent.text = str(child)
## elif str(tag) == '_attrs':
## for key, value in child.iteritems():
## parent.set(key, value)
elif element2attr_mappings != None and tag in element2attr_mappings:
parent.set(element2attr_mappings[tag],child)
elif type(child) is type([]):
for listchild in child:
elem = ElementTree.Element(tag)
parent.append(elem)
_ConvertDictToXmlRecurse(elem, listchild,element2attr_mappings=element2attr_mappings)
else:
if(not isinstance(dictitem, XmlDictObject) and
not callable(dictitem)):
attrs = dictitem
dictitem = XmlDictObject()
dictitem._attrs = attrs
if tag in dictitem._attrs:
parent.set(tag, child)
elif not callable(tag) and not callable(child):
elem = ElementTree.Element(tag)
parent.append(elem)
_ConvertDictToXmlRecurse(elem, child,element2attr_mappings=element2attr_mappings)
else:
if not callable(dictitem):
parent.text = str(dictitem)
def ConvertDictToXml(xmldict,element2attr_mappings=None):
roottag = xmldict.keys()[0]
root = ElementTree.Element(roottag)
_ConvertDictToXmlRecurse(root, xmldict[roottag],element2attr_mappings=element2attr_mappings)
return root
def _ConvertXmlToDictRecurse(node, dictclass):
nodedict = dictclass()
## if node.items():
## nodedict.update({'_attrs': dict(node.items())})
if len(node.items()) > 0:
# if we have attributes, set them
attrs = dict(node.items())
nodedict.update(attrs)
nodedict._attrs = attrs
for child in node:
# recursively add the element's children
newitem = _ConvertXmlToDictRecurse(child, dictclass)
if nodedict.has_key(child.tag):
# found duplicate tag, force a list
if type(nodedict[child.tag]) is type([]):
# append to existing list
nodedict[child.tag].append(newitem)
else:
# convert to list
nodedict[child.tag] = [nodedict[child.tag], newitem]
else:
# only one, directly set the dictionary
nodedict[child.tag] = newitem
if node.text is None:
text = ''
else:
text = node.text.strip()
if len(nodedict) > 0:
# if we have a dictionary add the text as a dictionary value (if there is any)
if len(text) > 0:
nodedict['_text'] = text
else:
# if we don't have child nodes or attributes, just set the text
if node.text is not None:
nodedict = node.text.strip()
return nodedict
def ConvertXmlToDict(root,dictclass=XmlDictObject):
return dictclass({root.tag: _ConvertXmlToDictRecurse(root, dictclass)})
def main():
c = Config('config.xml',root='config')
#print '%r' % c.config
#c.save(new_filename='config.new.xml')
print c.config['interface']
#for plugin in c.config.pluginlist.plugin:
# if plugin.active != 'no':
# print '%r' % plugin
if __name__ == '__main__':
main()
| 31.474138
| 105
| 0.571898
| 3,380
| 0.462887
| 0
| 0
| 548
| 0.075048
| 0
| 0
| 1,146
| 0.156943
|
b099d05e9015241447612382401295c1e700fde0
| 8,519
|
py
|
Python
|
ball_catching/easy_catch/planner.py
|
shoefer/ball_catching
|
46b2e95894659347b563123c1c23742437755993
|
[
"MIT"
] | 1
|
2017-07-22T11:36:02.000Z
|
2017-07-22T11:36:02.000Z
|
ball_catching/easy_catch/planner.py
|
shoefer/ball_catching
|
46b2e95894659347b563123c1c23742437755993
|
[
"MIT"
] | null | null | null |
ball_catching/easy_catch/planner.py
|
shoefer/ball_catching
|
46b2e95894659347b563123c1c23742437755993
|
[
"MIT"
] | null | null | null |
import casadi as ca
import casadi.tools as cat
__author__ = 'belousov'
class Planner:
# ========================================================================
# Simple planning
# ========================================================================
@classmethod
def create_plan(cls, model, warm_start=False,
x0=0, lam_x0=0, lam_g0=0):
# Degrees of freedom for the optimizer
V = cat.struct_symSX([
(
cat.entry('X', repeat=model.n+1, struct=model.x),
cat.entry('U', repeat=model.n, struct=model.u)
)
])
# Box constraints
[lbx, ubx] = cls._create_box_constraints(model, V)
# Force the catcher to always look forward
# lbx['U', :, 'theta'] = ubx['U', :, 'theta'] = 0
# Non-linear constraints
[g, lbg, ubg] = cls._create_nonlinear_constraints(model, V)
# Objective function
J = cls._create_objective_function(model, V, warm_start)
# Formulate non-linear problem
nlp = ca.SXFunction('nlp', ca.nlpIn(x=V), ca.nlpOut(f=J, g=g))
op = {# Linear solver
#'linear_solver': 'ma57',
# Acceptable termination
'acceptable_iter': 5}
if warm_start:
op['warm_start_init_point'] = 'yes'
op['fixed_variable_treatment'] = 'make_constraint'
# Initialize solver
solver = ca.NlpSolver('solver', 'ipopt', nlp, op)
# Solve
if warm_start:
sol = solver(x0=x0, lbx=lbx, ubx=ubx, lbg=lbg, ubg=ubg,
lam_x0=lam_x0, lam_g0=lam_g0)
else:
sol = solver(x0=x0, lbx=lbx, ubx=ubx, lbg=lbg, ubg=ubg)
return V(sol['x']), sol['lam_x'], sol['lam_g']
# sol = solver(x0=x0, lbx=lbx, ubx=ubx, lbg=lbg, ubg=ubg)
# return V(sol['x']), sol['lam_x'], sol['lam_g']
@staticmethod
def _create_nonlinear_constraints(model, V):
g, lbg, ubg = [], [], []
for k in range(model.n):
# Multiple shooting
[xk_next] = model.F([V['X', k], V['U', k]])
g.append(xk_next - V['X', k+1])
lbg.append(ca.DMatrix.zeros(model.nx))
ubg.append(ca.DMatrix.zeros(model.nx))
# Control constraints
constraint_k = model._set_constraint(V, k)
g.append(constraint_k)
lbg.append(-ca.inf)
ubg.append(0)
g = ca.veccat(g)
lbg = ca.veccat(lbg)
ubg = ca.veccat(ubg)
return [g, lbg, ubg]
@staticmethod
def _create_objective_function(model, V, warm_start):
[final_cost] = model.cl([V['X', model.n]])
running_cost = 0
for k in range(model.n):
[stage_cost] = model.c([V['X', k], V['U', k]])
# Encourage looking at the ball
d = ca.veccat([ca.cos(V['X', k, 'psi'])*ca.cos(V['X', k, 'phi']),
ca.cos(V['X', k, 'psi'])*ca.sin(V['X', k, 'phi']),
ca.sin(V['X', k, 'psi'])])
r = ca.veccat([V['X', k, 'x_b'] - V['X', k, 'x_c'],
V['X', k, 'y_b'] - V['X', k, 'y_c'],
V['X', k, 'z_b']])
r_cos_omega = ca.mul(d.T, r)
if warm_start:
cos_omega = r_cos_omega / (ca.norm_2(r) + 1e-6)
stage_cost += 1e-1 * (1 - cos_omega)
else:
stage_cost -= 1e-1 * r_cos_omega * model.dt
running_cost += stage_cost
return final_cost + running_cost
# ========================================================================
# Common functions
# ========================================================================
@staticmethod
def _create_box_constraints(model, V):
lbx = V(-ca.inf)
ubx = V(ca.inf)
# Control limits
model._set_control_limits(lbx, ubx)
# State limits
model._set_state_limits(lbx, ubx)
# Initial state
lbx['X', 0] = ubx['X', 0] = model.m0
return [lbx, ubx]
# ========================================================================
# Belief space planning
# ========================================================================
@classmethod
def create_belief_plan(cls, model, warm_start=False,
x0=0, lam_x0=0, lam_g0=0):
# Degrees of freedom for the optimizer
V = cat.struct_symSX([
(
cat.entry('X', repeat=model.n+1, struct=model.x),
cat.entry('U', repeat=model.n, struct=model.u)
)
])
# Box constraints
[lbx, ubx] = cls._create_box_constraints(model, V)
# Non-linear constraints
[g, lbg, ubg] = cls._create_belief_nonlinear_constraints(model, V)
# Objective function
J = cls._create_belief_objective_function(model, V)
# Formulate non-linear problem
nlp = ca.SXFunction('nlp', ca.nlpIn(x=V), ca.nlpOut(f=J, g=g))
op = {# Linear solver
#'linear_solver': 'ma57',
# Warm start
# 'warm_start_init_point': 'yes',
# Termination
'max_iter': 1500,
'tol': 1e-6,
'constr_viol_tol': 1e-5,
'compl_inf_tol': 1e-4,
# Acceptable termination
'acceptable_tol': 1e-3,
'acceptable_iter': 5,
'acceptable_obj_change_tol': 1e-2,
# NLP
# 'fixed_variable_treatment': 'make_constraint',
# Quasi-Newton
'hessian_approximation': 'limited-memory',
'limited_memory_max_history': 5,
'limited_memory_max_skipping': 1}
if warm_start:
op['warm_start_init_point'] = 'yes'
op['fixed_variable_treatment'] = 'make_constraint'
# Initialize solver
solver = ca.NlpSolver('solver', 'ipopt', nlp, op)
# Solve
if warm_start:
sol = solver(x0=x0, lbx=lbx, ubx=ubx, lbg=lbg, ubg=ubg,
lam_x0=lam_x0, lam_g0=lam_g0)
else:
sol = solver(x0=x0, lbx=lbx, ubx=ubx, lbg=lbg, ubg=ubg)
return V(sol['x']), sol['lam_x'], sol['lam_g']
@staticmethod
def _create_belief_nonlinear_constraints(model, V):
"""Non-linear constraints for planning"""
bk = cat.struct_SX(model.b)
bk['S'] = model.b0['S']
g, lbg, ubg = [], [], []
for k in range(model.n):
# Belief propagation
bk['m'] = V['X', k]
[bk_next] = model.BF([bk, V['U', k]])
bk_next = model.b(bk_next)
# Multiple shooting
g.append(bk_next['m'] - V['X', k+1])
lbg.append(ca.DMatrix.zeros(model.nx))
ubg.append(ca.DMatrix.zeros(model.nx))
# Control constraints
constraint_k = model._set_constraint(V, k)
g.append(constraint_k)
lbg.append(-ca.inf)
ubg.append(0)
# Advance time
bk = bk_next
g = ca.veccat(g)
lbg = ca.veccat(lbg)
ubg = ca.veccat(ubg)
return [g, lbg, ubg]
@staticmethod
def _create_belief_objective_function(model, V):
# Simple cost
running_cost = 0
for k in range(model.n):
[stage_cost] = model.c([V['X', k], V['U', k]])
running_cost += stage_cost
[final_cost] = model.cl([V['X', model.n]])
# Uncertainty cost
running_uncertainty_cost = 0
bk = cat.struct_SX(model.b)
bk['S'] = model.b0['S']
for k in range(model.n):
# Belief propagation
bk['m'] = V['X', k]
[bk_next] = model.BF([bk, V['U', k]])
bk_next = model.b(bk_next)
# Accumulate cost
[stage_uncertainty_cost] = model.cS([bk_next])
running_uncertainty_cost += stage_uncertainty_cost
# Advance time
bk = bk_next
[final_uncertainty_cost] = model.cSl([bk_next])
return running_cost + final_cost +\
running_uncertainty_cost + final_uncertainty_cost
| 32.515267
| 78
| 0.471182
| 8,424
| 0.988848
| 0
| 0
| 7,740
| 0.908557
| 0
| 0
| 2,293
| 0.269163
|
b09a552321fe21418a22b65a3d6582f20e8c9eea
| 4,526
|
py
|
Python
|
commtrack/links/gerrit.py
|
bregman-arie/commtrack
|
5359958dd07d1b9e868ec5276da6dde22f982d07
|
[
"Apache-2.0"
] | 3
|
2020-01-14T10:15:40.000Z
|
2020-12-01T14:32:01.000Z
|
commtrack/links/gerrit.py
|
bregman-arie/commit-tracker
|
5359958dd07d1b9e868ec5276da6dde22f982d07
|
[
"Apache-2.0"
] | 2
|
2018-12-24T12:16:58.000Z
|
2019-02-18T07:16:42.000Z
|
commtrack/links/gerrit.py
|
bregman-arie/commit-tracker
|
5359958dd07d1b9e868ec5276da6dde22f982d07
|
[
"Apache-2.0"
] | 1
|
2019-07-15T08:27:36.000Z
|
2019-07-15T08:27:36.000Z
|
# Copyright 2019 Arie Bregman
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import crayons
import json
import logging
import subprocess
import sys
from commtrack.gerrit import constants as const
from commtrack.gerrit import exceptions as exc
from commtrack.link import Link
LOG = logging.getLogger(__name__)
class Gerrit(Link):
"""Managing operations on Gerrit Code review system."""
def __init__(self, name, address, parameters):
super(Gerrit, self).__init__(name, address,
const.LINK_TYPE, parameters)
def get_basic_query_cmd(self, address):
"""Returns a very basic query command which extended based
on provided input from the user.
"""
return ['ssh', '-p', '29418',
address.strip('\"'),
'gerrit', 'query',
'limit:5',
'--format JSON']
def query(self):
"""Returns query result"""
query_cmd = self.get_basic_query_cmd(self.address)
if 'change_id' in self.params:
query_cmd.append('change:{}'.format(self.params['change_id']))
if 'project' in self.chain_params['global'] and self.chain_params[
'global']['project']:
query_cmd.append('project:{}'.format(self.chain_params['global']['project']))
if 'subject' in self.params:
query_cmd.append(self.params['subject'])
output = subprocess.check_output(query_cmd)
decoded_output = output.decode('utf-8')
query_result_li = decoded_output.split('\n')
# Handle multiple matches
if len(query_result_li) > 1 and self.chain_params['global']['commit']:
LOG.info(exc.multiple_matches())
sys.exit(2)
# return json.loads(query_result_li)
return query_result_li
def search(self, params=None, same_project=True):
"""Returns the result of searching the given change."""
self.verify_and_set_reqs(const.REQUIRED_PARAMS)
raw_result_li = self.query()
# Check if there is at least one result
if len(raw_result_li) < 3:
self.results.append("{} find such change.".format(crayons.red("Couldn't")))
else:
self.params['found'] = True
json_result_li = []
for res in raw_result_li:
if '"type":' not in res and res != '':
json_result_li.append(json.loads(res))
if len(json_result_li) > 1:
same_project = self.verify_same_project(json_result_li)
if same_project:
for result in json_result_li:
self.update_link_params(result)
self.results.append(self.process_result(result))
else:
LOG.error(exc.multiple_projects())
sys.exit(2)
return self.params
def verify_same_project(self, changes):
"""Returns true if all the changes belong to the same project."""
project = changes[0]['project']
for change in changes[1:]:
if change['project'] != project:
return False
return True
def update_link_params(self, data):
"""Update link parameters using data discovered during the query."""
for param in const.SINGLE_PROVIDED_PARAMS:
if param in data:
self.params[param] = data[param]
for param in const.MULTI_PROVIDED_PARAMS:
if param in data:
if param not in self.params:
self.params[param] = list()
self.params[param].append(data[param])
def process_result(self, data):
"""Returns adjusted result with only the relevant information."""
result_str = "Status in project {} branch {} is {}".format(
data['project'],
data['branch'],
self.colorize_result(data['status']))
return result_str
def colorize_result(self, status):
return const.COLORED_STATS[status]
| 35.085271
| 89
| 0.617543
| 3,685
| 0.814185
| 0
| 0
| 0
| 0
| 0
| 0
| 1,468
| 0.324348
|
b09a66e328205a7eed665f95fa47dd121d3d6481
| 844
|
py
|
Python
|
test/test_contact.py
|
hawkins/Shawk
|
c5be1165d8f6c0471544d06f7da07156df1dcc10
|
[
"MIT"
] | 13
|
2016-05-05T11:16:31.000Z
|
2021-02-25T11:23:14.000Z
|
test/test_contact.py
|
hawkins/Shawk
|
c5be1165d8f6c0471544d06f7da07156df1dcc10
|
[
"MIT"
] | 18
|
2016-05-05T20:23:51.000Z
|
2020-12-25T16:34:05.000Z
|
test/test_contact.py
|
hawkins/Shawk
|
c5be1165d8f6c0471544d06f7da07156df1dcc10
|
[
"MIT"
] | 5
|
2017-07-25T23:50:43.000Z
|
2021-12-04T11:05:13.000Z
|
# Import shawk
from shawk import Contact
# Prepare contacts used throughout tests
mini_contact = Contact(12345678, 'Verizon')
name_contact = Contact(12345678, 'Verizon', 'Somebody')
def test_repr_minimal():
assert(repr(mini_contact) == "<shawk.Contact('12345678', 'Verizon', '<No name>')>")
def test_repr_with_name():
assert(repr(name_contact) == "<shawk.Contact('12345678', 'Verizon', 'Somebody')>")
def test_string_minimal():
assert(str(mini_contact) == '<No name>: 12345678 (Verizon)')
def test_string_with_name():
assert(str(name_contact) == 'Somebody: 12345678 (Verizon)')
def test_get_address_verizon():
assert(name_contact.get_address() == '12345678@vtext.com')
def test_get_number():
assert(name_contact.get_number() == '12345678')
def test_get_name():
assert(name_contact.get_name() == 'Somebody')
| 29.103448
| 87
| 0.716825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 288
| 0.341232
|
b09aee01c7ad46563e06e418ba66c3d71fe3908d
| 1,090
|
py
|
Python
|
teamcat_service/docker_build/target/one_step_build/teamcat/doraemon/api/ci/views/ci_task_parameter_view.py
|
zhangyin2088/Teamcat
|
be9be8d7c1e58c8d2d22ab78d25783d9aee4de71
|
[
"Apache-2.0"
] | 6
|
2018-11-26T08:42:52.000Z
|
2020-06-01T08:33:48.000Z
|
teamcat_service/doraemon/doraemon/api/ci/views/ci_task_parameter_view.py
|
zhangyin2088/Teamcat
|
be9be8d7c1e58c8d2d22ab78d25783d9aee4de71
|
[
"Apache-2.0"
] | null | null | null |
teamcat_service/doraemon/doraemon/api/ci/views/ci_task_parameter_view.py
|
zhangyin2088/Teamcat
|
be9be8d7c1e58c8d2d22ab78d25783d9aee4de71
|
[
"Apache-2.0"
] | 1
|
2019-01-22T06:45:36.000Z
|
2019-01-22T06:45:36.000Z
|
#coding=utf-8
# coding=utf-8
'''
Created on 2014-1-5
@author: ETHAN
'''
from rest_framework import generics
from doraemon.api.ci.serializer import ci_serializer
from rest_framework.permissions import AllowAny
from doraemon.ci.mongo_models import CITaskParameterGroup
from business.ci.ci_task_parameter_service import CITaskParameterService
class CITaskParameterGroupView(generics.RetrieveUpdateDestroyAPIView):
"""
An endpoint for users to view and update their profile information.
"""
serializer_class = ci_serializer.TaskParameterGroupSerializer
permission_classes=[AllowAny]
def get_object(self):
group_id =self.kwargs['id']
return CITaskParameterService.task_parameter(group_id)
class CITaskParameterGroupListView(generics.ListCreateAPIView):
"""
Parameter id: TaskID
"""
serializer_class = ci_serializer.TaskParameterGroupSerializer
permission_classes=[AllowAny]
def get_queryset(self):
task_id=self.kwargs['task_id']
return CITaskParameterService.task_parameter_list(task_id)
| 27.25
| 72
| 0.763303
| 734
| 0.673394
| 0
| 0
| 0
| 0
| 0
| 0
| 203
| 0.186239
|
b09e1dcc4651bbea3e5a05f15eded482f3e5b822
| 2,097
|
py
|
Python
|
src/leetcode_106_construct_binary_tree_from_inorder_and_postorder_traversal.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
src/leetcode_106_construct_binary_tree_from_inorder_and_postorder_traversal.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
src/leetcode_106_construct_binary_tree_from_inorder_and_postorder_traversal.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
# @l2g 106 python3
# [106] Construct Binary Tree from Inorder and Postorder Traversal
# Difficulty: Medium
# https://leetcode.com/problems/construct-binary-tree-from-inorder-and-postorder-traversal
#
# Given two integer arrays inorder and postorder where inorder is the inorder traversal of a binary tree and postorder is the postorder traversal of the same tree,
# construct and return the binary tree.
#
# Example 1:
#
#
# Input: inorder = [9,3,15,20,7], postorder = [9,15,7,20,3]
# Output: [3,9,20,null,null,15,7]
#
# Example 2:
#
# Input: inorder = [-1], postorder = [-1]
# Output: [-1]
#
#
# Constraints:
#
# 1 <= inorder.length <= 3000
# postorder.length == inorder.length
# -3000 <= inorder[i], postorder[i] <= 3000
# inorder and postorder consist of unique values.
# Each value of postorder also appears in inorder.
# inorder is guaranteed to be the inorder traversal of the tree.
# postorder is guaranteed to be the postorder traversal of the tree.
#
#
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from typing import List, Optional
class Solution:
def buildTree(self, inorder: List[int], postorder: List[int]) -> Optional[TreeNode]:
inorder_map = defaultdict(list)
for i, val in enumerate(inorder):
inorder_map[val] = i
self.post_idx = len(postorder) - 1
def traverse(l, r):
# base case
if l > r:
return
cur_value = postorder[self.post_idx]
self.post_idx -= 1
cur_node = TreeNode(val=cur_value)
inorder_idx = inorder_map[cur_value]
# right subtree
cur_node.right = traverse(inorder_idx + 1, r)
# left subtree
cur_node.left = traverse(l, inorder_idx - 1)
return cur_node
return traverse(0, len(inorder) - 1)
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_106.py")])
| 26.884615
| 163
| 0.637577
| 784
| 0.373867
| 0
| 0
| 0
| 0
| 0
| 0
| 1,187
| 0.566047
|
b0a05dc2fcad4bbe3156161354e99b4fa56b5535
| 666
|
py
|
Python
|
Symmetric Tree.py
|
ChyavanKoushik/-My-Solutions-to-Leetcode-problems-using-Python-3
|
b78779bd3f5313ab4752f9e9a23cb4a93805aff6
|
[
"MIT"
] | 1
|
2020-07-11T15:10:19.000Z
|
2020-07-11T15:10:19.000Z
|
Symmetric Tree.py
|
ChyavanKoushik/-My-Solutions-to-Leetcode-problems-using-Python-3
|
b78779bd3f5313ab4752f9e9a23cb4a93805aff6
|
[
"MIT"
] | null | null | null |
Symmetric Tree.py
|
ChyavanKoushik/-My-Solutions-to-Leetcode-problems-using-Python-3
|
b78779bd3f5313ab4752f9e9a23cb4a93805aff6
|
[
"MIT"
] | null | null | null |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isSymmetric(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
ptr = root
if(ptr==None):
return True
return self.CheckSym(ptr,ptr)
def CheckSym(self,x,y):
if(x==None and y==None):
return True
elif(x!=None and y!=None):
if(x.val==y.val):
return self.CheckSym(x.left,y.right) and self.CheckSym(x.right,y.left)
return False
| 24.666667
| 86
| 0.503003
| 503
| 0.755255
| 0
| 0
| 0
| 0
| 0
| 0
| 221
| 0.331832
|
b0a07ddea1fef76e6d524883928e67b18be43ea3
| 2,695
|
py
|
Python
|
tornado/models/Session.py
|
maqg/wcrobot
|
7d026c1a34362c5434105c27c5bd25f08c6fabe2
|
[
"MIT"
] | null | null | null |
tornado/models/Session.py
|
maqg/wcrobot
|
7d026c1a34362c5434105c27c5bd25f08c6fabe2
|
[
"MIT"
] | null | null | null |
tornado/models/Session.py
|
maqg/wcrobot
|
7d026c1a34362c5434105c27c5bd25f08c6fabe2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from conf.dbconfig import TB_SESSION
from core.err_code import DB_ERR, OCT_SUCCESS
from core.log import WARNING,DEBUG
from utils.commonUtil import getUuid, transToStr, transToObj
from utils.timeUtil import get_current_time, getStrTime
SESSION_EXPIRE_TIME = 86400 * 30 * 1000 # one month
class Session:
db = None
username = ""
myId = 0
cookie = { }
createTime = 0
expireTime = 0
dbObj = None
def __init__(self, db=None, myId=None, dbObj=None):
self.db = db
self.myId = myId
self.userId = ""
self.username = ""
self.role = 3
self.dbObj = dbObj
if (self.dbObj):
self.loadFromObj()
def init(self):
cond = "WHERE ID='%s' AND S_ExpireTime > %ld " % (self.myId, get_current_time())
dbObj = self.db.fetchone(TB_SESSION, cond)
if (not dbObj):
return -1
self.dbObj = dbObj
self.loadFromObj()
return 0
def add(self):
self.myId = getUuid()
self.createTime = get_current_time()
self.expireTime = get_current_time() + SESSION_EXPIRE_TIME
obj = {
"ID": self.myId,
"S_UserId": self.userId,
"S_UserName": self.username,
"S_UserType": self.role,
"S_Cookie": transToStr(self.cookie),
"S_CreateTime": self.createTime,
"S_ExpireTime": self.expireTime,
}
ret = self.db.insert(TB_SESSION, obj)
if ret == -1:
WARNING("add session %s error for db operation" % self.myId)
return DB_ERR
return OCT_SUCCESS
def delete(self):
cond = "WHERE ID='%s'" % self.myId
DEBUG("to delete session %s" % (self.myId))
ret = self.db.delete(TB_SESSION, cond=cond)
if ret == -1:
WARNING("delete session %s error for db operation" % self.myId)
return DB_ERR
return 0
def update(self):
obj = {
"S_ExpireTime": get_current_time() + SESSION_EXPIRE_TIME,
}
cond = "WHERE ID='%s'" % self.myId
ret = self.db.update(TB_SESSION, obj, cond=cond)
if ret == -1:
WARNING("update session %s error for db operation" % self.myId)
return DB_ERR
return 0
def loadFromObj(self):
self.myId = self.dbObj["ID"]
self.username = self.dbObj["S_UserName"]
self.role = self.dbObj["S_UserType"]
self.userId = self.dbObj["S_UserId"]
self.cookie = transToObj(self.dbObj["S_Cookie"])
self.createTime = self.dbObj["S_CreateTime"]
self.expireTime = self.dbObj["S_ExpireTime"]
return 0
def toObj(self):
return {
"id": self.myId,
"user": self.username,
"userId": self.userId,
"userRole":self.role,
"cookie": self.cookie,
"creatTime": getStrTime(self.createTime),
"expireTime": getStrTime(self.expireTime)
}
| 21.733871
| 83
| 0.640445
| 2,347
| 0.870872
| 0
| 0
| 0
| 0
| 0
| 0
| 499
| 0.185158
|
b0a17272de31b01f9feb7f36322ad30ad949bc47
| 1,379
|
py
|
Python
|
python/equalSubstring.py
|
l0latgithub/codediary
|
a0327d2ee1137a542886d0af85129692711cd68a
|
[
"MIT"
] | null | null | null |
python/equalSubstring.py
|
l0latgithub/codediary
|
a0327d2ee1137a542886d0af85129692711cd68a
|
[
"MIT"
] | null | null | null |
python/equalSubstring.py
|
l0latgithub/codediary
|
a0327d2ee1137a542886d0af85129692711cd68a
|
[
"MIT"
] | null | null | null |
class Solution:
def equalSubstring(self, s: str, t: str, maxCost: int) -> int:
"""
You are given two strings s and t of the same length.
You want to change s to t. Changing the i-th character
of s to i-th character of t costs |s[i] - t[i]| that is,
the absolute difference between the ASCII values of the characters.
You are also given an integer maxCost.
Return the maximum length of a substring of s that
can be changed to be the same as the corresponding
substring of twith a cost less than or equal to maxCost.
If there is no substring from s that can be changed to
its corresponding substring from t, return 0.
"""
# runcost, lo, ans = 0, 0, 0
# for hi in range(len(s)):
# runcost += abs( ord(s[hi]) -ord(t[hi]) )
# while runcost>maxCost:
# runcost -= abs( ord(s[lo]) -ord(t[lo]) )
# lo+=1
# ans = max(ans, hi-lo+1)
# return ans
runcost, lo, ans = 0, 0, 0
for hi in range(len(s)):
runcost += abs( ord(s[hi]) -ord(t[hi]) )
if runcost>maxCost:
runcost -= abs( ord(s[lo]) -ord(t[lo]) )
lo+=1
return len(s)-lo
| 33.634146
| 75
| 0.503263
| 1,361
| 0.986947
| 0
| 0
| 0
| 0
| 0
| 0
| 934
| 0.677302
|
b0a210478770d270da6e717e23c836b0dee9d3ef
| 180
|
py
|
Python
|
jp.atcoder/abc102/abc102_b/9248681.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-09T03:06:25.000Z
|
2022-02-09T03:06:25.000Z
|
jp.atcoder/abc102/abc102_b/9248681.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-05T22:53:18.000Z
|
2022-02-09T01:29:30.000Z
|
jp.atcoder/abc102/abc102_b/9248681.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | null | null | null |
import sys
n, *a = map(int, sys.stdin.read().split())
def main():
a.sort()
return a[-1] - a[0]
if __name__ == "__main__":
ans = main()
print(ans)
| 12.857143
| 43
| 0.494444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.055556
|
b0a2caf9d2447dd1aa1058366507843de22c310d
| 143
|
py
|
Python
|
coding/learn_celery/config.py
|
yatao91/learning_road
|
e88dc43de98e35922bfc71c222ec71766851e618
|
[
"MIT"
] | 3
|
2021-05-25T16:58:52.000Z
|
2022-02-05T09:37:17.000Z
|
coding/learn_celery/config.py
|
yataosu/learning_road
|
e88dc43de98e35922bfc71c222ec71766851e618
|
[
"MIT"
] | null | null | null |
coding/learn_celery/config.py
|
yataosu/learning_road
|
e88dc43de98e35922bfc71c222ec71766851e618
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
broker_url = 'redis://192.168.33.10/0'
result_backend = 'redis://192.168.33.10/1'
imports = ('learn_celery.tasks',)
| 17.875
| 42
| 0.629371
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 93
| 0.65035
|
b0a5e221bf9ef4d660262d2ed08fec2672de5ce6
| 1,044
|
py
|
Python
|
seqwise_cont_skillspace/networks/classifier_seq_step_cont_choose_dims.py
|
fgitmichael/SelfSupevisedSkillDiscovery
|
60eee11cfd67046190dd2784bf40e97bdbed9d40
|
[
"MIT"
] | null | null | null |
seqwise_cont_skillspace/networks/classifier_seq_step_cont_choose_dims.py
|
fgitmichael/SelfSupevisedSkillDiscovery
|
60eee11cfd67046190dd2784bf40e97bdbed9d40
|
[
"MIT"
] | 6
|
2021-02-02T23:00:02.000Z
|
2022-01-13T03:13:51.000Z
|
seqwise_cont_skillspace/networks/classifier_seq_step_cont_choose_dims.py
|
fgitmichael/SelfSupevisedSkillDiscovery
|
60eee11cfd67046190dd2784bf40e97bdbed9d40
|
[
"MIT"
] | null | null | null |
from typing import Union
from seqwise_cont_skillspace.networks.rnn_vae_classifier import \
RnnVaeClassifierContSkills
class RnnStepwiseSeqwiseClassifierObsDimSelect(RnnVaeClassifierContSkills):
def __init__(self,
*args,
input_size,
obs_dims_selected: Union[tuple, list]=None,
**kwargs,
):
if obs_dims_selected is not None:
num_obs_selected = len(obs_dims_selected)
input_size = num_obs_selected
# Sanity check
if 'obs_dims_selected' in kwargs.keys():
raise ValueError('Double dim selected does not make sense')
self.obs_dims_selected = obs_dims_selected
super().__init__(
*args,
input_size=input_size,
**kwargs,
)
def _process_seq(self, seq_batch):
if self.obs_dims_selected is not None:
seq_batch = seq_batch[..., self.obs_dims_selected]
return super()._process_seq(seq_batch)
| 29
| 75
| 0.612069
| 918
| 0.87931
| 0
| 0
| 0
| 0
| 0
| 0
| 74
| 0.070881
|
b0a5f7e6a00c493dc8a6dfec6c9d4ed52c54782b
| 1,165
|
py
|
Python
|
prog_vae/cfg_parser/parser_demo.py
|
Hanjun-Dai/sdvae
|
bd26ea949c496419634fd2cf4802fc8e19a9194c
|
[
"MIT"
] | 70
|
2018-02-24T07:50:59.000Z
|
2021-12-27T02:42:37.000Z
|
prog_vae/cfg_parser/parser_demo.py
|
Hanjun-Dai/sdvae
|
bd26ea949c496419634fd2cf4802fc8e19a9194c
|
[
"MIT"
] | 7
|
2018-05-31T00:50:19.000Z
|
2021-09-28T11:58:22.000Z
|
prog_vae/cfg_parser/parser_demo.py
|
Hanjun-Dai/sdvae
|
bd26ea949c496419634fd2cf4802fc8e19a9194c
|
[
"MIT"
] | 19
|
2019-01-11T10:56:00.000Z
|
2022-03-23T23:09:39.000Z
|
#!/usr/bin/env python2
import os
import nltk
import cfg_parser as parser
def main():
cfg_grammar_file = '../../dropbox/context_free_grammars/prog_leftskew.grammar'
grammar = parser.Grammar(cfg_grammar_file)
ts = parser.parse(
'v1=sin(v0);v2=v0*4;v3=v1/v2;v4=cos(v0);v5=v0*3;v6=sin(v1);v7=v3-v6;v8=v7+v5;v9=v8+v4;return:v9', grammar
)
t = ts[0]
print('(ugly) tree:')
print(t)
print()
print('for root:')
print(
'symbol is %s, is it non-terminal = %s, it\' value is %s (of type %s)' %
(t.symbol, isinstance(t, parser.Nonterminal), t.symbol.symbol(), type(t.symbol.symbol()))
)
print(
'rule is %s, its left side is %s (of type %s), its right side is %s, a tuple '
'which each element can be either str (for terminal) or Nonterminal (for nonterminal)' % (
t.rule,
t.rule.lhs(),
type(t.rule.lhs()),
t.rule.rhs(),
)
)
import pdb, traceback, sys, code
if __name__ == '__main__':
try:
main()
except:
type, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
| 24.270833
| 113
| 0.572532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 446
| 0.382833
|
b0a772eb4ccf5983bb0eef911f8bafb1f815a766
| 161
|
py
|
Python
|
scenarios/settlement_show/executable.py
|
timgates42/balanced-python
|
1df86b45c36a97ec2e214480c6806c4df3c79860
|
[
"MIT"
] | 12
|
2015-04-12T06:18:33.000Z
|
2021-03-03T23:54:19.000Z
|
scenarios/settlement_show/executable.py
|
timgates42/balanced-python
|
1df86b45c36a97ec2e214480c6806c4df3c79860
|
[
"MIT"
] | 1
|
2021-11-24T20:10:19.000Z
|
2021-11-24T20:10:19.000Z
|
scenarios/settlement_show/executable.py
|
timgates42/balanced-python
|
1df86b45c36a97ec2e214480c6806c4df3c79860
|
[
"MIT"
] | 14
|
2015-03-23T17:52:06.000Z
|
2021-11-24T11:04:15.000Z
|
import balanced
balanced.configure('ak-test-2eKlj1ZDfAcZSARMf3NMhBHywDej0avSY')
settlement = balanced.Settlement.fetch('/settlements/ST6HmBuLJSEa82oUwId1AShW')
| 32.2
| 79
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 0.509317
|
b0a7fba1d79fe97862c63c08c78298201ee28e87
| 5,857
|
py
|
Python
|
cpf/python/training/auto_trade/action_suggester.py
|
nQuantums/tips
|
cdea2a0e778d2de14cd69882f6273c872c366f03
|
[
"MIT"
] | 1
|
2018-12-11T14:51:43.000Z
|
2018-12-11T14:51:43.000Z
|
cpf/python/training/auto_trade/action_suggester.py
|
nQuantums/tips
|
cdea2a0e778d2de14cd69882f6273c872c366f03
|
[
"MIT"
] | null | null | null |
cpf/python/training/auto_trade/action_suggester.py
|
nQuantums/tips
|
cdea2a0e778d2de14cd69882f6273c872c366f03
|
[
"MIT"
] | null | null | null |
import typing
import numpy as np
import trade_environment
def detect_turning_points(values: np.ndarray, gap: int) -> typing.Tuple[np.ndarray, np.ndarray]:
"""指定数列の折返しポイントの地点を検出する.
Args:
values: 数列.
gap: 折返し判定閾値、この値を超えて反転したら折返しと判断する.
Returns:
(折返しインデックス, 検出途中に生成した一定値以上距離を保って付いてくる値の数列) のタプル.
"""
indices = []
stalkers = np.empty((len(values),), dtype=np.int32)
last_value = int(values[0])
stalker = last_value
stalkers[0] = stalker
last_i = 0
for i in range(1, len(values)):
v = int(values[i])
up = last_value < stalker and stalker <= v
down = stalker < last_value and v <= stalker
if up or down:
delta_array = values[last_i:i + 1]
tpi = last_i + int(np.argmin(delta_array) if up else np.argmax(delta_array))
tpv = int(values[tpi])
indices.append(tpi)
last_i = i
stalker = tpv - gap if up else tpv + gap
# indices.append(i - 1)
# stalker = v - gap if up else v + gap
else:
d = v - stalker
if d < -gap:
stalker = v + gap
elif gap < d:
stalker = v - gap
stalkers[i] = stalker
last_value = v
return np.array(indices, dtype=np.int32), stalkers
class TpActionSuggester:
"""予め折り返し点を探索し、それを用いて指定環境での状態からお勧めアクションを提示するクラス.
"""
def __init__(self, env: trade_environment.TradeEnvironment, spread_adj: int = 1) -> None:
self.env = env # トレード用環境
self.threshould = int(np.rint(env.spread * spread_adj).item()) # エントリーするかどうか判断する閾値、現在値と折返し値の差がこの値以下ならエントリーしない
self.tp_indices = np.empty((0,), dtype=np.int32) # 折返し点のエピソード内インデックス一覧
self.tp_values = np.empty((0,), dtype=np.int32) # 折返し点の値一覧
def start_episode(self) -> None:
"""トレード用環境のエピソード開始直後に呼び出す必要がある."""
values = self.env.episode_values
c = values[:, 3]
self.tp_indices, _ = detect_turning_points(c, self.threshould)
self.tp_values = c[self.tp_indices]
def get_next_turning_index(self) -> int:
"""次の折返しインデックスの取得."""
i1 = np.where(self.env.index_in_episode <= self.tp_indices)[0][:1]
return i1.item() if i1.size else -1
def get_suggested_action(self) -> int:
"""現状の状態でのお勧めアクションの取得."""
tp_indices = self.tp_indices # 折返し点インデックス列
tp_values = self.tp_values # 折り返し点値列
value = self.env.get_value() # 現在値
tp_idx = self.get_next_turning_index() # 未来の直近折り返し点インデックス
tp_delta = None # 現在値から次の折返し点の値への差
on_tp = False # 現在折り返し点上かどうか
if 0 <= tp_idx:
# 現在が丁度折り返し点なら次の折返し点が目標となる
if tp_indices[tp_idx] == self.env.index_in_episode:
on_tp = True
tp_idx += 1
# まだこれから折り返し点があるなら現在値との差分を計算
if tp_idx < len(tp_values):
tp_delta = tp_values[tp_idx] - value
threshould = self.threshould
suggested_action = 0 # 基本何もしない
if self.env.position_type == 0:
# ポジション持っておらず、次の折返し値との差が閾値より大きいなら売買する
if tp_delta is not None:
if threshould < tp_delta:
suggested_action = 1
elif tp_delta < -threshould:
suggested_action = 2
else:
# 既にポジション持っている際の処理
if on_tp:
# 現在が折り返し点上の場合は次の折返しに備える
suggested_action = 3
if tp_delta is not None:
if threshould < tp_delta:
suggested_action = 1
elif tp_delta < -threshould:
suggested_action = 2
else:
# 現在が折り返し点間の場合は必要に応じてポジションを調整する
suggested_action = 0
if tp_delta is not None:
if threshould < tp_delta and self.env.position_type != 1:
suggested_action = 1
elif tp_delta < -threshould and self.env.position_type != -1:
suggested_action = 2
# elif tp_delta * self.env.position_type < 0:
# suggested_action = 3
return suggested_action
class TpRewardAdjuster:
"""TpActionSuggester 用の指定アクションから報酬調整処理を行うクラス.
Args:
action_suggester: お勧めアクション提示オブジェクト.
adj_rate: 想定される損益から報酬調整量に換算する係数.
loss_cut_check: 適切に損切りできているかチェックを行うかどうか.
securing_profit_check: 適切に利確できているかチェックを行うかどうか.
"""
def __init__(self,
action_suggester: TpActionSuggester,
adj_rate: float = 0.01,
loss_cut_check: bool = False,
securing_profit_check: bool = False):
self.action_suggester = action_suggester
self.adj_rate = adj_rate
self.loss_cut_check = loss_cut_check
self.securing_profit_check = securing_profit_check
self.env = action_suggester.env
self.threshould = action_suggester.threshould
def adjust_reward(self, action: int) -> float:
"""現状の状態で指定のアクションを行った際の報酬調整料の取得."""
tp_indices = self.action_suggester.tp_indices # 折返し点インデックス列
tp_values = self.action_suggester.tp_values # 折り返し点値列
value = self.env.get_value() # 現在値
tp_idx = self.action_suggester.get_next_turning_index() # 未来の直近折り返し点インデックス
tp_delta = None # 現在値から次の折返し点の値への差
on_tp = False # 現在折り返し点上かどうか
if 0 <= tp_idx:
# 現在が丁度折り返し点なら次の折返し点が目標となる
if tp_indices[tp_idx] == self.env.index_in_episode:
on_tp = True
tp_idx += 1
# まだこれから折り返し点があるなら現在値との差分を計算
if tp_idx < len(tp_values):
tp_delta = tp_values[tp_idx] - value
reward = 0.0
# 現状のポジションから行っても無視されるアクションを排除
if self.env.is_action_ignored(action):
action = 0
if 1 <= action and action <= 3 and self.env.position_type != 0:
# 決済するなら残りの損益から報酬を調整する
if tp_delta is not None and not on_tp:
reward -= self.adj_rate * self.env.position_type * tp_delta
if action == 0:
if tp_delta is not None:
if self.env.position_type == 0:
# チャンスがある状態で何もしていないなら報酬を減衰させる
if self.threshould < abs(tp_delta):
reward -= self.adj_rate * abs(tp_delta)
else:
pr = self.env.calc_positional_reward()
miss_position = tp_delta * self.env.position_type < 0
# 間違ったポジションなら報酬を減衰させ続ける
if self.loss_cut_check and miss_position and pr < 0:
reward += pr * self.adj_rate
# 正しいポジションなら利確すべきタイミングを逃した瞬間に報酬を減衰させる
if self.securing_profit_check and on_tp and miss_position and 0 < pr:
reward -= pr * self.adj_rate
elif action == 1 or action == 2:
# 売買の方向と次の折り返し点への差分から報酬を調整する
if tp_delta is not None:
reward += self.adj_rate * (1.0 if action == 1 else -1.0) * tp_delta
return reward
| 30.035897
| 111
| 0.701383
| 6,353
| 0.829482
| 0
| 0
| 0
| 0
| 0
| 0
| 3,107
| 0.405667
|
b0a816443b29ee3dd3e6c72a76f0961276f90425
| 1,284
|
py
|
Python
|
meregistro/apps/validez_nacional/FSM.py
|
MERegistro/meregistro
|
6cde3cab2bd1a8e3084fa38147de377d229391e3
|
[
"BSD-3-Clause"
] | null | null | null |
meregistro/apps/validez_nacional/FSM.py
|
MERegistro/meregistro
|
6cde3cab2bd1a8e3084fa38147de377d229391e3
|
[
"BSD-3-Clause"
] | null | null | null |
meregistro/apps/validez_nacional/FSM.py
|
MERegistro/meregistro
|
6cde3cab2bd1a8e3084fa38147de377d229391e3
|
[
"BSD-3-Clause"
] | null | null | null |
from models.EstadoSolicitud import EstadoSolicitud
"""
De Pendiente puede pasar a: controlado, retenido.
De controlado puede pasar a: retenido, evaluado, numerado.
De Retenido puede pasar a:controlado, evaluado
De Evaluado puedo pasar a: numerado
"""
class FSMSolicitud:
def __init__(self):
self.estados = {}
self._estadoDesde = {}
for e in EstadoSolicitud.objects.all():
self.estados[e.nombre] = e
self._estadoDesde[EstadoSolicitud.PENDIENTE] = [self.estados[EstadoSolicitud.PENDIENTE], self.estados[EstadoSolicitud.CONTROLADO], self.estados[EstadoSolicitud.RETENIDO],]
self._estadoDesde[EstadoSolicitud.CONTROLADO] = [self.estados[EstadoSolicitud.CONTROLADO], self.estados[EstadoSolicitud.PENDIENTE], self.estados[EstadoSolicitud.RETENIDO], self.estados[EstadoSolicitud.EVALUADO],]
self._estadoDesde[EstadoSolicitud.RETENIDO] = [self.estados[EstadoSolicitud.RETENIDO], self.estados[EstadoSolicitud.PENDIENTE], self.estados[EstadoSolicitud.CONTROLADO], self.estados[EstadoSolicitud.EVALUADO],]
self._estadoDesde[EstadoSolicitud.EVALUADO] = [self.estados[EstadoSolicitud.EVALUADO], self.estados[EstadoSolicitud.PENDIENTE],]
def estadosDesde(self, estado):
return self._estadoDesde[estado.nombre]
| 53.5
| 220
| 0.76324
| 1,030
| 0.802181
| 0
| 0
| 0
| 0
| 0
| 0
| 199
| 0.154984
|
b0a8727d927e47b98409e6272a4aa283bdbce8d5
| 405
|
py
|
Python
|
application/csc648-03-fa21-team04/project/com/vo/MessageVO.py
|
seelapant13/Gator-Learner
|
a10583daf91d76d1a6610418c0f70715b2a1c811
|
[
"MIT"
] | null | null | null |
application/csc648-03-fa21-team04/project/com/vo/MessageVO.py
|
seelapant13/Gator-Learner
|
a10583daf91d76d1a6610418c0f70715b2a1c811
|
[
"MIT"
] | null | null | null |
application/csc648-03-fa21-team04/project/com/vo/MessageVO.py
|
seelapant13/Gator-Learner
|
a10583daf91d76d1a6610418c0f70715b2a1c811
|
[
"MIT"
] | null | null | null |
# Class: CSC-648-848 Fall 2021
# Author: Manali Seth
# Description: Contains columns and datatypes of Messaging Table of database
from wtforms import *
class MessageVO:
msgId = IntegerField
msgTo_loginId = IntegerField
msgFrom_loginId = IntegerField
msg_majorId = IntegerField
msg_courseNo = IntegerField
msgDate = StringField
msgTime = StringField
msgDesc = StringField
| 25.3125
| 76
| 0.748148
| 251
| 0.619753
| 0
| 0
| 0
| 0
| 0
| 0
| 127
| 0.31358
|
b0a9f0bcb8836537939df03b6e758465264c1d2b
| 330
|
py
|
Python
|
mesyparams.py
|
jkrueger1/stm_ethernet
|
f9fc87222a6a12f64d033c79a3858e16405ea524
|
[
"Apache-2.0"
] | 2
|
2019-08-02T10:00:46.000Z
|
2020-07-27T02:25:23.000Z
|
mesyparams.py
|
jkrueger1/stm_ethernet
|
f9fc87222a6a12f64d033c79a3858e16405ea524
|
[
"Apache-2.0"
] | null | null | null |
mesyparams.py
|
jkrueger1/stm_ethernet
|
f9fc87222a6a12f64d033c79a3858e16405ea524
|
[
"Apache-2.0"
] | 1
|
2019-12-21T11:40:30.000Z
|
2019-12-21T11:40:30.000Z
|
import sys
import socket
from mesylib import send_cmd
try:
addr = sys.argv[1]
rate = int(sys.argv[2])
except (ValueError, IndexError):
print('usage: mesyparams.py ipaddr rate')
sys.exit(1)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
send_cmd(sock, addr, 0xF1F0, 'IH', rate, 0)
print('configure ok')
| 20.625
| 55
| 0.70303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 52
| 0.157576
|
b0aa64db3850f67c0060437e85aa0d6cf6487d84
| 11,751
|
py
|
Python
|
Data/Stocks/Stock.py
|
yangtx2009/finance
|
5ba428f0495a16de89ea852e04e71bde8a00f9ba
|
[
"MIT"
] | null | null | null |
Data/Stocks/Stock.py
|
yangtx2009/finance
|
5ba428f0495a16de89ea852e04e71bde8a00f9ba
|
[
"MIT"
] | null | null | null |
Data/Stocks/Stock.py
|
yangtx2009/finance
|
5ba428f0495a16de89ea852e04e71bde8a00f9ba
|
[
"MIT"
] | null | null | null |
# from bin.x64.iFinDPy import *
import urllib.request
import json
from abc import ABC
import matplotlib.pyplot as plt
import pandas as pd
import os
from bs4 import BeautifulSoup
import requests
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from datetime import datetime
import time
import itertools
import copy
from sklearn import preprocessing
import random
import tqdm
from Data.Stocks.Loader import LoadFinishCondition, LoadThread
from Data.SqlClient import DatabaseClient
pd.set_option('display.max_columns', 20)
class Stock(ABC):
# https://www.jianshu.com/p/2f45fcb44771
def __init__(self):
super(Stock, self).__init__()
self.localDir = os.path.dirname(os.path.realpath(__file__))
self.collection = {}
self.selected_data = None
self.client = DatabaseClient()
self.m_industryList = pd.DataFrame(columns=["industry", "link"])
self.loadIndustryListFromDB()
self.m_stockList = pd.DataFrame(columns=["industry", "name", "symbol"])
self.loadStockListFromDB()
self.loadStocks()
def loadIndustryListFromDB(self):
df = self.client.readTableNames()
if "industry" in df.values:
self.m_industryList = self.client.readTable("industry")
print("industries\n", self.m_industryList)
else:
self.readIndustryList()
self.client.createTable("industry", ["industry VARCHAR(255)", "link VARCHAR(255)"], "industry")
self.client.storeData("industry", self.m_industryList, "append")
self.client.showTable("industry")
def loadStockListFromDB(self):
df = self.client.readTableNames()
if "stock" in df.values:
print("Stock already exists in database")
self.m_stockList = self.client.readTable("stock")
print("stocks\n", self.m_stockList)
else:
print("Cannot find 'stock' in database. Creating it ...")
self.client.createTable("stock", ["industry VARCHAR(255)", "name VARCHAR(255)", "symbol VARCHAR(255)"],
"symbol")
self.readStockList()
self.m_stockList = self.m_stockList.drop_duplicates(subset=['symbol'])
self.client.storeData("stock", self.m_stockList, "append")
self.client.showTable("stock")
def readHSIndex(self, p_draw=False):
url = "http://img1.money.126.net/data/hs/kline/day/times/1399001.json"
with urllib.request.urlopen(url) as url_file:
l_jsonData = json.loads(url_file.read().decode())
self.m_hsIndexTotal = pd.DataFrame(data={"closes": l_jsonData["closes"], "times": l_jsonData["times"]})
print("hsIndex total", self.m_hsIndexTotal.head(5))
url = "http://img1.money.126.net/data/hs/time/today/1399001.json"
with urllib.request.urlopen(url) as url_file:
l_jsonData = json.loads(url_file.read().decode())
print(l_jsonData.keys())
# self.m_hsIndexToday = pd.DataFrame(data={"data": data["closes"], "times": data["times"]})
# print("hsIndex today", self.m_hsIndexToday.head(5))
if p_draw:
self.m_hsIndexTotal.plot(x="times", y="closes", title="HS index", figsize=(10, 4))
plt.title("HS index", fontproperties='SimHei', fontsize='large')
plt.show()
def readIndustryList(self):
print("Reading industry list ...")
url = "http://stock.eastmoney.com/hangye.html"
r = requests.get(url)
r.encoding = 'utf-8'
soup = BeautifulSoup(r.text, 'html.parser')
hangye_div = soup.find('div', {'class': 'hot-hy-list'})
children = hangye_div.findChildren("a", recursive=True)
for child in children:
original_link = child.get("href")
code = int(original_link.split(".")[0].split("hy")[1])
link = "http://quote.eastmoney.com/center/boardlist.html#boards-BK{:04d}1".format(code)
self.m_industryList = self.m_industryList.append({"industry": child.get("title"), "link": link},
ignore_index=True)
print("Created new industry list")
# self.m_industryList.to_csv("IndustryList.csv")
# print(self.m_industryList["industry"], "\n")
def xpath_soup(self, element):
"""
Generate xpath of soup element
:param element: bs4 text or node
:return: xpath as string
"""
components = []
child = element if element.name else element.parent
for parent in child.parents:
"""
@type parent: bs4.element.Tag
"""
previous = itertools.islice(parent.children, 0, parent.contents.index(child))
xpath_tag = child.name
xpath_index = sum(1 for i in previous if i.name == xpath_tag) + 1
components.append(xpath_tag if xpath_index == 1 else '%s[%d]' % (xpath_tag, xpath_index))
child = parent
components.reverse()
return '/%s' % '/'.join(components)
def readStockList(self):
"""
use selenium to wait for javascript in webpage loading data
:return:
"""
print("Reading stock list ...")
startTime = time.time()
fireFoxOptions = webdriver.FirefoxOptions()
fireFoxOptions.add_argument("--headless")
fireFoxOptions.add_argument('--disable-gpu')
fireFoxOptions.add_argument('--no-sandbox')
browser = webdriver.Firefox(firefox_options=fireFoxOptions, executable_path=r"geckodriver.exe")
for index, row in tqdm.tqdm(self.m_industryList.iterrows()):
print("{}/{}: Getting {} information ({})".format(index, len(self.m_industryList), row["industry"],
row['link']))
industry_url = row['link']
browser.get(industry_url)
# time.sleep(5)
WebDriverWait(browser, timeout=10).until(LoadFinishCondition()) # , poll_frequency=5
html = browser.page_source
soup = BeautifulSoup(html, 'html.parser')
while True:
next_button_soup = None
self.findStocks(soup, row["industry"])
next_button_soup = soup.find("a", {"class", "next paginate_button"})
if next_button_soup:
xpath = self.xpath_soup(next_button_soup)
next_button = browser.find_element_by_xpath(xpath)
if next_button:
next_button.click()
print("To next page")
WebDriverWait(browser, timeout=10).until(LoadFinishCondition())
html = browser.page_source
soup = BeautifulSoup(html, 'html.parser')
else:
print("Cannot find button component!")
break
else:
print("Cannot find next page button!")
break
self.m_stockList.to_csv("StockList.csv")
browser.quit()
print("Created new industry list")
# self.m_stockList.to_csv("StockList.csv")
print(self.m_stockList.head(5))
timeElapsed = (time.time() - startTime)
print("The loading of stock list takes {} seconds".format(timeElapsed))
def findStocks(self, soup, key):
table = soup.find('table', {'id': 'table_wrapper-table'})
stocks = table.findChild("tbody", recursive=True).findChildren("tr", recursive=True)
for stock in stocks:
values = stock.findChildren("td", recursive=True)
temp = {"industry": key}
for idx, value in enumerate(values):
if idx == 1:
temp["symbol"] = value.string
elif idx == 2:
temp["name"] = value.string
# print("adding stock:", temp)
self.m_stockList = self.m_stockList.append(temp, ignore_index=True)
def correctTimes(self):
industries = self.m_stockList.groupby("industry")
for name, industry in industries:
filename = os.path.join("industries", "{}.csv".format(name))
data = pd.read_csv(filename)
data["times"] = ["{}.{}.{}".format(str(t)[:4], str(t)[4:6], str(t)[6:8]) for t in data["times"].tolist()]
data.to_csv(filename)
print(data.head(10))
def chunkIt(self, seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def loadStocks(self, threadNum=30):
industries = self.m_stockList.groupby("industry")
if not os.path.exists("industries"):
os.makedirs("industries")
industryNames = list(industries.groups.keys())
temp = industryNames
for industryName in temp:
path = os.path.join("industries", "{}.csv".format(industryName))
if os.path.exists(path):
industryNames.remove(industryName)
self.collection[industryName] = pd.read_csv(path)
if len(industryNames) > 0:
grouped = self.chunkIt(industryNames, threadNum)
threads = list()
for n in range(threadNum):
threads.append(LoadThread(n, self, grouped[n]))
threads[n].start()
print("Waiting for reading stocks ...")
for n in range(threadNum):
threads[n].join()
else:
print("Already read all stocks ...")
def calculateIndustryPerformance(self, showRows=100):
print("Calculating industry performance ...")
industries = self.m_stockList.groupby("industry")
if os.path.exists(os.path.join(self.localDir, "joined.csv")):
joined = pd.read_csv(os.path.join(self.localDir, "joined.csv"))
else:
joined = None
for idx, (name, data) in enumerate(self.collection.items()):
averaged_industry = pd.DataFrame(columns=["times", name])
averaged_industry["times"] = data["times"].tolist()
data = data.fillna(0)
temp = copy.deepcopy(data).drop("times", axis=1)
nonZeroNum = temp.gt(0).sum(axis=1)
if name == "珠宝首饰":
print("珠宝首饰", nonZeroNum)
temp = temp.sum(axis=1) / nonZeroNum
averaged_industry[name] = temp
if joined is None:
joined = averaged_industry
else:
joined = pd.merge(joined, averaged_industry, on="times", how='outer')
joined = joined.sort_values(by="times")
joined.to_csv(os.path.join(self.localDir, "joined.csv"), index=False)
self.selected_data = joined.tail(showRows)
def getRandomStock(self):
industries = self.m_stockList.groupby("industry")
industryNames = list(industries.groups.keys())
industryName = random.sample(industryNames, 1)[0]
filename = os.path.join(self.localDir, "industries", "{}.csv".format(industryName))
if os.path.exists(filename):
data = pd.read_csv(filename)
titles = list(data.columns)
titles.remove("times")
return data[["times", titles[0]]]
else:
print("Cannot find {} in industries directory".format(filename))
return None
if __name__ == '__main__':
stock = Stock()
| 40.6609
| 117
| 0.580631
| 11,154
| 0.947905
| 0
| 0
| 0
| 0
| 0
| 0
| 2,455
| 0.208634
|
b0ad7ea0b53d5e610e64b17510e844460d79cd69
| 947
|
py
|
Python
|
daily_learn/basic_pdf.py
|
thc128/limudScripts
|
4d44f82b00b9cd7bca58b56cbc0fba08914c89a1
|
[
"MIT"
] | null | null | null |
daily_learn/basic_pdf.py
|
thc128/limudScripts
|
4d44f82b00b9cd7bca58b56cbc0fba08914c89a1
|
[
"MIT"
] | null | null | null |
daily_learn/basic_pdf.py
|
thc128/limudScripts
|
4d44f82b00b9cd7bca58b56cbc0fba08914c89a1
|
[
"MIT"
] | null | null | null |
from PyPDF2 import PdfFileWriter, PdfFileReader
import daily_learn
def read_book(path):
input1 = PdfFileReader(open(path, "rb"))
my_book=[]
for i in xrange(input1.getNumPages()):
my_book.append(input1.getPage(i))
return my_book
def write_files(output_file,folder,name):
j=0
output = []
for wok in output_file:
output.append(PdfFileWriter())
for day in wok:
for page in day:
output[j].addPage(page)
outputStream = file(folder + "\\week_" + str(j) + "_" + name, "wb")
output[j].write(outputStream)
j = j + 1
book1=read_book("korban.pdf")
book2=read_book("har_habait.pdf")
book3=read_book("RIF.pdf")
my_book1=daily_learn.Mybook(book1,2,5,17)
my_book2=daily_learn.Mybook(book2,1,5,0,2)
my_book3=daily_learn.Mybook(book3,2,7,205)
my_file = daily_learn.books_into_weeks([my_book1,my_book2,my_book3],4)
write_files(my_file,'splitted','torah.pdf')
| 30.548387
| 75
| 0.674762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 78
| 0.082365
|
b0ad9a13b69f0d5d58e42c1013c83345a35544ed
| 592
|
py
|
Python
|
firmware/polyfit.py
|
mfkiwl/OpenXcvr
|
9bea6efd03cd246f16982f0fadafed684ac5ce1c
|
[
"MIT"
] | 14
|
2020-02-16T15:36:31.000Z
|
2022-03-27T02:24:40.000Z
|
firmware/polyfit.py
|
mfkiwl/OpenXcvr
|
9bea6efd03cd246f16982f0fadafed684ac5ce1c
|
[
"MIT"
] | 1
|
2020-11-23T16:16:33.000Z
|
2020-11-23T16:16:33.000Z
|
firmware/polyfit.py
|
mfkiwl/OpenXcvr
|
9bea6efd03cd246f16982f0fadafed684ac5ce1c
|
[
"MIT"
] | 4
|
2021-03-29T16:55:03.000Z
|
2022-01-23T16:43:59.000Z
|
import numpy as np
from matplotlib import pyplot as plt
def function_to_approximate(x):
return 128*(np.sin((np.pi*x/256)-(0.5*np.pi))+1)
def calculate_poly(z, x):
return np.round(z[3]+x*(z[2]+x*(z[1]+z[0]*x)))
def quantize(z, fraction_bits):
q = 2.0**fraction_bits
z = z * q
z = np.round(z)
z /= q
return z
a = np.arange(256)
z = np.polyfit(a, function_to_approximate(a), 4)
p = np.poly1d(z)
z = quantize(z, 18)
q = np.poly1d(z)
print(z * 2**18)
plt.plot(function_to_approximate(a))
plt.plot(p(a))
plt.plot(q(a))
plt.plot(calculate_poly(z, a))
plt.show()
| 17.939394
| 52
| 0.633446
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b0af71c36645d20ea3374d900810326b1c95631d
| 11,718
|
py
|
Python
|
apps/integration_tests/selenium_cases.py
|
CMSgov/bluebutton-web-server
|
3e7bfb049a2b6bd64fdc4eeae7512b461ccbe682
|
[
"Apache-2.0"
] | 25
|
2017-12-10T00:48:31.000Z
|
2022-03-25T01:29:13.000Z
|
apps/integration_tests/selenium_cases.py
|
CMSgov/bluebutton-web-server
|
3e7bfb049a2b6bd64fdc4eeae7512b461ccbe682
|
[
"Apache-2.0"
] | 298
|
2017-12-05T05:53:32.000Z
|
2022-03-21T19:29:03.000Z
|
apps/integration_tests/selenium_cases.py
|
CMSgov/bluebutton-web-server
|
3e7bfb049a2b6bd64fdc4eeae7512b461ccbe682
|
[
"Apache-2.0"
] | 31
|
2017-12-04T16:01:12.000Z
|
2021-09-26T22:34:55.000Z
|
from django.conf import settings
from enum import Enum
from selenium.webdriver.common.by import By
class Action(Enum):
LOAD_PAGE = 1
FIND_CLICK = 2
FIND = 3
FIND_SEND_KEY = 4
CHECK = 5
BACK = 6
LOGIN = 7
CONTAIN_TEXT = 8
GET_SAMPLE_TOKEN_START = 9
SLEEP = 10
TESTCLIENT_BUNDLE_LABEL_FMT = "Response (Bundle of {}), API version: {}"
TESTCLIENT_RESOURCE_LABEL_FMT = "Response ({}), API version: {}"
MESSAGE_NO_PERMISSION = "You do not have permission to perform this action."
TESTCASE_BANNER_FMT = "** {} TEST: {}, API: {}, STEP: {}, {}"
'''
UI Widget text: texts on e.g. buttons, links, labels etc.
'''
LNK_TXT_TESTCLIENT = "Test Client"
LNK_TXT_GET_TOKEN_V1 = "Get a Sample Authorization Token"
LNK_TXT_GET_TOKEN_V2 = "Get a Sample Authorization Token for v2"
LNK_TXT_AUTH_AS_BENE = "Authorize as a Beneficiary"
LNK_TXT_RESTART_TESTCLIENT = "restart testclient"
# FHIR search result bundle pagination
LNK_TXT_NAV_FIRST = "first"
LNK_TXT_NAV_NEXT = "next"
LNK_TXT_NAV_PREV = "previous"
LNK_TXT_NAV_LAST = "last"
LNK_TXT_NAV_SELF = "self"
# FHIR resources query page
LNK_TXT_PATIENT = "Patient"
LNK_TXT_EOB = "ExplanationOfBenefit"
LNK_TXT_COVERAGE = "Coverage"
LNK_TXT_PROFILE = "Profile"
LNK_TXT_METADATA = "FHIR Metadata"
LNK_TXT_OIDC_DISCOVERY = "OIDC Discovery"
# FHIR result page label H2
LAB_FHIR_RESULTPAGE_H2 = "h2"
CONTENT_FHIR_RESULTPAGE_PRE = "pre"
# MSLSX login form
TXT_FLD_SUB_MSLSX = "username"
TXT_FLD_HICN_MSLSX = "hicn"
TXT_FLD_MBI_MSLSX = "mbi"
TXT_FLD_VAL_SUB_MSLSX = "fred"
MSLSX_TXT_FLD_HICN_VAL = "1000044680"
MSLSX_TXT_FLD_MBI_VAL = "2SW4N00AA00"
MSLSX_CSS_BUTTON = "button"
# SLSX login form
SLSX_TXT_FLD_USERNAME = "username-textbox"
SLSX_TXT_FLD_PASSWORD = "password-textbox"
SLSX_TXT_FLD_USERNAME_VAL = "BBUser00000"
SLSX_TXT_FLD_PASSWORD_VAL = "PW00000!"
SLSX_CSS_BUTTON = "login-button"
# Demographic info access grant form
BTN_ID_GRANT_DEMO_ACCESS = "approve"
BTN_ID_DENY_DEMO_ACCESS = "deny"
BTN_ID_RADIO_NOT_SHARE = "label:nth-child(5)"
# API versions
API_V2 = "v2"
API_V1 = "v1"
BROWSERBACK = {
"display": "Back to FHIR resource page",
"action": Action.BACK,
}
WAIT_SECONDS = {
"display": "Sleep seconds...",
"action": Action.SLEEP,
"params": [3],
}
CHECK_TESTCLIENT_START_PAGE = {
"display": "Check it's on 'Test Client' start page",
"action": Action.FIND,
"params": [30, By.LINK_TEXT, LNK_TXT_GET_TOKEN_V1]
}
CLICK_TESTCLIENT = {
"display": "Click link 'Test Client'",
"action": Action.FIND_CLICK,
"params": [30, By.LINK_TEXT, LNK_TXT_TESTCLIENT]
}
CLICK_RADIO_NOT_SHARE = {
"display": "Click 'Share healthcare data, but not your personal info' on DEMO info grant form",
"action": Action.FIND_CLICK,
"params": [20, By.CSS_SELECTOR, BTN_ID_RADIO_NOT_SHARE]
}
CLICK_AGREE_ACCESS = {
"display": "Click 'Agree' on DEMO info grant form",
"action": Action.FIND_CLICK,
"params": [20, By.ID, BTN_ID_GRANT_DEMO_ACCESS]
}
CLICK_DENY_ACCESS = {
"display": "Click 'Deny' on DEMO info grant form",
"action": Action.FIND_CLICK,
"params": [20, By.ID, BTN_ID_DENY_DEMO_ACCESS]
}
CALL_LOGIN = {
"display": "Start login ...",
"action": Action.LOGIN,
}
SEQ_LOGIN_MSLSX = [
{
"display": "Input SUB(username)",
"action": Action.FIND_SEND_KEY,
"params": [20, By.NAME, TXT_FLD_SUB_MSLSX, TXT_FLD_VAL_SUB_MSLSX]
},
{
"display": "Input hicn",
"action": Action.FIND_SEND_KEY,
"params": [20, By.NAME, TXT_FLD_HICN_MSLSX, MSLSX_TXT_FLD_HICN_VAL]
},
{
"display": "Input mbi",
"action": Action.FIND_SEND_KEY,
"params": [20, By.NAME, TXT_FLD_MBI_MSLSX, MSLSX_TXT_FLD_MBI_VAL]
},
{
"display": "Click 'submit' on MSLSX login form",
"action": Action.FIND_CLICK,
"params": [20, By.CSS_SELECTOR, MSLSX_CSS_BUTTON]
},
]
SEQ_LOGIN_SLSX = [
{
"display": "Medicare.gov login username",
"action": Action.FIND_SEND_KEY,
"params": [20, By.ID, SLSX_TXT_FLD_USERNAME, SLSX_TXT_FLD_USERNAME_VAL]
},
{
"display": "Medicare.gov login password",
"action": Action.FIND_SEND_KEY,
"params": [20, By.ID, SLSX_TXT_FLD_PASSWORD, SLSX_TXT_FLD_PASSWORD_VAL]
},
{
"display": "Click 'submit' on SLSX login form",
"action": Action.FIND_CLICK,
"params": [20, By.ID, SLSX_CSS_BUTTON]
},
]
SEQ_AUTHORIZE_START = [
{
"display": "Load BB2 Landing Page ...",
"action": Action.LOAD_PAGE,
"params": [settings.HOSTNAME_URL]
},
CLICK_TESTCLIENT,
{
"display": "Click link to get sample token v1/v2",
"action": Action.GET_SAMPLE_TOKEN_START,
},
{
"display": "Click link 'Authorize as a Beneficiary' - start authorization",
"action": Action.FIND_CLICK,
"params": [30, By.LINK_TEXT, LNK_TXT_AUTH_AS_BENE]
},
]
SEQ_QUERY_FHIR_RESOURCES = [
{
"display": "Click 'Patient' on FHIR resources page",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_PATIENT]
},
{
"display": "Check Patient result page title",
"action": Action.CHECK,
"params": [20, By.TAG_NAME, LAB_FHIR_RESULTPAGE_H2, TESTCLIENT_RESOURCE_LABEL_FMT, LNK_TXT_PATIENT]
},
BROWSERBACK,
{
"display": "Click 'Coverage' on FHIR resources page",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_COVERAGE]
},
{
"display": "Check Coverage result page title",
"action": Action.CHECK,
"params": [20, By.TAG_NAME, LAB_FHIR_RESULTPAGE_H2, TESTCLIENT_BUNDLE_LABEL_FMT, LNK_TXT_COVERAGE]
},
{
"display": "Check and click Coverage result page navigation links 'last'",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_NAV_LAST]
},
CLICK_TESTCLIENT,
{
"display": "Click 'ExplanationOfBenefit' on FHIR resources page",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_EOB]
},
{
"display": "Check ExplanationOfBenefit result page title",
"action": Action.CHECK,
"params": [20, By.TAG_NAME, LAB_FHIR_RESULTPAGE_H2, TESTCLIENT_BUNDLE_LABEL_FMT, LNK_TXT_EOB]
},
{
"display": "Check and click ExplanationOfBenefit result page navigation links 'last'",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_NAV_LAST]
},
WAIT_SECONDS,
CLICK_TESTCLIENT,
WAIT_SECONDS,
{
"display": "Click 'Profile' on FHIR resources page",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_PROFILE]
},
WAIT_SECONDS,
{
"display": "Check Profile result page title",
"action": Action.CHECK,
"params": [20, By.TAG_NAME, LAB_FHIR_RESULTPAGE_H2, TESTCLIENT_RESOURCE_LABEL_FMT,
"{} (OIDC Userinfo)".format(LNK_TXT_PROFILE)]
},
BROWSERBACK,
{
"display": "Click 'FHIR Metadata' on FHIR resources page",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_METADATA]
},
{
"display": "Check FHIR Metadata result page title",
"action": Action.CHECK,
"params": [20, By.TAG_NAME, LAB_FHIR_RESULTPAGE_H2, TESTCLIENT_RESOURCE_LABEL_FMT, LNK_TXT_METADATA]
},
BROWSERBACK,
{
"display": "Click 'OIDC Discovery' on FHIR resources page",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_OIDC_DISCOVERY]
},
{
"display": "Check OIDC Discovery result page title",
"action": Action.CHECK,
"params": [20, By.TAG_NAME, LAB_FHIR_RESULTPAGE_H2, TESTCLIENT_RESOURCE_LABEL_FMT, LNK_TXT_OIDC_DISCOVERY]
},
BROWSERBACK,
]
SEQ_QUERY_FHIR_RESOURCES_NO_DEMO = [
{
"display": "Click 'Patient' on FHIR resources page",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_PATIENT]
},
{
"display": "Check Patient result page content (<pre>) expect no permission message",
"action": Action.CONTAIN_TEXT,
"params": [20, By.TAG_NAME, CONTENT_FHIR_RESULTPAGE_PRE, MESSAGE_NO_PERMISSION]
},
BROWSERBACK,
{
"display": "Click 'Coverage' on FHIR resources page",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_COVERAGE]
},
{
"display": "Check Coverage result page title",
"action": Action.CHECK,
"params": [20, By.TAG_NAME, LAB_FHIR_RESULTPAGE_H2, TESTCLIENT_BUNDLE_LABEL_FMT, LNK_TXT_COVERAGE]
},
{
"display": "Check and click Coverage result page navigation links 'last'",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_NAV_LAST]
},
CLICK_TESTCLIENT,
{
"display": "Click 'ExplanationOfBenefit' on FHIR resources page",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_EOB]
},
{
"display": "Check ExplanationOfBenefit result page title",
"action": Action.CHECK,
"params": [20, By.TAG_NAME, LAB_FHIR_RESULTPAGE_H2, TESTCLIENT_BUNDLE_LABEL_FMT, LNK_TXT_EOB]
},
{
"display": "Check and click ExplanationOfBenefit result page navigation links 'last'",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_NAV_LAST]
},
WAIT_SECONDS,
CLICK_TESTCLIENT,
WAIT_SECONDS,
{
"display": "Click 'Profile' on FHIR resources page",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_PROFILE]
},
WAIT_SECONDS,
{
"display": "Check Profile result page content (<pre>) expect no permission message",
"action": Action.CONTAIN_TEXT,
"params": [20, By.TAG_NAME, CONTENT_FHIR_RESULTPAGE_PRE, MESSAGE_NO_PERMISSION]
},
BROWSERBACK,
{
"display": "Click 'FHIR Metadata' on FHIR resources page",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_METADATA]
},
{
"display": "Check FHIR Metadata result page title",
"action": Action.CHECK,
"params": [20, By.TAG_NAME, LAB_FHIR_RESULTPAGE_H2, TESTCLIENT_RESOURCE_LABEL_FMT, LNK_TXT_METADATA]
},
BROWSERBACK,
{
"display": "Click 'OIDC Discovery' on FHIR resources page",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_OIDC_DISCOVERY]
},
{
"display": "Check OIDC Discovery result page title",
"action": Action.CHECK,
"params": [20, By.TAG_NAME, LAB_FHIR_RESULTPAGE_H2, TESTCLIENT_RESOURCE_LABEL_FMT, LNK_TXT_OIDC_DISCOVERY]
},
BROWSERBACK,
]
TESTS = {
"auth_grant_fhir_calls": [
{"sequence": SEQ_AUTHORIZE_START},
CALL_LOGIN,
CLICK_AGREE_ACCESS,
{"sequence": SEQ_QUERY_FHIR_RESOURCES}
],
"auth_deny_fhir_calls": [
{"sequence": SEQ_AUTHORIZE_START},
CALL_LOGIN,
CLICK_DENY_ACCESS,
CHECK_TESTCLIENT_START_PAGE
],
"auth_grant_w_no_demo": [
{"sequence": SEQ_AUTHORIZE_START},
CALL_LOGIN,
CLICK_RADIO_NOT_SHARE,
CLICK_AGREE_ACCESS,
{"sequence": SEQ_QUERY_FHIR_RESOURCES_NO_DEMO}
]
}
| 32.55
| 115
| 0.628179
| 209
| 0.017836
| 0
| 0
| 0
| 0
| 0
| 0
| 4,040
| 0.344769
|
b0b1e3d43f16b67a5ff4e9b9b0c971aecd3e9976
| 4,854
|
py
|
Python
|
Zhuanlan.py
|
rainmanzj/smartspider
|
e0b9ec2fe6ef93b026e2773f342774263aa66f2d
|
[
"MIT"
] | 340
|
2016-07-17T08:28:39.000Z
|
2022-03-30T15:23:27.000Z
|
Zhuanlan.py
|
rainmanzj/smartspider
|
e0b9ec2fe6ef93b026e2773f342774263aa66f2d
|
[
"MIT"
] | 10
|
2016-08-10T00:19:31.000Z
|
2020-04-22T08:24:25.000Z
|
Zhuanlan.py
|
rainmanzj/smartspider
|
e0b9ec2fe6ef93b026e2773f342774263aa66f2d
|
[
"MIT"
] | 76
|
2016-07-17T12:18:31.000Z
|
2021-12-21T09:01:03.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""
@version: 1.0
@author: lizheming
@contact: nkdudu@126.com
@site: lizheming.top
@file: Zhuanlan.py
"""
from zhihu import headers, clear, error, session
from bs4 import BeautifulSoup
import re
import webbrowser
import termcolor
import requests
import json
import sys
class Zhuanlan:
url = None
zhuanlan = None
soup = None
originalurl = None
def __init__(self, url):
#https://zhuanlan.zhihu.com/p/20825292
self.originalurl = url
number = re.findall(r"(\d+)", url)[0]
self.url = "http://zhuanlan.zhihu.com/api/posts/" + str(number)
self.headers = headers.copy()
self.headers["Host"] = "zhuanlan.zhihu.com"
def parse(self):
self.se = requests.Session()
for cookie in session.cookies:
self.se.cookies.set_cookie(cookie)
n = 3
res = None
while n > 0:
try:
res = self.se.get(self.url, headers=self.headers, timeout=30)
break
except:
n -= 1
return False
if not res:
print termcolor.colored("网络故障,请检查您的网络设置", "red")
sys.exit()
self.zhuanlan = dict(res.json())
self.soup = BeautifulSoup(self.zhuanlan["content"], "html.parser")
return True
def open_in_browser(self):
webbrowser.open_new(self.originalurl)
def check(self):
if not self.soup:
self.parse()
def get_title(self):
self.check()
return termcolor.colored(self.zhuanlan["title"], "blue")
def get_content(self):
self.check()
from Answer import print_content
print_content(self.soup.contents)
def get_author_info(self):
self.check()
author = dict(self.zhuanlan["author"])
return author["profileUrl"]
def vote(self, type=1):
self.check()
url = self.url + "/rating"
data = {}
if type == 1:
data["value"] = "none"
try:
self.se.put(url, json.dumps(data), headers=self.headers, timeout=15)
except:
print termcolor.colored("网络故障,请检查您的网络设置", "yellow")
return
data["value"] = "like"
else:
data["value"] = "none"
self.headers['Content-Type'] = "application/json;charset=UTF-8"
self.headers["Referer"] = self.originalurl
self.headers["Origin"] = "https://zhuanlan.zhihu.com"
self.headers['X-XSRF-TOKEN'] = self.se.cookies['XSRF-TOKEN']
try:
res = self.se.put(url, json.dumps(data), headers=self.headers, timeout=15)
except:
print termcolor.colored("网络故障,请检查您的网络设置", "yellow")
return
if res.status_code == 204:
s = "取消赞同成功" if data["value"] == "none" else "赞同成功"
print termcolor.colored(s, "blue")
elif res.status_code == 404:
s = "还没有赞同过" if data["value"] == "none" else "已经赞同过了"
print termcolor.colored(s, "blue")
def operate(self):
if not self.parse():
return True
print self.get_title()
while True:
op = raw_input("zhuanlan$ ")
if op == "content":
self.get_content()
elif op == "author":
url = self.get_author_info()
if not url:
print termcolor.colored("当前用户为匿名用户", "red")
else:
from User import User
user = User(url)
if user.operate():
return True
elif op == "voteup":
self.vote(type=1)
elif op == "votecancle":
self.vote(type=2)
elif op == "pwd":
print self.get_title()
elif op == "browser":
self.open_in_browser()
elif op == "clear":
clear()
elif op == "break":
break
elif op == "help":
self.help()
elif op == "quit":
return True
else:
error()
def help(self):
info = "\n" \
"**********************************************************\n" \
"**\n" \
"** content: 查看内容\n" \
"** author: 查看作者\n" \
"** voteup: 赞同\n" \
"** votecancle: 取消赞同\n" \
"** pwd: 显示当前专栏\n" \
"** browser: 在默认浏览器中查看\n" \
"** break: 返回上级操作目录\n" \
"** clear: 清屏\n" \
"** quit: 退出系统\n" \
"**\n" \
"**********************************************************\n"
print termcolor.colored(info, "green")
| 30.528302
| 86
| 0.479192
| 4,767
| 0.938386
| 0
| 0
| 0
| 0
| 0
| 0
| 1,320
| 0.259843
|
b0b389c0a24e47982d3180a11aacaf35518b9f2f
| 2,202
|
py
|
Python
|
extraction/extract.py
|
ejuarezg/manga109-demos
|
667da247683d467047a4bd03c171cb885a27c858
|
[
"MIT"
] | 7
|
2021-03-17T04:26:27.000Z
|
2021-04-21T16:48:40.000Z
|
extraction/extract.py
|
ejuarezg/manga109-demos
|
667da247683d467047a4bd03c171cb885a27c858
|
[
"MIT"
] | 4
|
2021-03-17T06:23:44.000Z
|
2021-11-20T13:49:56.000Z
|
extraction/extract.py
|
ejuarezg/manga109-demos
|
667da247683d467047a4bd03c171cb885a27c858
|
[
"MIT"
] | 3
|
2021-03-17T05:50:40.000Z
|
2021-04-26T02:12:12.000Z
|
import manga109api
import argparse
import os
import glob
from PIL import Image
def args_parser():
"""
:return: This function returns the manual input of book, annotation_type, and page count.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--book', type=str, help='Name of book to annotate from.')
parser.add_argument('--annotation', type=str, help='Type of annotation: "body", "face", "frame", "text".')
parser.add_argument('--pages', type=int, default=1, help='Number of pages to annotate.')
parser.add_argument('--preprocess', action='store_true', help='Preprocess the extracted images to have a uniform size.')
parser.add_argument('--size', type=int, default=128, help='The uniform size if using preprocessing.')
args = parser.parse_args()
return args
if __name__ == "__main__":
ap = args_parser()
manga109_root_dir = "manga109extracted"
if not os.path.exists(manga109_root_dir):
os.makedirs(manga109_root_dir)
book = ap.book
page_count = ap.pages
file_count = [glob.glob(os.path.join(manga109_root_dir, '**', '*.*'), recursive=True)]
count = len(file_count[0])
for page_index in range(page_count):
tracker = 0
p = manga109api.Parser(root_dir="Manga109s_data")
annotation = p.get_annotation(book=book)
img = Image.open(p.img_path(book=book, index=page_index))
for annotation_type in [ap.annotation]:
rois = annotation["page"][page_index][annotation_type]
for roi in rois:
cropped = img.crop((roi["@xmin"], roi["@ymin"], roi["@xmax"], roi["@ymax"]))
image_x_dim, image_y_dim = cropped.size
if ap.preprocess:
cropped = cropped.resize((ap.size, ap.size), Image.ANTIALIAS)
if image_x_dim >= (ap.size / 2) and image_y_dim >= (ap.size / 2):
cropped.save("manga109extracted/%s_%d.jpg" % (ap.book, count))
count += 1
tracker += 1
print("Extracted %d %s images from page %d of %s's book." % (tracker, ap.annotation, page_index + 1, ap.book))
| 44.938776
| 125
| 0.616712
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 555
| 0.252044
|
b0b507ab20f27a7bafa822e29f3aeb4ce2fdbbe0
| 5,565
|
py
|
Python
|
libraries/botframework-streaming/botframework/streaming/payloads/header_serializer.py
|
andreikop/botbuilder-python
|
5e073e0c68fcbdc558133bdbd59a02453e597abe
|
[
"MIT"
] | 388
|
2019-05-07T15:53:21.000Z
|
2022-03-28T20:29:46.000Z
|
libraries/botframework-streaming/botframework/streaming/payloads/header_serializer.py
|
andreikop/botbuilder-python
|
5e073e0c68fcbdc558133bdbd59a02453e597abe
|
[
"MIT"
] | 1,286
|
2019-05-07T23:38:19.000Z
|
2022-03-31T10:44:16.000Z
|
libraries/botframework-streaming/botframework/streaming/payloads/header_serializer.py
|
andreikop/botbuilder-python
|
5e073e0c68fcbdc558133bdbd59a02453e597abe
|
[
"MIT"
] | 168
|
2019-05-14T20:23:25.000Z
|
2022-03-16T06:49:14.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from uuid import UUID
from typing import List
from botframework.streaming.transport import TransportConstants
from .models import Header
_CHAR_TO_BINARY_INT = {val.decode(): list(val)[0] for val in [b".", b"\n", b"1", b"0"]}
# TODO: consider abstracting the binary int list logic into a class for easier handling
class HeaderSerializer:
DELIMITER = _CHAR_TO_BINARY_INT["."]
TERMINATOR = _CHAR_TO_BINARY_INT["\n"]
END = _CHAR_TO_BINARY_INT["1"]
NOT_END = _CHAR_TO_BINARY_INT["0"]
TYPE_OFFSET = 0
TYPE_DELIMITER_OFFSET = 1
LENGTH_OFFSET = 2
LENGTH_LENGTH = 6
LENGTH_DELIMETER_OFFSET = 8
ID_OFFSET = 9
ID_LENGTH = 36
ID_DELIMETER_OFFSET = 45
END_OFFSET = 46
TERMINATOR_OFFSET = 47
@staticmethod
def serialize(
header: Header,
buffer: List[int],
offset: int, # pylint: disable=unused-argument
) -> int:
# write type
buffer[HeaderSerializer.TYPE_OFFSET] = HeaderSerializer._char_to_binary_int(
header.type
)
buffer[HeaderSerializer.TYPE_DELIMITER_OFFSET] = HeaderSerializer.DELIMITER
# write length
length_binary_array: List[int] = list(
HeaderSerializer._int_to_formatted_encoded_str(
header.payload_length, "{:06d}"
)
)
HeaderSerializer._write_in_buffer(
length_binary_array, buffer, HeaderSerializer.LENGTH_OFFSET
)
buffer[HeaderSerializer.LENGTH_DELIMETER_OFFSET] = HeaderSerializer.DELIMITER
# write id
id_binary_array: List[int] = list(
HeaderSerializer._uuid_to_numeric_encoded_str(header.id)
)
HeaderSerializer._write_in_buffer(
id_binary_array, buffer, HeaderSerializer.ID_OFFSET
)
buffer[HeaderSerializer.ID_DELIMETER_OFFSET] = HeaderSerializer.DELIMITER
# write terminator
buffer[HeaderSerializer.END_OFFSET] = (
HeaderSerializer.END if header.end else HeaderSerializer.NOT_END
)
buffer[HeaderSerializer.TERMINATOR_OFFSET] = HeaderSerializer.TERMINATOR
return TransportConstants.MAX_HEADER_LENGTH
@staticmethod
def deserialize(
buffer: List[int], offset: int, count: int # pylint: disable=unused-argument
) -> Header:
if count != TransportConstants.MAX_HEADER_LENGTH:
raise ValueError("Cannot deserialize header, incorrect length")
header = Header(
type=HeaderSerializer._binary_int_to_char(
buffer[HeaderSerializer.TYPE_OFFSET]
)
)
if buffer[HeaderSerializer.TYPE_DELIMITER_OFFSET] != HeaderSerializer.DELIMITER:
raise ValueError("Header type delimeter is malformed")
length_str = HeaderSerializer._binary_array_to_str(
buffer[
HeaderSerializer.LENGTH_OFFSET : HeaderSerializer.LENGTH_OFFSET
+ HeaderSerializer.LENGTH_LENGTH
]
)
try:
length = int(length_str)
except Exception:
raise ValueError("Header length is malformed")
header.payload_length = length
if (
buffer[HeaderSerializer.LENGTH_DELIMETER_OFFSET]
!= HeaderSerializer.DELIMITER
):
raise ValueError("Header length delimeter is malformed")
identifier_str = HeaderSerializer._binary_array_to_str(
buffer[
HeaderSerializer.ID_OFFSET : HeaderSerializer.ID_OFFSET
+ HeaderSerializer.ID_LENGTH
]
)
try:
identifier = UUID(identifier_str)
except Exception:
raise ValueError("Header id is malformed")
header.id = identifier
if buffer[HeaderSerializer.ID_DELIMETER_OFFSET] != HeaderSerializer.DELIMITER:
raise ValueError("Header id delimeter is malformed")
if buffer[HeaderSerializer.END_OFFSET] not in [
HeaderSerializer.END,
HeaderSerializer.NOT_END,
]:
raise ValueError("Header end is malformed")
header.end = buffer[HeaderSerializer.END_OFFSET] == HeaderSerializer.END
if buffer[HeaderSerializer.TERMINATOR_OFFSET] != HeaderSerializer.TERMINATOR:
raise ValueError("Header terminator is malformed")
return header
@staticmethod
def _char_to_binary_int(char: str) -> int:
if len(char) != 1:
raise ValueError("Char to cast should be a str of exactly length 1")
unicode_list = list(char.encode())
if len(unicode_list) != 1:
raise ValueError("Char to cast should be in the ASCII domain")
return unicode_list[0]
@staticmethod
def _int_to_formatted_encoded_str(value: int, str_format: str) -> bytes:
return str_format.format(value).encode("ascii")
@staticmethod
def _uuid_to_numeric_encoded_str(value: UUID) -> bytes:
return str(value).encode("ascii")
@staticmethod
def _binary_int_to_char(binary_int: int) -> str:
return bytes([binary_int]).decode("ascii")
@staticmethod
def _binary_array_to_str(binary_array: List[int]) -> str:
return bytes(binary_array).decode("ascii")
@staticmethod
def _write_in_buffer(data: List[int], buffer: List[int], insert_index: int):
for byte_int in data:
buffer[insert_index] = byte_int
insert_index += 1
| 32.54386
| 88
| 0.65319
| 5,151
| 0.925606
| 0
| 0
| 4,683
| 0.841509
| 0
| 0
| 721
| 0.12956
|
b0b80838d88129627ee88f66e99dd63a8662a687
| 6,849
|
py
|
Python
|
modules/tools/vehicle_calibration/preprocess.py
|
jzjonah/apollo
|
bc534789dc0548bf2d27f8d72fe255d5c5e4f951
|
[
"Apache-2.0"
] | 22,688
|
2017-07-04T23:17:19.000Z
|
2022-03-31T18:56:48.000Z
|
modules/tools/vehicle_calibration/preprocess.py
|
WJY-Mark/apollo
|
463fb82f9e979d02dcb25044e60931293ab2dba0
|
[
"Apache-2.0"
] | 4,804
|
2017-07-04T22:30:12.000Z
|
2022-03-31T12:58:21.000Z
|
modules/tools/vehicle_calibration/preprocess.py
|
WJY-Mark/apollo
|
463fb82f9e979d02dcb25044e60931293ab2dba0
|
[
"Apache-2.0"
] | 9,985
|
2017-07-04T22:01:17.000Z
|
2022-03-31T14:18:16.000Z
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2020 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
This module provides the preprocessing function of vehicle calibration data
"""
import os
import re
import shutil
import time
from absl import app
from absl import flags
from absl import logging
from datetime import datetime
from cyber.python.cyber_py3 import cyber
from modules.dreamview.proto import preprocess_table_pb2
from modules.tools.vehicle_calibration.sanity_check import sanity_check
flags.DEFINE_string('vehicle_type', '', 'The vehicle type to be calibrated')
flags.DEFINE_string('data_path', '/apollo/output', 'Default output data path')
flags.DEFINE_string('calibration_data_path',
'/apollo/modules/calibration/data',
'Default vehicle configuration file directory')
flags.DEFINE_string('config_file_name', 'vehicle_param.pb.txt',
'Default vehicle configuration file name')
flags.DEFINE_string('record_root_path', '/apollo/data/bag',
'Default record root path')
flags.DEFINE_integer(
'record_num', 1, 'The number of record folders '
'required for this calibration task')
FLAGS = flags.FLAGS
def main(argv):
cyber.init("Preprocessor")
preprocessor = Preprocessor()
task_dir = preprocessor.create_tree()
preprocessor.sanity_check_path(task_dir)
cyber.shutdown()
class Preprocessor(object):
def __init__(self):
self.record_num = FLAGS.record_num
self.vehicle_type = self.folder_case(FLAGS.vehicle_type)
self.config_file = self.get_config_path()
self.node = cyber.Node("vehicle_calibration_preprocessor")
self.writer = self.node.create_writer("/apollo/dreamview/progress",
preprocess_table_pb2.Progress,
10)
self.progress = preprocess_table_pb2.Progress()
self.progress.percentage = 0.0
self.progress.log_string = "Press the button to start preprocessing"
@staticmethod
def folder_case(str):
"""Convert a string from title case to folder case"""
return "_".join(str.lower().split(" "))
def create_if_not_exists(self, path):
"""Create dir if path does not exists"""
try:
if not os.path.exists(path):
os.makedirs(path)
self.log_and_publish(f'Sucessfully created {path}')
except OSError:
self.log_and_publish(f'Failed to create: {path}', 'error')
return path
def get_config_path(self):
"""Get the configuration file of the specified vehicle type"""
return os.path.join(FLAGS.calibration_data_path, self.vehicle_type,
FLAGS.config_file_name)
def get_records_info(self):
"""Get records required for calibration"""
res = []
for dir in os.listdir(FLAGS.record_root_path):
match = re.match(r'(^\d{4}-\d{2}-\d{2})-(\d{2}-\d{2}-\d{2}_s$)',
dir)
if match is not None:
record_info = {}
record_info['rel_path'] = match.group()
record_info['abs_path'] = os.path.join(FLAGS.record_root_path,
match.group())
record_info['prefix'] = match.group(1)
res.append(record_info)
if len(res) < self.record_num:
self.log_and_publish(
f'The number of records in {FLAGS.record_root_path} '
f'is less than {self.record_num}', 'error')
res = sorted(res, key=lambda record: record['rel_path'],
reverse=True)[:self.record_num]
return res
def log_and_publish(self,
str,
logging_level="info",
status=preprocess_table_pb2.Status.UNKNOWN):
"""Publish the str by cyber writer"""
if logging_level == 'info':
logging.info(str)
elif logging_level == 'warn':
logging.warn(str)
elif logging_level == 'error':
logging.error(str)
elif logging_level == 'fatal':
logging.fatal(str)
else:
logging.info(str)
self.progress.log_string = str
self.progress.status = status
self.writer.write(self.progress)
time.sleep(0.5)
def create_tree(self):
"""Create file tree according to a specific order"""
task_dir = self.create_if_not_exists(
os.path.join(FLAGS.data_path,
'task' + datetime.now().strftime("-%Y-%m-%d-%H-%M")))
vehicle_dir = self.create_if_not_exists(
os.path.join(task_dir, self.vehicle_type))
records_dir = self.create_if_not_exists(
os.path.join(vehicle_dir, "Records"))
shutil.copy(self.config_file, vehicle_dir)
records_info = self.get_records_info()
finished_records = 0
self.progress.log_string = 'Start preprocessing...'
for iter in records_info:
sub_dir = self.create_if_not_exists(
os.path.join(records_dir, iter['prefix']))
shutil.copytree(iter['abs_path'],
os.path.join(sub_dir, iter['rel_path']))
finished_records += 1
self.progress.percentage = (
finished_records / self.record_num) * 80.0
self.writer.write(self.progress)
self.log_and_publish(
f'The file tree has been successfully created at {task_dir}.')
return task_dir
def sanity_check_path(self, path):
"""Sanity check wrapper"""
result, log_str = sanity_check(path)
if result is True:
self.progress.percentage = 100.0
self.progress.status = preprocess_table_pb2.Status.SUCCESS
else:
self.progress.status = preprocess_table_pb2.Status.FAIL
self.progress.log_string = log_str
self.writer.write(self.progress)
time.sleep(0.5)
if __name__ == "__main__":
app.run(main)
| 38.26257
| 79
| 0.604614
| 4,724
| 0.689736
| 0
| 0
| 149
| 0.021755
| 0
| 0
| 2,141
| 0.3126
|
b0b866e5edd6d67d39733549a96e6a2c9c924cf6
| 1,273
|
py
|
Python
|
cblearn/datasets/tests/test_musician_similarity.py
|
JFHoelscher/cblearn
|
18e3ac24f4d4fdb1e649bea201b18abe27862ef7
|
[
"MIT"
] | 7
|
2021-11-19T13:53:56.000Z
|
2022-03-28T18:39:04.000Z
|
cblearn/datasets/tests/test_musician_similarity.py
|
JFHoelscher/cblearn
|
18e3ac24f4d4fdb1e649bea201b18abe27862ef7
|
[
"MIT"
] | 20
|
2021-09-24T11:39:06.000Z
|
2022-03-17T15:50:06.000Z
|
cblearn/datasets/tests/test_musician_similarity.py
|
JFHoelscher/cblearn
|
18e3ac24f4d4fdb1e649bea201b18abe27862ef7
|
[
"MIT"
] | 3
|
2021-11-24T13:23:17.000Z
|
2022-03-24T08:57:20.000Z
|
import numpy as np
import pytest
from cblearn.datasets import fetch_musician_similarity
@pytest.mark.remote_data
def test_fetch_musician_similarity(tmp_path):
data_home = tmp_path / 'cblearn_datasets'
bunch = fetch_musician_similarity(data_home=data_home, shuffle=False)
assert bunch.data.shape == (131_970, 3)
assert bunch.judgement_id.shape == (131_970, )
assert bunch.user.shape == (131_970, )
assert bunch.survey_or_game.shape == (131_970, )
assert bunch.artist_name.shape == (448, )
assert bunch.artist_id.shape == (448, )
assert bunch.artist_name[bunch.data][0, 0] == 'queen'
assert tuple(bunch.artist_id[bunch.data][0]) == (4325, 1735, 3295)
assert tuple(bunch.artist_id[bunch.data][-1]) == (3603, 4913, 4948)
triplets = fetch_musician_similarity(data_home=data_home, shuffle=False, return_triplets=True)
np.testing.assert_equal(bunch.data, triplets)
np.testing.assert_equal(bunch.artist_name[triplets], bunch.artist_name[bunch.data])
shuffle_bunch = fetch_musician_similarity(data_home=data_home, random_state=42)
assert not np.all(shuffle_bunch.data == bunch.data)
assert not np.all(shuffle_bunch.user == bunch.user)
np.testing.assert_equal(shuffle_bunch.user.sort(), bunch.user.sort())
| 43.896552
| 98
| 0.741555
| 0
| 0
| 0
| 0
| 1,182
| 0.928515
| 0
| 0
| 25
| 0.019639
|
b0b9d81b27101a0308d3e4f404ef228daa426733
| 2,847
|
py
|
Python
|
app/api/views.py
|
Olexsai2020/todo_app
|
f93faffefaaa78292930061867d4ecf772fa0add
|
[
"MIT"
] | null | null | null |
app/api/views.py
|
Olexsai2020/todo_app
|
f93faffefaaa78292930061867d4ecf772fa0add
|
[
"MIT"
] | null | null | null |
app/api/views.py
|
Olexsai2020/todo_app
|
f93faffefaaa78292930061867d4ecf772fa0add
|
[
"MIT"
] | null | null | null |
from django.utils.decorators import method_decorator
from rest_framework import viewsets, status, generics
from rest_framework.response import Response
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from drf_yasg.utils import swagger_auto_schema
from .models import Todo
from .serializers import UserSignupSerializer, UserLoginSerializer, \
TodoSerializer
class UserSignupView(generics.CreateAPIView):
'''
User Signup
Endpoint for registration new user
'''
serializer_class = UserSignupSerializer
permission_classes = (AllowAny, )
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
response = {'message': 'User signup successfully',
'result': 'New user created: ' + request.data['email']}
return Response(response, status=status.HTTP_201_CREATED)
class UserLoginView(generics.CreateAPIView):
'''
User Login
Endpoint for JWT Authorization
'''
serializer_class = UserLoginSerializer
permission_classes = (AllowAny, )
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
response = {'message': 'User logged in successfully',
'result': 'User logged in: ' + request.data['email'],
'token': serializer.data['token']}
return Response(response, status=status.HTTP_200_OK)
@method_decorator(name='list', decorator=swagger_auto_schema(
operation_description="Endpoint for viewing todo list",
operation_summary='ToDo List',
))
@method_decorator(name='create', decorator=swagger_auto_schema(
operation_description="Endpoint for creation a new task",
operation_summary='Create New Task',
))
@method_decorator(name='retrieve', decorator=swagger_auto_schema(
operation_description="Endpoint for reading a task",
operation_summary='Read Task',
))
@method_decorator(name='update', decorator=swagger_auto_schema(
operation_description="Endpoint for updating a task",
operation_summary='Update Task',
))
@method_decorator(name='partial_update', decorator=swagger_auto_schema(
operation_description="Endpoint for partial updating a task",
operation_summary='Partial Update Task',
))
@method_decorator(name='destroy', decorator=swagger_auto_schema(
operation_description="Endpoint to delete a task",
operation_summary='Delete Task',
))
class TodoViewSet(viewsets.ModelViewSet):
queryset = Todo.objects.all()
serializer_class = TodoSerializer
permission_classes = (IsAuthenticated, )
authentication_class = JSONWebTokenAuthentication
| 36.5
| 75
| 0.736916
| 1,366
| 0.479803
| 0
| 0
| 1,211
| 0.42536
| 0
| 0
| 617
| 0.216719
|
b0ba70e5498a44a15d39de5effec074f2599f013
| 112
|
py
|
Python
|
tests/test_mail_bug.py
|
FunTimeCoding/mail-bug
|
185ed77ce92be44c9b37ebf380a1b80a1385c263
|
[
"MIT"
] | null | null | null |
tests/test_mail_bug.py
|
FunTimeCoding/mail-bug
|
185ed77ce92be44c9b37ebf380a1b80a1385c263
|
[
"MIT"
] | null | null | null |
tests/test_mail_bug.py
|
FunTimeCoding/mail-bug
|
185ed77ce92be44c9b37ebf380a1b80a1385c263
|
[
"MIT"
] | null | null | null |
from mail_bug.mail_bug import MailBug
def test_return_code():
bug = MailBug([])
assert bug.run() == 0
| 16
| 37
| 0.669643
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b0bb6683aa9dcadc69b57d035b577b7055890820
| 8,244
|
py
|
Python
|
django_sqltools/common.py
|
yang0/django_sqltools
|
44e7208337815d2fe8361824d223ba518bf38221
|
[
"MIT"
] | null | null | null |
django_sqltools/common.py
|
yang0/django_sqltools
|
44e7208337815d2fe8361824d223ba518bf38221
|
[
"MIT"
] | null | null | null |
django_sqltools/common.py
|
yang0/django_sqltools
|
44e7208337815d2fe8361824d223ba518bf38221
|
[
"MIT"
] | null | null | null |
# _*_coding:utf-8_*_
from django.conf import settings
import uuid, os, json, logging, time, shutil
from datetime import datetime, date
from PIL import Image, ImageFile
import mimetypes
import re
logger = logging.getLogger(__name__)
def get_file_path(instance, filename):
folder = instance.__class__.__name__.lower() + datetime.now().strftime("/%Y/%m/%d")
ext = filename.split('.')[-1]
filename = "%s.%s" % (uuid.uuid4(), ext)
return os.path.join(folder, filename)
def getFileUri(fileInfo):
""" 上传文件相对路径 """
if fileInfo is None or fileInfo == "":
return ""
fileList = fileInfo.split(",")
if len(fileList) == 3:
return fileList[0]
else:
return ""
def getFileName(fileInfo):
""" 上传文件相对路径 """
return re.findall(r'\w+\.\w+', fileInfo)[0]
def getPath(fileInfo):
""" 截取路径 """
return re.findall(r'[/\w]+[/]', fileInfo)[0]
def moveFile(fileUri, folder):
""" 将fileInfo中的文件移到某个folder """
if not os.path.exists(settings.MEDIA_ROOT + fileUri):
return
# 获取路径中的日期信息
datePath = re.findall(r'[/][/\w]+[/]', fileUri)[0]
path = settings.MEDIA_ROOT + folder + datePath
if not os.path.exists(path):
os.makedirs(path)
# 移动文件
os.rename(settings.MEDIA_ROOT + fileUri, path + getFileName(fileUri))
return folder + datePath + getFileName(fileUri)
def getUploadImageSize(fileInfo):
""" 上传图片的原始尺寸 @return width,height"""
if fileInfo is None or fileInfo == "":
return None, None
fileList = fileInfo.split(",")
if len(fileList) == 3:
sizeList = fileList[1].split("_")
return sizeList[0], sizeList[1]
else:
return None, None
def getFileSize(fileInfo):
""" 上传文件的大小 """
if fileInfo is None or fileInfo == "":
return None
fileList = fileInfo.split(",")
if len(fileList) == 3:
return fileList[2]
else:
return None
def uploadFile(upload_file, domain, extType=('png', 'jpeg', 'gif', 'bmp', 'jpg')):
if upload_file:
datePath = date.strftime(date.today(), "%Y/%m/%d")
uid = uuid.UUID.time_low.fget(uuid.uuid4())
folder = domain + "/" + str(datePath)
# 中文文件名处理 encode('utf-8')
ext = str(upload_file.name.encode('utf-8')).split(".")[-1] # 暂时未考虑 tar.gz 这样的后缀
if ext in extType:
# file_name = image.name.encode('utf-8')
file_uid = str(uid)
path_root = settings.MEDIA_ROOT
path_folder = path_root + folder
# 保存文件到服务器的路径
file_upload = path_folder + "/" + file_uid + "." + ext
# 保存在DB中的文件信息:文件路径,文件尺寸(若有),文件大小
fileInfo = folder + "/" + file_uid + "." + ext
# path_save = path_folder + "/" + file_uid + ".jpg"
# save_50 = path_folder + "/" + 'snap_50X50_' + file_uid + '.jpg'
# save_60 = path_folder + "/" + 'snap_60X60_' + file_uid + '.jpg'
# avatar_info = 'folder='+ folder + ',uid=' + file_uid + ',ext=jpg' + ',swidth=50,sheight=50' + ',name=' +file_name +',size=' + file_size
if not os.path.exists(path_folder):
os.makedirs(path_folder)
try:
if ext in ('png', 'jpeg', 'gif', 'bmp', 'jpg'):
parser = ImageFile.Parser()
for chunk in upload_file.chunks():
parser.feed(chunk)
img = parser.close()
img.save(file_upload, format="JPEG", quality=85)
else:
with open(file_upload, 'wb') as fd:
for chunk in upload_file.chunks():
fd.write(chunk)
except Exception as e:
logger.error(u"上传失败!%s", e)
return 3, "上传失败!"
# 获取文件大小
if ext in ('png', 'jpeg', 'gif', 'bmp', 'jpg'):
image = Image.open(file_upload)
srcW, srcH = image.size
fileInfo += "," + str(srcW) + "_" + str(srcH)
else:
fileInfo += ",0_0"
file_size = os.path.getsize(file_upload)
fileInfo += "," + str(file_size)
return 1, fileInfo
else:
return 2, """不是支持的文件类型!"""
else:
return 0, """未上传文件"""
def resizeImage(imgPath, thumbPath, width, height, pathRoot=settings.MEDIA_ROOT):
"""等比压缩生成缩略图 @param imgPath 原图(相对路径) @param thumbPath 缩略图"""
img = pathRoot + imgPath
resizeImg = pathRoot + thumbPath
if os.path.exists(img):
image = Image.open(img)
# 获得图像的宽度和高度
newWidth = 0
newHeight = 0
srcWidth, srcHeight = image.size
if srcWidth <= width and srcHeight <= height:
newWidth = srcWidth
newHeight = srcHeight
else:
ratioH = 1.0 * srcHeight / height
ratioW = 1.0 * srcWidth / width
if ratioH >= ratioW:
newHeight = height
newWidth = int(1.0 * height / srcHeight * srcWidth)
else:
newWidth = width
newHeight = int(1.0 * width / srcWidth * srcHeight)
if image.format == 'GIF':
image = image.convert('RGB')
image.resize((newWidth, newHeight), Image.ANTIALIAS).save(resizeImg, format=image.format, quality=95)
if os.path.exists(resizeImg):
return True
return False
def isImageSize(img, width, height):
"""判断图片尺寸 @param img 图片的绝对路径"""
image = Image.open(img)
srcWidth, srcHeight = image.size
if srcWidth == width and srcHeight == height:
return True
return False
def getImageSize(img):
"""获取图片尺寸 @param img 图片的绝对路径"""
image = Image.open(img)
srcWidth, srcHeight = image.size
return srcWidth, srcHeight
def cropImageCenter(img, newImg, width, height, pathRoot=settings.MEDIA_ROOT):
"""最大范围裁切图片的中间部分"""
img = pathRoot + img
newImg = pathRoot + newImg
image = Image.open(img)
srcWidth, srcHeight = image.size
ratioH = 1.0 * srcHeight / height
ratioW = 1.0 * srcWidth / width
x1 = 0
y1 = 0
x2 = 0
y2 = 0
if ratioW <= 1 or ratioH <= 1:
# if ratioW<=1:
# x1=0
# else:
# x1=int(1.0*(srcWidth-width)/2)
# if ratioH<=1:
# y1=0
# else:
# y1=int(1.0*(srcHeight-height)/2)
x = int(1.0 * (srcWidth - width) / 2)
x1 = x if x > 0 else 0
y = int(1.0 * (srcHeight - height) / 2)
y1 = y if y > 0 else 0
x2 = x1 + width
y2 = y1 + height
x2 = x2 if x2 <= srcWidth else srcWidth
y2 = y2 if y2 <= srcHeight else srcHeight
box = (x1, y1, x2, y2)
image.crop(box).save(newImg)
else:
# 先等比压缩到最接近裁切比例,再裁切
newWidth = 0
newHeight = 0
if ratioW <= ratioH:
newWidth = width
newHeight = int(srcHeight / ratioW)
else:
newHeight = height
newWidth = int(srcWidth / ratioH)
if image.format == 'GIF':
image = image.convert('RGB')
image.resize((newWidth, newHeight), Image.ANTIALIAS).save(newImg, format=image.format, quality=95)
x = int(1.0 * (newWidth - width) / 2)
y = int(1.0 * (newHeight - height) / 2)
x1 = x if x > 0 else 0
y1 = y if y > 0 else 0
x2 = x1 + width
y2 = y1 + height
x2 = x2 if x2 <= newWidth else newWidth
y2 = y2 if y2 <= newHeight else newHeight
box = (x1, y1, x2, y2)
image = Image.open(newImg)
image.crop(box).save(newImg)
if os.path.exists(newImg):
return True
return False
def delFile(filePath, pathRoot=settings.MEDIA_ROOT):
if filePath:
fullPath = pathRoot + filePath
if os.path.exists(fullPath):
os.remove(fullPath)
def renameFile(srcFile, newFile, pathRoot=settings.MEDIA_ROOT):
if srcFile and newFile:
fullPath = pathRoot + srcFile
newFilePath = pathRoot + newFile
if os.path.exists(fullPath):
os.rename(fullPath, newFilePath)
if os.path.exists(newFilePath):
return True
return False
| 31.346008
| 149
| 0.548156
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,676
| 0.192555
|
b0bbdeb9ad8ad6836124106f4fb1414f2a3afe49
| 1,853
|
py
|
Python
|
csn_searcher/loader.py
|
box-key/shanet
|
6a2679a2dcb98dc4447af8eb453e297cd7585c79
|
[
"Apache-2.0"
] | 2
|
2020-05-07T00:46:36.000Z
|
2020-05-26T10:17:36.000Z
|
csn_searcher/loader.py
|
box-key/shanet
|
6a2679a2dcb98dc4447af8eb453e297cd7585c79
|
[
"Apache-2.0"
] | 2
|
2022-02-27T20:43:49.000Z
|
2022-03-02T12:28:26.000Z
|
csn_searcher/loader.py
|
box-key/shanet
|
6a2679a2dcb98dc4447af8eb453e297cd7585c79
|
[
"Apache-2.0"
] | 1
|
2020-05-08T23:32:03.000Z
|
2020-05-08T23:32:03.000Z
|
import requests
from tqdm import tqdm
from time import sleep
import os
class DataLoader():
"""
This class is used to download a pre-trained neural network model from url.
It stores model name and the link to a model.
"""
def __init__(self):
# url where model is stored
self.model_url = 'https://www.dropbox.com/s/z82x8xtofzwgae8/siamense-lstm.pt?dl=1'
self.field_url = 'https://www.dropbox.com/s/99ogf071ncl47ut/TEXT.Field?dl=1'
# output file name
# model_path = os.path.join('csn_searcher', 'data', 'siamese-lstm.pt')
self.model_path = 'siamese-lstm.pt'
self.text_field_path = 'TEXT.Field'
# chunk size for progress bar
self.chunk_size = 2**20
self._load()
def _load(self):
# load nn model
# if a model exists skip this process
if not os.path.exists(self.model_path):
# get model
r = requests.get(self.model_url, stream=True)
# output model
with open(self.model_path, 'wb') as f:
size = int(r.headers.get('content-length'))
task = 'Download NN model'
# print progress bar
with tqdm(total=size, unit=' data', desc=task) as pbar:
for chunk in r.iter_content(chunk_size=self.chunk_size):
if chunk:
f.write(chunk)
f.flush()
pbar.update(len(chunk))
# loads field object
if not os.path.exists(self.text_field_path):
print('Loading Field object...')
r = requests.get(self.field_url)
with open(self.text_field_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=self.chunk_size):
f.write(chunk)
| 38.604167
| 90
| 0.563411
| 1,779
| 0.960065
| 0
| 0
| 0
| 0
| 0
| 0
| 630
| 0.339989
|
b0bf332f22eca1ba57686a0f75bc10bbc8f9a9d8
| 75
|
py
|
Python
|
components/server/src/shared/routes/plugins/__init__.py
|
ICTU/quality-time
|
88d80ea30e35bd5f0bf5cce7cb43dc9f439e91f5
|
[
"Apache-2.0"
] | 33
|
2016-01-20T07:35:48.000Z
|
2022-03-14T09:20:51.000Z
|
components/server/src/shared/routes/plugins/__init__.py
|
ICTU/quality-time
|
88d80ea30e35bd5f0bf5cce7cb43dc9f439e91f5
|
[
"Apache-2.0"
] | 2,410
|
2016-01-22T18:13:01.000Z
|
2022-03-31T16:57:34.000Z
|
components/server/src/shared/routes/plugins/__init__.py
|
ICTU/quality-time
|
88d80ea30e35bd5f0bf5cce7cb43dc9f439e91f5
|
[
"Apache-2.0"
] | 21
|
2016-01-16T11:49:23.000Z
|
2022-01-14T21:53:22.000Z
|
"""Bottle route plugins."""
from .injection_plugin import InjectionPlugin
| 18.75
| 45
| 0.786667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 0.36
|
b0c2140b35a3bb72f96b2eb82a9fe58420a9a0cf
| 4,419
|
py
|
Python
|
airflow/providers/tesouro_gerencial/hooks/tesouro_gerencial.py
|
CarlosAdp/airflow-providers-tesouro-gerencial
|
f48ba321a5152dfd9b72107f640c66b217d59b9d
|
[
"MIT"
] | null | null | null |
airflow/providers/tesouro_gerencial/hooks/tesouro_gerencial.py
|
CarlosAdp/airflow-providers-tesouro-gerencial
|
f48ba321a5152dfd9b72107f640c66b217d59b9d
|
[
"MIT"
] | null | null | null |
airflow/providers/tesouro_gerencial/hooks/tesouro_gerencial.py
|
CarlosAdp/airflow-providers-tesouro-gerencial
|
f48ba321a5152dfd9b72107f640c66b217d59b9d
|
[
"MIT"
] | null | null | null |
from enum import Enum
from typing import List, Union
from urllib.parse import urljoin
import warnings
from airflow.exceptions import AirflowException
from airflow.providers.siafi.hooks.siafi import SIAFIHook
import requests
warnings.filterwarnings('ignore', message='Unverified HTTPS request')
class TesouroGerencialHook(SIAFIHook):
'''Hook para interação com Tesouro Gerencial.
Classe herdada de :class:`airflow.providers.siafi.hooks.siafi.SIAFIHook`
'''
class FORMATO(Enum):
PDF = 'pdf'
CSV = 'csv'
EXCEL = 'excel'
def __str__(self) -> str:
return self.value
URL = 'https://tesourogerencial.tesouro.gov.br/'
string_sessao: str
def __enter__(self) -> 'TesouroGerencialHook':
'''Inicia sessão.'''
super().__enter__()
cpf = self.cpf
senha = self.senha
self.log.info('Iniciando sessão com usuário "%s"', self.cpf)
url = urljoin(self.URL, 'tg/servlet/taskAdmin')
params = {
'taskId': 'senhaMstrSSOTask',
'taskEnv': 'xhr',
'taskContentType': 'json',
'cpf': cpf,
'token': '',
'server': '',
'project': 'TESOURO%20GERENCIAL%20-%20DES',
'senha': senha,
'novaSenha': '',
}
resposta = requests.get(url, params=params, verify=False)
try:
resposta_json = resposta.json()
self.string_sessao = resposta_json['sessionState']
except Exception:
raise AirflowException(resposta)
self.log.info('Sessão iniciado com sucesso')
return self
def __exit__(self, *args, **kwargs) -> None:
'''Encerra sessão.'''
url = urljoin(self.URL, 'tg/servlet/taskAdmin')
params = {'taskId': 'logout', 'sessionState': self.string_sessao}
requests.get(url, params=params, verify=False)
self.log.info('Sessão encerrada com sucesso')
def retorna_relatorio(
self,
id_relatorio: str,
formato: Union[str, FORMATO] = FORMATO.CSV,
respostas_prompts_valor: List[str] = None,
) -> bytes:
'''Retorna um relatório do Tesouro Gerencial.
:param id_relatorio: ID do relatório
:type id_relatorio: str
:param formato: formato do relatório a ser buscado no Tesouro
Gerencial, podendo ser "csv", "excel" ou "pdf". O atributo
:attr:`~TesouroGerencialHook.FORMATO` também pode ser utilizado.
:type formato: Union[str, TesouroGerencialHook.FORMATO]
:param respostas_prompts_valor: lista com respostas de prompts de
valor, respeitando sua ordem conforme consta no relatório
:type respostas_prompts_valor: List[str]
:return: conteúdo do relatório, em cadeia de caracteres binários
:rtype: bytes
'''
self.log.info(
'Solicitando relatório "%s" no formato "%s" com as seguintes '
'respostas para prompts: "%s"',
id_relatorio, formato, respostas_prompts_valor
)
url = urljoin(self.URL, 'tg/servlet/taskAdmin')
params = {
'taskId': 'exportReport',
'taskEnv': 'juil_iframe',
'taskContent': 'json',
'expandPageBy': True,
}
params.update({
'sessionState': self.string_sessao,
'reportID': id_relatorio,
'valuePromptAnswers': '^'.join(respostas_prompts_valor or [])
})
try:
formato = self.FORMATO(formato)
except ValueError:
raise AirflowException(f'"{formato}" não é um formato válido')
if formato == self.FORMATO.CSV:
params.update({'executionMode': 4, 'plainTextDelimiter': ','})
elif formato == self.FORMATO.EXCEL:
params.update({'executionMode': 3, 'excelVersion': 4})
elif formato == self.FORMATO.PDF:
params.update({'executionMode': 2})
requisicao = requests.Request('GET', url, params=params)
requisicao_preparada = requisicao.prepare()
self.log.info('Solicitando URL "%s"', requisicao_preparada.url)
resposta = requests.get(requisicao_preparada.url, verify=False)
if resposta.ok:
self.log.info('Relatório gerado com sucesso')
return resposta.content
else:
raise AirflowException(resposta)
| 32.977612
| 76
| 0.605114
| 4,141
| 0.932658
| 0
| 0
| 0
| 0
| 0
| 0
| 1,745
| 0.393018
|
b0c27fbbd572c04635510356965faf39003f2e6a
| 843
|
py
|
Python
|
offensive_nn/models/offensive_lstm_model.py
|
TharinduDR/OffensiveNN
|
336b377c44a7067d2e23ca4a8d331ce7f99157cc
|
[
"Apache-2.0"
] | null | null | null |
offensive_nn/models/offensive_lstm_model.py
|
TharinduDR/OffensiveNN
|
336b377c44a7067d2e23ca4a8d331ce7f99157cc
|
[
"Apache-2.0"
] | null | null | null |
offensive_nn/models/offensive_lstm_model.py
|
TharinduDR/OffensiveNN
|
336b377c44a7067d2e23ca4a8d331ce7f99157cc
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
class OffensiveLSTMModel:
def __init__(self, args, embedding_matrix):
emb = layers.Embedding(args.max_features, args.embed_size, trainable=False,
name="embedding_layer")
inp = tf.keras.Input(shape=(None,), dtype="int64", name="input")
x = emb(inp)
x = layers.Bidirectional(layers.LSTM(64, return_sequences=True, name="lstm_1"))(x)
x = layers.Bidirectional(layers.LSTM(64, name="lstm_2"))(x)
x = layers.Dense(256, activation="relu", name="dense_1")(x)
x = layers.Dense(args.num_classes, activation="softmax", name="dense_predictions")(x)
emb.set_weights([embedding_matrix])
self.model = tf.keras.Model(inputs=inp, outputs=x, name="lstm_model")
| 42.15
| 93
| 0.664294
| 751
| 0.890866
| 0
| 0
| 0
| 0
| 0
| 0
| 102
| 0.120996
|
b0c2a699dca314b9665d88e6e73956104ae6f215
| 2,852
|
py
|
Python
|
st_library/utils/databases/postgres.py
|
shortesttrack/dataprovider-py
|
3ecbcb5e09da09614a708e4ef990acdd3443c6ed
|
[
"Apache-2.0"
] | null | null | null |
st_library/utils/databases/postgres.py
|
shortesttrack/dataprovider-py
|
3ecbcb5e09da09614a708e4ef990acdd3443c6ed
|
[
"Apache-2.0"
] | 2
|
2018-03-27T11:06:46.000Z
|
2020-10-27T20:48:51.000Z
|
st_library/utils/databases/postgres.py
|
shortesttrack/dataprovider-py
|
3ecbcb5e09da09614a708e4ef990acdd3443c6ed
|
[
"Apache-2.0"
] | 4
|
2018-02-26T08:12:39.000Z
|
2018-05-18T06:01:01.000Z
|
import backoff
import psycopg2
from psycopg2.extras import DictCursor, NamedTupleCursor
from st_library.utils.generics.connectors import ConnectorContainer
_disconnect_errors = (psycopg2.InterfaceError, psycopg2.OperationalError,)
_backoff = backoff.on_exception(backoff.expo, _disconnect_errors, max_time=30, max_tries=30)
class Postgres(object):
DictCursor = DictCursor
NamedTupleCursor = NamedTupleCursor
def __init__(self, name, host, port, username, password):
self._name = name
self._host = host
self._port = port
self._username = username
self._password = password
self._conn = None
self._cursor = None
self._cursor_type = None
def __repr__(self):
return '<Postgres db "{}">'.format(self._name)
def set_cursor_type(self, cursor_type):
self._cursor_type = cursor_type
@property
def name(self):
return self._name
@_backoff
def _get_connection(self):
if self._conn and not self._conn.closed:
return self._conn
db_connection = self._do_get_connection()
self._conn = db_connection
return self._conn
def _do_get_connection(self):
return psycopg2.connect(database=self._name,
user=self._username,
password=self._password,
host=self._host, port=self._port)
@_backoff
def execute(self, *args, **kwargs):
if self._cursor is None or self._cursor.closed:
self._cursor = self._get_connection().cursor(cursor_factory=self._cursor_type)
return self._cursor.execute(*args, **kwargs)
def fetchall(self):
return self._cursor.fetchall()
def fetchmany(self, *args, **kwargs):
return self._cursor.fetchmany(*args, **kwargs)
def fetchone(self):
return self._cursor.fetchone()
@_backoff
def commit(self):
self._get_connection().commit()
@_backoff
def cancel(self):
self._get_connection().cancel()
@_backoff
def close(self):
self._get_connection().close()
@_backoff
def rollback(self):
self._get_connection().rollback()
class PostgresContainer(ConnectorContainer):
def _do_initialize_data(self):
assert not len(self._list)
params = self._fetch_param_dict([
'psql_host', 'psql_name', 'psql_username', 'psql_password', 'psql_port'
])
list_of_servers_params = [params]
for server in list_of_servers_params:
obj = Postgres(server['psql_name'], server['psql_host'],
int(server['psql_port']), server['psql_username'],
server['psql_password'])
self._list.append(obj)
self._dict[obj.name] = obj
| 29.102041
| 92
| 0.630435
| 2,519
| 0.88324
| 0
| 0
| 820
| 0.287518
| 0
| 0
| 146
| 0.051192
|
b0c2f5602738aeed19e70471739de83515468bde
| 566
|
py
|
Python
|
examples/date-format.py
|
adamantonio/gooeypie
|
a60416a6a317f83e89541d6fdcac3559ace87cac
|
[
"MIT"
] | 1
|
2021-11-20T16:28:47.000Z
|
2021-11-20T16:28:47.000Z
|
examples/date-format.py
|
adamantonio/gooeypie
|
a60416a6a317f83e89541d6fdcac3559ace87cac
|
[
"MIT"
] | null | null | null |
examples/date-format.py
|
adamantonio/gooeypie
|
a60416a6a317f83e89541d6fdcac3559ace87cac
|
[
"MIT"
] | null | null | null |
import gooeypie as gp
date_formats = ['28/8/20', '8/28/20', '28/08/2020', '08/28/2020', '2020-08-28',
'28-Aug-2020', 'Friday, August 28, 2020', 'Friday, 28 August, 2020',
'August 28, 2020', '28 August, 2020']
app = gp.GooeyPieApp('Time and date')
app.width = 250
label = gp.Label(app, 'Available formats:')
date_options = gp.Listbox(app, date_formats)
date_options.height = 8
ok = gp.Button(app, 'OK', None)
ok.width = 10
app.set_grid(3, 1)
app.add(label, 1, 1)
app.add(date_options, 2, 1, fill=True)
app.add(ok, 3, 1)
app.run()
| 25.727273
| 84
| 0.627208
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 190
| 0.335689
|
b0c3406b4cbc0b4a885af800bf5115dce13d7dd2
| 1,131
|
py
|
Python
|
color_contrast_calc/converters/grayscale.py
|
nico-hn/color_contrast_calc_py
|
92cf9eecbd8e5d000f284ec786103cb719df6026
|
[
"MIT"
] | 2
|
2020-10-01T11:50:30.000Z
|
2020-10-11T20:59:06.000Z
|
color_contrast_calc/converters/grayscale.py
|
nico-hn/color_contrast_calc_py
|
92cf9eecbd8e5d000f284ec786103cb719df6026
|
[
"MIT"
] | null | null | null |
color_contrast_calc/converters/grayscale.py
|
nico-hn/color_contrast_calc_py
|
92cf9eecbd8e5d000f284ec786103cb719df6026
|
[
"MIT"
] | 1
|
2019-06-18T02:08:06.000Z
|
2019-06-18T02:08:06.000Z
|
# https://www.w3.org/TR/filter-effects/#funcdef-grayscale
# https://www.w3.org/TR/filter-effects/#grayscaleEquivalent
# https://www.w3.org/TR/SVG/filters.html#feColorMatrixElement
import numpy as np
from . import rgb_clamp
_CONST_PART = np.array([[0.2126, 0.7152, 0.0722],
[0.2126, 0.7152, 0.0722],
[0.2126, 0.7152, 0.0722]])
_RATIO_PART = np.array([[0.7874, -0.7152, -0.0722],
[-0.2126, 0.2848, -0.0722],
[-0.2126, -0.7152, 0.9278]])
def calc_rgb(rgb, s):
"""Convert passed a passed color to grayscale.
The calculation is based on the definition found at
https://www.w3.org/TR/filter-effects/#funcdef-grayscale
:param rgb: The Original RGB value before the conversion.
:type rgb: (int, int, int)
:param s: Conversion ratio in percentage
:type s: float
:return: RGB value of grayscale color
:rtype: (int, int, int)
"""
return rgb_clamp((_calc_grayscale(s) * np.array(rgb)).sum(1))
def _calc_grayscale(s):
r = 1 - min((100, s)) / 100.0
return _CONST_PART + _RATIO_PART * r
| 29.763158
| 65
| 0.612732
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 575
| 0.5084
|
b0c414717803420c137b57f5097d0149ee8b7ac9
| 2,285
|
py
|
Python
|
Coursera/Google_IT_Automation_with_Python/01_Crash_Course_on_Python/Week_5/wk5_mod3_ex2.py
|
ssolomon2020/Self_Study_Python_Training
|
b253093b185f4a0d98cb8565f5fcf2b0e4a99556
|
[
"MIT"
] | null | null | null |
Coursera/Google_IT_Automation_with_Python/01_Crash_Course_on_Python/Week_5/wk5_mod3_ex2.py
|
ssolomon2020/Self_Study_Python_Training
|
b253093b185f4a0d98cb8565f5fcf2b0e4a99556
|
[
"MIT"
] | null | null | null |
Coursera/Google_IT_Automation_with_Python/01_Crash_Course_on_Python/Week_5/wk5_mod3_ex2.py
|
ssolomon2020/Self_Study_Python_Training
|
b253093b185f4a0d98cb8565f5fcf2b0e4a99556
|
[
"MIT"
] | null | null | null |
# Specialization: Google IT Automation with Python
# Course 01: Crash Course with Python
# Week 5 Module Part 3 Exercise 02
# Student: Shawn Solomon
# Learning Platform: Coursera.org
# Let’s expand a bit on our Clothing classes from the previous in-video question. Your mission:
# Finish the "Stock_by_Material" method and iterate over the amount of each item of a given
# material that is in stock. When you’re finished, the script should add up to 10 cotton Polo shirts.
# class Clothing:
# stock={ 'name': [],'material' :[], 'amount':[]}
# def __init__(self,name):
# material = ""
# self.name = name
# def add_item(self, name, material, amount):
# Clothing.stock['name'].append(self.name)
# Clothing.stock['material'].append(self.material)
# Clothing.stock['amount'].append(amount)
# def Stock_by_Material(self, material):
# count=0
# n=0
# for item in Clothing.stock['___']:
# if item == material:
# count += Clothing.___['amount'][n]
# n+=1
# return count
#
# class shirt(Clothing):
# material="Cotton"
# class pants(Clothing):
# material="Cotton"
#
# polo = shirt("Polo")
# sweatpants = pants("Sweatpants")
# polo.add_item(polo.name, polo.material, 4)
# sweatpants.add_item(sweatpants.name, sweatpants.material, 6)
# current_stock = polo.Stock_by_Material("Cotton")
# print(current_stock)
class Clothing:
stock={ 'name': [],'material': [], 'amount': []}
def __init__(self,name):
material = ""
self.name = name
def add_item(self, name, material, amount):
Clothing.stock['name'].append(self.name)
Clothing.stock['material'].append(self.material)
Clothing.stock['amount'].append(amount)
def Stock_by_Material(self, material):
count=0
n=0
for item in Clothing.stock['material']:
if item == material:
count += Clothing.stock['amount'][n]
n+=1
return count
class shirt(Clothing):
material="Cotton"
class pants(Clothing):
material="Cotton"
polo = shirt("Polo")
sweatpants = pants("Sweatpants")
polo.add_item(polo.name, polo.material, 4)
sweatpants.add_item(sweatpants.name, sweatpants.material, 6)
current_stock = polo.Stock_by_Material("Cotton")
print(current_stock)
| 33.115942
| 102
| 0.661707
| 629
| 0.274792
| 0
| 0
| 0
| 0
| 0
| 0
| 1,483
| 0.647881
|
b0c4a12b9a13aa11699c0f68047498ec12883a9b
| 845
|
py
|
Python
|
dictionary.py
|
LordGhostX/ECXBotsMastery
|
199000f8c24b30734869e8bbdebb5901604cd0fa
|
[
"MIT"
] | 2
|
2021-07-17T20:09:24.000Z
|
2021-08-09T13:48:38.000Z
|
dictionary.py
|
LordGhostX/ECXBotsMastery
|
199000f8c24b30734869e8bbdebb5901604cd0fa
|
[
"MIT"
] | null | null | null |
dictionary.py
|
LordGhostX/ECXBotsMastery
|
199000f8c24b30734869e8bbdebb5901604cd0fa
|
[
"MIT"
] | 1
|
2021-07-17T00:12:07.000Z
|
2021-07-17T00:12:07.000Z
|
import requests
from bs4 import BeautifulSoup
def find_word_meaning(word):
r = requests.get(f"https://www.dictionary.com/browse/{word}")
if r.status_code == 200:
page = BeautifulSoup(r.text, "html.parser")
luna_pos = page.find("span", {"class": "luna-pos"}).text
word_meaning = f"{word} - {luna_pos}\n\n"
meanings = page.find(
"div", {"class": "css-1uqerbd e1hk9ate0"}).find_all("div", {"class": "e1q3nk1v2"})
for i, meaning in enumerate(meanings):
word_meaning += f"{i + 1} - {meaning.find('span').text}\n\n"
return word_meaning.strip()
elif r.status_code == 404:
return "the specified word does not exist!"
else:
return "an error occured while finding word meaning!"
if __name__ == "__main__":
print(find_word_meaning("intense"))
| 33.8
| 94
| 0.620118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 308
| 0.364497
|