hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e3ef462471404137a56b1266f197564ddb0cd8c7
| 3,745
|
py
|
Python
|
intersight/models/connector_event_channel_control_ref.py
|
ategaw-cisco/intersight-python
|
9d6476620507281b1dc358e29ac452d56081bbb0
|
[
"Apache-2.0"
] | null | null | null |
intersight/models/connector_event_channel_control_ref.py
|
ategaw-cisco/intersight-python
|
9d6476620507281b1dc358e29ac452d56081bbb0
|
[
"Apache-2.0"
] | null | null | null |
intersight/models/connector_event_channel_control_ref.py
|
ategaw-cisco/intersight-python
|
9d6476620507281b1dc358e29ac452d56081bbb0
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-262
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ConnectorEventChannelControlRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'moid': 'str',
'object_type': 'str'
}
attribute_map = {
'moid': 'Moid',
'object_type': 'ObjectType'
}
def __init__(self, moid=None, object_type=None):
"""
ConnectorEventChannelControlRef - a model defined in Swagger
"""
self._moid = None
self._object_type = None
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
@property
def moid(self):
"""
Gets the moid of this ConnectorEventChannelControlRef.
:return: The moid of this ConnectorEventChannelControlRef.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this ConnectorEventChannelControlRef.
:param moid: The moid of this ConnectorEventChannelControlRef.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this ConnectorEventChannelControlRef.
:return: The object_type of this ConnectorEventChannelControlRef.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this ConnectorEventChannelControlRef.
:param object_type: The object_type of this ConnectorEventChannelControlRef.
:type: str
"""
self._object_type = object_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ConnectorEventChannelControlRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 24.966667
| 84
| 0.558077
|
2afa5a3b4e6a140c8e15930787dc5e676f7a0cc1
| 1,469
|
py
|
Python
|
lib/layer_utils/test_proposal_layer_tf.py
|
sonnguyen64/horus-tf-faster-rcnn
|
1322cb413932ef4d911e8ebc4ae93b24d58eb56b
|
[
"MIT"
] | null | null | null |
lib/layer_utils/test_proposal_layer_tf.py
|
sonnguyen64/horus-tf-faster-rcnn
|
1322cb413932ef4d911e8ebc4ae93b24d58eb56b
|
[
"MIT"
] | 1
|
2018-10-17T08:00:17.000Z
|
2018-10-23T07:39:38.000Z
|
lib/layer_utils/test_proposal_layer_tf.py
|
sonnguyen64/horus-tf-faster-rcnn
|
1322cb413932ef4d911e8ebc4ae93b24d58eb56b
|
[
"MIT"
] | null | null | null |
import sys
sys.path.insert(0, 'lib')
import numpy as np
import tensorflow as tf
from proposal_layer_tf import proposal_layer_tf
test_file = 'proposal_layer_test/test1'
npz_file = np.load(test_file + '_input.npz')
rpn_cls_prob, rpn_bbox_pred, im_info, cfg_key, anchors, num_anchors = \
npz_file['rpn_cls_prob'], npz_file['rpn_bbox_pred'], npz_file['im_info'], \
npz_file['cfg_key'].tolist(), npz_file['anchors'], npz_file['num_anchors']
npz_file = np.load(test_file + '_output.npz')
exp_blob, exp_scores = npz_file['blob'], npz_file['scores']
# Define intput variables
tf_rpn_cls_prob = tf.constant(rpn_cls_prob)
tf_rpn_bbox_pred = tf.constant(rpn_bbox_pred)
tf_im_info = tf.constant(im_info)
tf_anchors = tf.constant(anchors)
tf_num_anchors = tf.constant(num_anchors)
res_full = proposal_layer_tf(rpn_cls_prob=tf_rpn_cls_prob,
rpn_bbox_pred=tf_rpn_bbox_pred,
im_info=tf_im_info,
cfg_key=cfg_key,
anchors=tf_anchors,
num_anchors=num_anchors)
sess = tf.Session()
tf_blob_res, tf_scores_res = sess.run(res_full)
print '\nAll bounding boxes and scores correct?'
print np.allclose(tf_blob_res, exp_blob),
print np.allclose(tf_scores_res, exp_scores)
print '\nTop 50 bounding boxes and scores correct?'
print np.allclose(tf_blob_res[:50], exp_blob[:50]),
print np.allclose(tf_scores_res[:50], exp_scores[:50])
| 32.644444
| 79
| 0.70388
|
60d2612d2f0e0468af74131dbfbcf91760f7cc81
| 121
|
py
|
Python
|
influxdb/__init__.py
|
bbinet/influxdb-python
|
81793fa8fcb44edf2a753d93c4ba7bb23f88d2d5
|
[
"MIT"
] | null | null | null |
influxdb/__init__.py
|
bbinet/influxdb-python
|
81793fa8fcb44edf2a753d93c4ba7bb23f88d2d5
|
[
"MIT"
] | null | null | null |
influxdb/__init__.py
|
bbinet/influxdb-python
|
81793fa8fcb44edf2a753d93c4ba7bb23f88d2d5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from influxdb.client import InfluxDBClient
__all__ = ['InfluxDBClient']
__version__ = '0.1.4'
| 15.125
| 42
| 0.68595
|
6660e5132ca3e94dea620d369cf22de02068d18f
| 4,031
|
py
|
Python
|
pytmpdir/DirectoryTest.py
|
Synerty/pytmpdir
|
0c393f35b4aa3f5368527dcf1f9932dfc967c730
|
[
"MIT"
] | null | null | null |
pytmpdir/DirectoryTest.py
|
Synerty/pytmpdir
|
0c393f35b4aa3f5368527dcf1f9932dfc967c730
|
[
"MIT"
] | 2
|
2016-12-14T18:11:59.000Z
|
2016-12-15T02:33:20.000Z
|
pytmpdir/DirectoryTest.py
|
Synerty/pytmpdir
|
0c393f35b4aa3f5368527dcf1f9932dfc967c730
|
[
"MIT"
] | null | null | null |
# Created by Synerty Pty Ltd
# Copyright (C) 2013-2017 Synerty Pty Ltd (Australia)
#
# This software is open source, the MIT license applies.
#
# Website : http://www.synerty.com
# Support : support@synerty.com
import os
import random
import string
import unittest
from tempfile import mkstemp
from pytmpdir.Directory import Directory, FileClobberError, isWindows
class DirectoryTest(unittest.TestCase):
@classmethod
def makeRandomContents(cls, size=100):
return ''.join([random.choice(string.ascii_uppercase + string.digits)
for _ in range(size)])
@classmethod
def makeRandomDirectory(cls):
directory = Directory()
dirs = ['']
def addRecursiveDirs(path):
if len(dirs) > 20:
return
for d in range(5):
newPath = os.path.join(path, cls.makeRandomContents(10))
# print "Creating new path %s" % newPath
dirs.append(newPath)
addRecursiveDirs(newPath)
for x in range(10):
f = directory.createFile(path=newPath,
name=cls.makeRandomContents(10))
with f.open(write=True) as fobj:
fobj.write(cls.makeRandomContents(4000))
addRecursiveDirs('')
return directory
# Create files with bad paths
@unittest.skipUnless(isWindows,
"Not Windows detected, skipping "
"testCreateWindowsBadPaths.")
def testCreateWindowsBadPaths(self):
d = Directory()
self.assertEqual(d.createFile(pathName="\\abspath\\name1").pathName,
'abspath\\name1')
self.assertEqual(d.createFile(pathName="relpath\\name2").pathName,
"relpath\\name2")
self.assertRaises(AssertionError, d.createFile,
pathName="\\abspath\\dir1\\")
self.assertRaises(AssertionError, d.createFile,
pathName="relpath\\dir2\\")
self.assertEqual(2, len(d.files))
print("COMPLETED testCreateWindowsBadPaths")
@unittest.skipIf(isWindows,
"Windows detected, skipping testCreateLinuxBadPaths.")
def testCreateLinuxBadPaths(self):
d = Directory()
self.assertEqual(d.createFile(pathName="/abspath/name1").pathName,
'abspath/name1')
self.assertEqual(d.createFile(pathName="relpath/name2").pathName,
"relpath/name2")
self.assertRaises(AssertionError, d.createFile,
pathName="/abspath/dir1/")
self.assertRaises(AssertionError, d.createFile,
pathName="relpath/dir2/")
self.assertEqual(2, len(d.files))
print("COMPLETED testCreateLinuxBadPaths")
def testDir(self):
d = Directory()
assert (os.path.isdir(d._path))
num = 10
for x in range(num):
(fd, name) = mkstemp(dir=d._path)
with os.fdopen(fd, 'w') as f:
f.write(self.makeRandomContents())
d.scan()
self.assertEqual(num, len(d.files))
for x in range(num):
d.createFile(name=self.makeRandomContents(10))
d.createFile(path=self.makeRandomContents(10),
name=self.makeRandomContents(10))
# Create a file that already exists
d.createFile(pathName="clobber1")
self.assertRaises(FileClobberError, d.createFile,
pathName="clobber1")
self.assertEqual(num * 3 + 1, len(d.files))
files = d.files[:]
removeIndexes = list(range(0, len(files), 3))
[files[i].delete() for i in removeIndexes]
self.assertEqual(len(d.files), len(files) - len(removeIndexes))
dirPath = d._path
d = None
self.assertFalse(os.path.isdir(dirPath))
print("COMPLETED makeRandomContents")
| 34.452991
| 77
| 0.57802
|
69604d4d412e7887da8cf83e7a98060b79bde148
| 617
|
py
|
Python
|
tools.py
|
zhenv5/atp
|
9b9a19283f13a9ab8f25688894a6ec23ccb73e9c
|
[
"BSD-3-Clause"
] | 7
|
2019-04-15T17:19:03.000Z
|
2022-01-06T03:14:28.000Z
|
tools.py
|
DavidMcDonald1993/atp
|
baa7ba751b7be7306f624043989bf3ecde3148d4
|
[
"BSD-3-Clause"
] | null | null | null |
tools.py
|
DavidMcDonald1993/atp
|
baa7ba751b7be7306f624043989bf3ecde3148d4
|
[
"BSD-3-Clause"
] | 2
|
2019-05-03T14:47:17.000Z
|
2019-11-18T13:54:31.000Z
|
import os.path
def sprint(dir_name,file_name,line):
print line
with open(os.path.join(dir_name,file_name),"a") as f:
f.write("%s \n" % line)
def dir_tail_name(file_name):
import os.path
dir_name = os.path.dirname(os.path.abspath(file_name))
#dir_name = os.path.dirname(file_name)
head, tail = os.path.split(file_name)
print("dir name: %s, file_name: %s" % (dir_name,tail))
return dir_name,tail
def run_command(command,is_print = True):
import subprocess
print("command: %s" % command)
p = subprocess.Popen(command,shell = True, stdout = subprocess.PIPE)
o = p.communicate()
if is_print:
print o[0]
| 29.380952
| 69
| 0.719611
|
8235b2539443e778356dd25a4d99fc79becce705
| 2,401
|
py
|
Python
|
archive/madison.py
|
jayktee/scrapers-us-municipal
|
ff52a331e91cb590a3eda7db6c688d75b77acacb
|
[
"MIT"
] | 67
|
2015-04-28T19:28:18.000Z
|
2022-01-31T03:27:17.000Z
|
archive/madison.py
|
jayktee/scrapers-us-municipal
|
ff52a331e91cb590a3eda7db6c688d75b77acacb
|
[
"MIT"
] | 202
|
2015-01-15T18:43:12.000Z
|
2021-11-23T15:09:10.000Z
|
archive/madison.py
|
jayktee/scrapers-us-municipal
|
ff52a331e91cb590a3eda7db6c688d75b77acacb
|
[
"MIT"
] | 54
|
2015-01-27T03:15:45.000Z
|
2021-09-10T19:35:32.000Z
|
from pupa.scrape import Jurisdiction, Organization
from legistar.people import LegistarPersonScraper
class MadisonPersonScraper(LegistarPersonScraper):
EXTRA_FIELDS = ('notes',)
DATE_FORMATS = ('%m/%d/%Y', '%m/%d/%Y*',)
def skip_item(self, item):
#return item['name'] in ('VACANCIES', 'Al Matano')
# TODO: this skips all non-city councilors, check to make sure it doesn't skip other
# interesting people?
return 'district' not in item['url']
class Madison(Jurisdiction):
division_id = 'ocd-division/country:us/state:wi/place:madison'
classification = 'government'
timezone = 'America/Chicago'
name = 'Madison'
url = 'http://www.cityofmadison.com/'
scrapers = {'people': MadisonPersonScraper}
# HTTPS is vital here, without it pagination doesn't work!
LEGISTAR_ROOT_URL = 'https://madison.legistar.com/'
def get_organizations(self):
council = Organization('City of Madison Common Council', classification='legislature')
for x in range(1,21):
council.add_post(str(x), role='Alder')
yield council
#ORG_CLASSIFICATIONS = {
# 'ALLIED AREA TASK FORCE': 'commission',
# 'TRANSPORT 2020 IMPLEMENTATION TASK FORCE': 'commission',
# 'COMMON COUNCIL': 'legislature',
# 'COMMON COUNCIL - DISCUSSION': 'commission',
# 'COMMUNITY ACTION COALITION FOR SOUTH CENTRAL WISCONSIN INC': 'commission',
# 'COMMUNITY DEVELOPMENT AUTHORITY': 'commission',
# 'MADISON COMMUNITY FOUNDATION': 'commission',
# 'MADISON FOOD POLICY COUNCIL': 'commission',
# 'MADISON HOUSING AUTHORITY': 'commission',
# 'PARKING COUNCIL FOR PEOPLE WITH DISABILITIES': 'commission',
#}
#def person_district(self, data):
# '''This corresponds to the label field on organizations posts.
# '''
# # First try to get it from bio.
# dist = re.findall(r'District\s+\d+', data['notes'])
# if dist:
# return dist.pop()
# # Then try website.
# dist = re.findall(r'/district(\d+)/', data['website'])
# if dist:
# return dist.pop()
# # Then email.
# dist = re.findall(r'district(\d+)', data['email'])
# if dist:
# return dist.pop()
| 37.515625
| 94
| 0.597668
|
51d25c0d66527fa15757400559a65406e23e5014
| 691
|
py
|
Python
|
pipelines/programs/metaphlan.py
|
cdeitrick/workflows
|
8edd2a08078144a2445af3903eb13b71abb96538
|
[
"MIT"
] | null | null | null |
pipelines/programs/metaphlan.py
|
cdeitrick/workflows
|
8edd2a08078144a2445af3903eb13b71abb96538
|
[
"MIT"
] | null | null | null |
pipelines/programs/metaphlan.py
|
cdeitrick/workflows
|
8edd2a08078144a2445af3903eb13b71abb96538
|
[
"MIT"
] | null | null | null |
from typing import List, Any
from pipelines import sampleio
from pathlib import Path
def run_metaphlan(samples: List[sampleio.SampleReads], output_folder: Path, bash: Path = None):
"""
Runs metaphlan over a series of samples.
Parameters
----------
samples
output_folder: Where to save the output
bash:Path Will write the command to this file
"""
commands: List[List[Any]] = list()
for sample in samples:
command = ["metaphlan2.py", "--input_type", "fastq", "-o", output_folder, sample.forward]
commands.append(command)
with bash.open('w') as file1:
for command in commands:
string_command = " ".join([str(i) for i in command])
file1.write(string_command + '\n\n')
| 28.791667
| 95
| 0.709117
|
e651b30d870b3a4c628637e915626f5e0d754957
| 1,398
|
py
|
Python
|
nasa/client.py
|
quiktea/nasa.py
|
b5dce1a5c8930e6e3a95de41e3e5a5b3be36aa31
|
[
"MIT"
] | 1
|
2021-04-24T22:30:25.000Z
|
2021-04-24T22:30:25.000Z
|
nasa/client.py
|
quiktea/nasa.py
|
b5dce1a5c8930e6e3a95de41e3e5a5b3be36aa31
|
[
"MIT"
] | null | null | null |
nasa/client.py
|
quiktea/nasa.py
|
b5dce1a5c8930e6e3a95de41e3e5a5b3be36aa31
|
[
"MIT"
] | null | null | null |
from .http import *
from .models import *
from typing import Union, List
class Client:
def __init__(self, api_key : str = ""):
self._key = api_key
self.http = HTTPClient()
self.auth_params = {"api_key" : api_key}
self.http.login(api_key)
async def apod(self, date : str = None, start_date : str = None, end_date : str = None, count : int = None, thumbs : bool = False) -> Union[APODResult, List[APODResult]]:
route = Route("/planetary/apod")
params = {}
params.update(self.auth_params)
params.update({
"date" : date if date is not None else "",
"start_date" : start_date if start_date is not None else "",
"end_date" : end_date if end_date is not None else "",
"count" : count if count is not None else "",
"thumbs" : str(thumbs).lower()
})
route.params = params
response = await self.http.request(route)
if count is not None:
json = await response.json()
results = []
for result in json:
apod_response = APODResult(result)
results.append(apod_response)
return results
apod_response = APODResult(await response.json())
return apod_response
async def close(self) -> None:
return await self.http.session.close()
| 36.789474
| 174
| 0.575823
|
80db824b1a39414e44aa8a38584b2e59d965c97c
| 830
|
py
|
Python
|
src/mold/plugins/face/github.py
|
felix-hilden/mold
|
f29b689673a158094196e3d853e61812c9957cb5
|
[
"MIT"
] | 1
|
2021-04-13T11:08:59.000Z
|
2021-04-13T11:08:59.000Z
|
src/mold/plugins/face/github.py
|
felix-hilden/mold
|
f29b689673a158094196e3d853e61812c9957cb5
|
[
"MIT"
] | 1
|
2021-08-10T10:39:14.000Z
|
2021-08-10T10:39:14.000Z
|
src/mold/plugins/face/github.py
|
felix-hilden/mold
|
f29b689673a158094196e3d853e61812c9957cb5
|
[
"MIT"
] | null | null | null |
from mold import Interface, Question, hook
from .vcs_host import interface as vcs_host, Provides as VcsHostNeeds
from ..domains import module
class Provides:
github_user: str = ''
github_repo: str = ''
class Accepts:
pass
questions = [
Question('github_user', 'GitHub user name', prefill=True),
Question('github_repo', 'GitHub repository (leave empty for project slug)'),
]
def post_dialog():
user = questions[0].response
repo = questions[1].response or hook.Provides.project_slug
Provides.github_user = user
Provides.github_repo = repo
VcsHostNeeds.vcs_host_url = f'https://github.com/{user}/{repo}'
interface = Interface(
module,
'github',
'GitHub VCS host',
Provides,
Accepts,
parents=[vcs_host],
questions=questions,
post_dialog=post_dialog,
)
| 21.282051
| 80
| 0.693976
|
dcd6c15954e1ca9997cf48f67f1ed803f1a30ef3
| 94,385
|
py
|
Python
|
src/olympia/blocklist/tests/test_admin.py
|
renuacpro/addons-server
|
7435857d7fd709929caf6f260a580961a0643f4e
|
[
"BSD-3-Clause"
] | 1
|
2020-07-27T11:22:17.000Z
|
2020-07-27T11:22:17.000Z
|
src/olympia/blocklist/tests/test_admin.py
|
renuacpro/addons-server
|
7435857d7fd709929caf6f260a580961a0643f4e
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/blocklist/tests/test_admin.py
|
renuacpro/addons-server
|
7435857d7fd709929caf6f260a580961a0643f4e
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
import json
from unittest import mock
from django.conf import settings
from django.contrib.admin.models import LogEntry, ADDITION
from django.contrib.contenttypes.models import ContentType
from pyquery import PyQuery as pq
from waffle.testutils import override_switch
from olympia import amo
from olympia.activity.models import ActivityLog
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.tests import (
TestCase, addon_factory, user_factory, version_factory)
from olympia.amo.urlresolvers import reverse
from ..models import Block, BlocklistSubmission
class TestBlockAdmin(TestCase):
def setUp(self):
self.admin_home_url = reverse('admin:index')
self.list_url = reverse('admin:blocklist_block_changelist')
self.add_url = reverse('admin:blocklist_block_add')
self.submission_url = reverse(
'admin:blocklist_blocklistsubmission_add')
def test_can_see_addon_module_in_admin_with_review_admin(self):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
response = self.client.get(self.admin_home_url, follow=True)
assert response.status_code == 200
doc = pq(response.content)
modules = [x.text for x in doc('a.section')]
assert modules == ['Blocklist']
def test_can_not_see_addon_module_in_admin_without_permissions(self):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.client.login(email=user.email)
response = self.client.get(self.admin_home_url, follow=True)
assert response.status_code == 200
doc = pq(response.content)
modules = [x.text for x in doc('a.section')]
assert modules == []
def test_can_list(self):
addon = addon_factory()
Block.objects.create(guid=addon.guid, updated_by=user_factory())
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
response = self.client.get(self.list_url, follow=True)
assert response.status_code == 200
assert addon.guid in response.content.decode('utf-8')
def test_can_not_list_without_permission(self):
addon = addon_factory()
Block.objects.create(guid=addon.guid, updated_by=user_factory())
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.client.login(email=user.email)
response = self.client.get(self.list_url, follow=True)
assert response.status_code == 403
assert addon.guid not in response.content.decode('utf-8')
def test_add(self):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
response = self.client.get(self.add_url, follow=True)
assert b'Add-on GUIDs (one per line)' in response.content
# Submit an empty list of guids should redirect back to the page
response = self.client.post(
self.add_url, {'guids': ''}, follow=False)
assert b'Add-on GUIDs (one per line)' in response.content
# A single invalid guid should redirect back to the page too (for now)
response = self.client.post(
self.add_url, {'guids': 'guid@'}, follow=False)
assert b'Add-on GUIDs (one per line)' in response.content
assert b'Addon with GUID guid@ does not exist' in response.content
addon = addon_factory(guid='guid@')
# But should continue to the django admin add page if it exists
response = self.client.post(
self.add_url, {'guids': 'guid@'}, follow=True)
self.assertRedirects(response, self.submission_url, status_code=307)
# Multiple guids are redirected to the multiple guid view
response = self.client.post(
self.add_url, {'guids': 'guid@\nfoo@baa'}, follow=True)
self.assertRedirects(response, self.submission_url, status_code=307)
# An existing block will redirect to change view instead
block = Block.objects.create(
guid=addon.guid, updated_by=user_factory())
response = self.client.post(
self.add_url, {'guids': 'guid@'}, follow=True)
self.assertRedirects(
response,
reverse('admin:blocklist_block_change', args=(block.pk,))
)
def test_add_restrictions(self):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
post_data = {'guids': 'guid@\nfoo@baa'}
# If the guid already exists in a pending BlocklistSubmission the guid
# is invalid also
addon = addon_factory(guid='guid@')
submission = BlocklistSubmission.objects.create(input_guids='guid@')
response = self.client.post(
self.add_url, post_data, follow=True)
assert b'Add-on GUIDs (one per line)' in response.content
assert b'GUID guid@ is in a pending Submission' in (
response.content)
# It's okay if the submission isn't pending (rejected, etc) though.
submission.update(signoff_state=BlocklistSubmission.SIGNOFF_REJECTED)
# But should continue to the django admin add page if it exists
response = self.client.post(
self.add_url, post_data, follow=True)
self.assertRedirects(response, self.submission_url, status_code=307)
# same if one of the guids exists as a block
block = Block.objects.create(
guid=addon.guid, updated_by=user_factory())
response = self.client.post(
self.add_url, post_data, follow=True)
self.assertRedirects(response, self.submission_url, status_code=307)
# but not if it's imported from a legacy record
block.update(legacy_id='343545')
response = self.client.post(
self.add_url, post_data, follow=True)
assert b'Add-on GUIDs (one per line)' in response.content
assert b'The block for GUID guid@ is readonly - it must be edited' in (
response.content)
# unless the `blocklist_legacy_submit` waffle switch is on
with override_switch('blocklist_legacy_submit', active=True):
response = self.client.post(
self.add_url, post_data, follow=True)
self.assertRedirects(
response, self.submission_url, status_code=307)
def test_add_from_addon_pk_view(self):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
addon = addon_factory()
url = reverse('admin:blocklist_block_addaddon', args=(addon.id,))
response = self.client.post(url, follow=True)
self.assertRedirects(
response, self.submission_url + f'?guids={addon.guid}')
# if (for some reason) we're passed a previous, deleted, addon
# instance, we still correctly passed along the guid.
deleted_addon = addon_factory(status=amo.STATUS_DELETED)
deleted_addon.addonguid.update(guid=addon.guid)
url = reverse(
'admin:blocklist_block_addaddon', args=(deleted_addon.id,))
response = self.client.post(url, follow=True)
self.assertRedirects(
response, self.submission_url + f'?guids={addon.guid}')
# GET params are passed along
version = addon.current_version
response = self.client.post(
url + f'?min_version={version.version}', follow=True)
self.assertRedirects(
response,
self.submission_url +
f'?guids={addon.guid}&min_version={version.version}')
# And version ids as short params are expanded and passed along
response = self.client.post(
url + f'?max={version.pk}', follow=True)
self.assertRedirects(
response,
self.submission_url +
f'?guids={addon.guid}&max_version={version.version}')
assert not response.context['messages']
# Existing blocks are redirected to the change view instead
block = Block.objects.create(addon=addon, updated_by=user_factory())
response = self.client.post(
url + f'?max={version.pk}', follow=True)
self.assertRedirects(
response,
reverse('admin:blocklist_block_change', args=(block.pk,)))
# with a message warning the versions were ignored
assert [msg.message for msg in response.context['messages']] == [
f'The versions 0 to {version.version} could not be pre-selected '
'because some versions have been blocked already']
# Pending blocksubmissions are redirected to the submission view
submission = BlocklistSubmission.objects.create(input_guids=addon.guid)
response = self.client.post(
url + f'?max={version.pk}', follow=True)
self.assertRedirects(
response,
reverse(
'admin:blocklist_blocklistsubmission_change',
args=(submission.pk,)))
# with a message warning the versions were ignored
assert [msg.message for msg in response.context['messages']] == [
f'The versions 0 to {version.version} could not be pre-selected '
'because this addon is part of a pending submission']
class TestBlocklistSubmissionAdmin(TestCase):
def setUp(self):
self.submission_url = reverse(
'admin:blocklist_blocklistsubmission_add')
self.submission_list_url = reverse(
'admin:blocklist_blocklistsubmission_changelist')
def test_add_single(self):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
deleted_addon = addon_factory(version_kw={'version': '1.2.5'})
deleted_addon_version = deleted_addon.current_version
deleted_addon.delete()
deleted_addon.addonguid.update(guid='guid@')
addon = addon_factory(
guid='guid@', name='Danger Danger', version_kw={'version': '1.2a'})
first_version = addon.current_version
second_version = version_factory(addon=addon, version='3')
pending_version = version_factory(
addon=addon, version='5.999',
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
response = self.client.get(
self.submission_url + '?guids=guid@', follow=True)
content = response.content.decode('utf-8')
assert 'Add-on GUIDs (one per line)' not in content
assert 'guid@' in content
assert 'Danger Danger' in content
assert str(addon.average_daily_users) in content
assert Block.objects.count() == 0 # Check we didn't create it already
assert 'Block History' in content
# Create the block
response = self.client.post(
self.submission_url, {
'input_guids': 'guid@',
'action': '0',
'min_version': '0',
'max_version': addon.current_version.version,
'existing_min_version': '0',
'existing_max_version': addon.current_version.version,
'url': 'dfd',
'reason': 'some reason',
'_save': 'Save',
},
follow=True)
assert response.status_code == 200
assert Block.objects.count() == 1
block = Block.objects.first()
assert block.addon == addon
log = ActivityLog.objects.for_addons(addon).last()
assert log.action == amo.LOG.BLOCKLIST_BLOCK_ADDED.id
assert log.arguments == [addon, addon.guid, block]
assert log.details['min_version'] == '0'
assert log.details['max_version'] == addon.current_version.version
assert log.details['reason'] == 'some reason'
block_log = ActivityLog.objects.for_block(block).filter(
action=log.action).last()
assert block_log == log
block_log_by_guid = ActivityLog.objects.for_guidblock('guid@').filter(
action=log.action).last()
assert block_log_by_guid == log
assert log == ActivityLog.objects.for_version(first_version).last()
assert log == ActivityLog.objects.for_version(second_version).last()
assert log == ActivityLog.objects.for_version(
deleted_addon_version).last()
assert not ActivityLog.objects.for_version(pending_version).exists()
assert [msg.message for msg in response.context['messages']] == [
'The blocklist submission "No Sign-off: guid@; dfd; some reason" '
'was added successfully.']
response = self.client.get(
reverse('admin:blocklist_block_change', args=(block.pk,)))
content = response.content.decode('utf-8')
todaysdate = datetime.datetime.now().date()
assert f'<a href="dfd">{todaysdate}</a>' in content
assert f'Block added by {user.name}:\n guid@' in content
assert f'versions 0 - {addon.current_version.version}' in content
assert f'Included in legacy blocklist' not in content
@override_switch('blocklist_legacy_submit', active=False)
def test_legacy_id_property_readonly(self):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
addon = addon_factory()
response = self.client.get(
self.submission_url + f'?guids={addon.guid}', follow=True)
assert not pq(response.content)('#id_legacy_id')
assert b'_save' in response.content
# Try to set legacy_id
response = self.client.post(
self.submission_url, {
'input_guids': addon.guid,
'action': '0',
'min_version': addon.current_version.version,
'max_version': addon.current_version.version,
'existing_min_version': addon.current_version.version,
'existing_max_version': addon.current_version.version,
'url': '',
'legacy_id': True,
'reason': 'Added!',
'_save': 'Save',
},
follow=True)
assert response.status_code == 200
assert BlocklistSubmission.objects.exists()
assert Block.objects.exists()
block = Block.objects.get()
assert block.reason == 'Added!'
assert block.in_legacy_blocklist is False
assert BlocklistSubmission.objects.get().in_legacy_blocklist is False
@override_switch('blocklist_legacy_submit', active=True)
@mock.patch('olympia.blocklist.models.legacy_delete_blocks')
@mock.patch('olympia.blocklist.models.legacy_publish_blocks')
def test_legacy_id_enabled_with_legacy_submit_waffle_on(self, publish_mock,
delete_mock):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
addon = addon_factory()
response = self.client.get(
self.submission_url + f'?guids={addon.guid}', follow=True)
assert pq(response.content)('#id_legacy_id')
assert b'_save' in response.content
# Try to set legacy_id
response = self.client.post(
self.submission_url, {
'input_guids': addon.guid,
'action': '0',
'min_version': addon.current_version.version,
'max_version': addon.current_version.version,
'existing_min_version': addon.current_version.version,
'existing_max_version': addon.current_version.version,
'url': '',
'legacy_id': True,
'reason': 'Added!',
'_save': 'Save',
},
follow=True)
assert response.status_code == 200
assert BlocklistSubmission.objects.exists()
assert Block.objects.exists()
block = Block.objects.get()
assert block.reason == 'Added!'
publish_mock.assert_called_once()
delete_mock.assert_not_called()
# And again with the opposite
publish_mock.reset_mock()
addon = addon_factory()
response = self.client.post(
self.submission_url, {
'input_guids': addon.guid,
'action': '0',
'min_version': addon.current_version.version,
'max_version': addon.current_version.version,
'url': '',
'reason': 'Added again!',
'_save': 'Save',
},
follow=True)
assert response.status_code == 200
block = Block.objects.latest()
assert block.reason == 'Added again!'
publish_mock.assert_not_called()
delete_mock.assert_called_once()
def _test_add_multiple_submit(self, addon_adu):
"""addon_adu is important because whether dual signoff is needed is
based on what the average_daily_users is."""
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
new_addon = addon_factory(
guid='any@new', name='New Danger', average_daily_users=addon_adu)
existing_and_full = Block.objects.create(
addon=addon_factory(guid='full@existing', name='Full Danger'),
min_version='0',
max_version='*',
updated_by=user_factory())
partial_addon = addon_factory(
guid='partial@existing', name='Partial Danger',
average_daily_users=(addon_adu - 1))
existing_and_partial = Block.objects.create(
addon=partial_addon,
min_version='1',
max_version='99',
updated_by=user_factory())
response = self.client.post(
self.submission_url,
{'guids': 'any@new\npartial@existing\nfull@existing\ninvalid@'},
follow=True)
content = response.content.decode('utf-8')
# meta data for new blocks and existing ones needing update:
assert 'Add-on GUIDs (one per line)' not in content
assert 'any@new' in content
assert 'New Danger' in content
assert str(new_addon.average_daily_users) in content
assert 'partial@existing' in content
assert 'Partial Danger' in content
assert str(partial_addon.average_daily_users) in content
# but not for existing blocks already 0 - *
assert 'full@existing' in content
assert 'Full Danger' not in content
assert str(existing_and_full.addon.average_daily_users) not in content
# no metadata for an invalid guid but it should be shown
assert 'invalid@' in content
# Check we didn't create the block already
assert Block.objects.count() == 2
assert BlocklistSubmission.objects.count() == 0
# Create the block submission
response = self.client.post(
self.submission_url, {
'input_guids': (
'any@new\npartial@existing\nfull@existing\ninvalid@'),
'action': '0',
'min_version': '0',
'max_version': '*',
'existing_min_version': '0',
'existing_max_version': '*',
'url': 'dfd',
'reason': 'some reason',
'_save': 'Save',
},
follow=True)
assert response.status_code == 200
return (
new_addon, existing_and_full, partial_addon, existing_and_partial)
def _test_add_multiple_verify_blocks(self, new_addon, existing_and_full,
partial_addon, existing_and_partial,
has_signoff=True):
assert Block.objects.count() == 3
assert BlocklistSubmission.objects.count() == 1
submission = BlocklistSubmission.objects.get()
all_blocks = Block.objects.all()
new_block = all_blocks[2]
assert new_block.addon == new_addon
add_log = ActivityLog.objects.for_addons(new_addon).last()
assert add_log.action == amo.LOG.BLOCKLIST_BLOCK_ADDED.id
assert add_log.arguments == [new_addon, new_addon.guid, new_block]
assert add_log.details['min_version'] == '0'
assert add_log.details['max_version'] == '*'
assert add_log.details['reason'] == 'some reason'
if has_signoff:
assert add_log.details['signoff_state'] == 'Approved'
assert add_log.details['signoff_by'] == submission.signoff_by.id
else:
assert add_log.details['signoff_state'] == 'No Sign-off'
assert 'signoff_by' not in add_log.details
block_log = ActivityLog.objects.for_block(new_block).filter(
action=add_log.action).last()
assert block_log == add_log
vlog = ActivityLog.objects.for_version(
new_addon.current_version).last()
assert vlog == add_log
existing_and_partial = existing_and_partial.reload()
assert all_blocks[1] == existing_and_partial
# confirm properties were updated
assert existing_and_partial.min_version == '0'
assert existing_and_partial.max_version == '*'
assert existing_and_partial.reason == 'some reason'
assert existing_and_partial.url == 'dfd'
assert existing_and_partial.in_legacy_blocklist is False
edit_log = ActivityLog.objects.for_addons(partial_addon).last()
assert edit_log.action == amo.LOG.BLOCKLIST_BLOCK_EDITED.id
assert edit_log.arguments == [
partial_addon, partial_addon.guid, existing_and_partial]
assert edit_log.details['min_version'] == '0'
assert edit_log.details['max_version'] == '*'
assert edit_log.details['reason'] == 'some reason'
if has_signoff:
assert edit_log.details['signoff_state'] == 'Approved'
assert edit_log.details['signoff_by'] == submission.signoff_by.id
else:
assert edit_log.details['signoff_state'] == 'No Sign-off'
assert 'signoff_by' not in edit_log.details
block_log = ActivityLog.objects.for_block(existing_and_partial).filter(
action=edit_log.action).last()
assert block_log == edit_log
vlog = ActivityLog.objects.for_version(
partial_addon.current_version).last()
assert vlog == edit_log
existing_and_full = existing_and_full.reload()
assert all_blocks[0] == existing_and_full
# confirm properties *were not* updated.
assert existing_and_full.reason != 'some reason'
assert existing_and_full.url != 'dfd'
assert not ActivityLog.objects.for_addons(
existing_and_full.addon).exists()
assert not ActivityLog.objects.for_version(
existing_and_full.addon.current_version).exists()
assert submission.input_guids == (
'any@new\npartial@existing\nfull@existing\ninvalid@')
assert submission.min_version == new_block.min_version
assert submission.max_version == new_block.max_version
assert submission.url == new_block.url
assert submission.reason == new_block.reason
assert submission.to_block == [
{'guid': 'any@new', 'id': None,
'average_daily_users': new_addon.average_daily_users},
{'guid': 'partial@existing', 'id': existing_and_partial.id,
'average_daily_users': partial_addon.average_daily_users}
]
assert set(submission.block_set.all()) == {
new_block, existing_and_partial}
def test_submit_no_dual_signoff(self):
addon_adu = settings.DUAL_SIGNOFF_AVERAGE_DAILY_USERS_THRESHOLD
new_addon, existing_and_full, partial_addon, existing_and_partial = (
self._test_add_multiple_submit(addon_adu=addon_adu))
self._test_add_multiple_verify_blocks(
new_addon, existing_and_full, partial_addon, existing_and_partial,
has_signoff=False)
def test_submit_dual_signoff(self):
addon_adu = settings.DUAL_SIGNOFF_AVERAGE_DAILY_USERS_THRESHOLD + 1
new_addon, existing_and_full, partial_addon, existing_and_partial = (
self._test_add_multiple_submit(addon_adu=addon_adu))
# no new Block objects yet
assert Block.objects.count() == 2
# and existing block wasn't updated
multi = BlocklistSubmission.objects.get()
multi.update(
signoff_state=BlocklistSubmission.SIGNOFF_APPROVED,
signoff_by=user_factory())
assert multi.is_submission_ready
multi.save_to_block_objects()
self._test_add_multiple_verify_blocks(
new_addon, existing_and_full, partial_addon, existing_and_partial)
@override_switch('blocklist_admin_dualsignoff_disabled', active=True)
def test_add_and_edit_with_different_min_max_versions(self):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
new_addon = addon_factory(
guid='any@new', average_daily_users=100,
version_kw={'version': '5.56'})
existing_one_to_ten = Block.objects.create(
addon=addon_factory(guid='partial@existing'),
min_version='1',
max_version='10',
updated_by=user_factory())
existing_zero_to_max = Block.objects.create(
addon=addon_factory(
guid='full@existing', average_daily_users=99,
version_kw={'version': '10'}),
min_version='0',
max_version='*',
updated_by=user_factory())
response = self.client.post(
self.submission_url,
{'guids': 'any@new\npartial@existing\nfull@existing'},
follow=True)
# Check we've processed the guids correctly.
doc = pq(response.content)
assert 'full@existing' in doc('.field-existing-guids').text()
assert 'partial@existing' in doc('.field-blocks-to-add').text()
assert 'any@new' in doc('.field-blocks-to-add').text()
# Check we didn't create the block already
assert Block.objects.count() == 2
assert BlocklistSubmission.objects.count() == 0
# Change the min/max versions
response = self.client.post(
self.submission_url, {
'input_guids': (
'any@new\npartial@existing\nfull@existing'),
'action': '0',
'min_version': '1', # this is the field we can change
'max_version': '10', # this is the field we can change
'existing_min_version': '0', # this is a hidden field
'existing_max_version': '*', # this is a hidden field
'url': 'dfd',
'reason': 'some reason',
'_save': 'Save',
},
follow=True)
assert response.status_code == 200
assert b'Blocks to be updated are different' in response.content
# No Block should have been changed or added
assert Block.objects.count() == 2
assert BlocklistSubmission.objects.count() == 0
# The guids should have been processed differently now
doc = pq(response.content)
assert 'partial@existing' in doc('.field-existing-guids').text()
assert 'full@existing' in doc('.field-blocks-to-add').text()
assert 'any@new' in doc('.field-blocks-to-add').text()
# We're submitting again, but now existing_min|max_version is the same
response = self.client.post(
self.submission_url, {
'input_guids': (
'any@new\npartial@existing\nfull@existing'),
'action': '0',
'min_version': '1', # this is the field we can change
'max_version': '10', # this is the field we can change
'existing_min_version': '1', # this is a hidden field
'existing_max_version': '10', # this is a hidden field
'url': 'dfd',
'reason': 'some reason',
'_save': 'Save',
},
follow=True)
assert [msg.message for msg in response.context['messages']] == [
'The blocklist submission '
'"No Sign-off: any@new, partial@existing, full@exist...; dfd; '
'some reason" was added successfully.']
# This time the blocks are updated
assert Block.objects.count() == 3
assert BlocklistSubmission.objects.count() == 1
all_blocks = Block.objects.all()
new_block = all_blocks[2]
assert new_block.addon == new_addon
log = ActivityLog.objects.for_addons(new_addon).get()
assert log.action == amo.LOG.BLOCKLIST_BLOCK_ADDED.id
assert log.arguments == [new_addon, new_addon.guid, new_block]
assert log.details['min_version'] == '1'
assert log.details['max_version'] == '10'
assert log.details['reason'] == 'some reason'
block_log = ActivityLog.objects.for_block(new_block).filter(
action=log.action).last()
assert block_log == log
vlog = ActivityLog.objects.for_version(
new_addon.current_version).last()
assert vlog == log
existing_zero_to_max = existing_zero_to_max.reload()
assert all_blocks[1] == existing_zero_to_max
# confirm properties were updated
assert existing_zero_to_max.min_version == '1'
assert existing_zero_to_max.max_version == '10'
assert existing_zero_to_max.reason == 'some reason'
assert existing_zero_to_max.url == 'dfd'
assert existing_zero_to_max.in_legacy_blocklist is False
log = ActivityLog.objects.for_addons(existing_zero_to_max.addon).get()
assert log.action == amo.LOG.BLOCKLIST_BLOCK_EDITED.id
assert log.arguments == [
existing_zero_to_max.addon, existing_zero_to_max.guid,
existing_zero_to_max]
assert log.details['min_version'] == '1'
assert log.details['max_version'] == '10'
assert log.details['reason'] == 'some reason'
block_log = ActivityLog.objects.for_block(existing_zero_to_max).filter(
action=log.action).last()
assert block_log == log
vlog = ActivityLog.objects.for_version(
existing_zero_to_max.addon.current_version).last()
assert vlog == log
existing_one_to_ten = existing_one_to_ten.reload()
assert all_blocks[0] == existing_one_to_ten
# confirm properties *were not* updated.
assert existing_one_to_ten.reason != 'some reason'
assert existing_one_to_ten.url != 'dfd'
assert existing_one_to_ten.in_legacy_blocklist is False
assert not ActivityLog.objects.for_addons(
existing_one_to_ten.addon).exists()
assert not ActivityLog.objects.for_version(
existing_one_to_ten.addon.current_version).exists()
submission = BlocklistSubmission.objects.get()
assert submission.input_guids == (
'any@new\npartial@existing\nfull@existing')
assert submission.min_version == new_block.min_version
assert submission.max_version == new_block.max_version
assert submission.url == new_block.url
assert submission.reason == new_block.reason
assert submission.to_block == [
{'guid': 'any@new', 'id': None,
'average_daily_users': new_addon.average_daily_users},
{'guid': 'full@existing', 'id': existing_zero_to_max.id,
'average_daily_users':
existing_zero_to_max.addon.average_daily_users}
]
assert set(submission.block_set.all()) == {
new_block, existing_zero_to_max}
@mock.patch('olympia.blocklist.admin.GUID_FULL_LOAD_LIMIT', 1)
def test_add_multiple_bulk_so_fake_block_objects(self):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
new_addon = addon_factory(guid='any@new', name='New Danger')
Block.objects.create(
addon=addon_factory(guid='full@existing', name='Full Danger'),
min_version='0',
max_version='*',
legacy_id='34345',
updated_by=user_factory())
partial_addon = addon_factory(
guid='partial@existing', name='Partial Danger')
Block.objects.create(
addon=partial_addon,
min_version='1',
max_version='99',
legacy_id='75456',
updated_by=user_factory())
Block.objects.create(
addon=addon_factory(guid='regex@legacy'),
min_version='23',
max_version='567',
legacy_id='*regexlegacy',
updated_by=user_factory())
response = self.client.post(
self.submission_url,
{'guids': 'any@new\npartial@existing\nfull@existing\ninvalid@\n'
'regex@legacy'},
follow=True)
content = response.content.decode('utf-8')
# This metadata should exist
assert new_addon.guid in content
assert str(new_addon.average_daily_users) in content
assert partial_addon.guid in content
assert str(partial_addon.average_daily_users) in content
assert 'full@existing' in content
assert 'invalid@' in content
assert 'regex@legacy' in content
assert 'imported from a regex based legacy' in content
assert 'regex@legacy' in pq(response.content)(
'.regexlegacywarning').text()
assert 'full@existing' not in pq(response.content)(
'.regexlegacywarning').text()
# But Addon names or review links shouldn't have been loaded
assert 'New Danger' not in content
assert 'Partial Danger' not in content
assert 'Full Danger' not in content
assert 'Review Listed' not in content
assert 'Review Unlisted' not in content
def test_legacy_regex_warning(self):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
new_addon = addon_factory(guid='any@new', name='New Danger')
Block.objects.create(
addon=addon_factory(guid='full@existing', name='Full Danger'),
min_version='0',
max_version='*',
legacy_id='5656',
updated_by=user_factory())
partial_addon = addon_factory(
guid='partial@existing', name='Partial Danger')
Block.objects.create(
addon=partial_addon,
min_version='1',
max_version='99',
legacy_id='74356',
updated_by=user_factory())
Block.objects.create(
addon=addon_factory(guid='regex@legacy'),
min_version='23',
max_version='567',
legacy_id='*regexlegacy',
updated_by=user_factory())
response = self.client.post(
self.submission_url,
{'guids': 'any@new\npartial@existing\nfull@existing\ninvalid@\n'
'regex@legacy'},
follow=True)
content = response.content.decode('utf-8')
# This metadata should exist
assert new_addon.guid in content
assert str(new_addon.average_daily_users) in content
assert partial_addon.guid in content
assert str(partial_addon.average_daily_users) in content
assert 'full@existing' in content
assert 'invalid@' in content
assert 'regex@legacy' in content
assert 'imported from a regex based legacy' in content
assert 'regex@legacy' in pq(response.content)(
'.regexlegacywarning').text()
assert 'full@existing' not in pq(response.content)(
'.regexlegacywarning').text()
def test_review_links(self):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
post_kwargs = {
'path': self.submission_url,
'data': {'guids': 'guid@\nfoo@baa\ninvalid@'},
'follow': True}
# An addon with only listed versions should have listed link
addon = addon_factory(
guid='guid@', name='Danger Danger', version_kw={'version': '0.1'})
# This is irrelevant because a complete block doesn't have links
Block.objects.create(
addon=addon_factory(guid='foo@baa'),
min_version="0",
max_version="*",
updated_by=user_factory())
response = self.client.post(**post_kwargs)
assert b'Review Listed' in response.content
assert b'Review Unlisted' not in response.content
assert b'Edit Block' not in response.content
assert not pq(response.content)('.existing_block')
# Should work the same if partial block (exists but needs updating)
existing_block = Block.objects.create(
guid=addon.guid, min_version='8', updated_by=user_factory())
response = self.client.post(**post_kwargs)
assert b'Review Listed' in response.content
assert b'Review Unlisted' not in response.content
assert pq(response.content)('.existing_block a').attr('href') == (
reverse('admin:blocklist_block_change', args=(existing_block.pk,)))
assert pq(response.content)('.existing_block').text() == (
'[Edit Block: %s - %s]' % (existing_block.min_version, '*'))
# And an unlisted version
version_factory(
addon=addon, channel=amo.RELEASE_CHANNEL_UNLISTED, version='0.2')
response = self.client.post(**post_kwargs)
assert b'Review Listed' in response.content
assert b'Review Unlisted' in response.content
assert pq(response.content)('.existing_block a').attr('href') == (
reverse('admin:blocklist_block_change', args=(existing_block.pk,)))
assert pq(response.content)('.existing_block').text() == (
'[Edit Block: %s - %s]' % (existing_block.min_version, '*'))
# And delete the block again
existing_block.delete()
response = self.client.post(**post_kwargs)
assert b'Review Listed' in response.content
assert b'Review Unlisted' in response.content
assert b'Edit Block' not in response.content
assert not pq(response.content)('.existing_block')
addon.current_version.delete(hard=True)
response = self.client.post(**post_kwargs)
assert b'Review Listed' not in response.content
assert b'Review Unlisted' in response.content
def test_can_not_set_min_version_above_max_version(self):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
addon_factory(guid='any@new', name='New Danger')
partial_addon = addon_factory(
guid='partial@existing', name='Partial Danger')
Block.objects.create(
addon=partial_addon,
min_version='1',
max_version='99',
updated_by=user_factory())
response = self.client.post(
self.submission_url, {
'input_guids': 'any@new\npartial@existing\ninvalid@',
'action': '0',
'min_version': '5',
'max_version': '3',
'existing_min_version': '5',
'existing_max_version': '3',
'url': 'dfd',
'reason': 'some reason',
'_save': 'Save',
},
follow=True)
assert response.status_code == 200
assert b'Min version can not be greater than Max' in response.content
assert Block.objects.count() == 1
def test_can_not_add_without_create_permission(self):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
# The signoff permission shouldn't be sufficient
self.grant_permission(user, 'Blocklist:Signoff')
self.client.login(email=user.email)
addon_factory(guid='guid@', name='Danger Danger')
existing = Block.objects.create(
addon=addon_factory(guid='foo@baa'),
min_version="1",
max_version="99",
updated_by=user_factory())
response = self.client.post(
self.submission_url,
{'guids': 'guid@\nfoo@baa\ninvalid@'},
follow=True)
assert response.status_code == 403
assert b'Danger Danger' not in response.content
# Try to create the block anyway
response = self.client.post(
self.submission_url, {
'input_guids': 'guid@\nfoo@baa\ninvalid@',
'action': '0',
'min_version': '0',
'max_version': '*',
'existing_min_version': '0',
'existing_max_version': '*',
'url': 'dfd',
'reason': 'some reason',
'_save': 'Save',
},
follow=True)
assert response.status_code == 403
assert Block.objects.count() == 1
existing = existing.reload()
assert existing.min_version == '1' # check the values didn't update.
def _test_can_list_with_permission(self, permission):
# add some guids to the multi block to test out the counts in the list
addon = addon_factory(guid='guid@', name='Danger Danger')
block = Block.objects.create(
addon=addon_factory(
guid='block@', name='High Voltage', average_daily_users=1),
updated_by=user_factory(),
)
add_change_subm = BlocklistSubmission.objects.create(
input_guids='guid@\ninvalid@\nblock@',
updated_by=user_factory(display_name='Bób'),
min_version='123',
action=BlocklistSubmission.ACTION_ADDCHANGE)
delete_subm = BlocklistSubmission.objects.create(
input_guids='block@',
updated_by=user_factory(display_name='Sué'),
action=BlocklistSubmission.ACTION_DELETE)
add_change_subm.save()
delete_subm.save()
assert add_change_subm.to_block == [
{'guid': 'guid@',
'id': None,
'average_daily_users': addon.average_daily_users},
{'guid': 'block@',
'id': block.id,
'average_daily_users': block.addon.average_daily_users},
]
assert delete_subm.to_block == [
{'guid': 'block@',
'id': block.id,
'average_daily_users': block.addon.average_daily_users},
]
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, permission)
self.client.login(email=user.email)
response = self.client.get(self.submission_list_url, follow=True)
assert response.status_code == 200
assert 'Bób' in response.content.decode('utf-8')
assert 'Sué' in response.content.decode('utf-8')
doc = pq(response.content)
assert doc('th.field-blocks_count').text() == '1 add-ons 2 add-ons'
assert doc('.field-action').text() == (
'Delete Add/Change')
assert doc('.field-signoff_state').text() == 'Pending Pending'
def test_can_list_with_blocklist_create(self):
self._test_can_list_with_permission('Blocklist:Create')
def test_can_list_with_blocklist_signoff(self):
self._test_can_list_with_permission('Blocklist:Signoff')
def test_can_not_list_without_permission(self):
BlocklistSubmission.objects.create(
updated_by=user_factory(display_name='Bób'))
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.client.login(email=user.email)
response = self.client.get(self.submission_list_url, follow=True)
assert response.status_code == 403
assert 'Bób' not in response.content.decode('utf-8')
def test_edit_with_blocklist_create(self):
threshold = settings.DUAL_SIGNOFF_AVERAGE_DAILY_USERS_THRESHOLD
addon = addon_factory(
guid='guid@', name='Danger Danger',
average_daily_users=threshold + 1)
mbs = BlocklistSubmission.objects.create(
input_guids='guid@\ninvalid@\nsecond@invalid',
updated_by=user_factory())
assert mbs.to_block == [
{'guid': 'guid@',
'id': None,
'average_daily_users': addon.average_daily_users}]
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
multi_url = reverse(
'admin:blocklist_blocklistsubmission_change', args=(mbs.id,))
response = self.client.get(multi_url, follow=True)
assert response.status_code == 200
assert b'guid@<br>invalid@<br>second@invalid' in response.content
doc = pq(response.content)
buttons = doc('.submit-row input')
assert buttons[0].attrib['value'] == 'Update'
assert len(buttons) == 1
assert b'Reject Submission' not in response.content
assert b'Approve Submission' not in response.content
response = self.client.post(
multi_url, {
'input_guids': 'guid2@\nfoo@baa', # should be ignored
'min_version': '1', # should be ignored
'max_version': '99', # should be ignored
'url': 'new.url',
'reason': 'a new reason thats longer than 40 charactors',
'_save': 'Update',
},
follow=True)
assert response.status_code == 200
mbs = mbs.reload()
# the read-only values above weren't changed.
assert mbs.input_guids == 'guid@\ninvalid@\nsecond@invalid'
assert mbs.min_version == '0'
assert mbs.max_version == '*'
# but the other details were
assert mbs.url == 'new.url'
assert mbs.reason == 'a new reason thats longer than 40 charactors'
# The blocklistsubmission wasn't approved or rejected though
assert mbs.signoff_state == BlocklistSubmission.SIGNOFF_PENDING
assert Block.objects.count() == 0
log_entry = LogEntry.objects.get()
assert log_entry.user == user
assert log_entry.object_id == str(mbs.id)
assert log_entry.change_message == json.dumps(
[{'changed': {'fields': ['url', 'reason']}}])
response = self.client.get(multi_url, follow=True)
assert (
b'Changed "Pending: guid@, invalid@, second@invalid; '
b'new.url; a new reason thats longer than 40 cha...' in
response.content)
def test_edit_page_with_blocklist_signoff(self):
addon = addon_factory(guid='guid@', name='Danger Danger')
mbs = BlocklistSubmission.objects.create(
input_guids='guid@\ninvalid@\nsecond@invalid',
updated_by=user_factory())
assert mbs.to_block == [
{'guid': 'guid@',
'id': None,
'average_daily_users': addon.average_daily_users}]
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Signoff')
self.client.login(email=user.email)
multi_url = reverse(
'admin:blocklist_blocklistsubmission_change', args=(mbs.id,))
response = self.client.get(multi_url, follow=True)
assert response.status_code == 200
assert b'guid@<br>invalid@<br>second@invalid' in response.content
doc = pq(response.content)
buttons = doc('.submit-row input')
assert len(buttons) == 2
assert buttons[0].attrib['value'] == 'Reject Submission'
assert buttons[1].attrib['value'] == 'Approve Submission'
# Try to submit an update - no signoff approve or reject
response = self.client.post(
multi_url, {
'input_guids': 'guid2@\nfoo@baa',
'action': '1',
'min_version': '1',
'max_version': '99',
'url': 'new.url',
'reason': 'a reason',
'_save': 'Update',
},
follow=True)
assert response.status_code == 403
mbs = mbs.reload()
# none of the values above were changed because they're all read-only.
assert mbs.input_guids == 'guid@\ninvalid@\nsecond@invalid'
assert mbs.action == 0
assert mbs.min_version == '0'
assert mbs.max_version == '*'
assert mbs.url != 'new.url'
assert mbs.reason != 'a reason'
# The blocklistsubmission wasn't approved or rejected either
assert mbs.signoff_state == BlocklistSubmission.SIGNOFF_PENDING
assert Block.objects.count() == 0
assert LogEntry.objects.count() == 0
@override_switch('blocklist_legacy_submit', active=True)
@mock.patch('olympia.blocklist.models.legacy_publish_blocks')
def test_signoff_approve(self, legacy_publish_blocks_mock):
addon = addon_factory(guid='guid@', name='Danger Danger')
mbs = BlocklistSubmission.objects.create(
input_guids='guid@\ninvalid@',
updated_by=user_factory(),
legacy_id=True)
assert mbs.to_block == [
{'guid': 'guid@',
'id': None,
'average_daily_users': addon.average_daily_users}]
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Signoff')
self.client.login(email=user.email)
multi_url = reverse(
'admin:blocklist_blocklistsubmission_change', args=(mbs.id,))
response = self.client.post(
multi_url, {
'input_guids': 'guid2@\nfoo@baa', # should be ignored
'min_version': '1', # should be ignored
'max_version': '99', # should be ignored
'url': 'new.url', # should be ignored
'reason': 'a reason', # should be ignored
'_approve': 'Approve Submission',
},
follow=True)
assert response.status_code == 200
mbs = mbs.reload()
assert mbs.signoff_by == user
# the read-only values above weren't changed.
assert mbs.input_guids == 'guid@\ninvalid@'
assert mbs.min_version == '0'
assert mbs.max_version == '*'
assert mbs.url != 'new.url'
assert mbs.reason != 'a reason'
# As it was signed off, the block should have been created
assert Block.objects.count() == 1
new_block = Block.objects.get()
assert new_block.addon == addon
logs = ActivityLog.objects.for_addons(addon)
add_log = logs[1]
signoff_log = logs[0]
assert add_log.action == amo.LOG.BLOCKLIST_BLOCK_ADDED.id
assert add_log.arguments == [addon, addon.guid, new_block]
assert add_log.details['min_version'] == '0'
assert add_log.details['max_version'] == '*'
assert add_log.details['reason'] == ''
assert add_log.details['signoff_state'] == 'Approved'
assert add_log.details['signoff_by'] == user.id
block_log = ActivityLog.objects.for_block(new_block).filter(
action=add_log.action).last()
assert block_log == add_log
vlog = ActivityLog.objects.for_version(addon.current_version).last()
assert vlog == add_log
assert signoff_log.action == amo.LOG.BLOCKLIST_SIGNOFF.id
assert signoff_log.arguments == [addon, addon.guid, 'add', new_block]
assert signoff_log.user == user
# blocks would have been submitted to remote settings legacy collection
legacy_publish_blocks_mock.assert_called()
legacy_publish_blocks_mock.assert_called_with([new_block])
assert mbs.to_block == [
{'guid': 'guid@',
'id': None,
'average_daily_users': addon.average_daily_users}]
assert list(mbs.block_set.all()) == [new_block]
log_entry = LogEntry.objects.last()
assert log_entry.user == user
assert log_entry.object_id == str(mbs.id)
other_obj = addon_factory(id=mbs.id)
LogEntry.objects.log_action(
user_factory().id, ContentType.objects.get_for_model(other_obj).pk,
other_obj.id, repr(other_obj), ADDITION, 'not a Block!')
response = self.client.get(multi_url, follow=True)
assert (
b'Changed "Approved: guid@, invalid@'
b'" - Sign-off Approval' in
response.content)
assert b'not a Block!' not in response.content
@override_switch('blocklist_legacy_submit', active=True)
@mock.patch('olympia.blocklist.models.legacy_publish_blocks')
def test_signoff_reject(self, legacy_publish_blocks_mock):
addon = addon_factory(guid='guid@', name='Danger Danger')
mbs = BlocklistSubmission.objects.create(
input_guids='guid@\ninvalid@',
updated_by=user_factory())
assert mbs.to_block == [
{'guid': 'guid@',
'id': None,
'average_daily_users': addon.average_daily_users}]
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Signoff')
self.client.login(email=user.email)
multi_url = reverse(
'admin:blocklist_blocklistsubmission_change', args=(mbs.id,))
response = self.client.post(
multi_url, {
'input_guids': 'guid2@\nfoo@baa', # should be ignored
'min_version': '1', # should be ignored
'max_version': '99', # should be ignored
'url': 'new.url', # should be ignored
'reason': 'a reason', # should be ignored
'_reject': 'Reject Submission',
},
follow=True)
assert response.status_code == 200
mbs = mbs.reload()
# the read-only values above weren't changed.
assert mbs.input_guids == 'guid@\ninvalid@'
assert mbs.min_version == '0'
assert mbs.max_version == '*'
assert mbs.url != 'new.url'
assert mbs.reason != 'a reason'
# blocks would not have been submitted to remote settings legacy
# collection
legacy_publish_blocks_mock.assert_not_called()
# And the blocklistsubmission was rejected, so no Blocks created
assert mbs.signoff_state == BlocklistSubmission.SIGNOFF_REJECTED
assert Block.objects.count() == 0
assert not mbs.is_submission_ready
log_entry = LogEntry.objects.last()
assert log_entry.user == user
assert log_entry.object_id == str(mbs.id)
other_obj = addon_factory(id=mbs.id)
LogEntry.objects.log_action(
user_factory().id, ContentType.objects.get_for_model(other_obj).pk,
other_obj.id, repr(other_obj), ADDITION, 'not a Block!')
response = self.client.get(multi_url, follow=True)
assert (
b'Changed "Rejected: guid@, invalid@'
b'" - Sign-off Rejection' in
response.content)
assert b'not a Block!' not in response.content
def test_cannot_approve_with_only_block_create_permission(self):
addon = addon_factory(guid='guid@', name='Danger Danger')
mbs = BlocklistSubmission.objects.create(
input_guids='guid@\ninvalid@',
updated_by=user_factory())
assert mbs.to_block == [
{'guid': 'guid@',
'id': None,
'average_daily_users': addon.average_daily_users}]
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
multi_url = reverse(
'admin:blocklist_blocklistsubmission_change', args=(mbs.id,))
response = self.client.post(
multi_url, {
'input_guids': 'guid2@\nfoo@baa', # should be ignored
'min_version': '1', # should be ignored
'max_version': '99', # should be ignored
'url': 'new.url', # could be updated with this permission
'reason': 'a reason', # could be updated with this permission
'_approve': 'Approve Submission',
},
follow=True)
assert response.status_code == 403
mbs = mbs.reload()
# It wasn't signed off
assert not mbs.signoff_by
assert mbs.signoff_state == BlocklistSubmission.SIGNOFF_PENDING
# And the details weren't updated either
assert mbs.url != 'new.url'
assert mbs.reason != 'a reason'
def test_can_only_reject_your_own_with_only_block_create_permission(self):
addon = addon_factory(guid='guid@', name='Danger Danger')
submission = BlocklistSubmission.objects.create(
input_guids='guid@\ninvalid@',
updated_by=user_factory())
assert submission.to_block == [
{'guid': 'guid@',
'id': None,
'average_daily_users': addon.average_daily_users}]
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
change_url = reverse(
'admin:blocklist_blocklistsubmission_change',
args=(submission.id,))
response = self.client.post(
change_url, {
'input_guids': 'guid2@\nfoo@baa', # should be ignored
'min_version': '1', # should be ignored
'max_version': '99', # should be ignored
'url': 'new.url', # could be updated with this permission
'reason': 'a reason', # could be updated with this permission
'_reject': 'Reject Submission',
},
follow=True)
assert response.status_code == 403
submission = submission.reload()
# It wasn't signed off
assert not submission.signoff_by
assert submission.signoff_state == BlocklistSubmission.SIGNOFF_PENDING
# And the details weren't updated either
assert submission.url != 'new.url'
assert submission.reason != 'a reason'
# except if it's your own submission
submission.update(updated_by=user)
response = self.client.get(change_url, follow=True)
assert response.status_code == 200
doc = pq(response.content)
buttons = doc('.submit-row input')
assert buttons[0].attrib['value'] == 'Update'
assert buttons[1].attrib['value'] == 'Reject Submission'
assert len(buttons) == 2
assert b'Approve Submission' not in response.content
response = self.client.post(
change_url, {
'input_guids': 'guid2@\nfoo@baa', # should be ignored
'min_version': '1', # should be ignored
'max_version': '99', # should be ignored
'url': 'new.url', # could be updated with this permission
'reason': 'a reason', # could be updated with this permission
'_reject': 'Reject Submission',
},
follow=True)
assert response.status_code == 200
submission = submission.reload()
assert submission.signoff_state == BlocklistSubmission.SIGNOFF_REJECTED
assert not submission.signoff_by
assert submission.url == 'new.url'
assert submission.reason == 'a reason'
def test_signed_off_view(self):
addon = addon_factory(guid='guid@', name='Danger Danger')
mbs = BlocklistSubmission.objects.create(
input_guids='guid@\ninvalid@\nsecond@invalid',
updated_by=user_factory(),
signoff_by=user_factory(),
signoff_state=BlocklistSubmission.SIGNOFF_APPROVED)
assert mbs.to_block == [
{'guid': 'guid@',
'id': None,
'average_daily_users': addon.average_daily_users}]
mbs.save_to_block_objects()
block = Block.objects.get()
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
multi_view_url = reverse(
'admin:blocklist_blocklistsubmission_change', args=(mbs.id,))
response = self.client.get(multi_view_url, follow=True)
assert response.status_code == 200
assert b'guid@<br>invalid@<br>second@invalid' in response.content
doc = pq(response.content)
review_link = doc('div.field-blocks div div a')[0]
assert review_link.attrib['href'] == absolutify(reverse(
'reviewers.review', args=(addon.pk,)))
guid_link = doc('div.field-blocks div div a')[1]
assert guid_link.attrib['href'] == reverse(
'admin:blocklist_block_change', args=(block.pk,))
assert not doc('submit-row input')
def test_list_filters(self):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Signoff')
self.client.login(email=user.email)
addon_factory(guid='pending1@')
addon_factory(guid='pending2@')
addon_factory(guid='published@')
BlocklistSubmission.objects.create(
input_guids='pending1@\npending2@',
signoff_state=BlocklistSubmission.SIGNOFF_PENDING)
BlocklistSubmission.objects.create(
input_guids='missing@',
signoff_state=BlocklistSubmission.SIGNOFF_APPROVED)
BlocklistSubmission.objects.create(
input_guids='published@',
signoff_state=BlocklistSubmission.SIGNOFF_PUBLISHED)
response = self.client.get(self.submission_list_url, follow=True)
assert response.status_code == 200
doc = pq(response.content)
# default is to only show Pending (signoff_state=0)
assert doc('#result_list tbody tr').length == 1
assert doc('.field-blocks_count').text() == '2 add-ons'
expected_filters = [
('All', '?signoff_state=all'),
('Pending', '?signoff_state=0'),
('Approved', '?signoff_state=1'),
('Rejected', '?signoff_state=2'),
('No Sign-off', '?signoff_state=3'),
('Published to Blocks', '?signoff_state=4'),
]
filters = [
(x.text, x.attrib['href']) for x in doc('#changelist-filter a')
]
assert filters == expected_filters
# Should be shown as selected too
assert doc('#changelist-filter li.selected a').text() == 'Pending'
# Repeat with the Pending filter explictly selected
response = self.client.get(self.submission_list_url, {
'signoff_state': 0,
})
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody tr').length == 1
assert doc('.field-blocks_count').text() == '2 add-ons'
assert doc('#changelist-filter li.selected a').text() == 'Pending'
# And then lastly with all submissions showing
response = self.client.get(self.submission_list_url, {
'signoff_state': 'all'
})
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody tr').length == 3
assert doc('#changelist-filter li.selected a').text() == 'All'
class TestBlockAdminEdit(TestCase):
def setUp(self):
self.addon = addon_factory(guid='guid@', name='Danger Danger')
self.block = Block.objects.create(
guid=self.addon.guid, updated_by=user_factory())
self.change_url = reverse(
'admin:blocklist_block_change', args=(self.block.pk,))
self.submission_url = reverse(
'admin:blocklist_blocklistsubmission_add')
def _test_edit(self, user, signoff_state):
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
response = self.client.get(self.change_url, follow=True)
content = response.content.decode('utf-8')
assert 'Add-on GUIDs (one per line)' not in content
assert 'guid@' in content
assert 'Danger Danger' in content
assert str(self.addon.average_daily_users) in content
assert 'Block History' in content
assert 'imported from a regex based legacy' not in content
# Change the block
response = self.client.post(
self.change_url, {
'addon_id': addon_factory().id, # new addon should be ignored
'input_guids': self.block.guid,
'action': '0',
'min_version': '0',
'max_version': self.addon.current_version.version,
'url': 'https://foo.baa',
'reason': 'some other reason',
'_continue': 'Save and continue editing',
},
follow=True)
assert response.status_code == 200
assert BlocklistSubmission.objects.exists()
submission = BlocklistSubmission.objects.get(
input_guids=self.block.guid)
assert submission.signoff_state == signoff_state
def _test_post_edit_logging(self, user):
assert Block.objects.count() == 1 # check we didn't create another
assert Block.objects.first().addon == self.addon # wasn't changed
log = ActivityLog.objects.for_addons(self.addon).last()
assert log.action == amo.LOG.BLOCKLIST_BLOCK_EDITED.id
assert log.arguments == [self.addon, self.addon.guid, self.block]
assert log.details['min_version'] == '0'
assert log.details['max_version'] == self.addon.current_version.version
assert log.details['reason'] == 'some other reason'
block_log = ActivityLog.objects.for_block(self.block).filter(
action=log.action).last()
assert block_log == log
block_log_by_guid = ActivityLog.objects.for_guidblock('guid@').filter(
action=log.action).last()
assert block_log_by_guid == log
vlog = ActivityLog.objects.for_version(
self.addon.current_version).last()
assert vlog == log
# Check the block history contains the edit just made.
response = self.client.get(self.change_url, follow=True)
content = response.content.decode('utf-8')
todaysdate = datetime.datetime.now().date()
assert f'<a href="https://foo.baa">{todaysdate}</a>' in content
assert f'Block edited by {user.name}:\n {self.block.guid}' in (
content)
assert f'versions 0 - {self.addon.current_version.version}' in content
assert f'Included in legacy blocklist' not in content
def test_edit_low_adu(self):
user = user_factory()
self.addon.update(
average_daily_users=(
settings.DUAL_SIGNOFF_AVERAGE_DAILY_USERS_THRESHOLD))
self._test_edit(user, BlocklistSubmission.SIGNOFF_PUBLISHED)
self._test_post_edit_logging(user)
def test_edit_high_adu(self):
user = user_factory()
self.addon.update(
average_daily_users=(
settings.DUAL_SIGNOFF_AVERAGE_DAILY_USERS_THRESHOLD + 1))
self._test_edit(user, BlocklistSubmission.SIGNOFF_PENDING)
submission = BlocklistSubmission.objects.get(
input_guids=self.block.guid)
submission.update(
signoff_state=BlocklistSubmission.SIGNOFF_APPROVED,
signoff_by=user_factory())
submission.save_to_block_objects()
self._test_post_edit_logging(user)
def test_edit_high_adu_only_metadata(self):
user = user_factory()
self.addon.update(
average_daily_users=(
settings.DUAL_SIGNOFF_AVERAGE_DAILY_USERS_THRESHOLD + 1))
self.block.update(max_version=self.addon.current_version.version)
self._test_edit(user, BlocklistSubmission.SIGNOFF_PUBLISHED)
self._test_post_edit_logging(user)
def test_invalid_versions_not_accepted(self):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
deleted_addon = addon_factory(version_kw={'version': '345.34a'})
deleted_addon.delete()
deleted_addon.addonguid.update(guid=self.addon.guid)
self.addon.current_version.update(version='123.4b5')
version_factory(addon=self.addon, version='678')
# Update min_version in self.block to a version that doesn't exist
self.block.update(min_version='444.4a')
response = self.client.get(self.change_url, follow=True)
content = response.content.decode('utf-8')
doc = pq(content)
ver_list = doc('#id_min_version option')
assert len(ver_list) == 5
assert ver_list.eq(0).attr['value'] == '444.4a'
assert ver_list.eq(0).text() == '(invalid)'
assert ver_list.eq(1).attr['value'] == '0'
assert ver_list.eq(2).attr['value'] == '123.4b5'
assert ver_list.eq(3).attr['value'] == '345.34a'
assert ver_list.eq(4).attr['value'] == '678'
ver_list = doc('#id_max_version option')
assert len(ver_list) == 4
assert ver_list.eq(0).attr['value'] == '*'
assert ver_list.eq(1).attr['value'] == '123.4b5'
assert ver_list.eq(2).attr['value'] == '345.34a'
assert ver_list.eq(3).attr['value'] == '678'
data = {
'input_guids': self.block.guid,
'action': '0',
'url': 'https://foo.baa',
'reason': 'some other reason',
'_save': 'Update',
}
# Try saving the form with the same min_version
response = self.client.post(
self.change_url, dict(
min_version='444.4a', # current value, but not a version.
max_version=self.addon.current_version.version, # valid
**data),
follow=True)
assert response.status_code == 200
assert b'Invalid version' in response.content
self.block = self.block.reload()
assert self.block.min_version == '444.4a' # not changed
assert self.block.max_version == '*' # not changed either.
assert not ActivityLog.objects.for_addons(self.addon).exists()
doc = pq(content)
assert doc('#id_min_version option').eq(0).attr['value'] == '444.4a'
# Change to a version that exists
response = self.client.post(
self.change_url, dict(
min_version='345.34a',
max_version='*',
**data),
follow=True)
assert response.status_code == 200
assert b'Invalid version' not in response.content
self.block = self.block.reload()
assert self.block.min_version == '345.34a' # changed
assert self.block.max_version == '*'
assert ActivityLog.objects.for_addons(self.addon).exists()
# the value shouldn't be in the list of versions either any longer.
assert b'444.4a' not in response.content
def test_can_not_edit_without_permission(self):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.client.login(email=user.email)
response = self.client.get(self.change_url, follow=True)
assert response.status_code == 403
assert b'Danger Danger' not in response.content
# Try to edit the block anyway
response = self.client.post(
self.change_url, {
'input_guids': self.block.guid,
'min_version': '0',
'max_version': self.addon.current_version.version,
'url': 'dfd',
'reason': 'some reason',
'_save': 'Save',
},
follow=True)
assert response.status_code == 403
assert Block.objects.count() == 1
def test_cannot_edit_when_guid_in_blocklistsubmission_change(self):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
blocksubm = BlocklistSubmission.objects.create(
input_guids=self.block.guid,
min_version='123.45')
assert blocksubm.to_block == [{
'id': self.block.id,
'guid': self.block.guid,
'average_daily_users': self.block.addon.average_daily_users}]
response = self.client.get(self.change_url, follow=True)
content = response.content.decode('utf-8')
assert 'Add-on GUIDs (one per line)' not in content
assert 'guid@' in content
assert 'Danger Danger' in content
assert 'Add/Change submission pending' in content
submission_url = reverse(
'admin:blocklist_blocklistsubmission_change', args=(blocksubm.id,))
assert 'min_version: "0" to "123.45"' in content
assert submission_url in content
assert 'Close' in content
assert '_save' not in content
assert 'deletelink' not in content
# Try to edit the block anyway
response = self.client.post(
self.change_url, {
'input_guids': self.block.guid,
'min_version': '0',
'max_version': self.addon.current_version.version,
'url': 'dfd',
'reason': 'some reason',
'_save': 'Save',
},
follow=True)
assert response.status_code == 403
assert self.block.max_version == '*' # not changed
def test_cannot_edit_when_guid_in_blocklistsubmission_delete(self):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
blocksubm = BlocklistSubmission.objects.create(
input_guids=self.block.guid,
action=BlocklistSubmission.ACTION_DELETE)
assert blocksubm.to_block == [{
'id': self.block.id,
'guid': self.block.guid,
'average_daily_users': self.block.addon.average_daily_users}]
response = self.client.get(self.change_url, follow=True)
content = response.content.decode('utf-8')
assert 'Add-on GUIDs (one per line)' not in content
assert 'guid@' in content
assert 'Danger Danger' in content
assert 'Delete submission pending' in content
submission_url = reverse(
'admin:blocklist_blocklistsubmission_change', args=(blocksubm.id,))
assert submission_url in content
assert 'Close' in content
assert '_save' not in content
assert 'deletelink' not in content
# Try to edit the block anyway
response = self.client.post(
self.change_url, {
'input_guids': self.block.guid,
'min_version': '0',
'max_version': self.addon.current_version.version,
'url': 'dfd',
'reason': 'some reason',
'_save': 'Save',
},
follow=True)
assert response.status_code == 403
assert self.block.max_version == '*' # not changed
def test_imported_regex_block(self):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
self.block.update(legacy_id='*foo@baa')
response = self.client.get(self.change_url, follow=True)
content = response.content.decode('utf-8')
assert 'Add-on GUIDs (one per line)' not in content
assert 'guid@' in content
assert 'Danger Danger' in content
assert str(self.addon.average_daily_users) in content
assert 'Block History' in content
assert 'imported from a regex based legacy' in content
@override_switch('blocklist_legacy_submit', active=False)
def test_cannot_edit_when_imported_block(self):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
self.block.update(legacy_id='123456')
response = self.client.get(self.change_url, follow=True)
content = response.content.decode('utf-8')
assert 'Add-on GUIDs (one per line)' not in content
assert 'guid@' in content
assert 'Danger Danger' in content
assert 'Close' in content
assert '_save' not in content
assert 'deletelink' not in content
# Try to edit the block anyway
response = self.client.post(
self.change_url, {
'input_guids': self.block.guid,
'action': '0',
'min_version': '0',
'max_version': self.addon.current_version.version,
'url': 'dfd',
'reason': 'some reason',
'_save': 'Save',
},
follow=True)
assert response.status_code == 403
assert self.block.max_version == '*' # not changed
@override_switch('blocklist_legacy_submit', active=True)
@mock.patch('olympia.blocklist.models.legacy_publish_blocks')
def test_can_edit_imported_block_if_legacy_submit_waffle_on(self, pub_mck):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
self.block.update(legacy_id='123456')
response = self.client.get(self.change_url, follow=True)
content = response.content.decode('utf-8')
assert 'Add-on GUIDs (one per line)' not in content
assert 'guid@' in content
assert 'Danger Danger' in content
assert 'Close' not in content
assert '_save' in content
assert 'deletelink' in content
assert self.block.in_legacy_blocklist is True
# We can edit the block
assert not BlocklistSubmission.objects.exists()
response = self.client.post(
self.change_url, {
'input_guids': self.block.guid,
'action': '0',
'min_version': '0',
'max_version': self.addon.current_version.version,
'url': 'dfd',
'reason': 'some reason',
'legacy_id': '123456',
'_save': 'Save',
},
follow=True)
assert response.status_code == 200
assert BlocklistSubmission.objects.exists()
BlocklistSubmission.objects.get(
input_guids=self.block.guid)
pub_mck.assert_called_with([self.block])
self.block.reload()
assert self.block.in_legacy_blocklist is True
@override_switch('blocklist_legacy_submit', active=False)
def test_legacy_id_property_is_readonly(self):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
self.block.update(legacy_id='')
response = self.client.get(self.change_url, follow=True)
assert pq(response.content)('.field-legacy_id .readonly')
assert b'_save' in response.content
assert self.block.in_legacy_blocklist is False
# Try to edit the block
response = self.client.post(
self.change_url, {
'input_guids': self.block.guid,
'action': '0',
'min_version': self.block.min_version,
'max_version': self.block.max_version,
'url': '',
'reason': 'Changed!',
'legacy_id': '34344545',
'_save': 'Save',
},
follow=True)
assert response.status_code == 200
self.block.reload()
assert self.block.reason == 'Changed!'
assert self.block.in_legacy_blocklist is False
@override_switch('blocklist_legacy_submit', active=True)
@mock.patch('olympia.blocklist.models.legacy_delete_blocks')
def test_legacy_id_is_enabled_with_legacy_submit_waffle_on(self, del_mock):
del_mock.side_effect = lambda blocks: blocks[0].update(legacy_id='')
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
self.block.update(legacy_id='3467635')
response = self.client.get(self.change_url, follow=True)
assert pq(response.content)('.field-legacy_id input')
assert b'_save' in response.content
# Try to edit the block
response = self.client.post(
self.change_url, {
'input_guids': self.block.guid,
'action': '0',
'min_version': self.block.min_version,
'max_version': self.block.max_version,
'url': '',
'reason': 'Changed!',
# no legacy_id so clearing it
'_save': 'Save',
},
follow=True)
assert response.status_code == 200
assert BlocklistSubmission.objects.exists()
self.block.reload()
assert self.block.reason == 'Changed!'
del_mock.assert_called_with([self.block])
assert self.block.in_legacy_blocklist is False
class TestBlockAdminDelete(TestCase):
def setUp(self):
self.delete_url = reverse('admin:blocklist_block_delete_multiple')
self.submission_url = reverse(
'admin:blocklist_blocklistsubmission_add')
def test_delete_input(self):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
response = self.client.get(self.delete_url, follow=True)
assert b'Add-on GUIDs (one per line)' in response.content
# Submit an empty list of guids should redirect back to the page
response = self.client.post(
self.delete_url, {'guids': ''}, follow=False)
assert b'Add-on GUIDs (one per line)' in response.content
assert b'This field is required' in response.content
# Any invalid guids should redirect back to the page too, with an error
Block.objects.create(
addon=addon_factory(guid='guid@'), updated_by=user_factory())
response = self.client.post(
self.delete_url, {'guids': 'guid@\n{12345-6789}'}, follow=False)
assert b'Add-on GUIDs (one per line)' in response.content
assert b'Block with GUID {12345-6789} not found' in response.content
# Valid blocks are redirected to the multiple guid view
# We're purposely not creating the add-on here to test the edge-case
# where the addon has been hard-deleted or otherwise doesn't exist.
Block.objects.create(
guid='{12345-6789}', updated_by=user_factory())
assert Block.objects.count() == 2
response = self.client.post(
self.delete_url, {'guids': 'guid@\n{12345-6789}'}, follow=True)
self.assertRedirects(response, self.submission_url, status_code=307)
# If a block is already present in a submission though, we error
BlocklistSubmission.objects.create(
input_guids='guid@', min_version='1').save()
response = self.client.post(
self.delete_url, {'guids': 'guid@\n{12345-6789}'}, follow=False)
assert b'Add-on GUIDs (one per line)' in response.content
assert b'GUID guid@ is in a pending Submission' in response.content
def _test_delete_multiple_submit(self, addon_adu):
"""addon_adu is important because whether dual signoff is needed is
based on what the average_daily_users is."""
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
block_normal = Block.objects.create(
addon=addon_factory(
guid='guid@', name='Normal', average_daily_users=addon_adu),
updated_by=user_factory())
block_no_addon = Block.objects.create(
guid='{12345-6789}',
updated_by=user_factory())
block_legacy = Block.objects.create(
addon=addon_factory(guid='legacy@'),
legacy_id='123456', updated_by=user_factory())
response = self.client.post(
self.submission_url, {
'guids': 'guid@\n{12345-6789}\nlegacy@',
'action': '1',
},
follow=True)
content = response.content.decode('utf-8')
# meta data for block:
assert 'Add-on GUIDs (one per line)' not in content
assert 'Delete Blocks' in content
assert 'guid@' in content
assert 'Normal' in content
assert str(block_normal.addon.average_daily_users) in content
assert '{12345-6789}' in content
# The fields only used for Add/Change submissions shouldn't be shown
assert '"min_version"' not in content
assert '"max_version"' not in content
assert 'reason' not in content
assert 'legacy_id' not in content
# Check we didn't delete the blocks already
assert Block.objects.count() == 3
assert BlocklistSubmission.objects.count() == 0
# Create the block submission
response = self.client.post(
self.submission_url, {
'input_guids': (
'guid@\n{12345-6789}\nlegacy@'),
'action': '1',
'_save': 'Save',
},
follow=True)
assert response.status_code == 200
return block_normal, block_no_addon, block_legacy
def _test_delete_verify(self, block_with_addon, block_no_addon,
block_legacy, has_signoff=True):
block_from_addon = block_with_addon.addon
assert Block.objects.count() == 0
assert BlocklistSubmission.objects.count() == 1
submission = BlocklistSubmission.objects.get()
add_log = ActivityLog.objects.for_addons(block_from_addon).last()
assert add_log.action == amo.LOG.BLOCKLIST_BLOCK_DELETED.id
assert add_log.arguments == [
block_from_addon, block_from_addon.guid, None]
if has_signoff:
assert add_log.details['signoff_state'] == 'Approved'
assert add_log.details['signoff_by'] == submission.signoff_by.id
else:
assert add_log.details['signoff_state'] == 'No Sign-off'
assert 'signoff_by' not in add_log.details
vlog = ActivityLog.objects.for_version(
block_from_addon.current_version).last()
assert vlog == add_log
assert submission.input_guids == (
'guid@\n{12345-6789}\nlegacy@')
assert submission.to_block == [
{'guid': 'guid@', 'id': block_with_addon.id,
'average_daily_users': block_from_addon.average_daily_users},
{'guid': 'legacy@', 'id': block_legacy.id,
'average_daily_users': block_legacy.addon.average_daily_users},
{'guid': '{12345-6789}', 'id': block_no_addon.id,
'average_daily_users': -1},
]
assert not submission.block_set.all().exists()
@override_switch('blocklist_legacy_submit', active=True)
@mock.patch('olympia.blocklist.models.legacy_delete_blocks')
def test_submit_no_dual_signoff(self, legacy_delete_blocks_mock):
addon_adu = settings.DUAL_SIGNOFF_AVERAGE_DAILY_USERS_THRESHOLD
block_with_addon, block_no_addon, block_legacy = (
self._test_delete_multiple_submit(addon_adu=addon_adu))
self._test_delete_verify(
block_with_addon,
block_no_addon,
block_legacy,
has_signoff=False)
legacy_delete_blocks_mock.assert_called_with([
block_with_addon,
block_no_addon,
block_legacy])
@override_switch('blocklist_legacy_submit', active=True)
@mock.patch('olympia.blocklist.models.legacy_delete_blocks')
def test_submit_dual_signoff(self, legacy_delete_blocks_mock):
addon_adu = settings.DUAL_SIGNOFF_AVERAGE_DAILY_USERS_THRESHOLD + 1
block_with_addon, block_no_addon, block_legacy = (
self._test_delete_multiple_submit(addon_adu=addon_adu))
# Blocks shouldn't have been deleted yet
assert Block.objects.count() == 3, Block.objects.all()
submission = BlocklistSubmission.objects.get()
submission.update(
signoff_state=BlocklistSubmission.SIGNOFF_APPROVED,
signoff_by=user_factory())
assert submission.is_submission_ready
submission.delete_block_objects()
self._test_delete_verify(
block_with_addon,
block_no_addon,
block_legacy,
has_signoff=True)
legacy_delete_blocks_mock.assert_called_with([
block_with_addon,
block_no_addon,
block_legacy])
def test_edit_with_delete_submission(self):
threshold = settings.DUAL_SIGNOFF_AVERAGE_DAILY_USERS_THRESHOLD
block = Block.objects.create(
addon=addon_factory(
guid='guid@', name='Danger Danger',
average_daily_users=threshold + 1),
updated_by=user_factory())
mbs = BlocklistSubmission.objects.create(
input_guids='guid@',
updated_by=user_factory(),
action=BlocklistSubmission.ACTION_DELETE)
assert mbs.to_block == [
{'guid': 'guid@',
'id': block.id,
'average_daily_users': block.addon.average_daily_users}]
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
multi_url = reverse(
'admin:blocklist_blocklistsubmission_change', args=(mbs.id,))
response = self.client.get(multi_url, follow=True)
assert response.status_code == 200
assert b'guid@' in response.content
doc = pq(response.content)
buttons = doc('.submit-row input')
assert len(buttons) == 0
assert b'Reject Submission' not in response.content
assert b'Approve Submission' not in response.content
def test_django_delete_redirects_to_bulk(self):
block = Block.objects.create(
addon=addon_factory(guid='foo@baa', name='Danger Danger'),
updated_by=user_factory())
django_delete_url = reverse(
'admin:blocklist_block_delete', args=(block.pk,))
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
assert Block.objects.count() == 1
response = self.client.get(django_delete_url, follow=True)
self.assertRedirects(
response,
self.submission_url + '?guids=foo@baa&action=1',
target_status_code=200)
# No immediate delete.
assert Block.objects.count() == 1
assert not ActivityLog.objects.for_addons(block.addon).filter(
action=amo.LOG.BLOCKLIST_BLOCK_DELETED.id).exists()
assert not ActivityLog.objects.for_block(block).filter(
action=amo.LOG.BLOCKLIST_BLOCK_DELETED.id).exists()
def test_can_not_delete_without_permission(self):
block = Block.objects.create(
addon=addon_factory(guid='foo@baa', name='Danger Danger'),
updated_by=user_factory())
django_delete_url = reverse(
'admin:blocklist_block_delete', args=(block.pk,))
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.client.login(email=user.email)
assert Block.objects.count() == 1
# Can't access delete confirmation page.
response = self.client.get(django_delete_url, follow=True)
assert response.status_code == 403
| 43.216575
| 79
| 0.617333
|
56cd833164f1101869b75cb9347944c31e1612af
| 2,443
|
py
|
Python
|
RoloDataset.py
|
JunwookHeo/YOLO-OT
|
7004f25ce858acb7253bfcbc6fabeb915d8747a3
|
[
"MIT"
] | null | null | null |
RoloDataset.py
|
JunwookHeo/YOLO-OT
|
7004f25ce858acb7253bfcbc6fabeb915d8747a3
|
[
"MIT"
] | null | null | null |
RoloDataset.py
|
JunwookHeo/YOLO-OT
|
7004f25ce858acb7253bfcbc6fabeb915d8747a3
|
[
"MIT"
] | null | null | null |
import glob
import os
import torch
import numpy as np
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from coord_utils import *
class RoloDataset(Dataset):
""" Loading frames in a video file """
def __init__(self, path, label, seq_num, img_size, mode):
self.path = path
self.label = label
self.seq_num = seq_num
self.img_size = img_size
self.mode = mode
self.frames = sorted(glob.glob("%s/*.*" % os.path.join(path, 'img')))
self.images= sorted(glob.glob("%s/*.*" % os.path.join(path, 'yolo_out')))
with open(label, "r") as file:
self.labels = file.readlines()
self.num_frames = len(self.images)
self.start_pos = 0
if self.mode is 'train':
self.num_frames = int(self.num_frames*0.6)
elif self.mode is 'validate':
self.start_pos = int(self.num_frames*0.6)
end_pos = int(self.num_frames*0.8)
self.num_frames = end_pos - self.start_pos
elif self.mode is 'test':
self.start_pos = int(self.num_frames*0.8)
self.num_frames = self.num_frames - self.start_pos
def __len__(self):
return self.num_frames - (self.seq_num - 1)
def __getitem__(self, idx):
frames = []
fis = []
locs = []
labels = []
for i in range(self.seq_num):
pos = idx + i + self.start_pos
frame = np.array(Image.open(self.frames[pos]))
frame = torch.from_numpy(frame)
image = np.load(self.images[pos])
image = torch.from_numpy(image[0]).float()
fi = image
loc = image[4097:]
label = self.labels[pos].split('\t') # for gt type 2
if len(label) < 4:
label = self.labels[pos].split(',') # for gt type 1
# Convert (x1, y1, w, h) into (cx, cy, w, h)
label = np.array(label, dtype=float)
label[0] += label[2]/2.
label[1] += label[3]/2.
label = torch.as_tensor(label, dtype=torch.float32)
frames.append(frame)
fis.append(fi)
locs.append(loc)
labels.append(label)
return torch.stack(frames, dim=0), torch.stack(fis, dim=0), torch.stack(locs, dim=0), torch.stack(labels, dim=0)
| 33.013514
| 120
| 0.537863
|
763ee8b071afc36baaac3753be790860623594d0
| 4,390
|
py
|
Python
|
apps/search/src/search/search_controller.py
|
lumanjiao/XLS_BigData
|
2c4c37872b8636df1c8b0e005bc12a635a753c7a
|
[
"Apache-2.0"
] | 11
|
2019-03-20T07:38:35.000Z
|
2021-06-18T09:42:46.000Z
|
apps/search/src/search/search_controller.py
|
lumanjiao/XLS_BigData
|
2c4c37872b8636df1c8b0e005bc12a635a753c7a
|
[
"Apache-2.0"
] | null | null | null |
apps/search/src/search/search_controller.py
|
lumanjiao/XLS_BigData
|
2c4c37872b8636df1c8b0e005bc12a635a753c7a
|
[
"Apache-2.0"
] | 5
|
2019-06-29T03:13:02.000Z
|
2020-04-23T04:47:11.000Z
|
#!/usr/bin/env python
# -- coding: utf-8 --
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.db.models import Q
from desktop.models import Document2, Document, SAMPLE_USERNAME
from libsolr.api import SolrApi
from search.conf import SOLR_URL
from search.models import Collection2
LOG = logging.getLogger(__name__)
class SearchController(object):
"""
Glue the models to the views.
"""
def __init__(self, user):
self.user = user
def get_search_collections(self):
return [d.content_object for d in Document.objects.get_docs(self.user, Document2, extra='search-dashboard').order_by('-id')]
def get_shared_search_collections(self):
# Those are the ones appearing in the menu
docs = Document.objects.filter(Q(owner=self.user) | Q(owner__username=SAMPLE_USERNAME), extra='search-dashboard')
return [d.content_object for d in docs.order_by('-id')]
def get_owner_search_collections(self):
if self.user.is_superuser:
docs = Document.objects.filter(extra='search-dashboard')
else:
docs = Document.objects.filter(extra='search-dashboard', owner=self.user)
return [d.content_object for d in docs.order_by('-id')]
def get_icon(self, name):
if name == 'Twitter':
return 'search/art/icon_twitter_48.png'
elif name == 'Yelp Reviews':
return 'search/art/icon_yelp_48.png'
elif name == 'Web Logs':
return 'search/art/icon_logs_48.png'
else:
return 'search/art/icon_search_48.png'
def delete_collections(self, collection_ids):
result = {'status': -1, 'message': ''}
try:
for doc2 in self.get_owner_search_collections():
if doc2.id in collection_ids:
doc = doc2.doc.get()
doc.delete()
doc2.delete()
result['status'] = 0
except Exception, e:
LOG.warn('Error deleting collection: %s' % e)
result['message'] = unicode(str(e), "utf8")
return result
def copy_collections(self, collection_ids):
result = {'status': -1, 'message': ''}
try:
for doc2 in self.get_shared_search_collections():
if doc2.id in collection_ids:
doc2 = Document2.objects.get(uuid=doc2.uuid)
doc = doc2.doc.get()
name = doc2.name + '-copy'
doc2 = doc2.copy(name=name, owner=self.user)
doc.copy(content_object=doc2, name=name, owner=self.user)
collection = Collection2(self.user, document=doc2)
collection.data['collection']['label'] = name
doc2.update_data({'collection': collection.data['collection']})
doc2.save()
result['status'] = 0
except Exception, e:
LOG.exception('Error copying collection')
result['message'] = unicode(str(e), "utf8")
return result
def is_collection(self, collection_name):
solr_collections = SolrApi(SOLR_URL.get(), self.user).collections()
return collection_name in solr_collections
def is_core(self, core_name):
solr_cores = SolrApi(SOLR_URL.get(), self.user).cores()
return core_name in solr_cores
def get_solr_collection(self):
return SolrApi(SOLR_URL.get(), self.user).collections()
def get_all_indexes(self, show_all=False):
indexes = []
try:
indexes = self.get_solr_collection().keys()
except:
LOG.exception('failed to get indexes')
try:
indexes += SolrApi(SOLR_URL.get(), self.user).aliases().keys()
except:
LOG.exception('failed to get index aliases')
if show_all or not indexes:
return indexes + SolrApi(SOLR_URL.get(), self.user).cores().keys()
else:
return indexes
def can_edit_index(user):
return user.is_superuser
| 31.811594
| 128
| 0.687016
|
ba771970c2d7733bc06b9fd83c9056d431c41c25
| 2,543
|
py
|
Python
|
sdk/python/pulumi_aws/ses/receipt_filter.py
|
lemonade-hq/pulumi-aws
|
9ee22c65c7bad42d38b16879ccd56526d856a01a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ses/receipt_filter.py
|
lemonade-hq/pulumi-aws
|
9ee22c65c7bad42d38b16879ccd56526d856a01a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ses/receipt_filter.py
|
lemonade-hq/pulumi-aws
|
9ee22c65c7bad42d38b16879ccd56526d856a01a
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-03-08T15:05:29.000Z
|
2021-03-08T15:05:29.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class ReceiptFilter(pulumi.CustomResource):
cidr: pulumi.Output[str]
"""
The IP address or address range to filter, in CIDR notation
"""
name: pulumi.Output[str]
"""
The name of the filter
"""
policy: pulumi.Output[str]
"""
Block or Allow
"""
def __init__(__self__, resource_name, opts=None, cidr=None, name=None, policy=None, __name__=None, __opts__=None):
"""
Provides an SES receipt filter resource
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cidr: The IP address or address range to filter, in CIDR notation
:param pulumi.Input[str] name: The name of the filter
:param pulumi.Input[str] policy: Block or Allow
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if cidr is None:
raise TypeError('Missing required property cidr')
__props__['cidr'] = cidr
__props__['name'] = name
if policy is None:
raise TypeError('Missing required property policy')
__props__['policy'] = policy
super(ReceiptFilter, __self__).__init__(
'aws:ses/receiptFilter:ReceiptFilter',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 35.319444
| 118
| 0.653559
|
bfce8c7ffb20e934ee55807d12345860284cfd16
| 1,546
|
py
|
Python
|
CURSO PYTHON UDEMY/Curso Udemy/Mundo 4 (POO)/98. Classes - Python Orientado a Objetos.py
|
nihilboy1455/CURSO-PYTHON-UDEMY
|
ef27b9bba12672f5426ddeeb3fd70304073a56e2
|
[
"MIT"
] | null | null | null |
CURSO PYTHON UDEMY/Curso Udemy/Mundo 4 (POO)/98. Classes - Python Orientado a Objetos.py
|
nihilboy1455/CURSO-PYTHON-UDEMY
|
ef27b9bba12672f5426ddeeb3fd70304073a56e2
|
[
"MIT"
] | null | null | null |
CURSO PYTHON UDEMY/Curso Udemy/Mundo 4 (POO)/98. Classes - Python Orientado a Objetos.py
|
nihilboy1455/CURSO-PYTHON-UDEMY
|
ef27b9bba12672f5426ddeeb3fd70304073a56e2
|
[
"MIT"
] | null | null | null |
from CLASSES98 import Pessoa, Pessoas1
'''
Quando eu estiver criando uma classe, eu devo entender ela realmente como um objeto real
capaz de executar ações, dai depois eu transformo isso em código
'''
a = Pessoa()
a.falar()
'''
Eu crio uma classe, e depois os atributos da classe.
depois eu posso abribuir a classe à uma variavel, e depois chamar os atributos da classe como metodos da variavel
'''
bo = Pessoas1('Bolsonaro', 66)
lu = Pessoas1('Lula', 76)
print(bo.nome)
'''
Aqui, por exemplo, quando eu chamo "b.nome", eu me refiro ao paramentro "nome" da classe
'''
bo.outro_metodo()
'''
aqui eu to chamando a variavel que foi atribuida no método acima, para o metodo de baixo
'''
bo.comer('uva') # Bolsonaro está comendo um(a) uva
bo.falar('POO') # Bolsonaro não pode falar comendo...
bo.parar_de_comer() # Bolsonaro parou de comer!
bo.parar_de_comer() # Bolsonaro não está comendo!
bo.falar('POO') # Bolsonaro está falando POO
bo.falar('POO') # Bolsonaro já está falando...
lu.falar('bolinho') # Lula está falando bolinho
bo.comer('uva') # Bolsonaro não consegue comer falando
bo.parar_de_falar() # Bolsonaro parou de falar!
bo.comer('uva') # Bolsonaro está comendo um(a) uva
bo.comer('uva') # Bolsonaro já comeu!
bo.parar_de_comer() # Bolsonaro parou de comer!
bo.falar('POO') # Bolsonaro está falando POO
'''
as variaveis são independentes. se uma não pode, a outra talvez possa
'''
print(bo.anoatual)
print(bo.get_ano_de_nasc())
'''
aqui eu to usando uma variavel fixa e uma variavel da instancia pra fazer uma conta
'''
| 33.608696
| 113
| 0.729625
|
0f97fab38cff023ab701eb3e7e51f9f286b78777
| 498
|
py
|
Python
|
modules/cbflib/dREL-ply-0.5/drel_prep.py
|
jorgediazjr/dials-dev20191018
|
77d66c719b5746f37af51ad593e2941ed6fbba17
|
[
"BSD-3-Clause"
] | null | null | null |
modules/cbflib/dREL-ply-0.5/drel_prep.py
|
jorgediazjr/dials-dev20191018
|
77d66c719b5746f37af51ad593e2941ed6fbba17
|
[
"BSD-3-Clause"
] | null | null | null |
modules/cbflib/dREL-ply-0.5/drel_prep.py
|
jorgediazjr/dials-dev20191018
|
77d66c719b5746f37af51ad593e2941ed6fbba17
|
[
"BSD-3-Clause"
] | 1
|
2020-02-04T15:39:06.000Z
|
2020-02-04T15:39:06.000Z
|
#!/usr/bin/python
import sys
class Process:
def execute_method(self):
valuename = sys.argv[1]
editname = valuename + '_local'
print valuename
print editname
f = open("cbf_data_debug", "r")
data = f.readline()
str_list = []
while data:
data = f.readline()
print data
str = data.replace(valuename, editname)
print str
str_list.append(str)
str = "".join(str_list)
fout = open("cbf_data_debug_changed", "w")
print>>fout, str
p = Process()
p.execute_method()
| 18.444444
| 44
| 0.662651
|
5a700c1668d9dc20b736df38a547204a75d5e56e
| 47,855
|
py
|
Python
|
blink/biencoder/train_biencoder_mst.py
|
parin1995/claim2fact
|
9f928a5789b3dc85cfa69395e0ba02b0b84276a8
|
[
"MIT"
] | null | null | null |
blink/biencoder/train_biencoder_mst.py
|
parin1995/claim2fact
|
9f928a5789b3dc85cfa69395e0ba02b0b84276a8
|
[
"MIT"
] | null | null | null |
blink/biencoder/train_biencoder_mst.py
|
parin1995/claim2fact
|
9f928a5789b3dc85cfa69395e0ba02b0b84276a8
|
[
"MIT"
] | 1
|
2021-12-05T21:20:49.000Z
|
2021-12-05T21:20:49.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) 2021 Dhruv Agarwal and authors of arboEL.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import random
import time
import pickle5 as pickle
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler)
from pytorch_transformers.optimization import WarmupLinearSchedule
from tqdm import tqdm, trange
from special_partition.special_partition import cluster_linking_partition
from scipy.sparse.csgraph import minimum_spanning_tree
from scipy.sparse import csr_matrix
from collections import Counter
import blink.biencoder.data_process_mult as data_process
import blink.biencoder.eval_cluster_linking as eval_cluster_linking
import blink.candidate_ranking.utils as utils
from blink.biencoder.biencoder import BiEncoderRanker
from blink.common.optimizer import get_bert_optimizer
from blink.common.params import BlinkParser
from IPython import embed
logger = None
def evaluate(reranker, valid_dict_vecs, valid_men_vecs, device, logger, knn, n_gpu, entity_data, query_data,
silent=False, use_types=False, embed_batch_size=768, force_exact_search=False, probe_mult_factor=1,
within_doc=False, context_doc_ids=None):
torch.cuda.empty_cache()
reranker.model.eval()
n_entities = len(valid_dict_vecs)
n_mentions = len(valid_men_vecs)
joint_graphs = {}
max_knn = 8
for k in [0, 1, 2, 4, 8]:
joint_graphs[k] = {
'rows': np.array([]),
'cols': np.array([]),
'data': np.array([]),
'shape': (n_entities+n_mentions, n_entities+n_mentions)
}
if use_types:
logger.info("Eval: Dictionary: Embedding and building index")
dict_embeds, dict_indexes, dict_idxs_by_type = data_process.embed_and_index(reranker, valid_dict_vecs, encoder_type="candidate", n_gpu=n_gpu, corpus=entity_data, force_exact_search=force_exact_search, batch_size=embed_batch_size, probe_mult_factor=probe_mult_factor)
logger.info("Eval: Queries: Embedding and building index")
men_embeds, men_indexes, men_idxs_by_type = data_process.embed_and_index(reranker, valid_men_vecs, encoder_type="context", n_gpu=n_gpu, corpus=query_data, force_exact_search=force_exact_search, batch_size=embed_batch_size, probe_mult_factor=probe_mult_factor)
else:
logger.info("Eval: Dictionary: Embedding and building index")
dict_embeds, dict_index = data_process.embed_and_index(
reranker, valid_dict_vecs, 'candidate', n_gpu=n_gpu, force_exact_search=force_exact_search, batch_size=embed_batch_size, probe_mult_factor=probe_mult_factor)
logger.info("Eval: Queries: Embedding and building index")
men_embeds, men_index = data_process.embed_and_index(
reranker, valid_men_vecs, 'context', n_gpu=n_gpu, force_exact_search=force_exact_search, batch_size=embed_batch_size, probe_mult_factor=probe_mult_factor)
logger.info("Eval: Starting KNN search...")
# Fetch recall_k (default 16) knn entities for all mentions
# Fetch (k+1) NN mention candidates; fetching all mentions for within_doc to filter down later
n_men_to_fetch = len(men_embeds) if within_doc else max_knn + 1
if not use_types:
nn_ent_dists, nn_ent_idxs = dict_index.search(men_embeds, 1)
nn_men_dists, nn_men_idxs = men_index.search(men_embeds, n_men_to_fetch)
else:
nn_ent_idxs = -1 * np.ones((len(men_embeds), 1), dtype=int)
nn_ent_dists = -1 * np.ones((len(men_embeds), 1), dtype='float64')
nn_men_idxs = -1 * np.ones((len(men_embeds), n_men_to_fetch), dtype=int)
nn_men_dists = -1 * np.ones((len(men_embeds), n_men_to_fetch), dtype='float64')
for entity_type in men_indexes:
men_embeds_by_type = men_embeds[men_idxs_by_type[entity_type]]
nn_ent_dists_by_type, nn_ent_idxs_by_type = dict_indexes[entity_type].search(men_embeds_by_type, 1)
nn_ent_idxs_by_type = np.array(list(map(lambda x: dict_idxs_by_type[entity_type][x], nn_ent_idxs_by_type)))
nn_men_dists_by_type, nn_men_idxs_by_type = men_indexes[entity_type].search(men_embeds_by_type, min(n_men_to_fetch, len(men_embeds_by_type)))
nn_men_idxs_by_type = np.array(list(map(lambda x: men_idxs_by_type[entity_type][x], nn_men_idxs_by_type)))
for i, idx in enumerate(men_idxs_by_type[entity_type]):
nn_ent_idxs[idx] = nn_ent_idxs_by_type[i]
nn_ent_dists[idx] = nn_ent_dists_by_type[i]
nn_men_idxs[idx][:len(nn_men_idxs_by_type[i])] = nn_men_idxs_by_type[i]
nn_men_dists[idx][:len(nn_men_dists_by_type[i])] = nn_men_dists_by_type[i]
logger.info("Eval: Search finished")
logger.info('Eval: Building graphs')
for men_query_idx, men_embed in enumerate(tqdm(men_embeds, total=len(men_embeds), desc="Eval: Building graphs")):
# Get nearest entity candidate
dict_cand_idx = nn_ent_idxs[men_query_idx][0]
dict_cand_score = nn_ent_dists[men_query_idx][0]
# Filter candidates to remove -1s, mention query, within doc (if reqd.), and keep only the top k candidates
filter_mask_neg1 = nn_men_idxs[men_query_idx] != -1
men_cand_idxs = nn_men_idxs[men_query_idx][filter_mask_neg1]
men_cand_scores = nn_men_dists[men_query_idx][filter_mask_neg1]
if within_doc:
men_cand_idxs, wd_mask = filter_by_context_doc_id(men_cand_idxs,
context_doc_ids[men_query_idx],
context_doc_ids, return_numpy=True)
men_cand_scores = men_cand_scores[wd_mask]
filter_mask = men_cand_idxs != men_query_idx
men_cand_idxs, men_cand_scores = men_cand_idxs[filter_mask][:max_knn], men_cand_scores[filter_mask][:max_knn]
# Add edges to the graphs
for k in joint_graphs:
joint_graph = joint_graphs[k]
# Add mention-entity edge
joint_graph['rows'] = np.append(
joint_graph['rows'], [n_entities+men_query_idx]) # Mentions added at an offset of maximum entities
joint_graph['cols'] = np.append(
joint_graph['cols'], dict_cand_idx)
joint_graph['data'] = np.append(
joint_graph['data'], dict_cand_score)
if k > 0:
# Add mention-mention edges
joint_graph['rows'] = np.append(
joint_graph['rows'], [n_entities+men_query_idx]*len(men_cand_idxs[:k]))
joint_graph['cols'] = np.append(
joint_graph['cols'], n_entities+men_cand_idxs[:k])
joint_graph['data'] = np.append(
joint_graph['data'], men_cand_scores[:k])
max_eval_acc = -1.
for k in joint_graphs:
logger.info(f"\nEval: Graph (k={k}):")
# Partition graph based on cluster-linking constraints
partitioned_graph, clusters = eval_cluster_linking.partition_graph(
joint_graphs[k], n_entities, directed=True, return_clusters=True)
# Infer predictions from clusters
result = eval_cluster_linking.analyzeClusters(clusters, entity_data, query_data, k)
acc = float(result['accuracy'].split(' ')[0])
max_eval_acc = max(acc, max_eval_acc)
logger.info(f"Eval: accuracy for graph@k={k}: {acc}%")
logger.info(f"Eval: Best accuracy: {max_eval_acc}%")
return max_eval_acc, {'dict_embeds': dict_embeds, 'dict_indexes': dict_indexes, 'dict_idxs_by_type': dict_idxs_by_type} if use_types else {'dict_embeds': dict_embeds, 'dict_index': dict_index}
def get_optimizer(model, params):
return get_bert_optimizer(
[model],
params["type_optimization"],
params["learning_rate"],
fp16=params.get("fp16"),
correct_bias=params["opt_bias_correction"],
)
def get_scheduler(params, optimizer, len_train_data, logger):
batch_size = params["train_batch_size"]
grad_acc = params["gradient_accumulation_steps"]
epochs = params["num_train_epochs"]
num_train_steps = int(len_train_data / batch_size / grad_acc) * epochs
num_warmup_steps = int(num_train_steps * params["warmup_proportion"])
scheduler = WarmupLinearSchedule(
optimizer, warmup_steps=num_warmup_steps, t_total=num_train_steps,
)
logger.info(" Num optimization steps = %d" % num_train_steps)
logger.info(" Num warmup steps = %d", num_warmup_steps)
return scheduler
def load_optimizer_scheduler(params, logger):
optim_sched = None
model_path = params["path_to_model"]
if model_path is not None:
model_dir = os.path.dirname(model_path)
optim_sched_fpath = os.path.join(model_dir, utils.OPTIM_SCHED_FNAME)
if os.path.isfile(optim_sched_fpath):
logger.info(f'Loading stored optimizer and scheduler from {optim_sched_fpath}')
optim_sched = torch.load(optim_sched_fpath)
return optim_sched
def read_data(split, params, logger):
samples = utils.read_dataset(split, params["data_path"])
# Check if dataset has multiple ground-truth labels
has_mult_labels = "labels" in samples[0].keys()
if params["filter_unlabeled"]:
# Filter samples without gold entities
samples = list(
filter(lambda sample: (len(sample["labels"]) > 0) if has_mult_labels else (sample["label"] is not None),
samples))
logger.info("Read %d train samples." % len(samples))
return samples, has_mult_labels
def filter_by_context_doc_id(mention_idxs, doc_id, doc_id_list, return_numpy=False):
mask = [doc_id_list[i] == doc_id for i in mention_idxs]
if isinstance(mention_idxs, list):
mention_idxs = np.array(mention_idxs)
mention_idxs = mention_idxs[mask]
if not return_numpy:
mention_idxs = list(mention_idxs)
return mention_idxs, mask
def main(params):
model_output_path = params["output_path"]
if not os.path.exists(model_output_path):
os.makedirs(model_output_path)
logger = utils.get_logger(params["output_path"])
pickle_src_path = params["pickle_src_path"]
if pickle_src_path is None or not os.path.exists(pickle_src_path):
pickle_src_path = model_output_path
knn = params["knn"]
use_types = params["use_types"]
gold_arbo_knn = params["gold_arbo_knn"]
within_doc = params["within_doc"]
use_rand_negs = params["use_rand_negs"]
# Init model
reranker = BiEncoderRanker(params)
tokenizer = reranker.tokenizer
model = reranker.model
device = reranker.device
n_gpu = 1 if reranker.n_gpu == 0 else reranker.n_gpu
if params["gradient_accumulation_steps"] < 1:
raise ValueError(
"Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
params["gradient_accumulation_steps"]
)
)
# An effective batch size of `x`, when we are accumulating the gradient accross `y` batches will be achieved by having a batch size of `z = x / y`
params["train_batch_size"] = (
params["train_batch_size"] // params["gradient_accumulation_steps"]
)
train_batch_size = params["train_batch_size"]
grad_acc_steps = params["gradient_accumulation_steps"]
# Fix the random seeds
seed = params["seed"]
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if reranker.n_gpu > 0:
torch.cuda.manual_seed_all(seed)
entity_dictionary_loaded = False
entity_dictionary_pkl_path = os.path.join(pickle_src_path, 'entity_dictionary.pickle')
train_samples = valid_samples = None
if os.path.isfile(entity_dictionary_pkl_path):
print("Loading stored processed entity dictionary...")
with open(entity_dictionary_pkl_path, 'rb') as read_handle:
entity_dictionary = pickle.load(read_handle)
entity_dictionary_loaded = True
if not entity_dictionary_loaded or not params["only_evaluate"]:
# Load train data
train_tensor_data_pkl_path = os.path.join(pickle_src_path, 'train_tensor_data.pickle')
train_processed_data_pkl_path = os.path.join(pickle_src_path, 'train_processed_data.pickle')
if entity_dictionary_loaded and os.path.isfile(train_tensor_data_pkl_path) and os.path.isfile(train_processed_data_pkl_path):
print("Loading stored processed train data...")
with open(train_tensor_data_pkl_path, 'rb') as read_handle:
train_tensor_data = pickle.load(read_handle)
with open(train_processed_data_pkl_path, 'rb') as read_handle:
train_processed_data = pickle.load(read_handle)
else:
if not entity_dictionary_loaded:
with open(os.path.join(params["data_path"], 'dictionary.pickle'), 'rb') as read_handle:
entity_dictionary = pickle.load(read_handle)
train_samples, mult_labels = read_data("train", params, logger)
# For discovery experiment: Drop entities used in training that were dropped randomly from dev/test set
if params["drop_entities"]:
assert entity_dictionary_loaded
# Load either test_processed_data.pickle or valid_process_data.pickle to first calculate the unique
# entities for those mentions, and then drop 10% of those entities from the dictionary
drop_set_path = params["drop_set"] if params["drop_set"] is not None else os.path.join(pickle_src_path, 'drop_set_mention_data.pickle')
if not os.path.isfile(drop_set_path):
raise ValueError("Invalid or no --drop_set path provided to dev/test mention data")
with open(drop_set_path, 'rb') as read_handle:
drop_set_data = pickle.load(read_handle)
drop_set_mention_gold_cui_idxs = list(map(lambda x: x['label_idxs'][0], drop_set_data))
ents_in_data = np.unique(drop_set_mention_gold_cui_idxs)
ent_drop_prop = 0.1
logger.info(f"Dropping {ent_drop_prop*100}% of {len(ents_in_data)} entities found in drop set")
# Get entity indices to drop
n_ents_dropped = int(ent_drop_prop*len(ents_in_data))
rng = np.random.default_rng(seed=17)
dropped_ent_idxs = rng.choice(ents_in_data, size=n_ents_dropped, replace=False)
# Drop entities from dictionary (subsequent processing will automatically drop corresponding mentions)
keep_mask = np.ones(len(entity_dictionary), dtype='bool')
keep_mask[dropped_ent_idxs] = False
entity_dictionary = np.array(entity_dictionary)[keep_mask]
train_processed_data, entity_dictionary, train_tensor_data = data_process.process_mention_data(
train_samples,
entity_dictionary,
tokenizer,
params["max_context_length"],
params["max_cand_length"],
context_key=params["context_key"],
multi_label_key="labels" if mult_labels else None,
silent=params["silent"],
logger=logger,
debug=params["debug"],
knn=knn,
dictionary_processed=entity_dictionary_loaded,
use_desc_summaries=params["use_desc_summaries"],
)
print("Saving processed train data...")
if not entity_dictionary_loaded:
with open(entity_dictionary_pkl_path, 'wb') as write_handle:
pickle.dump(entity_dictionary, write_handle,
protocol=pickle.HIGHEST_PROTOCOL)
with open(train_tensor_data_pkl_path, 'wb') as write_handle:
pickle.dump(train_tensor_data, write_handle,
protocol=pickle.HIGHEST_PROTOCOL)
with open(train_processed_data_pkl_path, 'wb') as write_handle:
pickle.dump(train_processed_data, write_handle,
protocol=pickle.HIGHEST_PROTOCOL)
# Store the query mention vectors
train_men_vecs = train_tensor_data[:][0]
if params["shuffle"]:
train_sampler = RandomSampler(train_tensor_data)
else:
train_sampler = SequentialSampler(train_tensor_data)
train_dataloader = DataLoader(
train_tensor_data, sampler=train_sampler, batch_size=train_batch_size
)
# Store the entity dictionary vectors
entity_dict_vecs = torch.tensor(list(map(lambda x: x['ids'], entity_dictionary)), dtype=torch.long)
# Load eval data
valid_tensor_data_pkl_path = os.path.join(pickle_src_path, 'valid_tensor_data.pickle')
valid_processed_data_pkl_path = os.path.join(pickle_src_path, 'valid_processed_data.pickle')
if os.path.isfile(valid_tensor_data_pkl_path) and os.path.isfile(valid_processed_data_pkl_path):
print("Loading stored processed valid data...")
with open(valid_tensor_data_pkl_path, 'rb') as read_handle:
valid_tensor_data = pickle.load(read_handle)
with open(valid_processed_data_pkl_path, 'rb') as read_handle:
valid_processed_data = pickle.load(read_handle)
else:
valid_samples, mult_labels = read_data("valid", params, logger)
valid_processed_data, _, valid_tensor_data = data_process.process_mention_data(
valid_samples,
entity_dictionary,
tokenizer,
params["max_context_length"],
params["max_cand_length"],
context_key=params["context_key"],
multi_label_key="labels" if mult_labels else None,
silent=params["silent"],
logger=logger,
debug=params["debug"],
knn=knn,
dictionary_processed=True,
use_desc_summaries=params["use_desc_summaries"]
)
print("Saving processed valid data...")
with open(valid_tensor_data_pkl_path, 'wb') as write_handle:
pickle.dump(valid_tensor_data, write_handle,
protocol=pickle.HIGHEST_PROTOCOL)
with open(valid_processed_data_pkl_path, 'wb') as write_handle:
pickle.dump(valid_processed_data, write_handle,
protocol=pickle.HIGHEST_PROTOCOL)
# Store the query mention vectors
valid_men_vecs = valid_tensor_data[:][0]
train_context_doc_ids = valid_context_doc_ids = None
if within_doc:
# Store the context_doc_id for every mention in the train and valid sets
if train_samples is None:
train_samples, _ = read_data("train", params, logger)
train_context_doc_ids = [s['context_doc_id'] for s in train_samples]
if valid_samples is None:
valid_samples, _ = read_data("valid", params, logger)
valid_context_doc_ids = [s['context_doc_id'] for s in train_samples]
if params["only_evaluate"]:
evaluate(
reranker, entity_dict_vecs, valid_men_vecs, device=device, logger=logger, knn=knn, n_gpu=n_gpu,
entity_data=entity_dictionary, query_data=valid_processed_data, silent=params["silent"],
use_types=use_types or params["use_types_for_eval"], embed_batch_size=params["embed_batch_size"],
force_exact_search=use_types or params["use_types_for_eval"] or params["force_exact_search"],
probe_mult_factor=params['probe_mult_factor'], within_doc=within_doc, context_doc_ids=valid_context_doc_ids
)
exit()
# Get clusters of mentions that map to a gold entity
train_gold_clusters = data_process.compute_gold_clusters(train_processed_data)
max_gold_cluster_len = 0
for ent in train_gold_clusters:
if len(train_gold_clusters[ent]) > max_gold_cluster_len:
max_gold_cluster_len = len(train_gold_clusters[ent])
n_entities = len(entity_dictionary)
n_mentions = len(train_processed_data)
time_start = time.time()
utils.write_to_file(
os.path.join(model_output_path, "training_params.txt"), str(params)
)
logger.info("Starting training")
logger.info(
"device: {} n_gpu: {}, data_parallel: {}".format(device, n_gpu, params["data_parallel"])
)
# Set model to training mode
optim_sched, optimizer, scheduler = load_optimizer_scheduler(params, logger), None, None
if optim_sched is None:
optimizer = get_optimizer(model, params)
scheduler = get_scheduler(params, optimizer, len(train_tensor_data), logger)
else:
optimizer = optim_sched['optimizer']
scheduler = optim_sched['scheduler']
best_epoch_idx = -1
best_score = -1
best_during_training, best_during_training_epoch, best_during_training_pctg = -1, -1, -1
num_train_epochs = params["num_train_epochs"]
init_base_model_run = True if params.get("path_to_model", None) is None else False
init_run_pkl_path = os.path.join(pickle_src_path, f'init_run_{"type" if use_types else "notype"}.t7')
dict_embed_data = None
# Do an initial eval for baseline in order to determine if during-training models should be saved or not
if params["save_interval"] != -1:
best_during_training, _ = evaluate(
reranker, entity_dict_vecs, valid_men_vecs, device=device, logger=logger, knn=knn, n_gpu=n_gpu,
entity_data=entity_dictionary, query_data=valid_processed_data, silent=params["silent"],
use_types=use_types or params["use_types_for_eval"], embed_batch_size=params["embed_batch_size"],
force_exact_search=use_types or params["use_types_for_eval"] or params["force_exact_search"],
probe_mult_factor=params['probe_mult_factor'], within_doc=within_doc, context_doc_ids=valid_context_doc_ids
)
logger.info(f"Baseline evaluation: {best_during_training} %")
for epoch_idx in trange(int(num_train_epochs), desc="Epoch"):
model.train()
torch.cuda.empty_cache()
tr_loss = 0
# Check if embeddings and index can be loaded
init_run_data_loaded = False
if init_base_model_run:
if os.path.isfile(init_run_pkl_path):
logger.info('Loading init run data')
init_run_data = torch.load(init_run_pkl_path)
init_run_data_loaded = True
load_stored_data = init_base_model_run and init_run_data_loaded
# Compute mention and entity embeddings at the start of each epoch
if use_types:
if load_stored_data:
train_dict_embeddings, dict_idxs_by_type = init_run_data['train_dict_embeddings'], init_run_data['dict_idxs_by_type']
train_dict_indexes = data_process.get_index_from_embeds(train_dict_embeddings, dict_idxs_by_type, force_exact_search=params['force_exact_search'], probe_mult_factor=params['probe_mult_factor'])
train_men_embeddings, men_idxs_by_type = init_run_data['train_men_embeddings'], init_run_data['men_idxs_by_type']
train_men_indexes = data_process.get_index_from_embeds(train_men_embeddings, men_idxs_by_type, force_exact_search=params['force_exact_search'], probe_mult_factor=params['probe_mult_factor'])
else:
logger.info('Embedding and indexing')
if dict_embed_data is not None:
train_dict_embeddings, train_dict_indexes, dict_idxs_by_type = dict_embed_data['dict_embeds'], dict_embed_data['dict_indexes'], dict_embed_data['dict_idxs_by_type']
else:
train_dict_embeddings, train_dict_indexes, dict_idxs_by_type = data_process.embed_and_index(reranker, entity_dict_vecs, encoder_type="candidate", n_gpu=n_gpu, corpus=entity_dictionary, force_exact_search=params['force_exact_search'], batch_size=params['embed_batch_size'], probe_mult_factor=params['probe_mult_factor'])
train_men_embeddings, train_men_indexes, men_idxs_by_type = data_process.embed_and_index(reranker, train_men_vecs, encoder_type="context", n_gpu=n_gpu, corpus=train_processed_data, force_exact_search=params['force_exact_search'], batch_size=params['embed_batch_size'], probe_mult_factor=params['probe_mult_factor'])
else:
if load_stored_data:
train_dict_embeddings = init_run_data['train_dict_embeddings']
train_dict_index = data_process.get_index_from_embeds(train_dict_embeddings, force_exact_search=params['force_exact_search'], probe_mult_factor=params['probe_mult_factor'])
train_men_embeddings = init_run_data['train_men_embeddings']
train_men_index = data_process.get_index_from_embeds(train_men_embeddings, force_exact_search=params['force_exact_search'], probe_mult_factor=params['probe_mult_factor'])
else:
logger.info('Embedding and indexing')
if dict_embed_data is not None:
train_dict_embeddings, train_dict_index = dict_embed_data['dict_embeds'], dict_embed_data['dict_index']
else:
train_dict_embeddings, train_dict_index = data_process.embed_and_index(reranker, entity_dict_vecs, encoder_type="candidate", n_gpu=n_gpu, force_exact_search=params['force_exact_search'], batch_size=params['embed_batch_size'], probe_mult_factor=params['probe_mult_factor'])
train_men_embeddings, train_men_index = data_process.embed_and_index(reranker, train_men_vecs, encoder_type="context", n_gpu=n_gpu, force_exact_search=params['force_exact_search'], batch_size=params['embed_batch_size'], probe_mult_factor=params['probe_mult_factor'])
# Save the initial embeddings and index if this is the first run and data isn't persistent
if init_base_model_run and not load_stored_data:
init_run_data = {}
init_run_data['train_dict_embeddings'] = train_dict_embeddings
init_run_data['train_men_embeddings'] = train_men_embeddings
if use_types:
init_run_data['dict_idxs_by_type'] = dict_idxs_by_type
init_run_data['men_idxs_by_type'] = men_idxs_by_type
# NOTE: Cannot pickle faiss index because it is a SwigPyObject
torch.save(init_run_data, init_run_pkl_path, pickle_protocol=4)
init_base_model_run = False
if params["silent"]:
iter_ = train_dataloader
else:
iter_ = tqdm(train_dataloader, desc="Batch")
# Store golden MST links
gold_links = {}
# Calculate the number of negative entities and mentions to fetch
knn_dict = knn//2
knn_men = knn - knn_dict
logger.info("Starting KNN search...")
# INFO: Fetching all sorted mentions to be able to filter to within-doc later
n_men_to_fetch = len(train_men_embeddings) if within_doc else knn_men + max_gold_cluster_len
n_ent_to_fetch = knn_dict + 1
if not use_types:
_, dict_nns = train_dict_index.search(train_men_embeddings, n_ent_to_fetch)
_, men_nns = train_men_index.search(train_men_embeddings, n_men_to_fetch)
else:
dict_nns = -1 * np.ones((len(train_men_embeddings), n_ent_to_fetch))
men_nns = -1 * np.ones((len(train_men_embeddings), n_men_to_fetch))
for entity_type in train_men_indexes:
men_embeds_by_type = train_men_embeddings[men_idxs_by_type[entity_type]]
_, dict_nns_by_type = train_dict_indexes[entity_type].search(men_embeds_by_type, n_ent_to_fetch)
_, men_nns_by_type = train_men_indexes[entity_type].search(men_embeds_by_type, min(n_men_to_fetch, len(men_embeds_by_type)))
dict_nns_idxs = np.array(list(map(lambda x: dict_idxs_by_type[entity_type][x], dict_nns_by_type)))
men_nns_idxs = np.array(list(map(lambda x: men_idxs_by_type[entity_type][x], men_nns_by_type)))
for i, idx in enumerate(men_idxs_by_type[entity_type]):
dict_nns[idx] = dict_nns_idxs[i]
men_nns[idx][:len(men_nns_idxs[i])] = men_nns_idxs[i]
logger.info("Search finished")
total_skipped = total_knn_men_negs = 0
for step, batch in enumerate(iter_):
knn_men = knn - knn_dict
batch = tuple(t.to(device) for t in batch)
batch_context_inputs, candidate_idxs, n_gold, mention_idxs = batch
mention_embeddings = train_men_embeddings[mention_idxs.cpu()]
if len(mention_embeddings.shape) == 1:
mention_embeddings = np.expand_dims(mention_embeddings, axis=0)
# batch_context_inputs: Shape: batch x token_len
# candidate_inputs = []
# candidate_inputs = np.array([], dtype=np.long) # Shape: (batch*knn) x token_len
# label_inputs = (candidate_idxs >= 0).type(torch.float32) # Shape: batch x knn
positive_idxs = []
negative_dict_inputs = []
negative_men_inputs = []
skipped_positive_idxs = []
skipped_negative_dict_inputs = []
min_neg_mens = float('inf')
skipped = 0
context_inputs_mask = [True]*len(batch_context_inputs)
for m_embed_idx, m_embed in enumerate(mention_embeddings):
mention_idx = int(mention_idxs[m_embed_idx])
gold_idxs = set(train_processed_data[mention_idx]['label_idxs'][:n_gold[m_embed_idx]])
# TEMPORARY: Assuming that there is only 1 gold label, TODO: Incorporate multiple case
assert n_gold[m_embed_idx] == 1
if mention_idx in gold_links:
gold_link_idx = gold_links[mention_idx]
else:
# Run MST on mention clusters of all the gold entities of the current query mention to find its
# positive edge
rows, cols, data, shape = [], [], [], (n_entities+n_mentions, n_entities+n_mentions)
seen = set()
for cluster_ent in gold_idxs:
cluster_mens = train_gold_clusters[cluster_ent]
if within_doc:
# Filter the gold cluster to within-doc
cluster_mens, _ = filter_by_context_doc_id(cluster_mens,
train_context_doc_ids[mention_idx],
train_context_doc_ids)
to_ent_data = train_men_embeddings[cluster_mens] @ train_dict_embeddings[cluster_ent].T
to_men_data = train_men_embeddings[cluster_mens] @ train_men_embeddings[cluster_mens].T
if gold_arbo_knn is not None:
sorti = np.argsort(-to_men_data, axis=1)
sortv = np.take_along_axis(to_men_data, sorti, axis=1)
if params["rand_gold_arbo"]:
randperm = np.random.permutation(sorti.shape[1])
sortv, sorti = sortv[:, randperm], sorti[:, randperm]
for i in range(len(cluster_mens)):
from_node = n_entities + cluster_mens[i]
to_node = cluster_ent
# Add mention-entity link
rows.append(from_node)
cols.append(to_node)
data.append(-1 * to_ent_data[i])
if gold_arbo_knn is None:
# Add forward and reverse mention-mention links over the entire MST
for j in range(i+1, len(cluster_mens)):
to_node = n_entities + cluster_mens[j]
if (from_node, to_node) not in seen:
score = to_men_data[i,j]
rows.append(from_node)
cols.append(to_node)
data.append(-1 * score) # Negatives needed for SciPy's Minimum Spanning Tree computation
seen.add((from_node, to_node))
seen.add((to_node, from_node))
else:
# Approximate the MST using <gold_arbo_knn> nearest mentions from the gold cluster
added = 0
approx_k = min(gold_arbo_knn+1, len(cluster_mens))
for j in range(approx_k):
if added == approx_k - 1:
break
to_node = n_entities + cluster_mens[sorti[i, j]]
if to_node == from_node:
continue
added += 1
if (from_node, to_node) not in seen:
score = sortv[i, j]
rows.append(from_node)
cols.append(to_node)
data.append(
-1 * score) # Negatives needed for SciPy's Minimum Spanning Tree computation
seen.add((from_node, to_node))
# Find MST with entity constraint
csr = csr_matrix((data, (rows, cols)), shape=shape)
mst = minimum_spanning_tree(csr).tocoo()
rows = []
if len(mst.row) != 0:
rows, cols, data = cluster_linking_partition(np.concatenate((mst.row, mst.col)),
np.concatenate((mst.col,mst.row)),
np.concatenate((-mst.data, -mst.data)),
n_entities,
directed=True,
silent=True)
# assert np.array_equal(rows - n_entities, cluster_mens)
for i in range(len(rows)):
men_idx = rows[i] - n_entities
if men_idx in gold_links:
continue
assert men_idx >= 0
add_link = True
# Store the computed positive edges for the mentions in the clusters only if they have the same gold entities as the query mention
for l in train_processed_data[men_idx]['label_idxs'][:train_processed_data[men_idx]['n_labels']]:
if l not in gold_idxs:
add_link = False
break
if add_link:
gold_links[men_idx] = cols[i]
# FIX: Add mention-to-entity edge for those mentions skipped during MST call
for i in range(len(cluster_mens)):
men_idx = cluster_mens[i]
if men_idx in gold_links:
continue
gold_links[men_idx] = cluster_ent
gold_link_idx = gold_links[mention_idx]
# Add the positive example
positive_idxs.append(gold_link_idx)
if not use_rand_negs:
# Retrieve the pre-computed nearest neighbours
knn_dict_idxs = dict_nns[mention_idx]
knn_dict_idxs = knn_dict_idxs.astype(np.int64).flatten()
knn_men_idxs = men_nns[mention_idx][men_nns[mention_idx] != -1]
knn_men_idxs = knn_men_idxs.astype(np.int64).flatten()
if within_doc:
knn_men_idxs, _ = filter_by_context_doc_id(knn_men_idxs,
train_context_doc_ids[mention_idx],
train_context_doc_ids, return_numpy=True)
# Add the negative examples
neg_mens = list(knn_men_idxs[~np.isin(knn_men_idxs, np.concatenate([train_gold_clusters[gi] for gi in gold_idxs]))][:knn_men])
# Track queries with no valid mention negatives
if len(neg_mens) == 0:
context_inputs_mask[m_embed_idx] = False
skipped_negative_dict_inputs += list(knn_dict_idxs[~np.isin(knn_dict_idxs, list(gold_idxs))][:knn_dict])
skipped_positive_idxs.append(gold_link_idx)
skipped += 1
continue
else:
min_neg_mens = min(min_neg_mens, len(neg_mens))
negative_men_inputs.append(knn_men_idxs[~np.isin(knn_men_idxs, np.concatenate([train_gold_clusters[gi] for gi in gold_idxs]))][:knn_men])
negative_dict_inputs += list(knn_dict_idxs[~np.isin(knn_dict_idxs, list(gold_idxs))][:knn_dict])
positive_embeds = []
for pos_idx in positive_idxs:
if pos_idx < n_entities:
pos_embed = reranker.encode_candidate(entity_dict_vecs[pos_idx:pos_idx + 1].cuda(), requires_grad=True)
else:
pos_embed = reranker.encode_context(train_men_vecs[pos_idx - n_entities:pos_idx - n_entities + 1].cuda(), requires_grad=True)
positive_embeds.append(pos_embed)
positive_embeds = torch.cat(positive_embeds)
context_inputs = batch_context_inputs[context_inputs_mask]
context_inputs = context_inputs.cuda()
if use_rand_negs:
loss, _ = reranker(context_inputs, mst_data={'positive_embeds': positive_embeds.cuda()}, rand_negs=True)
else:
if len(negative_men_inputs) == 0:
continue
knn_men = min_neg_mens
filtered_negative_men_inputs = []
for row in negative_men_inputs:
filtered_negative_men_inputs += list(row[:knn_men])
negative_men_inputs = filtered_negative_men_inputs
assert len(negative_dict_inputs) == (len(mention_embeddings) - skipped) * knn_dict
assert len(negative_men_inputs) == (len(mention_embeddings) - skipped) * knn_men
total_skipped += skipped
total_knn_men_negs += knn_men
negative_dict_inputs = torch.tensor(list(map(lambda x: entity_dict_vecs[x].numpy(), negative_dict_inputs)))
negative_men_inputs = torch.tensor(list(map(lambda x: train_men_vecs[x].numpy(), negative_men_inputs)))
label_inputs = torch.tensor([[1]+[0]*(knn_dict+knn_men)]*len(context_inputs), dtype=torch.float32).cuda()
loss_dual_negs = loss_ent_negs = 0
# FIX: for error scenario of less number of examples than number of GPUs while using Data Parallel
data_parallel_batch_size_check = negative_men_inputs.shape[0] >= n_gpu and negative_dict_inputs.shape[0] >= n_gpu
if data_parallel_batch_size_check:
loss_dual_negs, _ = reranker(context_inputs, label_input=label_inputs, mst_data={
'positive_embeds': positive_embeds.cuda(),
'negative_dict_inputs': negative_dict_inputs.cuda(),
'negative_men_inputs': negative_men_inputs.cuda()
}, pos_neg_loss=params["pos_neg_loss"])
skipped_context_inputs = []
if skipped > 0 and not params["within_doc_skip_strategy"]:
skipped_negative_dict_inputs = torch.tensor(
list(map(lambda x: entity_dict_vecs[x].numpy(), skipped_negative_dict_inputs)))
skipped_positive_embeds = []
for pos_idx in skipped_positive_idxs:
if pos_idx < n_entities:
pos_embed = reranker.encode_candidate(entity_dict_vecs[pos_idx:pos_idx + 1].cuda(),
requires_grad=True)
else:
pos_embed = reranker.encode_context(
train_men_vecs[pos_idx - n_entities:pos_idx - n_entities + 1].cuda(), requires_grad=True)
skipped_positive_embeds.append(pos_embed)
skipped_positive_embeds = torch.cat(skipped_positive_embeds)
skipped_context_inputs = batch_context_inputs[~np.array(context_inputs_mask)]
skipped_context_inputs = skipped_context_inputs.cuda()
skipped_label_inputs = torch.tensor([[1] + [0] * (knn_dict)] * len(skipped_context_inputs),
dtype=torch.float32).cuda()
data_parallel_batch_size_check = skipped_negative_dict_inputs.shape[0] >= n_gpu
if data_parallel_batch_size_check:
loss_ent_negs, _ = reranker(skipped_context_inputs, label_input=skipped_label_inputs, mst_data={
'positive_embeds': skipped_positive_embeds.cuda(),
'negative_dict_inputs': skipped_negative_dict_inputs.cuda(),
'negative_men_inputs': None
}, pos_neg_loss=params["pos_neg_loss"])
loss = ((loss_dual_negs * len(context_inputs) + loss_ent_negs * len(skipped_context_inputs)) / (len(context_inputs) + len(skipped_context_inputs))) / grad_acc_steps
if isinstance(loss, torch.Tensor):
tr_loss += loss.item()
loss.backward()
n_print_iters = params["print_interval"] * grad_acc_steps
if (step + 1) % n_print_iters == 0:
logger.info(
"Step {} - epoch {} average loss: {}".format(
step,
epoch_idx,
tr_loss / n_print_iters,
)
)
if total_skipped > 0:
logger.info(
f"Queries per batch w/o mention negs={total_skipped / n_print_iters}/{len(mention_embeddings)}; Negative mentions per query per batch={total_knn_men_negs / n_print_iters} ")
total_skipped = 0
total_knn_men_negs = 0
tr_loss = 0
if (step + 1) % grad_acc_steps == 0:
torch.nn.utils.clip_grad_norm_(
model.parameters(), params["max_grad_norm"]
)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
if params["eval_interval"] != -1:
if (step + 1) % (params["eval_interval"] * grad_acc_steps) == 0:
logger.info("Evaluation on the development dataset")
eval_accuracy, _ = evaluate(
reranker, entity_dict_vecs, valid_men_vecs, device=device, logger=logger, knn=knn, n_gpu=n_gpu,
entity_data=entity_dictionary, query_data=valid_processed_data, silent=params["silent"],
use_types=use_types or params["use_types_for_eval"], embed_batch_size=params["embed_batch_size"],
force_exact_search=use_types or params["use_types_for_eval"] or params["force_exact_search"],
probe_mult_factor=params['probe_mult_factor'], within_doc=within_doc,
context_doc_ids=valid_context_doc_ids
)
if params["save_interval"] != -1:
if eval_accuracy > best_during_training:
best_during_training = eval_accuracy
best_during_training_epoch = epoch_idx
best_during_training_pctg = (step+1)/len(train_dataloader) * 100
logger.info(f"New best accuracy on the development dataset: {best_during_training} %")
intermediate_output_path = os.path.join(model_output_path, "best_model")
utils.save_model(model, tokenizer, intermediate_output_path)
logger.info(f"Model saved at {intermediate_output_path}")
model.train()
logger.info("\n")
logger.info("***** Saving fine-tuned model *****")
epoch_output_folder_path = os.path.join(
model_output_path, "epoch_{}".format(epoch_idx)
)
utils.save_model(model, tokenizer, epoch_output_folder_path)
logger.info(f"Model saved at {epoch_output_folder_path}")
eval_accuracy, dict_embed_data = evaluate(
reranker, entity_dict_vecs, valid_men_vecs, device=device, logger=logger, knn=knn, n_gpu=n_gpu,
entity_data=entity_dictionary, query_data=valid_processed_data, silent=params["silent"],
use_types=use_types or params["use_types_for_eval"], embed_batch_size=params["embed_batch_size"],
force_exact_search=use_types or params["use_types_for_eval"] or params["force_exact_search"],
probe_mult_factor=params['probe_mult_factor'], within_doc=within_doc, context_doc_ids=valid_context_doc_ids
)
ls = [best_score, eval_accuracy]
li = [best_epoch_idx, epoch_idx]
best_score = ls[np.argmax(ls)]
best_epoch_idx = li[np.argmax(ls)]
logger.info("\n")
execution_time = (time.time() - time_start) / 60
utils.write_to_file(
os.path.join(model_output_path, "training_time.txt"),
"The training took {} minutes\n".format(execution_time),
)
logger.info("The training took {} minutes\n".format(execution_time))
# save the best model in the parent_dir
if best_score > best_during_training:
logger.info(f"Best performance in epoch: {best_epoch_idx} - {best_score} %")
logger.info(f"Best model saved at {os.path.join(model_output_path, f'epoch_{best_epoch_idx}')}")
else:
logger.info(f"Best performance in epoch: {best_during_training_epoch} ({best_during_training_pctg:.1f}%) - {best_during_training} %")
logger.info(f"Best model saved at {os.path.join(model_output_path, 'best_model')}")
# params["path_to_model"] = os.path.join(
# model_output_path, "epoch_{}".format(best_epoch_idx)
# )
# utils.save_model(reranker.model, tokenizer, model_output_path)
# logger.info(f"Best model saved at {model_output_path}")
if __name__ == "__main__":
parser = BlinkParser(add_model_args=True)
parser.add_training_args()
args = parser.parse_args()
print(args)
main(args.__dict__)
| 55.132488
| 339
| 0.623738
|
b6ab59ed3564ca3a7ad61edeee95de68a4bf5444
| 21,002
|
py
|
Python
|
mrjob/parse.py
|
Milstein/mrjob
|
226a741548cf125ecfb549b7c50d52cda932d045
|
[
"Apache-2.0"
] | 1
|
2015-02-22T00:12:25.000Z
|
2015-02-22T00:12:25.000Z
|
mrjob/parse.py
|
kod3r/mrjob
|
226a741548cf125ecfb549b7c50d52cda932d045
|
[
"Apache-2.0"
] | 1
|
2021-03-26T00:47:44.000Z
|
2021-03-26T00:47:44.000Z
|
mrjob/parse.py
|
Milstein/mrjob
|
226a741548cf125ecfb549b7c50d52cda932d045
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2009-2012 Yelp
# Copyright 2013 Steve Johnson and David Marin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for parsing errors, counters, and status messages."""
from datetime import datetime
from functools import wraps
import logging
import re
import time
from urlparse import ParseResult
from urlparse import urlparse as urlparse_buggy
try:
from cStringIO import StringIO
StringIO # quiet "redefinition of unused ..." warning from pyflakes
except ImportError:
from StringIO import StringIO
from mrjob.compat import uses_020_counters
try:
import boto.utils
except ImportError:
# don't require boto; MRJobs don't actually need it when running
# inside hadoop streaming
boto = None
# match the filename of a hadoop streaming jar
HADOOP_STREAMING_JAR_RE = re.compile(r'^hadoop.*streaming.*(?<!-sources)\.jar$')
# match an mrjob job name (these are used to name EMR job flows)
JOB_NAME_RE = re.compile(r'^(.*)\.(.*)\.(\d+)\.(\d+)\.(\d+)$')
# match an mrjob step name (these are used to name steps in EMR)
STEP_NAME_RE = re.compile(
r'^(.*)\.(.*)\.(\d+)\.(\d+)\.(\d+): Step (\d+) of (\d+)$')
log = logging.getLogger(__name__)
### URI PARSING ###
# Used to parse the real netloc out of a malformed path from Python 2.5
# urlparse()
NETLOC_RE = re.compile(r'//(.*?)((/.*?)?)$')
# Used to check if the candidate candidate uri is actually a local windows path.
WINPATH_RE = re.compile(r"^[aA-zZ]:\\")
def is_windows_path(uri):
"""Return True if *uri* is a windows path."""
if WINPATH_RE.match(uri):
return True
else:
return False
def is_uri(uri):
"""Return True if *uri* is any sort of URI."""
if is_windows_path(uri):
return False
return bool(urlparse(uri).scheme)
def is_s3_uri(uri):
"""Return True if *uri* can be parsed into an S3 URI, False otherwise."""
try:
parse_s3_uri(uri)
return True
except ValueError:
return False
def parse_s3_uri(uri):
"""Parse an S3 URI into (bucket, key)
>>> parse_s3_uri('s3://walrus/tmp/')
('walrus', 'tmp/')
If ``uri`` is not an S3 URI, raise a ValueError
"""
components = urlparse(uri)
if (components.scheme not in ('s3', 's3n')
or '/' not in components.path):
raise ValueError('Invalid S3 URI: %s' % uri)
return components.netloc, components.path[1:]
@wraps(urlparse_buggy)
def urlparse(urlstring, scheme='', allow_fragments=True, *args, **kwargs):
"""A wrapper for :py:func:`urlparse.urlparse` with the following
differences:
* Handles buckets in S3 URIs correctly. (:py:func:`~urlparse.urlparse`
does this correctly sometime after 2.6.1; this is just a patch for older
Python versions.)
* Splits the fragment correctly in all URIs, not just Web-related ones.
This behavior was fixed in the Python 2.7.4 standard library but we have
to back-port it for previous versions.
"""
# we're probably going to mess with at least one of these values and
# re-pack the whole thing before we return it.
# NB: urlparse_buggy()'s second argument changes names from
# 'default_scheme' to 'scheme' in Python 2.6, so urlparse_buggy() should
# be called with positional arguments.
(scheme, netloc, path, params, query, fragment) = (
urlparse_buggy(urlstring, scheme, allow_fragments, *args, **kwargs))
if netloc == '' and path.startswith('//'):
m = NETLOC_RE.match(path)
netloc = m.group(1)
path = m.group(2)
if allow_fragments and '#' in path and not fragment:
path, fragment = path.split('#', 1)
return ParseResult(scheme, netloc, path, params, query, fragment)
### OPTION PARSING ###
def parse_port_range_list(range_list_str):
"""Parse a port range list of the form (start[:end])(,(start[:end]))*"""
all_ranges = []
for range_str in range_list_str.split(','):
if ':' in range_str:
a, b = [int(x) for x in range_str.split(':')]
all_ranges.extend(xrange(a, b + 1))
else:
all_ranges.append(int(range_str))
return all_ranges
def parse_key_value_list(kv_string_list, error_fmt, error_func):
"""Parse a list of strings like ``KEY=VALUE`` into a dictionary.
:param kv_string_list: Parse a list of strings like ``KEY=VALUE`` into a
dictionary.
:type kv_string_list: [str]
:param error_fmt: Format string accepting one ``%s`` argument which is the
malformed (i.e. not ``KEY=VALUE``) string
:type error_fmt: str
:param error_func: Function to call when a malformed string is encountered.
:type error_func: function(str)
"""
ret = {}
for value in kv_string_list:
try:
k, v = value.split('=', 1)
ret[k] = v
except ValueError:
error_func(error_fmt % (value,))
return ret
### LOG PARSING ###
_HADOOP_0_20_ESCAPED_CHARS_RE = re.compile(r'\\([.(){}[\]"\\])')
def counter_unescape(escaped_string):
"""Fix names of counters and groups emitted by Hadoop 0.20+ logs, which
use escape sequences for more characters than most decoders know about
(e.g. ``().``).
:param escaped_string: string from a counter log line
:type escaped_string: str
"""
escaped_string = escaped_string.decode('string_escape')
escaped_string = _HADOOP_0_20_ESCAPED_CHARS_RE.sub(r'\1', escaped_string)
return escaped_string
def find_python_traceback(lines):
"""Scan a log file or other iterable for a Python traceback,
and return it as a list of lines.
In logs from EMR, we find python tracebacks in ``task-attempts/*/stderr``
"""
# Lines to pass back representing entire error found
all_tb_lines = []
# This is used to store a working list of lines in a single traceback
tb_lines = []
# This is used to store a working list of non-traceback lines between the
# current traceback and the previous one
non_tb_lines = []
# Track whether or not we are in a traceback rather than consuming the
# iterator
in_traceback = False
for line in lines:
if in_traceback:
tb_lines.append(line)
# If no indentation, this is the last line of the traceback
if line.lstrip() == line:
in_traceback = False
if line.startswith('subprocess.CalledProcessError'):
# CalledProcessError may mean that the subprocess printed
# errors to stderr which we can show the user
all_tb_lines += non_tb_lines
all_tb_lines += tb_lines
# Reset all working lists
tb_lines = []
non_tb_lines = []
else:
if line.startswith('Traceback (most recent call last):'):
tb_lines.append(line)
in_traceback = True
else:
non_tb_lines.append(line)
if all_tb_lines:
return all_tb_lines
else:
return None
def find_hadoop_java_stack_trace(lines):
"""Scan a log file or other iterable for a java stack trace from Hadoop,
and return it as a list of lines.
In logs from EMR, we find java stack traces in ``task-attempts/*/syslog``
Sample stack trace::
2010-07-27 18:25:48,397 WARN org.apache.hadoop.mapred.TaskTracker (main): Error running child
java.lang.OutOfMemoryError: Java heap space
at org.apache.hadoop.mapred.IFile$Reader.readNextBlock(IFile.java:270)
at org.apache.hadoop.mapred.IFile$Reader.next(IFile.java:332)
at org.apache.hadoop.mapred.Merger$Segment.next(Merger.java:147)
at org.apache.hadoop.mapred.Merger$MergeQueue.adjustPriorityQueue(Merger.java:238)
at org.apache.hadoop.mapred.Merger$MergeQueue.next(Merger.java:255)
at org.apache.hadoop.mapred.Merger.writeFile(Merger.java:86)
at org.apache.hadoop.mapred.Merger$MergeQueue.merge(Merger.java:377)
at org.apache.hadoop.mapred.Merger.merge(Merger.java:58)
at org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:277)
at org.apache.hadoop.mapred.TaskTracker$Child.main(TaskTracker.java:2216)
(We omit the "Error running child" line from the results)
"""
for line in lines:
if line.rstrip('\r\n').endswith("Error running child"):
st_lines = []
for line in lines:
st_lines.append(line)
for line in lines:
if not line.startswith(' at '):
break
st_lines.append(line)
return st_lines
else:
return None
_OPENING_FOR_READING_RE = re.compile("^.*: Opening '(.*)' for reading$")
def find_input_uri_for_mapper(lines):
"""Scan a log file or other iterable for the path of an input file
for the first mapper on Hadoop. Just returns the path, or None if
no match.
In logs from EMR, we find python tracebacks in ``task-attempts/*/syslog``
Matching log lines look like::
2010-07-27 17:54:54,344 INFO org.apache.hadoop.fs.s3native.NativeS3FileSystem (main): Opening 's3://yourbucket/logs/2010/07/23/log2-00077.gz' for reading
"""
val = None
for line in lines:
match = _OPENING_FOR_READING_RE.match(line)
if match:
val = match.group(1)
return val
_HADOOP_STREAMING_ERROR_RE = re.compile(
r'^.*ERROR org\.apache\.hadoop\.streaming\.StreamJob \(main\): (.*)$')
_HADOOP_STREAMING_ERROR_RE_2 = re.compile(r'^(.*does not exist.*)$')
def find_interesting_hadoop_streaming_error(lines):
"""Scan a log file or other iterable for a hadoop streaming error
other than "Job not Successful!". Return the error as a string, or None
if nothing found.
In logs from EMR, we find java stack traces in ``steps/*/syslog``
Example line::
2010-07-27 19:53:35,451 ERROR org.apache.hadoop.streaming.StreamJob (main): Error launching job , Output path already exists : Output directory s3://yourbucket/logs/2010/07/23/ already exists and is not empty
"""
for line in lines:
match = (
_HADOOP_STREAMING_ERROR_RE.match(line) or
_HADOOP_STREAMING_ERROR_RE_2.match(line))
if match:
msg = match.group(1)
if msg != 'Job not Successful!':
return msg
return None
_MULTILINE_JOB_LOG_ERROR_RE = re.compile(
r'^\w+Attempt.*?TASK_STATUS="FAILED".*?ERROR="(?P<first_line>[^"]*)$')
def find_job_log_multiline_error(lines):
"""Scan a log file for an arbitrary multi-line error. Return it as a list
of lines, or None of nothing was found.
Here is an example error::
MapAttempt TASK_TYPE="MAP" TASKID="task_201106280040_0001_m_000218" TASK_ATTEMPT_ID="attempt_201106280040_0001_m_000218_5" TASK_STATUS="FAILED" FINISH_TIME="1309246900665" HOSTNAME="/default-rack/ip-10-166-239-133.us-west-1.compute.internal" ERROR="Error initializing attempt_201106280040_0001_m_000218_5:
java.io.IOException: Cannot run program "bash": java.io.IOException: error=12, Cannot allocate memory
at java.lang.ProcessBuilder.start(ProcessBuilder.java:460)
at org.apache.hadoop.util.Shell.runCommand(Shell.java:149)
at org.apache.hadoop.util.Shell.run(Shell.java:134)
at org.apache.hadoop.fs.DF.getAvailable(DF.java:73)
at org.apache.hadoop.fs.LocalDirAllocator$AllocatorPerContext.getLocalPathForWrite(LocalDirAllocator.java:296)
at org.apache.hadoop.fs.LocalDirAllocator.getLocalPathForWrite(LocalDirAllocator.java:124)
at org.apache.hadoop.mapred.TaskTracker.localizeJob(TaskTracker.java:648)
at org.apache.hadoop.mapred.TaskTracker.startNewTask(TaskTracker.java:1320)
at org.apache.hadoop.mapred.TaskTracker.offerService(TaskTracker.java:956)
at org.apache.hadoop.mapred.TaskTracker.run(TaskTracker.java:1357)
at org.apache.hadoop.mapred.TaskTracker.main(TaskTracker.java:2361)
Caused by: java.io.IOException: java.io.IOException: error=12, Cannot allocate memory
at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)
at java.lang.ProcessImpl.start(ProcessImpl.java:65)
at java.lang.ProcessBuilder.start(ProcessBuilder.java:453)
... 10 more
"
The first line returned will only include the text after ``ERROR="``, and
discard the final line with just ``"``.
These errors are parsed from jobs/\*.jar.
"""
for line in lines:
m = _MULTILINE_JOB_LOG_ERROR_RE.match(line)
if m:
st_lines = []
if m.group('first_line'):
st_lines.append(m.group('first_line'))
for line in lines:
st_lines.append(line)
for line in lines:
if line.strip() == '"':
break
st_lines.append(line)
return st_lines
return None
_TIMEOUT_ERROR_RE = re.compile(
r'.*?TASK_STATUS="FAILED".*?ERROR=".*?failed to report status for (\d+)'
r' seconds.*?"')
def find_timeout_error(lines):
"""Scan a log file or other iterable for a timeout error from Hadoop.
Return the number of seconds the job ran for before timing out, or None if
nothing found.
In logs from EMR, we find timeouterrors in ``jobs/*.jar``
Example line::
Task TASKID="task_201010202309_0001_m_000153" TASK_TYPE="MAP" TASK_STATUS="FAILED" FINISH_TIME="1287618918658" ERROR="Task attempt_201010202309_0001_m_000153_3 failed to report status for 602 seconds. Killing!"
"""
result = None
for line in lines:
match = _TIMEOUT_ERROR_RE.match(line)
if match:
result = match.group(1)
if result is None:
return None
else:
return int(result)
# recognize hadoop streaming output
_COUNTER_RE = re.compile(r'^reporter:counter:([^,]*),([^,]*),(-?\d+)$')
_STATUS_RE = re.compile(r'^reporter:status:(.*)$')
def parse_mr_job_stderr(stderr, counters=None):
"""Parse counters and status messages out of MRJob output.
:param stderr: a filehandle, a list of lines, or a str containing data
:param counters: Counters so far, to update; a map from group to counter
name to count.
Returns a dictionary with the keys *counters*, *statuses*, *other*:
- *counters*: counters so far; same format as above
- *statuses*: a list of status messages encountered
- *other*: lines that aren't either counters or status messages
"""
# For the corresponding code in Hadoop Streaming, see ``incrCounter()`` in
# http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java?view=markup
if isinstance(stderr, str):
stderr = StringIO(stderr)
if counters is None:
counters = {}
statuses = []
other = []
for line in stderr:
m = _COUNTER_RE.match(line.rstrip('\r\n'))
if m:
group, counter, amount_str = m.groups()
counters.setdefault(group, {})
counters[group].setdefault(counter, 0)
counters[group][counter] += int(amount_str)
continue
m = _STATUS_RE.match(line.rstrip('\r\n'))
if m:
statuses.append(m.group(1))
continue
other.append(line)
return {'counters': counters, 'statuses': statuses, 'other': other}
# Match a job output line containing counter data.
# The line is of the form
# "Job KEY="value" KEY2="value2" ... COUNTERS="<counter_string>"
# We just want to pull out the counter string, which varies between
# Hadoop versions.
_KV_EXPR = r'\s+\w+=".*?"' # this matches KEY="VALUE"
_COUNTER_LINE_EXPR = r'^.*?JOBID=".*?_%s".*?\bCOUNTERS="%s".*?$' % \
('(?P<step_num>\d+)', r'(?P<counters>.*?)')
_COUNTER_LINE_RE = re.compile(_COUNTER_LINE_EXPR)
# 0.18-specific
# see _parse_counters_0_18 for format
# A counter looks like this: groupname.countername:countervalue
_COUNTER_EXPR_0_18 = r'(,|^)(?P<group>[^,]+?)[.](?P<name>[^,]+):(?P<value>\d+)'
_COUNTER_RE_0_18 = re.compile(_COUNTER_EXPR_0_18)
# 0.20-specific
# capture one group including sub-counters
# these look like: {(gid)(gname)[...][...][...]...}
_COUNTER_LIST_EXPR = r'(?P<counter_list_str>\[.*?\])'
_GROUP_RE_0_20 = re.compile(r'{\(%s\)\(%s\)%s}' % (r'(?P<group_id>.*?)',
r'(?P<group_name>.*?)',
_COUNTER_LIST_EXPR))
# capture a single counter from a group
# this is what the ... is in _COUNTER_LIST_EXPR (incl. the brackets).
# it looks like: [(cid)(cname)(value)]
_COUNTER_0_20_EXPR = r'\[\(%s\)\(%s\)\(%s\)\]' % (r'(?P<counter_id>.*?)',
r'(?P<counter_name>.*?)',
r'(?P<counter_value>\d+)')
_COUNTER_RE_0_20 = re.compile(_COUNTER_0_20_EXPR)
def _parse_counters_0_18(counter_string):
# 0.18 counters look like this:
# GroupName.CounterName:Value,Group1.Crackers:3,Group2.Nerf:243,...
groups = _COUNTER_RE_0_18.finditer(counter_string)
if groups is None:
log.warn('Cannot parse Hadoop counter string: %s' % counter_string)
for m in groups:
yield m.group('group'), m.group('name'), int(m.group('value'))
def _parse_counters_0_20(counter_string):
# 0.20 counters look like this:
# {(groupid)(groupname)[(counterid)(countername)(countervalue)][...]...}
groups = _GROUP_RE_0_20.findall(counter_string)
if not groups:
log.warn('Cannot parse Hadoop counter string: %s' % counter_string)
for group_id, group_name, counter_str in groups:
matches = _COUNTER_RE_0_20.findall(counter_str)
for counter_id, counter_name, counter_value in matches:
try:
group_name = counter_unescape(group_name)
except ValueError:
log.warn("Could not decode group name %s" % group_name)
try:
counter_name = counter_unescape(counter_name)
except ValueError:
log.warn("Could not decode counter name %s" % counter_name)
yield group_name, counter_name, int(counter_value)
def parse_hadoop_counters_from_line(line, hadoop_version=None):
"""Parse Hadoop counter values from a log line.
The counter log line format changed significantly between Hadoop 0.18 and
0.20, so this function switches between parsers for them.
:param line: log line containing counter data
:type line: str
:return: (counter_dict, step_num) or (None, None)
"""
m = _COUNTER_LINE_RE.match(line)
if not m:
return None, None
if hadoop_version is None:
# try both if hadoop_version not specified
counters_1, step_num_1 = parse_hadoop_counters_from_line(line, '0.20')
if counters_1:
return (counters_1, step_num_1)
else:
return parse_hadoop_counters_from_line(line, '0.18')
if uses_020_counters(hadoop_version):
parse_func = _parse_counters_0_20
else:
parse_func = _parse_counters_0_18
counter_substring = m.group('counters')
counters = {}
for group, counter, value in parse_func(counter_substring):
counters.setdefault(group, {})
counters[group].setdefault(counter, 0)
counters[group][counter] += int(value)
return counters, int(m.group('step_num'))
### AWS Date-time parsing ###
# sometimes AWS gives us seconds as a decimal, which we can't parse
# with boto.utils.ISO8601
SUBSECOND_RE = re.compile('\.[0-9]+')
# Thu, 29 Mar 2012 04:55:44 GMT
RFC1123 = '%a, %d %b %Y %H:%M:%S %Z'
def iso8601_to_timestamp(iso8601_time):
iso8601_time = SUBSECOND_RE.sub('', iso8601_time)
try:
return time.mktime(time.strptime(iso8601_time, boto.utils.ISO8601))
except ValueError:
return time.mktime(time.strptime(iso8601_time, RFC1123))
def iso8601_to_datetime(iso8601_time):
iso8601_time = SUBSECOND_RE.sub('', iso8601_time)
try:
return datetime.strptime(iso8601_time, boto.utils.ISO8601)
except ValueError:
return datetime.strptime(iso8601_time, RFC1123)
| 36.33564
| 313
| 0.647415
|
9b56d744011924eb4fad38203efc188143920323
| 107
|
py
|
Python
|
Homework Examples/exam 2 figure 1.py
|
tonysulfaro/CSE-231
|
0e3ff5422fe42624a90a17d7f33174346662a6fc
|
[
"MIT"
] | 2
|
2021-09-23T19:17:24.000Z
|
2021-11-29T09:03:56.000Z
|
Homework Examples/exam 2 figure 1.py
|
tonysulfaro/CSE-231
|
0e3ff5422fe42624a90a17d7f33174346662a6fc
|
[
"MIT"
] | null | null | null |
Homework Examples/exam 2 figure 1.py
|
tonysulfaro/CSE-231
|
0e3ff5422fe42624a90a17d7f33174346662a6fc
|
[
"MIT"
] | 1
|
2020-10-25T13:03:18.000Z
|
2020-10-25T13:03:18.000Z
|
import operator
x = {1: 2, 3: 4, 4: 3, 2: 1, 0: 0}
sorted_x = sorted(x.items(), key=operator.itemgetter(1))
| 35.666667
| 56
| 0.626168
|
60f9b66a69ab280ff7ad591e748aa57b3b83ae51
| 3,465
|
py
|
Python
|
sfeprapy/dat/steel_section.py
|
fsepy/sfeprapy
|
0b1c11b30034793e1231f599cf41e496a9ec56aa
|
[
"MIT"
] | 4
|
2021-04-11T00:57:43.000Z
|
2022-03-10T05:28:48.000Z
|
sfeprapy/dat/steel_section.py
|
fsepy/sfeprapy
|
0b1c11b30034793e1231f599cf41e496a9ec56aa
|
[
"MIT"
] | 3
|
2019-10-26T11:31:58.000Z
|
2019-11-23T11:27:11.000Z
|
sfeprapy/dat/steel_section.py
|
fsepy/sfeprapy
|
0b1c11b30034793e1231f599cf41e496a9ec56aa
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# AUTHOR: YAN FU
# DATE: 23/01/2018
# FILE NAME: steel_carbon.py
# DESCRIPTION: This python script file contains SteelSection object which transforms data stored within section_UB.csv
# to Python readable format. Details see description under the object.
import os
import pandas as pd
import numpy as np
class SteelSection(object):
"""
DESCRIPTION: SteelSection aims to transform the data stored in .csv files (i.e. section_UB.csv and section_UC.csv)
into a pythonic format, i.e. an object with properties.
"""
def __init__(self, section_type, section_desination):
# Load data file, i.e. a .csv file, and make it in DataFrame format
dict_type_to_directory = {"ub": "sections_UB.csv", "uc": "sections_UC.csv"}
file_name_enquired_property = dict_type_to_directory[section_type]
dir_this_folder = os.path.dirname(os.path.abspath(__file__))
dir_file = "/".join([dir_this_folder, file_name_enquired_property])
# Check if the subject file exists. An file
if not os.path.isfile(dir_file):
raise FileNotFoundError("File does not exist: ".format(dir_file))
self.__data_all = pd.read_csv(
filepath_or_buffer=dir_file, header=2, index_col=0, dtype={"id": np.str}
)
self.__data_selected = self.__data_all[
self.__data_all.index == section_desination
]
"""
depth of section,width of section,web thickness,flange thickness,root radius,depth between fillets,ratios for local buckling (web),
ratios for local buckling (flange),
dimensions for detailing (end clearance),dimensions for detailing (notch),dimensions for detailing (notch),
"""
def __extract_col_data(self, parameter):
result = self.__data_selected[parameter].values
if len(result) == 1:
result = result[0]
return result
def mass_per_metre(self):
pass
def depth(self):
pass
def width(self):
pass
def thickness_web(self):
pass
def thickness_flange(self):
pass
def root_radius(self):
pass
def depth_between_fillets(self):
pass
def ratios_local_buckling_web(self):
pass
def ratios_local_buckling_flange(self):
pass
"""
surface area per metre,second moment of area (y-y),second moment of area (z-z),radius of gyration (y-y),radius of gyration (z-z),elastic modulus (y-y),"""
def surface_area_per_metre(self):
pass
def second_moment_of_area_yy(self):
pass
def second_moment_of_area_zz(self):
pass
def radius_of_gyration_yy(self):
pass
def elastic_modulus_yy(self):
pass
"""
elastic modulus (z-z),plastic modulus (y-y),plastic modulus (z-z),buckling parameter,torsional index,warping constant,torsional constant,area of section
"""
def elastic_modulus_zz(self):
pass
def plastic_modulus_yy(self):
pass
def plastic_modulus_zz(self):
pass
def buckling_parameter(self):
pass
def torsional_index(self):
pass
def torsional_constant(self):
pass
@property
def SECTION_TYPE_UB(self):
return "ub"
@property
def SECTION_TYPE_UC(self):
return "uc"
if __name__ == "__main__":
ss = SteelSection
my_section = ss("ub", "h")
| 26.052632
| 158
| 0.648196
|
8f59fc2b94df0666cb9a0248e835579c8c25b19e
| 2,100
|
py
|
Python
|
examples/two_file_example.py
|
ajnelson-nist/pySHACL
|
eebb546c981d5e7de72c8e51b0d9eaf14960e807
|
[
"Apache-2.0"
] | 167
|
2018-09-05T11:28:28.000Z
|
2022-03-29T13:31:02.000Z
|
examples/two_file_example.py
|
ajnelson-nist/pySHACL
|
eebb546c981d5e7de72c8e51b0d9eaf14960e807
|
[
"Apache-2.0"
] | 123
|
2018-09-07T04:27:05.000Z
|
2022-03-25T15:07:56.000Z
|
examples/two_file_example.py
|
ajnelson-nist/pySHACL
|
eebb546c981d5e7de72c8e51b0d9eaf14960e807
|
[
"Apache-2.0"
] | 50
|
2018-09-14T11:12:31.000Z
|
2022-03-25T15:00:21.000Z
|
from pyshacl import validate
shapes_file = '''
@prefix dash: <http://datashapes.org/dash#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix schema: <http://schema.org/> .
@prefix sh: <http://www.w3.org/ns/shacl#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
schema:PersonShape
a sh:NodeShape ;
sh:targetClass schema:Person ;
sh:property [
sh:path schema:givenName ;
sh:datatype xsd:string ;
sh:name "given name" ;
] ;
sh:property [
sh:path schema:birthDate ;
sh:lessThan schema:deathDate ;
sh:maxCount 1 ;
] ;
sh:property [
sh:path schema:gender ;
sh:in ( "female" "male" ) ;
] ;
sh:property [
sh:path schema:address ;
sh:node schema:AddressShape ;
] .
schema:AddressShape
a sh:NodeShape ;
sh:closed true ;
sh:property [
sh:path schema:streetAddress ;
sh:datatype xsd:string ;
] ;
sh:property [
sh:path schema:postalCode ;
sh:or ( [ sh:datatype xsd:string ] [ sh:datatype xsd:integer ] ) ;
sh:minInclusive 10000 ;
sh:maxInclusive 99999 ;
] .
'''
shapes_file_format = 'turtle'
data_file = '''
{
"@context": { "@vocab": "http://schema.org/" },
"@id": "http://example.org/ns#Bob",
"@type": "Person",
"givenName": "Robert",
"familyName": "Junior",
"birthDate": "1971-07-07",
"deathDate": "1968-09-10",
"address": {
"@id": "http://example.org/ns#BobsAddress",
"streetAddress": "1600 Amphitheatre Pkway",
"postalCode": 9404
}
}
'''
data_file_format = 'json-ld'
conforms, v_graph, v_text = validate(data_file, shacl_graph=shapes_file,
data_graph_format=data_file_format,
shacl_graph_format=shapes_file_format,
inference='rdfs', debug=True,
serialize_report_graph=True)
print(conforms)
print(v_graph)
print(v_text)
| 27.272727
| 75
| 0.568095
|
0aaf230a562cc087bf9b972208dd61337f9326d9
| 1,955
|
py
|
Python
|
tests/adapters/model/sqlalchemy_model/sqlite/conftest.py
|
proteanhq/protean
|
2006832265435cad8d4f9b86d1a789d8828d2707
|
[
"BSD-3-Clause"
] | 6
|
2018-09-26T04:54:09.000Z
|
2022-03-30T01:01:45.000Z
|
tests/adapters/model/sqlalchemy_model/sqlite/conftest.py
|
proteanhq/protean
|
2006832265435cad8d4f9b86d1a789d8828d2707
|
[
"BSD-3-Clause"
] | 261
|
2018-09-20T09:53:33.000Z
|
2022-03-08T17:43:04.000Z
|
tests/adapters/model/sqlalchemy_model/sqlite/conftest.py
|
proteanhq/protean
|
2006832265435cad8d4f9b86d1a789d8828d2707
|
[
"BSD-3-Clause"
] | 6
|
2018-07-22T07:09:15.000Z
|
2021-02-02T05:17:23.000Z
|
import os
import pytest
def initialize_domain():
from protean.domain import Domain
domain = Domain("SQLAlchemy Test - SQLite")
# Construct relative path to config file
current_path = os.path.abspath(os.path.dirname(__file__))
config_path = os.path.join(current_path, "./config.py")
if os.path.exists(config_path):
domain.config.from_pyfile(config_path)
return domain
domain = initialize_domain()
@pytest.fixture(autouse=True)
def test_domain():
with domain.domain_context():
yield domain
@pytest.fixture(scope="session", autouse=True)
def setup_db():
with domain.domain_context():
# Create all associated tables
from .elements import ComplexUser, Person, Provider, ProviderCustomModel, User
from .test_array_datatype import ArrayUser, IntegerArrayUser
from .test_json_datatype import Event
domain.register(ArrayUser)
domain.register(ComplexUser)
domain.register(Event)
domain.register(IntegerArrayUser)
domain.register(Person)
domain.register(Provider)
domain.register(User)
domain.register_model(ProviderCustomModel, entity_cls=Provider)
domain.repository_for(ArrayUser)._dao
domain.repository_for(ComplexUser)._dao
domain.repository_for(Event)._dao
domain.repository_for(IntegerArrayUser)._dao
domain.repository_for(Person)._dao
domain.repository_for(Provider)._dao
domain.repository_for(User)._dao
for _, provider in domain.providers.items():
provider._metadata.create_all()
yield
# Drop all tables at the end of test suite
for _, provider in domain.providers.items():
provider._metadata.drop_all()
@pytest.fixture(autouse=True)
def run_around_tests(test_domain):
yield
if "default" in test_domain.providers:
test_domain.providers["default"]._data_reset()
| 27.928571
| 86
| 0.697187
|
2ee56e6ca01659646130cca08967c1cf57e1b16a
| 377
|
py
|
Python
|
app/api/all_countries.py
|
lucasmcast/COVID-19-api
|
9de261b039d7eb9f46dad7aa140d2e3d4d77494a
|
[
"MIT"
] | 2
|
2020-03-27T22:59:47.000Z
|
2021-01-05T21:16:24.000Z
|
app/api/all_countries.py
|
lucasmcast/COVID-19-api
|
9de261b039d7eb9f46dad7aa140d2e3d4d77494a
|
[
"MIT"
] | null | null | null |
app/api/all_countries.py
|
lucasmcast/COVID-19-api
|
9de261b039d7eb9f46dad7aa140d2e3d4d77494a
|
[
"MIT"
] | 1
|
2021-01-06T13:40:13.000Z
|
2021-01-06T13:40:13.000Z
|
from . import api
from flask import jsonify
from datetime import date
from ..models import GeneralsDatas, CountryCases
@api.route('/all_countries', methods=["GET"])
def all_countries():
date_now = str(date.today())
all_data = CountryCases.query.filter_by(date_data=date_now)
list_json = [data.to_json() for data in all_data]
return jsonify(list_json), 200
| 31.416667
| 63
| 0.737401
|
af13768eea0bb177fdd4f3caeb93ac5a2f3926a7
| 12,702
|
py
|
Python
|
2.PDF-ext/core.py
|
Coder-Pham/KPMG-Challenge
|
18eea384ca046a59b2700c1b3b90325c442e5606
|
[
"MIT"
] | null | null | null |
2.PDF-ext/core.py
|
Coder-Pham/KPMG-Challenge
|
18eea384ca046a59b2700c1b3b90325c442e5606
|
[
"MIT"
] | null | null | null |
2.PDF-ext/core.py
|
Coder-Pham/KPMG-Challenge
|
18eea384ca046a59b2700c1b3b90325c442e5606
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# # Text Summarization in Python
#
# ## Approach:
# Extractive text summarization is all about finding the more important sentences from a document as a summary of that document.
# Our approach is using the PageRank algorithm to find these 'important' sentences.
# ## Implementation
# ### 1. Importing important libraries
# numpy library helps in working with arrays: array creation and manipulation
# this implementation uses array for storing the matrices generated as 2-D arrays
# PyPDF2 is a library used for reading the PDF files
# sys library has been used for printing the size of data structures used in the program
import numpy as np
import PyPDF2
import fpdf # to print PDF summary
import fitz # to highlight sentence in PDF
import sys
# matplotlib is a library that is used to visualize the data by drawing graphs of matrix inputs
# we will use it for drawing the matrices generated later in the program
# %matplotlib inline is a command used to show the graphs in the jupyter notebook
import matplotlib.pyplot as plt
#get_ipython().magic('matplotlib inline')
# networkx library helps in working with graphs
# and later performing the PageRank algorithm
# which is the crux of this implementation to find
# the importance of each sentence using their 'rank' as a metric
# rank, the output of the method pagerank, is a measure of importance of sentences
# this library has been used in the cell no. ()
import networkx as nx
# the PunktSentenceTokenizer library is being imported from the file punkt.py contained in package nltk.tokenize
# this is used to tokenize the document into sentences
# Tokenization: Tokenization is the process of demarcating and possibly classifying..
# sections of a string of input characters.
# The resulting tokens are then passed on to some other form of processing.
from nltk.tokenize.punkt import PunktSentenceTokenizer
# TfidfTransformer and CountVectorizer libraries are being imported
# CountVectorizer: In this implementation, a CountVectorizer object is being created that ..
# will be used for creating the document-term matrix
# tFidTransformer: In this implementation,TfidfTransformer is used for executing the method fit_transform()...
# which provides the output as a document-term matrix normalized (value 0-1) according to the TF-IDF
# TF(Term Frequency): the no. of times a term(a word here) appears in the current document(single sentence here)
# IDF(Inverse Document Frequency): the no. of times a term(a word here) appears in the entire corpus
# Corpus: set of all sentences
from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer
# ### 2. Function to read the document from user
# Supported formats: .txt, .pdf
#
# Input: Takes the name of the file as input.
#
# Output: Returns a string output containing the contents of the file.
# we are going to show an example of how the method is working
# first let's take the document as an input
def readDoc():
global name
# name = input('Please input a file name: ')
name = str(sys.argv[1])
print('You have asked for the document {}'.format(name))
# now read the type of document
if name.lower().endswith('.txt'):
choice = 1
elif name.lower().endswith('.pdf'):
choice = 2
else:
choice = 3
# print(name)
print(choice)
document = ''
# Case 1: if it is a .txt file
if choice == 1:
f = open(name, 'r')
document = f.read()
f.close()
# Case 2: if it is a .pdf file
elif choice == 2:
pdfFileObj = open(name, 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
for i in range(pdfReader.getNumPages()):
pageObj = pdfReader.getPage(i)
document += pageObj.extractText()
pdfFileObj.close()
# Case 3: none of the format
else:
print('Failed to load a valid file')
print('Returning an empty string')
document = ''
print(type(document))
return document
# ### 3. Function to tokenize the document
# Input: String of text document
#
# Output: A list containing sentences as its elements
# the function used for tokenizing the sentences
# tokenization of a sentence: '''provided in cell() above'''
def tokenize(document):
# We are tokenizing using the PunktSentenceTokenizer
# we call an instance of this class as sentence_tokenizer
doc_tokenizer = PunktSentenceTokenizer()
# tokenize() method: takes our document as input and returns a list of all the sentences in the document
# sentences is a list containing each sentence of the document as an element
sentences_list = doc_tokenizer.tokenize(document)
return sentences_list
# ### 4. Read the document
# reading a file and
# printing the size of the file
document = readDoc()
print('The length of the file is:', end=' ')
print(len(document))
# ### 5. Generate a list of sentences in the document
# we want to tokenize the document for further processing
# tokenizing the sentence means that we are creating a list of all the sentences of the document.
# Need of tokenizing the document: Initially the document is in just a string format.
# if we want to process the document, we need to store it in a data structure.
# Tokenization of document into words is also possible, but we will go with the tokenizing with the sentences
# Since we want to choose the most relevant sentences, we need to generate tokens of sentences only
sentences_list = tokenize(document)
# let us print the size of memory used by the list sentences
print('The size of the list in Bytes is: {}'.format(sys.getsizeof(sentences_list)))
# the size of one of the element of the list
print('The size of the item 0 in Bytes is: {}'.format(sys.getsizeof(sentences_list[0])))
# let us see the data type of sentences_list
# It will be list
print(type(sentences_list))
# let us analyse the elements of the sentences
# len() method applies on the list and provides the number of elements in the list
print('The size of the list "sentences" is: {}'.format(len(sentences_list)))
# print the elements of the list
# If the input document is long, which on realistically will be wrong, we would not like to print the entire document
for i in sentences_list:
print(i)
# ### 6. Generate term-document matrix (TD matrix) of the data
# Convert a collection of text documents to a matrix of token counts
# fit_transform method of CountVectorizer() class
# Learn the vocabulary dictionary and return term-document matrix.
# I/p: An iterable which yields either str, unicode or file objects.
# O/p: The term-document matrix named cv_matrix
cv = CountVectorizer()
cv_matrix = cv.fit_transform(sentences_list)
# **So what does CountVectorizer.fit_transform() do?**
'''
# a demo of what CountVectorizer().fit_transform(text) does
cv_demo = CountVectorizer() # a demo object of class CountVectorizer
# I have repeated the words to make a non-ambiguous array of the document text matrix
text_demo = ["Ashish is good, you are bad", "I am not bad"]
res_demo = cv_demo.fit_transform(text_demo)
print('Result demo array is {}'.format(res_demo.toarray()))
# Result is 2-d matrix containing document text matrix
# Notice that in the second row, there is 2.
# also, bad is repeated twice in that sentence.
# so we can infer that 2 is corresponding to the word 'bad'
print('Feature list: {}'.format(cv_demo.get_feature_names()))
'''
# printing the cv_matrix type
# and how it is being stored in memory?
# it is stored in the compressed row format
# compressed row format:
print('The data type of bow matrix {}'.format(type(cv_matrix)))
print('Shape of the matrix {}'.format(cv_matrix.get_shape))
print('Size of the matrix is: {}'.format(sys.getsizeof(cv_matrix)))
print(cv.get_feature_names())
print(cv_matrix.toarray())
# Tnormalized: document-term matrix normalized (value 0-1) according to the TF-IDF
# TF(Term Frequency): the no. of times a term(a word here) appears in the current document(single sentence here)
# IDF(Inverse Document Frequency): the no. of times a term(a word here) appears in the entire corpus
# Corpus: set of all sentences
normal_matrix = TfidfTransformer().fit_transform(cv_matrix)
print(normal_matrix.toarray())
print(normal_matrix.T.toarray)
res_graph = normal_matrix * normal_matrix.T
# plt.spy(res_graph)
# ### 7. Generate a graph for the document to apply PageRank algorithm
# drawing a graph to proceed for the textrank algorithm
# nx_graph is a graph developed using the networkx library
# each node represents a sentence
# an edge represents that they have words in common
# the edge weight is the number of words that are common in both of the sentences(nodes)
# nx.draw() method is used to draw the graph created
nx_graph = nx.from_scipy_sparse_matrix(res_graph)
nx.draw_circular(nx_graph)
print('Number of edges {}'.format(nx_graph.number_of_edges()))
print('Number of vertices {}'.format(nx_graph.number_of_nodes()))
# plt.show()
print('The memory used by the graph in Bytes is: {}'.format(sys.getsizeof(nx_graph)))
# ### 8. Getting the rank of every sentence using pagerank
# ranks is a dictionary with key=node(sentences) and value=textrank (the rank of each of the sentences)
ranks = nx.pagerank(nx_graph)
# analyse the data type of ranks
print(type(ranks))
print('The size used by the dictionary in Bytes is: {}'.format(sys.getsizeof(ranks)))
# print the dictionary
for i in ranks:
print(i, ranks[i])
# ### 9. Finding important sentences and generating summary
# enumerate method: returns an enumerate object
# Use of list Comprehensions
# O/p: sentence_array is the sorted(descending order w.r.t. score value) 2-d array of ranks[sentence] and sentence
# For example, if there are two sentences: S1 (with a score of S1 = s1) and S2 with score s2, with s2>s1
# then sentence_array is [[s2, S2], [s1, S1]]
sentence_array = sorted(((ranks[i], s) for i, s in enumerate(sentences_list)), reverse=True)
sentence_array = np.asarray(sentence_array)
# as sentence_array is in descending order wrt score value
# fmax is the largest score value(the score of first element)
# fmin is the smallest score value(the score of last element)
rank_max = float(sentence_array[0][0])
rank_min = float(sentence_array[len(sentence_array) - 1][0])
# print the largest and smallest value of scores of the sentence
print(rank_max)
print(rank_min)
# Normalization of the scores
# so that it comes out in the range 0-1
# fmax becomes 1
# fmin becomes 0
# store the normalized values in the list temp_array
temp_array = []
# if all sentences have equal ranks, means they are all the same
# taking any sentence will give the summary, say the first sentence
flag = 0
if rank_max - rank_min == 0:
temp_array.append(0)
flag = 1
# If the sentence has different ranks
if flag != 1:
for i in range(0, len(sentence_array)):
temp_array.append((float(sentence_array[i][0]) - rank_min) / (rank_max - rank_min))
print(len(temp_array))
# Calculation of threshold:
# We take the mean value of normalized scores
# any sentence with the normalized score 0.2 more than the mean value is considered to be
threshold = (sum(temp_array) / len(temp_array)) + 0.2
# Separate out the sentences that satiasfy the criteria of having a score above the threshold
sentence_list = []
if len(temp_array) > 1:
for i in range(0, len(temp_array)):
if temp_array[i] > threshold:
sentence_list.append(sentence_array[i][1])
else:
sentence_list.append(sentence_array[0][1])
model = sentence_list
# ### 10. Writing the summary to a new file
# print(sentence_list)
'''
summary = " ".join(str(x) for x in sentence_list)
summary = str.encode(summary).replace(b'\n', b'')
summary = summary.decode()
print(summary)
'''
# with open('sum.txt', 'w') as file:
# file.write(summary)
output_pdf = fpdf.FPDF(format='letter')
output_pdf.add_page()
output_pdf.set_font("Arial", size = 12)
final = []
for lines in sentence_list:
line = str(lines)
line = str.encode(line).replace(b'\n', b'')
line = line.decode()
line = str(line.encode(encoding = 'utf-8', errors = 'ignore'))
# print(line[1:])
output_pdf.write(5, line[2:-1])
final.append(line[2:-1])
output_pdf.ln(10)
output_pdf.output(name.replace(".pdf", "_") + "summary.pdf")
highlight = fitz.open(name)
for page in highlight:
for line in final:
sentence = page.searchFor(line)
for sen in sentence:
page.addHighlightAnnot(sen)
highlight.save(name.replace(".pdf", "_") + "highlight.pdf", garbage = 4, deflate = True, clean = True)
# End of the notebook
| 33.514512
| 128
| 0.728389
|
2078fa93e263ffe8364bd83bf16475f397e1ef29
| 625
|
py
|
Python
|
blog/wsgi.py
|
atlednolispe/blog
|
d3926e424d544f3e9a3805b16a15072ac6c6a780
|
[
"MIT"
] | null | null | null |
blog/wsgi.py
|
atlednolispe/blog
|
d3926e424d544f3e9a3805b16a15072ac6c6a780
|
[
"MIT"
] | 7
|
2020-06-05T16:37:46.000Z
|
2022-03-11T23:11:36.000Z
|
blog/wsgi.py
|
atlednolispe/blog
|
d3926e424d544f3e9a3805b16a15072ac6c6a780
|
[
"MIT"
] | null | null | null |
"""
WSGI config for blog project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
try:
import mysqlclient
except ModuleNotFoundError:
import pymysql
pymysql.install_as_MySQLdb()
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "blog.settings")
profile = os.environ.get('BLOG_PROFILE', 'product')
os.environ.update({"DJANGO_SETTINGS_MODULE": "blog.settings.%s" % profile})
application = get_wsgi_application()
| 24.038462
| 78
| 0.7664
|
c1594134a6d3d112e0cfab2731289ef28eaaf127
| 2,091
|
py
|
Python
|
app/main/views.py
|
YomZsamora/Watchlist-Default
|
a9077abec95c15d8c9b9d17280fb9647e3573891
|
[
"Unlicense"
] | null | null | null |
app/main/views.py
|
YomZsamora/Watchlist-Default
|
a9077abec95c15d8c9b9d17280fb9647e3573891
|
[
"Unlicense"
] | null | null | null |
app/main/views.py
|
YomZsamora/Watchlist-Default
|
a9077abec95c15d8c9b9d17280fb9647e3573891
|
[
"Unlicense"
] | null | null | null |
from flask import render_template,request,redirect,url_for
from . import main
from ..requests import get_movies,get_movie,search_movie
from .forms import ReviewForm
from ..models import Review
# Views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
# Getting popular movie
popular_movies = get_movies('popular')
upcoming_movie = get_movies('upcoming')
now_showing_movie = get_movies('now_playing')
title = 'Home - Welcome to The best Movie Review Website Online'
search_movie = request.args.get('movie_query')
if search_movie:
return redirect(url_for('search',movie_name=search_movie))
else:
return render_template('index.html', title = title, popular = popular_movies, upcoming = upcoming_movie, now_showing = now_showing_movie )
@main.route('/movie/<int:id>')
def movie(id):
'''
View movie page function that returns the movie details page and its data
'''
movie = get_movie(id)
title = f'{movie.title}'
reviews = Review.get_reviews(movie.id)
return render_template('movie.html',title = title,movie = movie,reviews = reviews)
@main.route('/search/<movie_name>')
def search(movie_name):
'''
View function to display the search results
'''
movie_name_list = movie_name.split(" ")
movie_name_format = "+".join(movie_name_list)
searched_movies = search_movie(movie_name_format)
title = f'search results for {movie_name}'
return render_template('search.html',movies = searched_movies)
@main.route('/movie/review/new/<int:id>', methods = ['GET','POST'])
def new_review(id):
form = ReviewForm()
movie = get_movie(id)
if form.validate_on_submit():
title = form.title.data
review = form.review.data
new_review = Review(movie.id,title,movie.poster,review)
new_review.save_review()
return redirect(url_for('movie',id = movie.id ))
title = f'{movie.title} review'
return render_template('new_review.html',title = title, review_form=form, movie=movie)
| 30.75
| 146
| 0.695839
|
aa1a659a57f27145c009a93aaf806738cf03fa1b
| 4,223
|
py
|
Python
|
tests/test_select.py
|
timgates42/goless
|
3c8742fa0f94d0a365840aae404da4e8eaed9d71
|
[
"Apache-2.0"
] | 266
|
2015-01-03T04:18:48.000Z
|
2022-02-16T03:08:38.000Z
|
tests/test_select.py
|
timgates42/goless
|
3c8742fa0f94d0a365840aae404da4e8eaed9d71
|
[
"Apache-2.0"
] | 19
|
2015-03-06T11:04:53.000Z
|
2021-06-09T15:08:57.000Z
|
tests/test_select.py
|
timgates42/goless
|
3c8742fa0f94d0a365840aae404da4e8eaed9d71
|
[
"Apache-2.0"
] | 20
|
2015-01-03T03:45:08.000Z
|
2022-03-05T06:05:32.000Z
|
import goless
from goless.backends import current as be
from . import BaseTests
class RecvCaseTests(BaseTests):
chansize = 1
def setUp(self):
BaseTests.setUp(self)
self.ch = goless.chan(self.chansize)
self.ca = goless.rcase(self.ch)
def test_ready(self):
self.assertFalse(self.ca.ready())
be.run(self.ch.send, 1)
self.assertTrue(self.ca.ready())
be.run(self.ch.recv)
self.assertFalse(self.ca.ready())
def test_executes(self):
be.run(self.ch.send, 'a')
x = self.ca.exec_()
self.assertEqual(x, 'a')
def test_exec_with_no_body(self):
be.run(self.ch.send, 'a')
ca = goless.rcase(self.ch)
self.assertEqual(ca.exec_(), 'a')
class RecvCaseUnbufferedTests(RecvCaseTests):
chansize = 0
class SendCaseTests(BaseTests):
chansize = 1
def setUp(self):
BaseTests.setUp(self)
self.ch = goless.chan(self.chansize)
self.sendval = 1
self.ca = goless.scase(self.ch, self.sendval)
def test_ready(self):
def assert_default_readiness():
self.assertEquals(self.ca.ready(), self.chansize > 0)
assert_default_readiness()
be.run(self.ch.send)
self.assertFalse(self.ca.ready())
be.run(self.ch.recv)
assert_default_readiness()
be.run(self.ch.send)
self.assertFalse(self.ca.ready())
be.run(self.ch.recv)
assert_default_readiness()
def test_executes(self):
def recv():
a.append(self.ch.recv())
a = []
be.run(recv)
self.ca.exec_()
self.assertEqual(a, [self.sendval])
def test_exec_no_onselected(self):
be.run(self.ch.recv)
self.ca.exec_()
class SendCaseUnbufferedTests(SendCaseTests):
chansize = 0
class SelectTests(BaseTests):
def setUp(self):
BaseTests.setUp(self)
self.chan1 = goless.chan()
def test_select_uses_default(self):
cases = [goless.rcase(self.chan1), goless.dcase()]
result, val = goless.select(cases)
self.assertIs(result, cases[1])
self.assertIsNone(val)
def test_select_chooses_ready_selection(self):
readychan = goless.chan(1)
notreadychan = goless.chan(1)
readychan.send(3)
cases = [goless.rcase(notreadychan), goless.rcase(readychan), goless.dcase()]
result, val = goless.select(cases)
self.assertIs(result, cases[1])
self.assertEqual(val, 3)
def test_select_no_default_no_ready_blocks(self):
chan1 = goless.chan()
chan2 = goless.chan()
a = []
cases = [goless.rcase(chan2), goless.rcase(chan1)]
def sel():
a.append(goless.select(cases))
be.run(sel)
self.assertEqual(a, [])
chan1.send(5)
be.yield_()
self.assertEqual(len(a), 1)
chosen, val = a[0]
self.assertEqual(chosen, cases[1])
self.assertEqual(val, 5)
def test_main_tasklet_can_select(self):
chan1 = goless.chan(1)
cases = [goless.scase(chan1, 3)]
chosen, val = goless.select(cases)
self.assertIs(chosen, cases[0])
self.assertIsNone(val)
def test_raises_if_multiple_default_cases(self):
with self.assertRaises(AssertionError):
goless.select([goless.dcase(), goless.dcase()])
def test_select_accepts_args(self):
chan1 = goless.chan(1)
scase = goless.scase(chan1, 1)
chosen, val = goless.select(scase)
self.assertIs(chosen, scase)
self.assertIsNone(val)
def test_select_raises_for_list_and_args(self):
chan1 = goless.chan(1)
chan2 = goless.chan(1)
chan3 = goless.chan(1)
cases = [goless.scase(chan1, 1), goless.scase(chan2, 2)]
with self.assertRaises(TypeError):
goless.select(cases, chan3)
def test_select_with_no_args_should_do_nothing(self):
goless.select()
goless.select([])
def test_raises_deadlock_if_no_goroutines(self):
with self.assertRaises(goless.Deadlock):
goless.select(goless.rcase(goless.chan()))
| 28.727891
| 85
| 0.608809
|
fd3a160794559ee44ca04d6f744f8bbaf8972812
| 3,980
|
py
|
Python
|
lib/surface/spanner/instances/update.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 2
|
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
lib/surface/spanner/instances/update.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/spanner/instances/update.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 1
|
2020-07-25T01:40:19.000Z
|
2020-07-25T01:40:19.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for spanner instances update."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import textwrap
from googlecloudsdk.api_lib.spanner import instance_operations
from googlecloudsdk.api_lib.spanner import instances
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.spanner import flags
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Update(base.Command):
"""Update a Cloud Spanner instance."""
detailed_help = {
'EXAMPLES':
textwrap.dedent("""\
To update the display name of a Cloud Spanner instance, run:
$ {command} my-instance-id --description=my-new-display-name
To update the node count of a Cloud Spanner instance, run:
$ {command} my-instance-id --nodes=1
"""),
}
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Please add arguments in alphabetical order except for no- or a clear-
pair for that argument which can follow the argument itself.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
flags.Instance().AddToParser(parser)
flags.Description(required=False).AddToParser(parser)
flags.Nodes(required=False).AddToParser(parser)
base.ASYNC_FLAG.AddToParser(parser)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
op = instances.Patch(
args.instance, description=args.description, nodes=args.nodes)
if args.async_:
return op
instance_operations.Await(op, 'Updating instance')
@base.ReleaseTracks(base.ReleaseTrack.BETA, base.ReleaseTrack.ALPHA)
class BetaUpdate(Update):
"""Update a Cloud Spanner instance."""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Please add arguments in alphabetical order except for no- or a clear-
pair for that argument which can follow the argument itself.
Args:
parser: An argparse parser that you can use to add arguments that go on
the command line after this command. Positional arguments are allowed.
"""
flags.Instance().AddToParser(parser)
flags.Description(required=False).AddToParser(parser)
base.ASYNC_FLAG.AddToParser(parser)
group_parser = parser.add_argument_group(mutex=True)
flags.Nodes(required=False).AddToParser(group_parser)
flags.ProcessingUnits(required=False).AddToParser(group_parser)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
op = instances.Patch(
args.instance,
description=args.description,
nodes=args.nodes,
processing_units=args.processing_units)
if args.async_:
return op
instance_operations.Await(op, 'Updating instance')
| 33.728814
| 79
| 0.719095
|
99f4596d8c3b13cacdb7814de6e9da5b1ead1e1b
| 1,150
|
py
|
Python
|
add-sources.py
|
eugk/sumo-logic-scripts
|
bbdb92ce91a1bda0ef4fab94c61fafbc288073d2
|
[
"MIT"
] | null | null | null |
add-sources.py
|
eugk/sumo-logic-scripts
|
bbdb92ce91a1bda0ef4fab94c61fafbc288073d2
|
[
"MIT"
] | null | null | null |
add-sources.py
|
eugk/sumo-logic-scripts
|
bbdb92ce91a1bda0ef4fab94c61fafbc288073d2
|
[
"MIT"
] | null | null | null |
import json
import requests
import socket
import sys
import os
# List of API endpoints:
# https://service.us2.sumologic.com/help/Default.htm#Sumo_Logic_Endpoints.htm%3FTocPath%3DAPIs%7C_____1
API_URL = 'https://api.us2.sumologic.com/api/v1/collectors'
USER = os.environ['SL_ACCESSID']
PASS = os.environ['SL_ACCESSKEY']
sources_file = sys.argv[1]
def post_source(collector_id):
with open(sources_file) as json_file:
json_data = json.load(json_file)
for source in json_data['sources']:
source_json = json.dumps(source)
r = requests.post(API_URL + '/' + collector_id + '/sources', data=source_json, auth=(USER, PASS), headers={'content-type': 'application/json'}, verify=True)
r.raise_for_status()
r = requests.get(API_URL, auth=(USER, PASS), verify=True)
r.raise_for_status()
collectors = r.json()
for collector in collectors['collectors']:
collector_name = str(collector['name'])
if '_aws_' in collector_name: continue
if 'a0-' not in collector_name: continue
print 'Adding sources to ' + collector_name
collector_id = str(collector['id'])
post_source(collector_id)
| 33.823529
| 168
| 0.709565
|
8f5b62d63ce5471550fc90f2dbfc85f8193dfb51
| 1,408
|
py
|
Python
|
z3/magic_sequence.py
|
Wikunia/hakank
|
030bc928d2efe8dcbc5118bda3f8ae9575d0fd13
|
[
"MIT"
] | 279
|
2015-01-10T09:55:35.000Z
|
2022-03-28T02:34:03.000Z
|
z3/magic_sequence.py
|
Wikunia/hakank
|
030bc928d2efe8dcbc5118bda3f8ae9575d0fd13
|
[
"MIT"
] | 10
|
2017-10-05T15:48:50.000Z
|
2021-09-20T12:06:52.000Z
|
z3/magic_sequence.py
|
Wikunia/hakank
|
030bc928d2efe8dcbc5118bda3f8ae9575d0fd13
|
[
"MIT"
] | 83
|
2015-01-20T03:44:00.000Z
|
2022-03-13T23:53:06.000Z
|
#!/usr/bin/python -u
# -*- coding: latin-1 -*-
#
# Magic sequence problem in Z3
# http://www.dcs.st-and.ac.uk/~ianm/CSPLib/prob/prob019/spec.html
# """
# A magic sequence of length n is a sequence of integers x0 . . xn-1 between
# 0 and n-1, such that for all i in 0 to n-1, the number i occurs exactly xi
# times in the sequence. For instance, 6,2,1,0,0,0,1,0,0,0 is a magic sequence
# since 0 occurs 6 times in it, 1 occurs twice, ...
# """
#
# Time for n=1..19:
# - just global_cardinality_count or loop of count/4: 9.6s
# - gcc + Sum: 4.3s
# - gcc + scalar_product: 1.6s
# - gcc + Sum + scalar_product: 1.4s
#
# This Z3 model was written by Hakan Kjellerstrand (hakank@gmail.com)
# See also my Z3 page: http://hakank.org/z3/
#
#
from z3_utils_hakank import *
def magic_sequence(n):
sol = Solver()
x = makeIntVector(sol,"x",n, 0,n)
# This is the only constraint that is really needed
global_cardinality_count(sol,[i for i in range(n)],x,x)
# for i in range(n): count(sol,i,x,x[i]) # slower
# some extras for speed up (see above)
sol.add(Sum([x[i] for i in range(n)]) == n)
scalar_product(sol,[i for i in range(n)],x,n)
if sol.check() == sat:
mod = sol.model()
print([mod.eval(x[i]) for i in range(n) ])
else:
print("No solution!")
for n in range(1,20):
print("Testing ", n)
magic_sequence(n)
print()
| 28.16
| 79
| 0.620739
|
4498398b861b5900b4b3947e76701bf9b51e3eb8
| 678
|
py
|
Python
|
examples/a5_error_handling.py
|
leonh/pyelements
|
eb8f6fd948bf2272219d69aa8e43a86df79bb946
|
[
"MIT"
] | null | null | null |
examples/a5_error_handling.py
|
leonh/pyelements
|
eb8f6fd948bf2272219d69aa8e43a86df79bb946
|
[
"MIT"
] | null | null | null |
examples/a5_error_handling.py
|
leonh/pyelements
|
eb8f6fd948bf2272219d69aa8e43a86df79bb946
|
[
"MIT"
] | null | null | null |
try:
# Write your operations here
print(numbers)
except:
# If there is an error, the code in this block will be executed
# For example, here you can log the error details
print("An exception occured")
print("-----------------------------------")
try:
print(numbers)
except NameError:
print("An NameError occured")
print("-----------------------------------")
#a = "one"
try:
print(int(a))
except NameError:
print("A NameError occured")
except TypeError:
print("A TypeError occured")
except ValueError:
print("A ValueError occured")
except:
print("Another type of error occured")
print("-----------------------------------")
| 21.1875
| 67
| 0.569322
|
1ffad90bfc44b19c30a903c5888fbc2ea0d9e2e4
| 1,823
|
py
|
Python
|
module3/matrix_massive_d.py
|
aspadm/labworks
|
8b04a40656f0791b191e7e6a980c10afc77cd041
|
[
"MIT"
] | null | null | null |
module3/matrix_massive_d.py
|
aspadm/labworks
|
8b04a40656f0791b191e7e6a980c10afc77cd041
|
[
"MIT"
] | null | null | null |
module3/matrix_massive_d.py
|
aspadm/labworks
|
8b04a40656f0791b191e7e6a980c10afc77cd041
|
[
"MIT"
] | null | null | null |
#Шибанова Дарья ИУ7-12
#Определить первые отрицательные элементы каждой строки матрицы,
#запомнить их в массиве. Сложить эл-ты матрицы с индексом макс.эл-та массива.
#Напечатать матрицу, массив и индекс макс.эл-та.
#B - матрица
#A - массив
A = []
Str = int(input('Введите количество строк (не более 7): '))
El = int(input('Введите количество элементов в строке (не более 9): '))
B = []
b = 0
print('Введите значения каждой строки в строчку: ')
for i in range(Str):
B.append([int(El) for El in input().split()])
print()
print('Исходная матрица: \n')
for i in range(Str):
for j in range(El):
print(B[i][j],end=" ")
print()
print()
for i in range(Str):
for j in range(El):
if B[i][j] < 0:
A.append(B[i][j])
break
a = len(A)
if a > 0:
print()
print('Массив первых отрицательных элементов каждой из строк: ')
for i in range(a):
print(A[i],end=' ')
#Определяем максимальный индекс полученного массива:
for i in range(a):
AMin = A[0]
n = 0
if A[i] > AMin:
AMin = A[i]
n = i
print()
print('Индекс максимального элемента полученного одномерного массива\
(по Python): ',n)
print('Номер максимального элемента полученного одномерного массива: ',n+1)
for i in range(Str):
for j in range(El):
b += B[i][j]
summa = b + n
print('Сумма элементов матрицы и индекса максимального элемента \
массива равна: ',summa)
print('Сумма элементов матрицы и номера максимального элемента массива \
равна: ',summa+1)
else:
print()
print('Так как массив пустой, определить индекс максимального \
элемента невозможно')
print('Так как массив пустой, сложить элементы матрицы с индексом \
невозможно')
| 30.383333
| 80
| 0.61492
|
43aeff7560761a53e690a2e55527d988a5193a00
| 3,829
|
py
|
Python
|
pystol-ui/tests/test_base.py
|
pystol/pystol
|
95dbff08a0a1448a9b3bcc847623b59761916086
|
[
"Apache-2.0"
] | 17
|
2020-03-18T13:49:23.000Z
|
2022-02-17T04:38:19.000Z
|
pystol-ui/tests/test_base.py
|
pystol/pystol
|
95dbff08a0a1448a9b3bcc847623b59761916086
|
[
"Apache-2.0"
] | 2
|
2021-05-18T07:44:34.000Z
|
2021-10-07T13:49:57.000Z
|
pystol-ui/tests/test_base.py
|
pystol/pystol
|
95dbff08a0a1448a9b3bcc847623b59761916086
|
[
"Apache-2.0"
] | 44
|
2019-10-23T20:26:31.000Z
|
2021-11-12T12:19:41.000Z
|
#!/usr/bin/env python
"""
Copyright 2019 Pystol (pystol.org).
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
from logging import CRITICAL, disable
disable(CRITICAL)
urls = {
'': (
'/fixed_sidebar',
'/fixed_footer',
'/plain_page',
'/page_403',
'/page_404',
'/page_500'
),
'/home': (
'/index',
'/index2',
'/index3'
),
'/forms': (
'/form',
'/form_advanced',
'/form_validation',
'/form_wizards',
'/form_upload',
'/form_buttons'
),
'/ui': (
'/general_elements',
'/media_gallery',
'/typography',
'/icons',
'/glyphicons',
'/widgets',
'/invoice',
'/inbox',
'/calendar'
),
'/tables': (
'/tables',
'/tables_dynamic'
),
'/data': (
'/chartjs',
'/chartjs2',
'/morisjs',
'/echarts',
'/other_charts'
),
'/additional': (
'/ecommerce',
'/projects',
'/project_detail',
'/contacts',
'/profile',
'/pricing'
)
}
free_access = {'/', '/login', '/page_403', '/page_404', '/page_500'}
def check_pages(*pages):
"""
Test the base app.
This is method function
"""
def decorator(function):
def wrapper(user_client):
function(user_client)
for page in pages:
r = user_client.get(page, follow_redirects=True)
print(r)
# assert r.status_code == 200
assert True
return wrapper
return decorator
def check_blueprints(*blueprints):
"""
Test the base app.
This is method function
"""
def decorator(function):
def wrapper(user_client):
function(user_client)
for blueprint in blueprints:
for page in urls[blueprint]:
r = user_client.get(blueprint + page,
follow_redirects=True)
print(r)
# assert r.status_code == 200
assert True
return wrapper
return decorator
# Base test
# test the login system: login, user creation, logout
# test that all pages respond with HTTP 403 if not logged in, 200 otherwise
def test_authentication(base_client):
"""
Test the base app.
This is method function
"""
for blueprint, pages in urls.items():
for page in pages:
page_url = blueprint + page
expected_code = 200 if page_url in free_access else 403
r = base_client.get(page_url, follow_redirects=True)
print(expected_code)
print(r)
# assert r.status_code == expected_code
assert True
def test_urls(user_client):
"""
Test the base app.
This is method function
"""
for blueprint, pages in urls.items():
for page in pages:
page_url = blueprint + page
r = user_client.get(page_url, follow_redirects=True)
print(r)
# assert r.status_code == 200
assert True
# logout and test that we cannot access anything anymore
r = user_client.get('/logout', follow_redirects=True)
test_authentication(user_client)
| 24.703226
| 75
| 0.559154
|
83cee280dfb1dfd41965c8504e6a0275a0e6d103
| 119
|
py
|
Python
|
django/tests/templates/base.html.py
|
roshanba/mangal
|
f7b428811dc07214009cc33f0beb665ead402038
|
[
"bzip2-1.0.6",
"MIT"
] | null | null | null |
django/tests/templates/base.html.py
|
roshanba/mangal
|
f7b428811dc07214009cc33f0beb665ead402038
|
[
"bzip2-1.0.6",
"MIT"
] | null | null | null |
django/tests/templates/base.html.py
|
roshanba/mangal
|
f7b428811dc07214009cc33f0beb665ead402038
|
[
"bzip2-1.0.6",
"MIT"
] | null | null | null |
XXXXXX
XXXXXXXXXXXXX
XXXXXX
XXXXXXXXXX XXXXXXXX XXXXXX BBBBB BBBBBBBBBBBBBXXXXX
BBBBB BBBBBBB
BBBBBBBB
XXXXXXX
XXXXXXX
| 13.222222
| 51
| 0.890756
|
0e106e25a7d0ce5296dcfd541465797a8f621962
| 2,574
|
py
|
Python
|
TwitchGraph/TwitchBot.py
|
dmegahan/TwitchStats
|
0f3964b74d4c4eb564d97c55984e52fe409254e7
|
[
"Apache-2.0"
] | 1
|
2019-03-04T18:05:45.000Z
|
2019-03-04T18:05:45.000Z
|
TwitchGraph/TwitchBot.py
|
dmegahan/TwitchStats
|
0f3964b74d4c4eb564d97c55984e52fe409254e7
|
[
"Apache-2.0"
] | null | null | null |
TwitchGraph/TwitchBot.py
|
dmegahan/TwitchStats
|
0f3964b74d4c4eb564d97c55984e52fe409254e7
|
[
"Apache-2.0"
] | null | null | null |
import requests
import csv
import threading
import time
import datetime
import os
import logging
import TwitchAPI
import config
logging.getLogger("requests").setLevel(logging.WARNING)
"""
This class tracks and records the twitch data from a stream (viewer numbers, game being played) and puts it into
a CSV file
"""
class TwitchThread(threading.Thread):
def __init__(self, stream, csvPath, config):
threading.Thread.__init__(self)
#stream name
#initialize everything we are going to use
self.stream = stream
self.CSVfp = csvPath
self._stopevent = threading.Event( )
self.directory = csvPath
self.config = config
def toCSV(self, streamer_name, num_viewers, game):
#get current time, format: Year-Month-Day Hour:Minute:Second
exact_time = datetime.datetime.utcnow().strftime(self.config["DATE_TIME_FORMAT"])
#check if directory exists
if not os.path.exists(os.path.dirname(self.directory)):
os.makedirs(os.path.dirname(self.directory))
with open(self.directory, 'a') as fp:
f_write = csv.writer(fp)
try:
f_write.writerow([num_viewers, game, exact_time])
except(UnicodeEncodeError):
print "UnicodeDecodeError from writerow: " + str(num_viewers) + ", " + game + ", " + str(exact_time)
game = game.decode('utf-8')
f_write.writerow([num_viewers, game, exact_time])
def run(self):
#do the operations
print self.stream + " thread started"
logging.info(self.stream + " thread started")
while not self._stopevent.isSet():
[viewerNum, game] = TwitchAPI.getStreamInfo(self.stream)
#stream is likely offline, end thread
if game is config.STR_STREAM_OFFLINE:
#error occured, wait some time and try again
time.sleep(5)
elif viewerNum is not None and game is not None:
#everything went OK, add data to CSV
#if a None is recieved, something broke so dont do anything
timeout_checks = 0
self.toCSV(self.stream, viewerNum, game)
self._stopevent.wait(0.5)
return
def join(self, timeout=None):
""" Stop the thread and wait for it to end. """
self._stopevent.set( )
threading.Thread.join(self, timeout)
"""
def main():
parent = ParentThread()
parent.setDaemon(False)
parent.start()
TwitchIRCBot.main()
main()"""
| 34.32
| 116
| 0.623543
|
9440313aa20cafe1c8d94fd226832cfd0a42bd7a
| 32,696
|
py
|
Python
|
crfnet/data_processing/generator/nuscenes_generator_modified.py
|
jovialio/CameraRadarFusionNet
|
31b2550694fa80ac2a30bd81875a089eff684c9c
|
[
"Apache-2.0"
] | null | null | null |
crfnet/data_processing/generator/nuscenes_generator_modified.py
|
jovialio/CameraRadarFusionNet
|
31b2550694fa80ac2a30bd81875a089eff684c9c
|
[
"Apache-2.0"
] | null | null | null |
crfnet/data_processing/generator/nuscenes_generator_modified.py
|
jovialio/CameraRadarFusionNet
|
31b2550694fa80ac2a30bd81875a089eff684c9c
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2017-2018 Fellfalla (https://github.com/Fellfalla/)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Original Source: https://github.com/fizyr/keras-retinanet
"""
# Standard Libraries
import csv
import os
import sys
import math
# 3rd Party Libraries
import numpy as np
from PIL import Image
import cv2
import tensorflow as tf
import progressbar
# Local Libraries
# Allow relative imports when being executed as script.
if __name__ == "__main__" and not __package__:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..'))
import crfnet.data_processing.generator # noqa: F401
__package__ = "crfnet.data_processing.generator"
# Local imports
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.data_classes import RadarPointCloud, Box
from nuscenes.utils.geometry_utils import box_in_image, view_points, BoxVisibility, points_in_box
from .generator import Generator
from ...utils import radar
from ...utils.nuscenes_helper import get_sensor_sample_data, calc_mask
from ...data_processing.fusion.fusion_projection_lines import create_imagep_visualization
from ...utils.noise_img import noisy
class NuscenesGenerator(Generator):
""" Generate data for a nuScenes dataset.
See www.nuscenes.org for more information.
"""
DATATYPE = np.float32
def __init__(
self,
nusc,
scene_indices=None,
channels=[0,1,2],
category_mapping=None,
radar_input_name=None,
radar_width=None,
image_radar_fusion=True,
camera_dropout=0.0,
radar_dropout=0.0,
normalize_radar=False,
sample_selection=False,
only_radar_annotated=False,
n_sweeps=1,
noise_filter=None,
noise_filter_threshold=0.5,
noisy_image_method=None,
noise_factor=0,
perfect_noise_filter=False,
noise_category_selection=None,
inference=False,
**kwargs
):
""" Initialize a nuScenes data generator.
: use param config for giving the arguments to this class
:param nusc: Object pointing at a nuscenes database
:param sample_indices: <int> Which samples to take from nusc database
:param channels: Which image and radar channels to use. Only in combination with
image_radar_fusion=True will return image_plus data. Otherwise the given channels
are split into radar_infeed and image_infeed.
:param category_mapping: <dict> dictionary between original classes and target classes.
Only classes given by this dict will be used for annotations. None for all categories.
:param radar_input_name: <str> name of the input_tensor for radar infeed into the nn
:param radar_width: width of the radar-data-array
:param image_radar_fusion: <bool> Determines if the data_generator performs the
default image_plus fusion.
"""
# Parameters
self.nusc = nusc
self.dropout_chance = 0.0
self.radar_sensors = ['RADAR_FRONT']
self.camera_sensors = ['CAM_FRONT']
self.labels = {}
self.image_data = dict()
self.classes, self.labels = self._get_class_label_mapping([c['name'] for c in nusc.category], category_mapping)
self.channels = channels
self.radar_channels = [ch for ch in channels if ch >= 3]
self.image_channels = [ch for ch in channels if ch < 3]
self.normalize_bbox = False # True for normalizing the bbox to [0,1]
self.radar_input_name = radar_input_name
self.radar_width = radar_width
self.radar_dropout = radar_dropout
self.camera_dropout = camera_dropout
self.sample_selection = sample_selection
self.only_radar_annotated = only_radar_annotated
self.n_sweeps = n_sweeps
self.noisy_image_method = noisy_image_method
self.noise_factor = noise_factor
self.cartesian_uncertainty = (0, 0, 0) # meters
self.angular_uncertainty = math.radians(0) # degree
self.inference = inference
#todo we cannot initialize the parent class first, because it depends on size()
self.image_min_side = kwargs['image_min_side']
self.image_max_side = kwargs['image_max_side']
# assign functions
self.image_radar_fusion = image_radar_fusion
self.normalize_radar = normalize_radar
# Optional imports
self.radar_array_creation = None
if self._is_image_plus_enabled() or self.camera_dropout > 0.0:
# Installing vizdom is required
from crfnet.data_processing.fusion.fusion_projection_lines import imageplus_creation, create_spatial_point_array
self.image_plus_creation = imageplus_creation
self.radar_array_creation = create_spatial_point_array
self.noise_filter_threshold = noise_filter_threshold
self.perfect_noise_filter = perfect_noise_filter
self.noise_category_selection = noise_category_selection
# TEST: Create immediately
if noise_filter and not isinstance(noise_filter, NfDockerClient):
raise NotImplementedError('Neural Filter not in opensource repository ')
else:
self.noise_filter = None
# Create all sample tokens
self.sample_tokens = {}
prog = 0
progbar = progressbar.ProgressBar(prefix='Initializing data generator: ')
skip_count = 0
# Resolve sample indexing
if scene_indices is None:
# We are using all scenes
scene_indices = range(len(nusc.scene))
assert hasattr(scene_indices, '__iter__'), "Iterable object containing sample indices expected"
for scene_index in scene_indices:
first_sample_token = nusc.scene[scene_index]['first_sample_token']
nbr_samples = nusc.scene[scene_index]['nbr_samples']
curr_sample = nusc.get('sample', first_sample_token)
for _ in range(nbr_samples):
self.sample_tokens[prog] = curr_sample['token']
if curr_sample['next']:
next_token = curr_sample['next']
curr_sample = nusc.get('sample', next_token)
prog += 1
progbar.update(prog)
if self.sample_selection: print("\nSkipped {} samples due to zero annotations".format(skip_count))
# Create all annotations and put into image_data
self.image_data = {image_index:None for image_index in self.sample_tokens}
# Finalize
super(NuscenesGenerator, self).__init__(**kwargs)
@staticmethod
def _get_class_label_mapping(category_names, category_mapping):
"""
:param category_mapping: [dict] Map from original name to target name. Subsets of names are supported.
e.g. {'pedestrian' : 'pedestrian'} will map all pedestrian types to the same label
:returns:
[0]: [dict of (str, int)] mapping from category name to the corresponding index-number
[1]: [dict of (int, str)] mapping from index number to category name
"""
# Initialize local variables
original_name_to_label = {}
original_category_names = category_names.copy()
original_category_names.append('bg')
if category_mapping is None:
# Create identity mapping and ignore no class
category_mapping = dict()
for cat_name in category_names:
category_mapping[cat_name] = cat_name
# List of unique class_names
selected_category_names = set(category_mapping.values()) # unordered
selected_category_names = list(selected_category_names)
selected_category_names.sort() # ordered
# Create the label to class_name mapping
label_to_name = { label:name for label, name in enumerate(selected_category_names)}
label_to_name[len(label_to_name)] = 'bg' # Add the background class
# Create original class name to label mapping
for label, label_name in label_to_name.items():
# Looking for all the original names that are adressed by label name
targets = [original_name for original_name in original_category_names if label_name in original_name]
# Assigning the same label for all adressed targets
for target in targets:
# Check for ambiguity
assert target not in original_name_to_label.keys(), 'ambigous mapping found for (%s->%s)'%(target, label_name)
# Assign label to original name
# Some label_names will have the same label, which is totally fine
original_name_to_label[target] = label
# Check for correctness
actual_labels = original_name_to_label.values()
expected_labels = range(0, max(actual_labels)+1) # we want to start labels at 0
assert all([label in actual_labels for label in expected_labels]), 'Expected labels do not match actual labels'
return original_name_to_label, label_to_name
def _is_image_plus_enabled(self):
"""
True if image radar fusion is enabled and
radar channels are requested.
"""
r = 0 in self.channels
g = 1 in self.channels
b = 2 in self.channels
return self.image_radar_fusion and len(self.channels) > r+g+b
def size(self):
""" Size of the dataset.
"""
return len(self.sample_tokens)
def num_classes(self):
""" Number of classes in the dataset.
"""
return len(self.labels)
def has_label(self, label):
""" Return True if label is a known label.
"""
return label in self.labels
def has_name(self, name):
""" Returns True if name is a known class.
"""
return name in self.classes
def name_to_label(self, name):
""" Map name to label.
"""
return self.classes[name]
def label_to_name(self, label):
""" Map label to name.
"""
return self.labels[label]
def inv_label_to_name(self, name):
""" Map name to label.
"""
class_dict = {y:x for x,y in self.labels.items()}
return class_dict[name]
def image_aspect_ratio(self, image_index):
""" Compute the aspect ratio for an image with image_index.
All images of nuscenes dataset have the same aspect ratio which is 16/9
"""
# All images of nuscenes dataset have the same aspect ratio
return 16/9
# sample_token = self.sample_tokens[image_index]
# sample = self.nusc.get('sample', sample_token)
# image_sample = self.load_sample_data(sample, camera_name)
# return float(image_sample.shape[1]) / float(image_sample.shape[0])
def load_radar_array(self, sample_index, target_width):
# Initialize local variables
if not self.radar_array_creation:
from ..raw_data_fusion.fusion_projection_lines import create_spatial_point_array
self.radar_array_creation = create_spatial_point_array
radar_name = self.radar_sensors[0]
camera_name = self.camera_sensors[0]
# Gettign data from nuscenes database
sample_token = self.sample_tokens[sample_index]
sample = self.nusc.get('sample', sample_token)
# Grab the front camera and the radar sensor.
radar_token = sample['data'][radar_name]
camera_token = sample['data'][camera_name]
image_target_shape = (self.image_min_side, self.image_max_side)
# Create the array
radar_sample = self.load_sample_data(sample, radar_name) # Load samples from disk
radar_array = self.radar_array_creation(self.nusc, radar_sample, radar_token, camera_token, target_width=target_width)
return radar_array
def set_noise_factor(self, noise_factor):
"""
This function turns off the noise factor: It is useful for rendering.
"""
self.noise_factor = noise_factor
def load_image(self, image_index):
"""
Returns the image plus from given image and radar samples.
It takes the requested channels into account.
:param sample_token: [str] the token pointing to a certain sample
:returns: imageplus
"""
# Initialize local variables
radar_name = self.radar_sensors[0]
camera_name = self.camera_sensors[0]
# Gettign data from nuscenes database
sample_token = self.sample_tokens[image_index]
sample = self.nusc.get('sample', sample_token)
# Grab the front camera and the radar sensor.
radar_token = sample['data'][radar_name]
camera_token = sample['data'][camera_name]
image_target_shape = (self.image_min_side, self.image_max_side)
# Load the image
image_sample = self.load_sample_data(sample, camera_name)
# Add noise to the image if enabled
if self.noisy_image_method is not None and self.noise_factor>0:
image_sample = noisy(self.noisy_image_method, image_sample, self.noise_factor)
if self._is_image_plus_enabled() or self.camera_dropout > 0.0:
# Parameters
kwargs = {
'pointsensor_token': radar_token,
'camera_token': camera_token,
'height': (0, self.radar_projection_height),
'image_target_shape': image_target_shape,
'clear_radar': np.random.rand() < self.radar_dropout,
'clear_image': np.random.rand() < self.camera_dropout,
}
# Create image plus
# radar_sample = self.load_sample_data(sample, radar_name) # Load samples from disk
# Get filepath
if self.noise_filter:
required_sweep_count = self.n_sweeps + self.noise_filter.num_sweeps_required -1
else:
required_sweep_count = self.n_sweeps
# sd_rec = self.nusc.get('sample_data', sample['data'][sensor_channel])
sensor_channel = radar_name
pcs, times = RadarPointCloud.from_file_multisweep(self.nusc, sample, sensor_channel, \
sensor_channel, nsweeps=required_sweep_count, min_distance=0.0, merge=False)
if self.noise_filter:
# fill up with zero sweeps
for _ in range(required_sweep_count - len(pcs)):
pcs.insert(0, RadarPointCloud(np.zeros(shape=(RadarPointCloud.nbr_dims(), 0))))
radar_sample = [radar.enrich_radar_data(pc.points) for pc in pcs]
if self.noise_filter:
##### Filter the pcs #####
radar_sample = list(self.noise_filter.denoise(radar_sample, self.n_sweeps))
if len(radar_sample) == 0:
radar_sample = np.zeros(shape=(len(radar.channel_map),0))
else:
##### merge pcs into single radar samples array #####
radar_sample = np.concatenate(radar_sample, axis=-1)
radar_sample = radar_sample.astype(dtype=np.float32)
if self.perfect_noise_filter:
cartesian_uncertainty = 0.5 # meters
angular_uncertainty = math.radians(1.7) # degree
category_selection = self.noise_category_selection
nusc_sample_data = self.nusc.get('sample_data', radar_token)
radar_gt_mask = calc_mask(nusc=self.nusc, nusc_sample_data=nusc_sample_data, points3d=radar_sample[0:3,:], \
tolerance=cartesian_uncertainty, angle_tolerance=angular_uncertainty, \
category_selection=category_selection)
# radar_sample = radar_sample[:, radar_gt_mask.astype(np.bool)]
radar_sample = np.compress(radar_gt_mask, radar_sample, axis=-1)
if self.normalize_radar:
# we need to noramlize
# : use preprocess method analog to image preprocessing
sigma_factor = int(self.normalize_radar)
for ch in range(3, radar_sample.shape[0]): # neural fusion requires x y and z to be not normalized
norm_interval = (-127.5,127.5) # caffee mode is default and has these norm interval for img
radar_sample[ch,:] = radar.normalize(ch, radar_sample[ch,:], normalization_interval=norm_interval, sigma_factor=sigma_factor)
img_p_full = self.image_plus_creation(self.nusc, image_data=image_sample, radar_data=radar_sample, **kwargs)
# reduce to requested channels
#self.channels = [ch - 1 for ch in self.channels] # Shift channels by 1, cause we have a weird convetion starting at 1
input_data = img_p_full[:,:,self.channels]
else: # We are not in image_plus mode
# Only resize, because in the other case this is contained in image_plus_creation
input_data = cv2.resize(image_sample, image_target_shape[::-1])
return input_data
def load_sample_data(self, sample, sensor_channel):
"""
This function takes the token of a sample and a sensor sensor_channel and returns the according data
Radar format: <np.array>
- Shape: 18 x n
- Semantics: x y z dyn_prop id rcs vx vy vx_comp vy_comp is_quality_valid ambig_state x_rms y_rms invalid_state pdh0
Image format: <np.array>
- Shape: h x w x 3
- Values: [0,255]
- Channels: RGB
"""
return get_sensor_sample_data(self.nusc, sample, sensor_channel, dtype=np.float32, size=None)
def create_annotations(self, sample_token, sensor_channels):
"""
Create annotations for the the given sample token.
1 bounding box vector contains:
:param sample_token: the sample_token to get the annotation for
:param sensor_channels: list of channels for cropping the labels, e.g. ['CAM_FRONT', 'RADAR_FRONT']
This works only for CAMERA atm
:returns:
annotations dictionary:
{
'labels': [] # <list of n int>
'bboxes': [] # <list of n x 4 float> [xmin, ymin, xmax, ymax]
'distances': [] # <list of n float> Center of box given as x, y, z.
'visibilities': [] # <list of n float> Visibility of annotated object
}
"""
if any([s for s in sensor_channels if 'RADAR' in s]):
print("[WARNING] Cropping to RADAR is not supported atm")
sensor_channels = [c for c in sensor_channels if 'CAM' in sensor_channels]
sample = self.nusc.get('sample', sample_token)
annotations_count = 0
annotations = {
'labels': [], # <list of n int>
'bboxes': [], # <list of n x 4 float> [xmin, ymin, xmax, ymax]
'distances': [], # <list of n float> Center of box given as x, y, z.
'visibilities': [],
'num_radar_pts':[] #<list of n int> number of radar points that cover that annotation
}
# Camera parameters
for selected_sensor_channel in sensor_channels:
sd_rec = self.nusc.get('sample_data', sample['data'][selected_sensor_channel])
# Create Boxes:
_, boxes, camera_intrinsic = self.nusc.get_sample_data(sd_rec['token'], box_vis_level=BoxVisibility.ANY)
imsize_src = (sd_rec['width'], sd_rec['height']) # nuscenes has (width, height) convention
bbox_resize = [ 1. / sd_rec['height'], 1. / sd_rec['width'] ]
if not self.normalize_bbox:
bbox_resize[0] *= float(self.image_min_side)
bbox_resize[1] *= float(self.image_max_side)
# Create labels for all boxes that are visible
for box in boxes:
# Add labels to boxes
if box.name in self.classes:
box.label = self.classes[box.name]
# Check if box is visible and transform box to 1D vector
if box_in_image(box=box, intrinsic=camera_intrinsic, imsize=imsize_src, vis_level=BoxVisibility.ANY):
## Points in box method for annotation filterS
# check if bounding box has an according radar point
if self.only_radar_annotated == 2:
pcs, times = RadarPointCloud.from_file_multisweep(self.nusc, sample, self.radar_sensors[0], \
selected_sensor_channel, nsweeps=self.n_sweeps, min_distance=0.0, merge=False)
for pc in pcs:
pc.points = radar.enrich_radar_data(pc.points)
if len(pcs) > 0:
radar_sample = np.concatenate([pc.points for pc in pcs], axis=-1)
else:
print("[WARNING] only_radar_annotated=2 and sweeps=0 removes all annotations")
radar_sample = np.zeros(shape=(len(radar.channel_map), 0))
radar_sample = radar_sample.astype(dtype=np.float32)
mask = points_in_box(box, radar_sample[0:3,:])
if True not in mask:
continue
# If visible, we create the corresponding label
box2d = box.box2d(camera_intrinsic) # returns [xmin, ymin, xmax, ymax]
box2d[0] *= bbox_resize[1]
box2d[1] *= bbox_resize[0]
box2d[2] *= bbox_resize[1]
box2d[3] *= bbox_resize[0]
annotations['bboxes'].insert(annotations_count, box2d)
annotations['labels'].insert(annotations_count, box.label)
annotations['num_radar_pts'].insert(annotations_count, self.nusc.get('sample_annotation', box.token)['num_radar_pts'])
distance = (box.center[0]**2 + box.center[1]**2 + box.center[2]**2)**0.5
annotations['distances'].insert(annotations_count, distance)
annotations['visibilities'].insert(annotations_count, int(self.nusc.get('sample_annotation', box.token)['visibility_token']))
annotations_count += 1
else:
# The current name has been ignored
pass
annotations['labels'] = np.array(annotations['labels'])
annotations['bboxes'] = np.array(annotations['bboxes'])
annotations['distances'] = np.array(annotations['distances'])
annotations['num_radar_pts'] = np.array(annotations['num_radar_pts'])
annotations['visibilities'] = np.array(annotations['visibilities'])
# num_radar_pts mathod for annotation filter
if self.only_radar_annotated == 1:
anns_to_keep = np.where(annotations['num_radar_pts'])[0]
for key in annotations:
annotations[key] = annotations[key][anns_to_keep]
return annotations
def load_annotations(self, image_index):
""" Load annotations for an image_index.
"""
annotations = self.image_data[image_index]
if annotations is None:
sample_token = self.sample_tokens[image_index]
annotations = self.create_annotations(sample_token, self.camera_sensors)
self.image_data[image_index] = annotations
return annotations
def compute_input_output(self, group, inference=False):
"""
Extends the basic function with the capability to
add radar input data to the input batch.
"""
inputs, targets = super(NuscenesGenerator, self).compute_input_output(group)
if self.radar_input_name:
# Load radar data
radar_input_batch = []
for sample_index in group:
radar_array = self.load_radar_array(sample_index, target_width=self.radar_width)
radar_input_batch.append(radar_array)
radar_input_batch = np.array(radar_input_batch)
inputs = {
'input_1': inputs,
self.radar_input_name: radar_input_batch
}
return inputs, targets
if __name__ == "__main__":
import cv2
import argparse
# Allow relative imports
from ...utils.anchor_calc import anchor_targets_bbox
from ...utils.anchor import guess_shapes, anchors_for_shape, compute_gt_annotations
from ...utils.anchor_parameters import AnchorParameters
from ...utils.image import preprocess_image, preprocess_image_inverted
from ...utils.config import get_config
from ...utils.visualization import draw_boxes
from ...utils.transform import random_transform_generator
from ...model import architectures
FILE_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, default=os.path.join(FILE_DIRECTORY, "../../configs/GeneratorTest.cfg"))
parser.add_argument('--debug', default=False, action='store_true')
parser.add_argument('--sample', type=int, default=0)
parser.add_argument('--bboxes', dest='bboxes', action='store_true')
parser.add_argument('--no-bboxes', dest='bboxes', action='store_false')
parser.set_defaults(bboxes=True)
args = parser.parse_args()
if not os.path.exists(args.config):
raise FileNotFoundError("ERROR: Config file \"%s\" not found"%(args.config))
else:
cfg = get_config(args.config)
if cfg.anchor_params:
if 'small' in cfg.anchor_params:
anchor_params = AnchorParameters.small
else:
anchor_params = None
else:
anchor_params = None
backbone = architectures.backbone(cfg.network)
common_args = {
'batch_size' : cfg.batchsize,
'config' : None,
'image_min_side' : cfg.image_size[0],
'image_max_side' : cfg.image_size[1],
'filter_annotations_enabled': False,
'preprocess_image' : backbone.preprocess_image,
'normalize_radar' : cfg.normalize_radar,
'camera_dropout' : cfg.dropout_image,
'radar_dropout' : cfg.dropout_radar,
'channels' : cfg.channels,
'distance' : cfg.distance_detection,
'sample_selection' : cfg.sample_selection,
'only_radar_annotated' : cfg.only_radar_annotated,
'n_sweeps' : cfg.n_sweeps,
'noise_filter' : cfg.noise_filter_cfg,
'noise_filter_threshold' : cfg.noise_filter_threshold,
'noisy_image_method' : cfg.noisy_image_method,
'noise_factor' : cfg.noise_factor,
'perfect_noise_filter' : cfg.noise_filter_perfect,
'radar_projection_height' : cfg.radar_projection_height,
'noise_category_selection' : None if cfg.class_weights is None else cfg.class_weights.keys(),
'inference' : cfg.inference,
'anchor_params' : anchor_params,
}
class_to_color = {
'bg': np.array([0, 0, 0])/255,
'human.pedestrian.adult': np.array([34, 114, 227]) / 255,
'vehicle.bicycle': np.array([0, 182, 0])/255,
'vehicle.bus': np.array([84, 1, 71])/255,
'vehicle.car': np.array([189, 101, 0]) / 255,
'vehicle.motorcycle': np.array([159, 157,156])/255,
'vehicle.trailer': np.array([0, 173, 162])/255,
'vehicle.truck': np.array([89, 51, 0])/255,
}
if 'mini' in cfg.data_set:
nusc = NuScenes(version='v1.0-mini', dataroot=cfg.data_path, verbose=True)
else:
try:
nusc = NuScenes(version='v1.0-trainval', dataroot=cfg.data_path, verbose=True)
except:
nusc = NuScenes(version='v1.0-mini', dataroot=cfg.data_path, verbose=True)
## Data Augmentation
transform_generator = random_transform_generator(
min_rotation=-0.1,
max_rotation=0.1,
min_translation=(-0.1, -0.1),
max_translation=(0.1, 0.1),
min_shear=-0.1,
max_shear=0.1,
min_scaling=(0.9, 0.9),
max_scaling=(1.1, 1.1),
flip_x_chance=0.5,
flip_y_chance=0.0,
)
data_generator = NuscenesGenerator(nusc,
scene_indices=None,
category_mapping=cfg.category_mapping,
#transform_generator=transform_generator,
shuffle_groups=False,
compute_anchor_targets=anchor_targets_bbox,
compute_shapes=guess_shapes,
**common_args)
if data_generator.noise_filter:
data_generator.noise_filter.render = True
i = args.sample
while i < len(data_generator):
print("Sample ", i)
# Get the data
inputs, targets = data_generator[i]
print(inputs.shape)
img = inputs[0]
img = preprocess_image_inverted(img)
ann = data_generator.load_annotations(i)
assert img.shape[0] == common_args['image_min_side']
assert img.shape[1] == common_args['image_max_side']
# assert img.shape[2] == len(common_args['channels'])
# Turn data into vizualizable format
viz = create_imagep_visualization(img, draw_circles=True, cfg=cfg, radar_lines_opacity=0.0)
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 0.4
lineType = 1
if args.debug:
## Positive Anchor Visualization
anchors = anchors_for_shape(viz.shape, anchor_params=anchor_params)
positive_indices, _, max_indices = compute_gt_annotations(anchors, ann['bboxes'])
draw_boxes(viz, anchors[positive_indices], (255, 255, 0), thickness=1)
## Data Augmentation
viz, ann = data_generator.random_transform_group_entry(viz, ann)
if args.bboxes:
for a in range(len(ann['bboxes'])):
label_name = data_generator.label_to_name(ann['labels'][a])
dist = ann['distances'][a]
if label_name in class_to_color:
color = class_to_color[label_name] * 255
else:
color = class_to_color['bg']
p1 = (int(ann['bboxes'][a][0]), int(ann['bboxes'][a][1])) # Top left
p2 = (int(ann['bboxes'][a][2]), int(ann['bboxes'][a][3])) # Bottom right
cv2.rectangle(viz,p1, p2, color,1)
textLabel = '{0}: {1:3.1f} {2}'.format(label_name.split('.', 1)[-1], dist, 'm')
(retval,baseLine) = cv2.getTextSize(textLabel, font, fontScale,1)
textOrg = p1
cv2.rectangle(viz, (textOrg[0] - 1,textOrg[1]+baseLine - 1), (textOrg[0]+retval[0] + 1, textOrg[1]-retval[1] - 1), color, -1)
cv2.putText(viz, textLabel, textOrg, cv2.FONT_HERSHEY_SIMPLEX, fontScale, (255,255,255), 1)
# Visualize data
cv2.imshow("Nuscenes Data Visualization",viz)
# cv2.imwrite('./ground_truth_selected/' + str(i).zfill(4) +'.png', viz*255)
key = cv2.waitKey(0)
if key == ord('p'): #previous image
i = i-1
elif key == ord('s'):
print("saving image")
cv2.imwrite("saved_img.png", viz)
elif key == ord('n'):
print("%c -> jump to next scene"%key)
i = i+40
elif key == ord('m'):
print("%c -> jump to previous scene"%key)
i = i-40
elif key == ord('q'):
break
else:
i = i+1
i = max(i, 0)
| 40.972431
| 149
| 0.617354
|
68acccebfeb4ab6bb28cc2f928ce5deeaac989ab
| 2,044
|
py
|
Python
|
apps/site/tests/models/__init__.py
|
LocalGround/localground
|
aa5a956afe7a84a7763a3b23d62a9fd925831cd7
|
[
"Apache-2.0"
] | 9
|
2015-05-29T22:22:20.000Z
|
2022-02-01T20:39:00.000Z
|
apps/site/tests/models/__init__.py
|
LocalGround/localground
|
aa5a956afe7a84a7763a3b23d62a9fd925831cd7
|
[
"Apache-2.0"
] | 143
|
2015-01-22T15:03:40.000Z
|
2020-06-27T01:55:29.000Z
|
apps/site/tests/models/__init__.py
|
LocalGround/localground
|
aa5a956afe7a84a7763a3b23d62a9fd925831cd7
|
[
"Apache-2.0"
] | 5
|
2015-03-16T20:51:49.000Z
|
2017-02-07T20:48:49.000Z
|
# Abstract Classes:
from localground.apps.site.tests.models.abstract_base_tests import *
from localground.apps.site.tests.models.abstract_base_audit_tests import *
# Mixin Classes:
from localground.apps.site.tests.models.mixin_extents_tests import *
from localground.apps.site.tests.models.mixin_extras_tests import *
from localground.apps.site.tests.models.mixin_genericrelation_tests import *
from localground.apps.site.tests.models.mixin_media_tests import *
from localground.apps.site.tests.models.mixin_named_tests import *
from localground.apps.site.tests.models.mixin_objectpermissions_tests import *
from localground.apps.site.tests.models.mixin_point_tests import *
from localground.apps.site.tests.models.mixin_project_tests import *
# Model Classes:
from localground.apps.site.tests.models.audio_tests import *
from localground.apps.site.tests.models.datatype_tests import *
from localground.apps.site.tests.models.field_tests import *
from localground.apps.site.tests.models.dataset_tests import *
from localground.apps.site.tests.models.generic_association_tests import *
from localground.apps.site.tests.models.imageopts_tests import *
from localground.apps.site.tests.models.layer_tests import *
from localground.apps.site.tests.models.layout_tests import *
from localground.apps.site.tests.models.lookup_tests import *
from localground.apps.site.tests.models.mapimage_tests import *
from localground.apps.site.tests.models.marker_tests import *
from localground.apps.site.tests.models.overlay_source_tests import *
from localground.apps.site.tests.models.overlay_type_tests import *
from localground.apps.site.tests.models.permissions_tests import *
from localground.apps.site.tests.models.photo_tests import *
from localground.apps.site.tests.models.prints_tests import *
from localground.apps.site.tests.models.project_tests import *
from localground.apps.site.tests.models.styled_map_tests import *
from localground.apps.site.tests.models.tileset_tests import *
from localground.apps.site.tests.models.video_tests import *
| 56.777778
| 78
| 0.844912
|
d77efe902ecf1a21594cb9af8304703a8ae3ceab
| 1,834
|
py
|
Python
|
src/ihcWrappers/odtcParamsXml.py
|
F9R/ihcpmslib-wrappers
|
4a5e37ab2ecc8c8c1a8437992e45b9271ec18826
|
[
"BSD-2-Clause"
] | 1
|
2022-02-09T06:41:20.000Z
|
2022-02-09T06:41:20.000Z
|
src/ihcWrappers/odtcParamsXml.py
|
F9R/ihcpmslib-wrappers
|
4a5e37ab2ecc8c8c1a8437992e45b9271ec18826
|
[
"BSD-2-Clause"
] | null | null | null |
src/ihcWrappers/odtcParamsXml.py
|
F9R/ihcpmslib-wrappers
|
4a5e37ab2ecc8c8c1a8437992e45b9271ec18826
|
[
"BSD-2-Clause"
] | null | null | null |
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
if config['IHC']['ODTC'] != "True":
pass
import clr
clr.AddReference("IHC_PMS_Lib.Odtc")
from IHC_PMS_Lib.Odtc import OdtcParamsXml
from IHC_PMS_Lib.Odtc.MethodXML import MethodSet
class OdtcParamsXmlWrapper:
def __init__(self) -> None:
self.__px = OdtcParamsXml()
@property
def CSVSeparatorCharacter(self) -> chr:
return self.__px.CSVSeparatorCharacter
@CSVSeparatorCharacter.setter
def CSVSeparatorCharacter(self, value: chr):
self.__px.CSVSeparatorCharacter = value
@property
def DynamicPreMethodDuration(self) -> bool:
return self.__px.DynamicPreMethodDuration
@DynamicPreMethodDuration.setter
def DynamicPreMethodDuration(self, value: bool):
self.__px.DynamicPreMethodDuration = value
@property
def ExecuteMethodDataEvent(self) -> bool:
return self.__px.ExecuteMethodDataEvent
@ExecuteMethodDataEvent.setter
def ExecuteMethodDataEvent(self, value: bool):
self.__px.ExecuteMethodDataEvent = value
@property
def MatchLidTemperatures(self) -> bool:
return self.__px.MatchLidTemperatures
@MatchLidTemperatures.setter
def MatchLidTemperatures(self, value: bool):
self.__px.MatchLidTemperatures = value
@property
def MethodsXML(self) -> str:
ms = self.__px.MethodsXML
if ms != None:
return ms.GetXml()
else:
return None
@MethodsXML.setter
def MethodsXML(self, methodSet: str):
self.__px.MethodsXML = MethodSet.Deserialize(methodSet)
def GetParamsXml(self) -> str:
return self.__px.GetParamsXml()
def ImportMethodSetXml(self, path: str) -> None:
self.__px.MethodsXML = MethodSet.GetMethodSet(path)
| 27.787879
| 63
| 0.699564
|
36779b99e6f0e475db2a852c7beb25fd406b6236
| 786
|
py
|
Python
|
historical_exercises/api-scripts.exercises/tools.py
|
mvdbeek/bioblend-tutorial
|
4b5a254e05bbdc1c376eb4840167796374efce0e
|
[
"MIT"
] | 12
|
2015-07-07T13:24:55.000Z
|
2020-07-18T19:03:45.000Z
|
historical_exercises/api-scripts.exercises/tools.py
|
mvdbeek/bioblend-tutorial
|
4b5a254e05bbdc1c376eb4840167796374efce0e
|
[
"MIT"
] | 3
|
2015-11-01T20:54:05.000Z
|
2020-07-15T18:48:16.000Z
|
historical_exercises/api-scripts.exercises/tools.py
|
mvdbeek/bioblend-tutorial
|
4b5a254e05bbdc1c376eb4840167796374efce0e
|
[
"MIT"
] | 13
|
2015-07-06T08:27:22.000Z
|
2021-05-28T13:08:04.000Z
|
#!/usr/bin/env python
import sys
import json
import output
import requests
BASE_URL = 'http://localhost:8080'
# -----------------------------------------------------------------------------
def list_tools():
#EXERCISE: build params and include 'in_panel' : False
return requests.get( full_url, params=params )
def show_tool( tool_id ):
#EXERCISE: build params, including 'io_details' : True, and request
# -----------------------------------------------------------------------------
if __name__ == '__main__':
response = None
# if passed an id, show details for that; if no id: return info for all tools
if len( sys.argv ) == 1:
response = list_tools()
else:
response = show_tool( sys.argv[1] )
output.output_response( response )
| 28.071429
| 81
| 0.53944
|
2907bba8f353e903b570ef54ff810a4d42343ccf
| 1,277
|
py
|
Python
|
build/lib/simpleimagededupe/deduper.py
|
RubberClucky/SIDD
|
51e693946b5e71e315c4ca7dae0d8a9b8444fd21
|
[
"MIT"
] | null | null | null |
build/lib/simpleimagededupe/deduper.py
|
RubberClucky/SIDD
|
51e693946b5e71e315c4ca7dae0d8a9b8444fd21
|
[
"MIT"
] | 1
|
2022-03-07T00:13:01.000Z
|
2022-03-07T00:13:01.000Z
|
build/lib/simpleimagededupe/deduper.py
|
RubberClucky/SIDD
|
51e693946b5e71e315c4ca7dae0d8a9b8444fd21
|
[
"MIT"
] | null | null | null |
import cv2
import os
import alive_progress
def CompareImage(original, dupe) -> bool:
originalImg = cv2.imread(original)
dupeImg = cv2.imread(dupe)
if originalImg.shape[:2] == dupeImg.shape[:2]:
difference = cv2.subtract(originalImg, dupeImg)
b, g, r = cv2.split(difference)
if cv2.countNonZero(b) == 0 and cv2.countNonZero(g) == 0 and cv2.countNonZero(r) == 0:
return True
else:
return False
def DedupeImageDir(directory):
dupeList = []
dirList = os.listdir(directory)
i = 1
for current in dirList:
print(f"Processing: {current} .. {i}/{len(dirList) - len(dupeList)} (Minus Duplicates)")
othersList = [x for x in dirList if x != current]
with alive_progress(len(othersList)) as bar:
if current in dupeList:
print("Skipping, this is a duplicate.")
continue
for suspect in othersList:
currentPath = os.path.join(directory,current)
suspectPath = os.path.join(directory,suspect)
result = CompareImage(currentPath, suspectPath)
if result == True:
dupeList.append(suspect)
bar()
i=i+1
return dupeList
| 35.472222
| 96
| 0.584965
|
55a8224b96922d317963eb09e19c0d4868f43353
| 4,156
|
py
|
Python
|
s4_random_forest.py
|
BillChan226/In-Field-Crop-Disease-Regnition-via-Domain-Adaptation
|
4500e7149a51eab66778471750b84b09d415e578
|
[
"MIT"
] | 1
|
2021-12-23T17:56:22.000Z
|
2021-12-23T17:56:22.000Z
|
s4_random_forest.py
|
BillChan226/In-Field-Crop-Disease-Regnition-via-Domain-Adaptation
|
4500e7149a51eab66778471750b84b09d415e578
|
[
"MIT"
] | null | null | null |
s4_random_forest.py
|
BillChan226/In-Field-Crop-Disease-Regnition-via-Domain-Adaptation
|
4500e7149a51eab66778471750b84b09d415e578
|
[
"MIT"
] | null | null | null |
import numpy as np
import skimage.io
from sklearn import svm
import os
# read data
train_path = '.\\data\\train'
test_path = '.\\data\\test'
#%% train data
train_data = []
train_label = []
for x in os.listdir(os.path.join(train_path, 'img')):
img_path = os.path.join(train_path, 'img', x)
ano_path = os.path.join(train_path, 'imgAno', x)
img = skimage.io.imread(img_path)
img_ano = skimage.io.imread(ano_path, 1)
# 归一化
img = img.astype(np.float)/255
img_ano = img_ano.astype(np.float)/255
img_ano[img_ano > 0] = 1
for i in range(0, 512):
for j in range(0, 512):
train_data.append(img[i, j, :])
train_label.append(img_ano[i, j])
train_data = np.array(train_data)
train_label = np.array(train_label)
# shuffle data
train = np.hstack((train_data, train_label.reshape(-1, 1)))
np.random.shuffle(train)
train_data = train[:, 0:3]
train_label = train[:, 3]
#%% test data
test_data = []
test_label = []
for i in range(20):
img_path = os.path.join(test_path, 'img', str(i) + '.png')
ano_path = os.path.join(test_path, 'imgAno', str(i) + '.png')
img = skimage.io.imread(img_path)
img_ano = skimage.io.imread(ano_path, 1)
# 归一化
img = img.astype(np.float)/255
img_ano = img_ano.astype(np.float)/255
img_ano[img_ano > 0] = 1
for i in range(0, 512):
for j in range(0, 512):
test_data.append(img[i, j, :])
test_label.append(img_ano[i, j])
test_data = np.array(test_data)
test_label = np.array(test_label)
#%% segmentation using random forest
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=10, verbose = True)
clf.fit(train_data, train_label)
#%%
y_pred = clf.predict(test_data)
#%%
acc = np.sum((y_pred == test_label))/test_label.size
print(acc)
#%%
for i in range(0, 5242880, 262144):
print(np.sum((y_pred[i:i+262144] == test_label[i:i+262144]))/262144)
#%%
# 3个acc
def iu_acc(y_true, y_pred, th = 0.5):
smooth = 1e-12
y_ = y_pred.copy()
y_[y_ <= th] = 0
y_[y_ > th] = 1
inter = np.sum(y_ * y_true)
sum_ = np.sum(y_true + y_)
return inter / (sum_ - inter + smooth)
def dice_acc(y_true, y_pred, th = 0.5):
smooth = 1e-12
y_ = y_pred.copy()
y_[y_ <= th] = 0
y_[y_ > th] = 1
inter = np.sum(y_ * y_true)
sum_ = np.sum(y_true + y_)
return 2 * inter / (sum_ + smooth)
def pixel_acc(y_true, y_pred, th = 0.5):
y_ = y_pred.copy()
y_[y_ <= th] = 0
y_[y_ > th] = 1
inter = np.sum(y_ * y_true)
inter2 = np.sum((1 - y_) * (1 - y_true))
return (inter + inter2) / (np.size(y_, 0) * np.size(y_, 1))
def pre_recall(y_true, y_pred, th = 0.5):
y_ = y_pred.copy()
y_[y_ <= th] = 0
y_[y_ > th] = 1
TP = np.sum(y_ * y_true)
FP = np.sum(y_ * (1 - y_true))
FN = np.sum(y_true * (1 - y_))
P = TP/(TP+FP)
R = TP/(TP+FN)
return P, R
#%%
# iu
print('iu:')
for i in range(0, 5242880, 262144):
yy_pred = y_pred[i:i+262144]
yy_label = test_label[i:i+262144]
print(iu_acc(yy_label, yy_pred))
# dice
print('dice:')
for i in range(0, 5242880, 262144):
yy_pred = y_pred[i:i+262144]
yy_label = test_label[i:i+262144]
print(dice_acc(yy_label, yy_pred))
# pixel
print('pixel:')
for i in range(0, 5242880, 262144):
print(np.sum((y_pred[i:i+262144] == test_label[i:i+262144]))/262144)
#%%
print('pre')
# pre recall
for i in range(0, 5242880, 262144):
yy_pred = y_pred[i:i+262144]
yy_label = test_label[i:i+262144]
ans = pre_recall(yy_label, yy_pred)
print(ans[0])
#%%
print('recall')
# pre recall
for i in range(0, 5242880, 262144):
yy_pred = y_pred[i:i+262144]
yy_label = test_label[i:i+262144]
ans = pre_recall(yy_label, yy_pred)
print(ans[1])
#%% 保存预测图片
import matplotlib.pyplot as plt
test_result = y_pred.reshape(20, 512, 512)
for i in range(20):
plt.figure()
plt.imshow(test_result[i, :, :], cmap='gray')
plt.imsave('.\\RF_image\\' + str(i) + '.png', test_result[i, :, :], cmap='gray')
#%%
for i in range(20):
plt.figure()
plt.imshow(test_data.reshape(20, 512,512,3)[19 - i,:,:,:])
| 25.813665
| 84
| 0.615014
|
d7936dc8974beb7400be232170976c12da20fea8
| 10,223
|
py
|
Python
|
montlake/plotting/flasso.py
|
sjkoelle/montlake
|
b908a43e0c00763bd1cf86120eaa6bdf7d8d1196
|
[
"Apache-2.0"
] | 8
|
2021-11-24T19:39:24.000Z
|
2021-12-03T01:30:14.000Z
|
montlake/plotting/flasso.py
|
sjkoelle/montlake
|
b908a43e0c00763bd1cf86120eaa6bdf7d8d1196
|
[
"Apache-2.0"
] | null | null | null |
montlake/plotting/flasso.py
|
sjkoelle/montlake
|
b908a43e0c00763bd1cf86120eaa6bdf7d8d1196
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/plotting.flasso.ipynb (unless otherwise specified).
__all__ = ['width', 'plot_cos_boxes', 'plot_reg_path_ax_lambdasearch_customcolors_names',
'plot_reg_path_ax_lambdasearch_customcolors_tslasso', 'plot_reg_path_ax_lambdasearch_customcolors_norm',
'plot_watch_custom', 'plot_watch', 'plot_reg_path_ax_lambdasearch_customcolors']
# Cell
import numpy as np
from matplotlib import rcParams
from pylab import rcParams
import matplotlib.pyplot as plt
import math
import seaborn as sns
from collections import OrderedDict
from matplotlib.patches import Rectangle
rcParams['figure.figsize'] = 25, 10
def width(p,w):
if p > 1.:
output = 10**(np.log10(p)+w/2.)-10**(np.log10(p)-w/2.)
else:
output = w
return(output)
def plot_cos_boxes(sup_sel, names, col, sel, d , nreps, axarr):
sns.heatmap(col, yticklabels = names, xticklabels = names, ax = axarr, vmin = 0., vmax = 1.)
cbar = axarr.collections[0].colorbar
cbar.ax.tick_params(labelsize=40)
axarr.set_xticklabels(axarr.get_xmajorticklabels(), fontsize = 60, rotation = 90)
axarr.set_yticklabels(axarr.get_ymajorticklabels(), fontsize = 60, rotation = 90)
if d == 2:
for r in range(nreps):
pos1 = np.where(sel == sup_sel[r,1])[0]
pos2 = np.where(sel == sup_sel[r,0])[0]
axarr.add_patch(Rectangle((pos1, pos2), 1, 1,facecolor = [0,0,1,0.], hatch = '/',fill= True, edgecolor='green', lw=5))
axarr.add_patch(Rectangle((pos2, pos1), 1, 1,facecolor = [0,0,1,0.], hatch = '/',fill= True, edgecolor='green', lw=5))
def plot_reg_path_ax_lambdasearch_customcolors_names(axes, coeffs, xaxis, fig, colors, names):
p = coeffs.shape[3]
q = coeffs.shape[1]
gnames = np.asarray(list(range(p)), dtype=str)
rcParams['axes.titlesize'] = 30
plt.rc('text', usetex=True)
normax = np.sqrt(np.sum(np.sum(np.sum(coeffs ** 2, axis=1), axis=1), axis=1).max())
for k in range(q):
for j in range(p):
toplot = np.linalg.norm(coeffs[:, k, :, j], axis=1)
w = .15
widths = np.asarray([width(xaxis[i], w) for i in range(len(xaxis))])
axes[k + 1].plot(xaxis, toplot, 'go--', linewidth=10, markersize=0, alpha=1.,
color=colors[j], label=gnames[j])
for j in range(p):
toplot = np.linalg.norm(np.linalg.norm(coeffs[:, :, :, j], axis=2), axis=1)
axes[0].plot(xaxis, toplot, 'go--', linewidth=10, markersize=0, alpha=.5,
color=colors[j], label=gnames[j])
# xax = xaxis.copy()
# xax.sort()
for k in range(1 + q):
axes[k].tick_params(labelsize=80)
axes[k].set_yscale('symlog')
axes[k].set_ylim(bottom=0, top=normax)
if (k == 0):
tixx = np.hstack(
[np.asarray([0]), 10 ** np.linspace(math.floor(np.log10(normax)), math.floor(np.log10(normax)) + 1, 2)])
if k != 0:
axes[k].set_yticklabels([])
if k != q:
axes[k + 1].set_title(names[k], fontsize=100)
if k == 0:
axes[k].set_title("Combined", fontdict={'fontsize': 140})
for k in range(1 + q):
axes[k].grid(True, which="both", alpha=True)
axes[k].set_xlabel(r"$\lambda$", fontsize=140)
axes[0].set_ylabel(r"$||\beta_j||$", fontsize=140)
def plot_reg_path_ax_lambdasearch_customcolors_tslasso(axes, coeffs, xaxis, fig, colors, names):
p = coeffs.shape[3]
q = coeffs.shape[1]
gnames = np.asarray(list(range(p)), dtype=str)
rcParams['axes.titlesize'] = 30
plt.rc('text', usetex=True)
normax = np.sqrt(np.sum(np.sum(np.sum(coeffs ** 2, axis=1), axis=1), axis=1).max())
for j in range(p):
toplot = np.linalg.norm(np.linalg.norm(coeffs[:, :, :, j], axis=2), axis=1)
axes.plot(xaxis, toplot, 'go--', linewidth=10, markersize=0, alpha=.5,
color=colors[j], label=gnames[j])
xax = xaxis.copy()
xax.sort()
axes.tick_params(labelsize=50)
axes.set_yscale('symlog')
axes.set_ylim(bottom=0, top=normax)
tixx = np.hstack(
[np.asarray([0]), 10 ** np.linspace(math.floor(np.log10(normax)), math.floor(np.log10(normax)) + 1, 2)])
axes.set_title("Combined", fontdict={'fontsize': 70})
axes.grid(True, which="both", alpha=True)
axes.set_xlabel(r"$\lambda$", fontsize=50)
axes.set_ylabel(r"$||\beta_j||$", fontsize=50)
def plot_reg_path_ax_lambdasearch_customcolors_norm(ax, coeffs, xaxis, fig, colors):
p = coeffs.shape[3]
q = coeffs.shape[1]
gnames = np.asarray(list(range(p)), dtype=str)
rcParams['axes.titlesize'] = 30
plt.rc('text', usetex=True)
normax = np.sqrt(np.sum(np.sum(np.sum(coeffs ** 2, axis=1), axis=1), axis=1).max())
for j in range(p):
toplot = np.linalg.norm(np.linalg.norm(coeffs[:, :, :, j], axis=2), axis=1)
ax.plot(xaxis, toplot, 'go--', linewidth=5, markersize=0, alpha=1.,
color=colors[j], label=gnames[j])
xax = xaxis.copy()
xax.sort()
ax.tick_params(labelsize=80)
ax.set_yscale('symlog')
ax.set_ylim(bottom=0, top=normax)
tixx = np.hstack(
[np.asarray([0]), 10 ** np.linspace(math.floor(np.log10(normax)), math.floor(np.log10(normax)) + 1, 2)])
ax.grid(True, which="both", alpha=True)
def plot_watch_custom(to_plot, p, ax, colors,nreps, names = None, s=.1, fontsize = 100):
if names is None:
names = np.asarray(list(range(p)), dtype = str)
theta = np.linspace(0, 2 * np.pi, 10000)
cmap = plt.get_cmap('twilight_shifted', p)
angles = np.linspace(0, 2 * np.pi, p + 1)
radius = 1.
a = radius * np.cos(theta)
b = radius * np.sin(theta)
ax.scatter(a, b, color='gray', s = .2, alpha=.1)
if len(to_plot.shape) > 1:
totes = np.sum(to_plot, axis=0)
else:
totes = to_plot
for j in range(p):
nm = names[j]
ax.scatter(np.cos(angles[j]), np.sin(angles[j]), color=cmap.colors[j], marker='x')
ax.text(x=1.15 * np.cos(angles[j]),
y=1.15 * np.sin(angles[j]),
s=r"$g_{{{}}}$".format(nm), color=colors[j], # cmap.colors[j],
fontdict={'fontsize': fontsize},
horizontalalignment='center',
verticalalignment='center')
for j in range(p):
ax.scatter(np.cos(angles[j]), np.sin(angles[j]), color=colors[j], marker='o', s= s* 1500 * totes[j])
if len(to_plot.shape) > 1:
for i in range(p):
for j in range(p):
x_values = [np.cos(angles[j]), np.cos(angles[i])]
y_values = [np.sin(angles[j]), np.sin(angles[i])]
ax.plot(x_values, y_values, linewidth=to_plot[i, j] * 8*s, color='black')
ax.set_aspect(1)
ax.set_axis_off()
def plot_watch(to_plot, names, colors, ax,nreps):
p = to_plot.shape[0]
theta = np.linspace(0, 2 * np.pi, 10000)
angles = np.linspace(0, 2 * np.pi, p + 1)
radius = 1.
a = radius * np.cos(theta)
b = radius * np.sin(theta)
ax.scatter(a, b, color='gray', s=.2,
alpha=.1)
if len(to_plot.shape) > 1:
totes = np.sum(to_plot, axis=0)
else:
totes = to_plot
for j in range(p):
ax.scatter(np.cos(angles[j]), np.sin(angles[j]), color=colors[j], marker='x')
ax.text(x=1.2 * np.cos(angles[j]),
y=1.2 * np.sin(angles[j]),
s=names[j], color=colors[j],
fontdict={'fontsize': 100},
horizontalalignment='center',
verticalalignment='center')
for j in range(p):
ax.scatter(np.cos(angles[j]), np.sin(angles[j]), color=colors[j], marker='o', s=100 * totes[j])
if len(to_plot.shape) > 1:
for i in range(p):
for j in range(p):
x_values = [np.cos(angles[j]), np.cos(angles[i])]
y_values = [np.sin(angles[j]), np.sin(angles[i])]
ax.plot(x_values, y_values, linewidth=to_plot[i, j], color='black')
ax.set_aspect(1)
ax.set_axis_off()
def plot_reg_path_ax_lambdasearch_customcolors(axes, coeffs, xaxis,fig, colors,gnames):
p = coeffs.shape[3]
q = coeffs.shape[1]
rcParams['axes.titlesize'] = 30
plt.rc('text', usetex=True)
normax = np.sqrt(np.sum(np.sum(np.sum(coeffs ** 2, axis=1), axis=1), axis=1).max())
for k in range(q):
for j in range(p):
toplot = np.linalg.norm(coeffs[:, k, :, j], axis=1)
w = .15
widths = np.asarray([width(xaxis[i], w) for i in range(len(xaxis))])
axes[k + 1].plot(xaxis, toplot, 'go--', linewidth=5, markersize=0, alpha=1.,
color=colors[j], label=gnames[j])
for j in range(p):
toplot = np.linalg.norm(np.linalg.norm(coeffs[:, :, :, j], axis=2), axis=1)
axes[0].plot(xaxis, toplot, 'go--', linewidth=5, markersize=0, alpha=1.,
color=colors[j], label=gnames[j])
xax = xaxis.copy()
xax.sort()
for k in range(1 + q):
axes[k].tick_params(labelsize=50)
axes[k].set_yscale('symlog')
axes[k].set_ylim(bottom=0, top=normax)
if (k == 0):
tixx = np.hstack(
[np.asarray([0]), 10 ** np.linspace(math.floor(np.log10(normax)), math.floor(np.log10(normax)) + 1, 2)])
if k != 0:
axes[k].set_yticklabels([])
if k != q:
axes[k+1].set_title(r"$\phi_{{{}}}$".format(k+1), fontsize = 100)
if k == 0:
axes[k].set_title("Combined", fontdict={'fontsize': 100})
for k in range(1 + q):
axes[k].grid(True, which="both", alpha=True)
axes[k].set_xlabel(r"$\lambda$", fontsize = 100)
axes[0].set_ylabel(r"$\|\beta\|$", fontsize = 50)
handles, labels = axes[0].get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
fig.subplots_adjust(right=0.85)
leg_ax = fig.add_axes([.9, 0.15, 0.05, 0.7])
leg_ax.axis('off')
leg = leg_ax.legend(by_label.values(), gnames, prop={'size': 600 / p})
for l in leg.get_lines():
l.set_alpha(1)
leg_ax.set_title("$g_{j}$", fontsize = 1500/p)
| 36.641577
| 130
| 0.579575
|
b2de00cacc8ea90e298aa46982bdf1a4bab4b5ae
| 3,125
|
py
|
Python
|
src/mp_api/core/models.py
|
munrojm/api
|
478eb7b7d65ee72c65c9c3a61aec02aed7aa5ffe
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
src/mp_api/core/models.py
|
munrojm/api
|
478eb7b7d65ee72c65c9c3a61aec02aed7aa5ffe
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
src/mp_api/core/models.py
|
munrojm/api
|
478eb7b7d65ee72c65c9c3a61aec02aed7aa5ffe
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from typing import Dict, List, Optional, Union, Tuple
from pydantic import BaseModel, Field
from pymatgen import Element
Vector3D = Tuple[float, float, float]
class Lattice(BaseModel):
"""
A lattice object represented as a 3x3 matrix of floats in Angstroms
"""
a: float = Field(..., title="*a* lattice parameter")
alpha: int = Field(..., title="Angle between a and b lattice vectors")
b: float = Field(..., title="b lattice parameter")
beta: int = Field(..., title="Angle between a and c lattice vectors")
c: float = Field(..., title="c lattice parameter")
gamma: int = Field(..., title="Angle between b and c lattice vectors")
volume: float = Field(..., title="Lattice volume")
matrix: Tuple[Vector3D, Vector3D, Vector3D] = Field(
..., description="Matrix representation of this lattice"
)
class Specie(BaseModel):
"""
An extension of Element with an oxidation state and other optional
properties. Properties associated with Specie should be "idealized"
values, not calculated values. For example, high-spin Fe2+ may be
assigned an idealized spin of +5, but an actual Fe2+ site may be
calculated to have a magmom of +4.5. Calculated properties should be
assigned to Site objects, and not Specie.
"""
symbol: str = Field(..., title="Element Symbol")
oxidation_state: float = Field(..., title="Oxidation State")
properties: Optional[Dict] = Field(..., title="Species Properties")
Composition = Dict[Element, float]
class SiteSpecie(Specie):
"""
Adds site occupation to Species
"""
occu: float = Field(..., title="Occupation")
class SiteElement(BaseModel):
"""
Elements defined on site with an occupation
"""
element: Element = Field(..., title="Element")
occu: float = Field(..., title="Occupation")
class Site(BaseModel):
"""
A generalized *non-periodic* site. This is essentially a composition
at a point in space, with some optional properties associated with it. A
Composition is used to represent the atoms and occupancy, which allows for
disordered site representation. Coords are given in standard cartesian
coordinates.
"""
species: List[Union[SiteElement, SiteSpecie]] = Field(..., title="Species")
xyz: Tuple[float, float, float] = Field(..., title="Cartesian Coordinates")
label: str = Field(..., title="Label")
properties: Optional[Dict] = Field(None, title="Properties")
class PeriodicSite(Site):
"""
A generalized *periodic* site. This adds on fractional coordinates within the
lattice to the generalized Site model
"""
abc: Tuple[float, float, float] = Field(..., title="Fractional Coordinates")
class Structure(BaseModel):
"""
Basic Structure object with periodicity. Essentially a sequence
of Sites having a common lattice and a total charge.
"""
charge: Optional[float] = Field(None, title="Total charge")
lattice: Lattice = Field(..., title="Lattice for this structure")
sites: List[PeriodicSite] = Field(..., title="List of sites in this structure")
| 32.216495
| 83
| 0.6784
|
1e7dec77fd6ca6c217f4558ba020e0cf37099357
| 3,124
|
py
|
Python
|
Lib/site-packages/prompt_toolkit/input/posix_pipe.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | 4,028
|
2015-01-02T16:31:38.000Z
|
2018-10-25T14:51:02.000Z
|
Lib/site-packages/prompt_toolkit/input/posix_pipe.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | 688
|
2015-01-02T18:02:49.000Z
|
2018-10-25T19:57:02.000Z
|
Lib/site-packages/prompt_toolkit/input/posix_pipe.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | 375
|
2015-01-02T17:57:09.000Z
|
2018-10-23T09:52:01.000Z
|
import sys
assert sys.platform != "win32"
import os
from contextlib import contextmanager
from typing import ContextManager, Iterator, TextIO, cast
from ..utils import DummyContext
from .base import PipeInput
from .vt100 import Vt100Input
__all__ = [
"PosixPipeInput",
]
class _Pipe:
"Wrapper around os.pipe, that ensures we don't double close any end."
def __init__(self) -> None:
self.read_fd, self.write_fd = os.pipe()
self._read_closed = False
self._write_closed = False
def close_read(self) -> None:
"Close read-end if not yet closed."
if self._read_closed:
return
os.close(self.read_fd)
self._read_closed = True
def close_write(self) -> None:
"Close write-end if not yet closed."
if self._write_closed:
return
os.close(self.write_fd)
self._write_closed = True
def close(self) -> None:
"Close both read and write ends."
self.close_read()
self.close_write()
class PosixPipeInput(Vt100Input, PipeInput):
"""
Input that is send through a pipe.
This is useful if we want to send the input programmatically into the
application. Mostly useful for unit testing.
Usage::
with PosixPipeInput.create() as input:
input.send_text('inputdata')
"""
_id = 0
def __init__(self, _pipe: _Pipe, _text: str = "") -> None:
# Private constructor. Users should use the public `.create()` method.
self.pipe = _pipe
class Stdin:
encoding = "utf-8"
def isatty(stdin) -> bool:
return True
def fileno(stdin) -> int:
return self.pipe.read_fd
super().__init__(cast(TextIO, Stdin()))
self.send_text(_text)
# Identifier for every PipeInput for the hash.
self.__class__._id += 1
self._id = self.__class__._id
@classmethod
@contextmanager
def create(cls, text: str = "") -> Iterator["PosixPipeInput"]:
pipe = _Pipe()
try:
yield PosixPipeInput(_pipe=pipe, _text=text)
finally:
pipe.close()
def send_bytes(self, data: bytes) -> None:
os.write(self.pipe.write_fd, data)
def send_text(self, data: str) -> None:
"Send text to the input."
os.write(self.pipe.write_fd, data.encode("utf-8"))
def raw_mode(self) -> ContextManager[None]:
return DummyContext()
def cooked_mode(self) -> ContextManager[None]:
return DummyContext()
def close(self) -> None:
"Close pipe fds."
# Only close the write-end of the pipe. This will unblock the reader
# callback (in vt100.py > _attached_input), which eventually will raise
# `EOFError`. If we'd also close the read-end, then the event loop
# won't wake up the corresponding callback because of this.
self.pipe.close_write()
def typeahead_hash(self) -> str:
"""
This needs to be unique for every `PipeInput`.
"""
return f"pipe-input-{self._id}"
| 26.700855
| 79
| 0.611076
|
789899f8b411ad8b89428f46a7f558963db5746f
| 4,048
|
py
|
Python
|
third_party/pyjson5/src/json5/fakes/host_fake.py
|
frontend-arch/devtools-frontend
|
907a14259687036ceac26b301d4fc9965327ec11
|
[
"BSD-3-Clause"
] | 14,668
|
2015-01-01T01:57:10.000Z
|
2022-03-31T23:33:32.000Z
|
third_party/pyjson5/src/json5/fakes/host_fake.py
|
frontend-arch/devtools-frontend
|
907a14259687036ceac26b301d4fc9965327ec11
|
[
"BSD-3-Clause"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
third_party/pyjson5/src/json5/fakes/host_fake.py
|
frontend-arch/devtools-frontend
|
907a14259687036ceac26b301d4fc9965327ec11
|
[
"BSD-3-Clause"
] | 5,941
|
2015-01-02T11:32:21.000Z
|
2022-03-31T16:35:46.000Z
|
# Copyright 2014 Dirk Pranke. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import sys
if sys.version_info[0] < 3:
# pylint: disable=redefined-builtin
str = unicode
class FakeHost(object):
# "too many instance attributes" pylint: disable=R0902
# "redefining built-in" pylint: disable=W0622
# "unused arg" pylint: disable=W0613
python_interpreter = 'python'
def __init__(self):
self.stdin = io.StringIO()
self.stdout = io.StringIO()
self.stderr = io.StringIO()
self.platform = 'linux2'
self.sep = '/'
self.dirs = set([])
self.files = {}
self.written_files = {}
self.last_tmpdir = None
self.current_tmpno = 0
self.cwd = '/tmp'
def abspath(self, *comps):
relpath = self.join(*comps)
if relpath.startswith('/'):
return relpath
return self.join(self.cwd, relpath)
def chdir(self, *comps): # pragma: no cover
path = self.join(*comps)
if not path.startswith('/'):
path = self.join(self.cwd, path)
self.cwd = path
def dirname(self, path):
return '/'.join(path.split('/')[:-1])
def fileinput(self, files=None):
if files:
for f in files:
for l in self.read_text_file(f).splitlines():
yield l
else:
for l in self.stdin.readlines():
yield l
def getcwd(self):
return self.cwd
def join(self, *comps): # pragma: no cover
p = ''
for c in comps:
if c in ('', '.'):
continue
elif c.startswith('/'):
p = c
elif p:
p += '/' + c
else:
p = c
# Handle ./
p = p.replace('/./', '/')
# Handle ../
while '/..' in p:
comps = p.split('/')
idx = comps.index('..')
comps = comps[:idx-1] + comps[idx+1:]
p = '/'.join(comps)
return p
def maybe_mkdir(self, *comps): # pragma: no cover
path = self.abspath(self.join(*comps))
if path not in self.dirs:
self.dirs.add(path)
def mkdtemp(self, suffix='', prefix='tmp', dir=None, **_kwargs):
if dir is None:
dir = self.sep + '__im_tmp'
curno = self.current_tmpno
self.current_tmpno += 1
self.last_tmpdir = self.join(dir, '%s_%u_%s' % (prefix, curno, suffix))
self.dirs.add(self.last_tmpdir)
return self.last_tmpdir
def print_(self, msg=u'', end=u'\n', stream=None):
stream = stream or self.stdout
stream.write(str(msg) + str(end))
stream.flush()
def read_text_file(self, *comps):
return self._read(comps)
def _read(self, comps):
return self.files[self.abspath(*comps)]
def remove(self, *comps):
path = self.abspath(*comps)
self.files[path] = None
self.written_files[path] = None
def rmtree(self, *comps):
path = self.abspath(*comps)
for f in self.files:
if f.startswith(path):
self.remove(f)
self.dirs.remove(path)
def write_text_file(self, path, contents):
self._write(path, contents)
def _write(self, path, contents):
full_path = self.abspath(path)
self.maybe_mkdir(self.dirname(full_path))
self.files[full_path] = contents
self.written_files[full_path] = contents
| 29.333333
| 79
| 0.565217
|
b51708ec1baadd97e53fc844fdb9e95af9d04a21
| 1,819
|
py
|
Python
|
TelegramEDT/__init__.py
|
flifloo/TelegramEDT
|
299dc340af31c4f7e6c601f133a5c10d6396225f
|
[
"MIT"
] | 1
|
2019-09-19T08:06:13.000Z
|
2019-09-19T08:06:13.000Z
|
TelegramEDT/__init__.py
|
flifloo/TelegramEDT
|
299dc340af31c4f7e6c601f133a5c10d6396225f
|
[
"MIT"
] | 11
|
2019-09-20T10:27:30.000Z
|
2021-09-08T01:19:58.000Z
|
TelegramEDT/__init__.py
|
flifloo/TelegramEDT
|
299dc340af31c4f7e6c601f133a5c10d6396225f
|
[
"MIT"
] | null | null | null |
from os.path import isfile
from threading import RLock
from aiogram import Bot, Dispatcher, types
from aiogram.types import reply_keyboard
from aiogram.utils.callback_data import CallbackData
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from TelegramEDT.EDTcalendar import Calendar
from TelegramEDT.base import Base, User
from TelegramEDT.lang import lang
from TelegramEDT.logger import logger
from TelegramEDT.EDTscoped_session import scoped_session
if not isfile("token.ini"):
logger.critical("No token specified, impossible to start the bot !")
exit(1)
API_TOKEN = open("token.ini").readline().replace("\n", "")
ADMIN_ID = 148441652
TIMES = ["", "day", "next", "week", "next week"]
bot = Bot(token=API_TOKEN)
posts_cb = CallbackData("post", "id", "action")
dp = Dispatcher(bot)
engine = create_engine("sqlite:///edt.db")
session_factory = sessionmaker(bind=engine)
Session = scoped_session(session_factory)
if not isfile("edt.db"):
Base.metadata.create_all(engine)
key = reply_keyboard.ReplyKeyboardMarkup()
for k in ["Edt", "Kfet", "Setkfet", "Setedt", "Notif", "Settomuss"]:
key.add(reply_keyboard.KeyboardButton(k))
modules_active = list()
def check_id(user: types.User):
with Session as session:
if (user.id,) not in session.query(User.id).all():
logger.info(f"{user.username} add to the db")
if user.locale and user.locale.language:
lg = user.locale.language
else:
lg = ""
session.add(User(id=user.id, language=lg))
session.commit()
logger.info("Start loading modules")
from TelegramEDT.modules import load_module
for m in ["modules", "basic", "edt", "kfet", "tomuss", "edt_notif", "tools"]:
load_module(m)
logger.info("Modules loading finish")
| 32.482143
| 77
| 0.707532
|
5bf77d901db74a275741c2c8d2f296f1d96f4ba9
| 10,025
|
py
|
Python
|
spikeinterface/sortingcomponents/motion_correction.py
|
vncntprvst/spikeinterface
|
dd5ae94f85fe5d9082b45321d2c96ba316eb4b77
|
[
"MIT"
] | null | null | null |
spikeinterface/sortingcomponents/motion_correction.py
|
vncntprvst/spikeinterface
|
dd5ae94f85fe5d9082b45321d2c96ba316eb4b77
|
[
"MIT"
] | null | null | null |
spikeinterface/sortingcomponents/motion_correction.py
|
vncntprvst/spikeinterface
|
dd5ae94f85fe5d9082b45321d2c96ba316eb4b77
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy.interpolate
import sklearn
from tqdm import tqdm
import sklearn.metrics
from spikeinterface.toolkit.preprocessing.basepreprocessor import BasePreprocessor, BasePreprocessorSegment
try:
import numba
HAVE_NUMBA = True
except ImportError:
HAVE_NUMBA = False
def correct_motion_on_peaks(peaks, peak_locations, times,
motion, temporal_bins, spatial_bins,
direction='y', progress_bar=False):
"""
Given the output of estimate_motion(), apply inverse motion on peak location.
Parameters
----------
peaks: np.array
peaks vector
peak_locations: np.array
peaks location vector
times: np.array
times vector of recording
motion: np.array 2D
motion.shape[0] equal temporal_bins.shape[0]
motion.shape[1] equal 1 when "rigid" motion
equal temporal_bins.shape[0] when "none rigid"
temporal_bins: np.array
Temporal bins in second.
spatial_bins: None or np.array
Bins for non-rigid motion. If None, rigid motion is used
Returns
-------
corrected_peak_locations: np.array
Motion-corrected peak locations
"""
corrected_peak_locations = peak_locations.copy()
if spatial_bins is None:
# rigid motion interpolation 1D
sample_bins = np.searchsorted(times, temporal_bins)
f = scipy.interpolate.interp1d(sample_bins, motion[:, 0], bounds_error=False, fill_value="extrapolate")
shift = f(peaks['sample_ind'])
corrected_peak_locations[direction] -= shift
else:
# non rigid motion = interpolation 2D
f = scipy.interpolate.RegularGridInterpolator((temporal_bins, spatial_bins), motion,
method='linear', bounds_error=False, fill_value=None)
shift = f(np.c_[times, peak_locations[direction]])
corrected_peak_locations[direction] -= shift
return corrected_peak_locations
def correct_motion_on_traces(traces, times, channel_locations, motion, temporal_bins, spatial_bins, direction=1,):
"""
Apply inverse motion with spatial interpolation on traces.
Traces can be full traces, but also waveforms snippets.
Parameters
----------
traces : np.array
Trace snippet (num_samples, num_channels)
channel_location: np.array 2d
Channel location with shape (n, 2) or (n, 3)
motion: np.array 2D
motion.shape[0] equal temporal_bins.shape[0]
motion.shape[1] equal 1 when "rigid" motion
equal temporal_bins.shape[0] when "none rigid"
temporal_bins: np.array
Temporal bins in second.
spatial_bins: None or np.array
Bins for non-rigid motion. If None, rigid motion is used
direction: int in (0, 1, 2)
Dimension of shift in channel_locations.
Returns
-------
channel_motions: np.array
Shift over time by channel
Shape (times.shape[0], channel_location.shape[0])
"""
assert HAVE_NUMBA
assert times.shape[0] == traces.shape[0]
traces_corrected = np.zeros_like(traces)
# print(traces_corrected.shape)
if spatial_bins is None:
# rigid motion interpolation 1D
raise NotImplementedError
else:
# non rigid motion = interpolation 2D
# regroup times by closet temporal_bins
bin_inds = _get_closest_ind(temporal_bins, times)
# inperpolation kernel will be the same per temporal bin
for bin_ind in np.unique(bin_inds):
# Step 1 : interpolation channel motion for this temporal bin
f = scipy.interpolate.interp1d(spatial_bins, motion[bin_ind, :], kind='linear',
axis=0, bounds_error=False, fill_value="extrapolate")
locs = channel_locations[:, direction]
channel_motions = f(locs)
channel_locations_moved = channel_locations.copy()
channel_locations_moved[:, direction] += channel_motions
# Step 2 : interpolate trace
# interpolation is done with Inverse Distance Weighted
# because it is simple to implement
# Instead vwe should use use the convex hull, Delaunay triangulation http://www.qhull.org/
# scipy.interpolate.LinearNDInterpolator and qhull.Delaunay should help for this
distances = sklearn.metrics.pairwise_distances(channel_locations_moved, channel_locations,
metric='euclidean')
num_chans = channel_locations.shape[0]
num_closest = 3
closest_chans = np.zeros((num_chans, num_closest), dtype='int64')
weights = np.zeros((num_chans, num_closest), dtype='float32')
for c in range(num_chans):
ind_sorted = np.argsort(distances[c, ])
closest_chans[c, :] = ind_sorted[:num_closest]
dists = distances[c, ind_sorted[:num_closest]]
if dists[0] == 0.:
# no interpolation the first have zeros distance
weights[c, :] = 0
weights[c, 0] = 1
else:
# Inverse Distance Weighted
w = 1 / dists
w /= np.sum(w)
weights[c, :] = w
my_inverse_weighted_distance_interpolation(traces, traces_corrected, closest_chans, weights)
return traces_corrected
if HAVE_NUMBA:
@numba.jit(parallel=False)
def my_inverse_weighted_distance_interpolation(traces, traces_corrected, closest_chans, weights):
num_sample = traces.shape[0]
num_chan = traces.shape[1]
num_closest = closest_chans.shape[1]
for sample_ind in range(num_sample):
for chan_ind in range(num_chan):
v = 0
for i in range(num_closest):
other_chan = closest_chans[chan_ind, i]
v += weights[chan_ind, i] * traces[sample_ind, other_chan]
traces_corrected[sample_ind, chan_ind] = v
def _get_closest_ind(array, values):
# https://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
# get insert positions
idxs = np.searchsorted(array, values, side="left")
# find indexes where previous index is closer
prev_idx_is_less = ((idxs == len(array)) | (np.fabs(values - array[np.maximum(idxs-1, 0)]) <
np.fabs(values - array[np.minimum(idxs, len(array)-1)])))
idxs[prev_idx_is_less] -= 1
return idxs
class CorrectMotionRecording(BasePreprocessor):
"""
Recording that corrects motion on-the-fly given a rigid or non-rigid
motion vector estimation.
This internally applies for every time bin an inverse weighted distance interpolation
on the original after reverse the motion.
`estimate_motion()` must be call before this to get the motion vector.
Parameters
----------
recording: Recording
The parent recording.
motion: np.array 2D
motion.shape[0] equal temporal_bins.shape[0]
motion.shape[1] equal 1 when "rigid" motion
equal temporal_bins.shape[0] when "none rigid"
temporal_bins: np.array
Temporal bins in second.
spatial_bins: None or np.array
Bins for non-rigid motion. If None, rigid motion is used
direction: int in (0, 1, 2)
Dimension of shift in channel_locations.
Returns
-------
Corrected_recording: CorrectMotionRecording
Recording after motion correction
"""
name = 'correct_motion'
def __init__(self, recording, motion, temporal_bins, spatial_bins, direction=1):
assert recording.get_num_segments() == 1, 'correct is handle only for one segment for the moment'
BasePreprocessor.__init__(self, recording)
channel_locations = recording.get_channel_locations()
for parent_segment in recording._recording_segments:
rec_segment = CorrectMotionRecordingSegment(parent_segment, channel_locations,
motion, temporal_bins, spatial_bins, direction)
self.add_recording_segment(rec_segment)
self._kwargs = dict(recording=recording.to_dict(), motion=motion, temporal_bins=temporal_bins,
spatial_bins=spatial_bins, direction=direction)
# self.is_dumpable= False
class CorrectMotionRecordingSegment(BasePreprocessorSegment):
def __init__(self, parent_recording_segment, channel_locations, motion, temporal_bins, spatial_bins, direction):
BasePreprocessorSegment.__init__(self, parent_recording_segment)
self.channel_locations = channel_locations
self.motion = motion
self.temporal_bins = temporal_bins
self.spatial_bins = spatial_bins
self.direction = direction
def get_traces(self, start_frame, end_frame, channel_indices):
if self.time_vector is not None:
times = np.asarray(self.time_vector[start_frame:end_frame])
else:
times = np.arange(end_frame - start_frame, dtype='float64')
times /= self.sampling_frequency
t0 = start_frame / self.sampling_frequency
if self.t_start is not None:
t0 = t0 + self.t_start
times += t0
traces = self.parent_recording_segment.get_traces(start_frame, end_frame, channel_indices=None)
# print(traces.shape, times.shape, self.channel_locations, self.motion, self.temporal_bins, self.spatial_bins)
trace2 = correct_motion_on_traces(traces, times, self.channel_locations, self.motion,
self.temporal_bins, self.spatial_bins, direction=self.direction)
if trace2 is not None:
trace2 = trace2[:, channel_indices]
return trace2
| 39.940239
| 118
| 0.641895
|
5af480448f276f73f5317c140796b3de50fa1a25
| 1,927
|
py
|
Python
|
var/spack/repos/builtin/packages/openfdtd/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 9
|
2018-04-18T07:51:40.000Z
|
2021-09-10T03:56:57.000Z
|
var/spack/repos/builtin/packages/openfdtd/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 907
|
2018-04-18T11:17:57.000Z
|
2022-03-31T13:20:25.000Z
|
var/spack/repos/builtin/packages/openfdtd/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 29
|
2018-11-05T16:14:23.000Z
|
2022-02-03T16:07:09.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Openfdtd(MakefilePackage):
"""OpenFDTD is general purpose FDTD simulator applicable to a wide range
of applications. The FDTD method (Finite Difference Time Domain method)
is a method for numerically calculating the Maxwell equation, which is
the basic equation of the electromagnetic field,
by the difference method."""
homepage = "http://www.e-em.co.jp/OpenFDTD/"
url = "http://www.e-em.co.jp/OpenFDTD/OpenFDTD.zip"
version('2.3.0', sha256='10ac70f2ed7160da87dd9222a5a17ca7b72365ee886235359afc48c4fb7b4be4')
variant('mpi', default=False, description='Build with MPI Support')
depends_on('mpi', when='+mpi')
def edit(self, spec, prefix):
filter_file('gcc', spack_cc, './src/Makefile_gcc')
if '+mpi' in self.spec:
filter_file('mpicc', spec['mpi'].mpicc, './mpi/Makefile_gcc')
# Openfdtd has "Makefile" and "Makefile_gcc".
# "Makefile" is used only in Windows development environment.
# The build in Windows development environment is currently unsupported.
def build(self, spec, prefix):
with working_dir('src'):
make('-f', 'Makefile_gcc')
# To make an executable file for mpi needs object files
# which are made for an executable file not for mpi.
# Therefore, the build in the "src" directory is necessary
# for to make an executable file for mpi.
if '+mpi' in self.spec:
with working_dir('mpi'):
make('-f', 'Makefile_gcc')
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('ofd', prefix.bin)
if '+mpi' in self.spec:
install('ofd_mpi', prefix.bin)
| 38.54
| 95
| 0.662688
|
c3ed351e0ec67471c6f4b0f7b878d2fd282b074d
| 5,658
|
py
|
Python
|
src/datadog_api_client/v1/model/widget_message_display.py
|
DataDog/datadog-api-client-python
|
de2fc57dbde9acf4b8c8eef94ac29911227a62a2
|
[
"Apache-2.0"
] | 32
|
2021-01-07T15:09:56.000Z
|
2022-01-30T05:49:23.000Z
|
src/datadog_api_client/v1/model/widget_message_display.py
|
DataDog/datadog-api-client-python
|
de2fc57dbde9acf4b8c8eef94ac29911227a62a2
|
[
"Apache-2.0"
] | 228
|
2020-09-03T14:03:54.000Z
|
2022-03-31T20:16:12.000Z
|
src/datadog_api_client/v1/model/widget_message_display.py
|
DataDog/datadog-api-client-python
|
de2fc57dbde9acf4b8c8eef94ac29911227a62a2
|
[
"Apache-2.0"
] | 12
|
2020-09-15T21:36:03.000Z
|
2022-03-31T17:13:17.000Z
|
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v1.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
)
class WidgetMessageDisplay(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("value",): {
"INLINE": "inline",
"EXPANDED_MEDIUM": "expanded-md",
"EXPANDED_LARGE": "expanded-lg",
},
}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"value": (str,),
}
discriminator = None
attribute_map = {}
_composed_schemas = None
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""WidgetMessageDisplay - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str): Amount of log lines to display., must be one of ["inline", "expanded-md", "expanded-lg", ] # noqa: E501
Keyword Args:
value (str): Amount of log lines to display., must be one of ["inline", "expanded-md", "expanded-lg", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
super().__init__(kwargs)
if "value" in kwargs:
value = kwargs.pop("value")
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=self._path_to_item,
valid_classes=(self.__class__,),
)
self._check_pos_args(args)
self.value = value
self._check_kw_args(kwargs)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
"""Helper creating a new instance from a response."""
return cls(*args, **kwargs)
| 39.84507
| 131
| 0.574585
|
9a5ac0ca820454f3b00e9c7e0c414878f182d202
| 5,356
|
py
|
Python
|
idunn/datasources/wikipedia.py
|
Qwant/idunn
|
65582dfed732093778bf7c2998db1e2cd78255b8
|
[
"Apache-2.0"
] | 3
|
2021-10-07T20:07:57.000Z
|
2022-03-04T15:23:26.000Z
|
idunn/datasources/wikipedia.py
|
Qwant/idunn
|
65582dfed732093778bf7c2998db1e2cd78255b8
|
[
"Apache-2.0"
] | 16
|
2021-02-01T11:02:03.000Z
|
2022-03-23T14:44:50.000Z
|
idunn/datasources/wikipedia.py
|
Qwant/idunn
|
65582dfed732093778bf7c2998db1e2cd78255b8
|
[
"Apache-2.0"
] | null | null | null |
import logging
import requests
import pybreaker
from requests.exceptions import HTTPError, RequestException, Timeout
from redis import RedisError
from idunn import settings
from idunn.utils import prometheus
from idunn.utils.redis import RedisWrapper
from idunn.utils.circuit_breaker import IdunnCircuitBreaker
from idunn.utils.rate_limiter import IdunnRateLimiter, TooManyRequestsException
logger = logging.getLogger(__name__)
class WikipediaSession:
TIMEOUT = 1.0 # seconds
API_V1_BASE_PATTERN = "https://{lang}.wikipedia.org/api/rest_v1"
API_PHP_BASE_PATTERN = "https://{lang}.wikipedia.org/w/api.php"
REDIS_GET_SUMMARY_PREFIX = "get_summary"
REDIS_TITLE_IN_LANG_PREFIX = "get_title_in_language"
circuit_breaker = IdunnCircuitBreaker(
"wikipedia_api_breaker",
int(settings["WIKI_BREAKER_MAXFAIL"]),
int(settings["WIKI_BREAKER_TIMEOUT"]),
)
class Helpers:
_rate_limiter = None
@classmethod
def get_rate_limiter(cls):
if cls._rate_limiter is None:
cls._rate_limiter = IdunnRateLimiter(
resource="WikipediaAPI",
max_requests=int(settings["WIKI_API_RL_MAX_CALLS"]),
expire=int(settings["WIKI_API_RL_PERIOD"]),
)
return cls._rate_limiter
@classmethod
def handle_requests_error(cls, f):
"""
Helper function to catch exceptions and log them into Prometheus.
"""
def wrapped_f(*args, **kwargs):
try:
with cls.get_rate_limiter().limit(client="idunn"):
return f(*args, **kwargs)
except pybreaker.CircuitBreakerError:
prometheus.exception("CircuitBreakerError")
logger.error(
"Got CircuitBreakerError in %s",
f.__name__,
exc_info=True,
)
except HTTPError:
prometheus.exception("HTTPError")
logger.warning("Got HTTP error in %s", f.__name__, exc_info=True)
except Timeout:
prometheus.exception("RequestsTimeout")
logger.warning("External API timed out in %s", f.__name__, exc_info=True)
except RequestException:
prometheus.exception("RequestException")
logger.error("Got Request exception in %s", f.__name__, exc_info=True)
except TooManyRequestsException:
prometheus.exception("TooManyRequests")
logger.warning("Got TooManyRequests in %s", f.__name__, exc_info=True)
except RedisError:
prometheus.exception("RedisError")
logger.warning("Got redis ConnectionError in %s", f.__name__, exc_info=True)
return None
return wrapped_f
def __init__(self):
self.session = requests.Session()
self.session.headers.update({"User-Agent": settings["WIKI_USER_AGENT"]})
def get_summary(self, title, lang):
@self.Helpers.handle_requests_error
@self.circuit_breaker
def fetch_data():
url = f"{self.API_V1_BASE_PATTERN.format(lang=lang)}/page/summary/{title}"
with prometheus.wiki_request_duration("wiki_api", "get_summary"):
resp = self.session.get(url=url, params={"redirect": True}, timeout=self.TIMEOUT)
resp.raise_for_status()
return resp.json()
key = self.REDIS_GET_SUMMARY_PREFIX + "_" + title + "_" + lang
fetch_data_cached = RedisWrapper.cache_it(key, fetch_data)
return fetch_data_cached()
def get_title_in_language(self, title, source_lang, dest_lang):
@self.Helpers.handle_requests_error
@self.circuit_breaker
def fetch_data():
url = self.API_PHP_BASE_PATTERN.format(lang=source_lang)
with prometheus.wiki_request_duration("wiki_api", "get_title"):
resp = self.session.get(
url=url,
params={
"action": "query",
"prop": "langlinks",
"titles": title,
"lllang": dest_lang,
"formatversion": 2,
"format": "json",
},
timeout=self.TIMEOUT,
)
resp.raise_for_status()
resp_data = resp.json()
resp_pages = resp_data.get("query", {}).get("pages", [])
if len(resp_pages) > 0:
if len(resp_pages) > 1:
logger.warning(
"Got multiple pages in wikipedia langlinks response: %s", resp_data
)
lang_links = resp_pages[0].get("langlinks", [])
if len(lang_links) > 0:
return lang_links[0].get("title")
return None
key = self.REDIS_TITLE_IN_LANG_PREFIX + "_" + title + "_" + source_lang + "_" + dest_lang
fetch_data_cached = RedisWrapper.cache_it(key, fetch_data)
return fetch_data_cached()
wikipedia_session = WikipediaSession()
| 37.454545
| 97
| 0.573936
|
87a0d10aa1f72e12a6c8456ea0745558653cdf70
| 126
|
py
|
Python
|
EixampleEnergy/tool.py
|
TugdualSarazin/eixample_energy
|
bcda0ba0d51eb229f00f6c557c6680cd416a62a4
|
[
"Apache-2.0"
] | null | null | null |
EixampleEnergy/tool.py
|
TugdualSarazin/eixample_energy
|
bcda0ba0d51eb229f00f6c557c6680cd416a62a4
|
[
"Apache-2.0"
] | null | null | null |
EixampleEnergy/tool.py
|
TugdualSarazin/eixample_energy
|
bcda0ba0d51eb229f00f6c557c6680cd416a62a4
|
[
"Apache-2.0"
] | null | null | null |
import geopandas
def load_shapefile(shape_file_path, rows=None):
return geopandas.read_file(shape_file_path, rows=rows)
| 21
| 58
| 0.809524
|
cea6eaa332dc7b211cd3d554a10018990205bbb2
| 1,312
|
py
|
Python
|
ravens/environments/environment_test.py
|
EricCousineau-TRI/ravens
|
d7f9db3214ed730c6d16e5c248684688555c6d23
|
[
"Apache-2.0"
] | null | null | null |
ravens/environments/environment_test.py
|
EricCousineau-TRI/ravens
|
d7f9db3214ed730c6d16e5c248684688555c6d23
|
[
"Apache-2.0"
] | null | null | null |
ravens/environments/environment_test.py
|
EricCousineau-TRI/ravens
|
d7f9db3214ed730c6d16e5c248684688555c6d23
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The Ravens Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ravens.environments.environment."""
from absl.testing import absltest
from ravens import tasks
from ravens.environments import environment
ASSETS_PATH = 'ravens/environments/assets/'
class EnvironmentTest(absltest.TestCase):
def test_environment_action(self):
env = environment.Environment(ASSETS_PATH)
task = tasks.BlockInsertion()
env.set_task(task)
env.seed(0)
agent = task.oracle(env)
obs = env.reset()
info = None
done = False
for _ in range(10):
act = agent.act(obs, info)
self.assertTrue(env.action_space.contains(act))
obs, _, done, info = env.step(act)
if done:
break
if __name__ == '__main__':
absltest.main()
| 27.914894
| 74
| 0.719512
|
fc6498d787c68f10dd37a30e4796911973e4bafc
| 4,956
|
py
|
Python
|
lib/utils/pvnet/pvnet_pose_utils.py
|
jonashein/pvnet_baseline
|
f919b5451051dbc5088f735271fc8779712b546c
|
[
"Apache-2.0"
] | null | null | null |
lib/utils/pvnet/pvnet_pose_utils.py
|
jonashein/pvnet_baseline
|
f919b5451051dbc5088f735271fc8779712b546c
|
[
"Apache-2.0"
] | null | null | null |
lib/utils/pvnet/pvnet_pose_utils.py
|
jonashein/pvnet_baseline
|
f919b5451051dbc5088f735271fc8779712b546c
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import cv2
from scipy.linalg import sqrtm
from scipy.optimize import leastsq
def pnp(points_3d, points_2d, camera_matrix, method=cv2.SOLVEPNP_EPNP):
try:
dist_coeffs = pnp.dist_coeffs
except:
dist_coeffs = np.zeros(shape=[8, 1], dtype='float64')
assert points_3d.shape[0] == points_2d.shape[0], 'points 3D and points 2D must have same number of vertices'
if method == cv2.SOLVEPNP_EPNP:
points_3d = np.expand_dims(points_3d, 0)
points_2d = np.expand_dims(points_2d, 0)
points_2d = np.ascontiguousarray(points_2d.astype(np.float64))
points_3d = np.ascontiguousarray(points_3d.astype(np.float64))
camera_matrix = camera_matrix.astype(np.float64)
_, R_exp, t = cv2.solvePnP(points_3d,
points_2d,
camera_matrix,
dist_coeffs,
flags=method)
# , None, None, False, cv2.SOLVEPNP_UPNP)
# R_exp, t, _ = cv2.solvePnPRansac(points_3D,
# points_2D,
# cameraMatrix,
# distCoeffs,
# reprojectionError=12.0)
R, _ = cv2.Rodrigues(R_exp)
# trans_3d=np.matmul(points_3d,R.transpose())+t.transpose()
# if np.max(trans_3d[:,2]<0):
# R=-R
# t=-t
return np.concatenate([R, t], axis=-1)
def uncertainty_pnp(points_3d, points_2d, var, camera_matrix, method=cv2.SOLVEPNP_EPNP):
# Compute weights
cov_invs = []
for vi in range(var.shape[0]):
if var[vi, 0, 0] < 1e-6 or np.sum(np.isnan(var)[vi]) > 0:
cov_invs.append(np.zeros([2, 2]).astype(np.float32))
else:
cov_inv = np.linalg.inv(sqrtm(var[vi]))
cov_invs.append(cov_inv)
cov_invs = np.asarray(cov_invs) # K,2,2
# Compute initialization with 4 best points
weights = cov_invs.reshape([-1, 4])
weights = weights[:, (0, 1, 3)]
idxs = np.argsort(weights[:, 0]+weights[:, 1])[-4:]
_, R_exp, t = cv2.solvePnP(np.expand_dims(points_3d[idxs, :], 0),
np.expand_dims(points_2d[idxs, :], 0),
camera_matrix, None, None, None, False, flags=cv2.SOLVEPNP_EPNP)
Rt_vec = np.concatenate([R_exp, t], axis=0)
# Return if we only have 4 points
if points_2d.shape[0] == 4:
R, _ = cv2.Rodrigues(Rt_vec[:3])
Rt = np.concatenate([R, Rt_vec[3:, None]], axis=-1)
return Rt
# Minimize Mahalanobis distance
Rt_vec, _ = leastsq(mahalanobis, Rt_vec, args=(points_3d, points_2d, cov_invs, camera_matrix))
R, _ = cv2.Rodrigues(Rt_vec[:3])
Rt = np.concatenate([R, Rt_vec[3:, None]], axis=-1)
return Rt
def mahalanobis(Rt_vec, points_3d, points_2d, var, camera_matrix):
# Rt_vec.shape: (6,)
# points_3d.shape: (K,3)
# points_2d.shape: (K,2)
# var.shape: (K,2,2)
# camera_matrix.shape: (3,3)
if np.any(np.iscomplex(var)):
var = np.real(var)
R, _ = cv2.Rodrigues(Rt_vec[:3])
Rt = np.concatenate([R, Rt_vec[3:, None]], axis=-1)
points_3d_hom = np.concatenate([points_3d, np.ones((points_3d.shape[0], 1))], axis=-1) # (K,4)
proj_2d_hom = camera_matrix @ Rt @ points_3d_hom.transpose() # (3, K)
proj_2d = proj_2d_hom[:2, :] / proj_2d_hom[2:, :] # (2,K)
err_2d = proj_2d.transpose() - points_2d # (K,2)
err_2d = np.expand_dims(err_2d, axis=1) # (K,1,2)
err = err_2d @ var @ err_2d.transpose((0,2,1)) # (K,1,2) x (K,2,2) x (K,2,1) = (K,1,1)
err = np.sqrt(err.squeeze())
return err
def project(xyz, K, RT):
"""
xyz: [N, 3]
K: [3, 3]
RT: [3, 4]
"""
xyz = np.dot(xyz, RT[:, :3].T) + RT[:, 3:].T
xyz = np.dot(xyz, K.T)
xy = xyz[:, :2] / xyz[:, 2:]
return xy
def cm_degree_5(pose_pred, pose_targets):
translation_distance = np.linalg.norm(pose_pred[:, 3] - pose_targets[:, 3]) * 100
rotation_diff = np.dot(pose_pred[:, :3], pose_targets[:, :3].T)
trace = np.trace(rotation_diff)
trace = trace if trace <= 3 else 3
angular_distance = np.rad2deg(np.arccos((trace - 1.) / 2.))
return translation_distance, angular_distance
def transform(verts, trans, convert_to_homogeneous=False):
"""
verts: [N, 3]
trans: [3, 4]
"""
assert len(verts.shape) == 2, "Expected 2 dimensions for verts, got: {}.".format(len(verts.shape))
assert len(trans.shape) == 2, "Expected 2 dimensions for trans, got: {}.".format(len(trans.shape))
if convert_to_homogeneous:
hom_verts = om_verts = np.concatenate([verts, np.ones([verts.shape[0], 1])], axis=1)
else:
hom_verts = verts
assert trans.shape[1] == hom_verts.shape[1], \
"Incompatible shapes: verts.shape: {}, trans.shape: {}".format(verts.shape, trans.shape)
trans_verts = np.dot(trans, hom_verts.transpose()).transpose()
return trans_verts
| 37.545455
| 112
| 0.589185
|
b5fe7d9fc360b6a75bd7f628323b896419931788
| 6,727
|
py
|
Python
|
server/external/youtube-dl/youtube_dl/extractor/adultswim.py
|
yycc179/urlp
|
d272b74c4aed18f03ccada8817ecf5c572a1bf71
|
[
"MIT"
] | null | null | null |
server/external/youtube-dl/youtube_dl/extractor/adultswim.py
|
yycc179/urlp
|
d272b74c4aed18f03ccada8817ecf5c572a1bf71
|
[
"MIT"
] | null | null | null |
server/external/youtube-dl/youtube_dl/extractor/adultswim.py
|
yycc179/urlp
|
d272b74c4aed18f03ccada8817ecf5c572a1bf71
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import unicode_literals
import re
from .turner import TurnerBaseIE
from ..utils import (
int_or_none,
strip_or_none,
)
class AdultSwimIE(TurnerBaseIE):
_VALID_URL = r'https?://(?:www\.)?adultswim\.com/videos/(?P<show_path>[^/?#]+)(?:/(?P<episode_path>[^/?#]+))?'
_TESTS = [{
'url': 'http://adultswim.com/videos/rick-and-morty/pilot',
'info_dict': {
'id': 'rQxZvXQ4ROaSOqq-or2Mow',
'ext': 'mp4',
'title': 'Rick and Morty - Pilot',
'description': 'Rick moves in with his daughter\'s family and establishes himself as a bad influence on his grandson, Morty.',
'timestamp': 1493267400,
'upload_date': '20170427',
},
'params': {
# m3u8 download
'skip_download': True,
},
'expected_warnings': ['Unable to download f4m manifest'],
}, {
'url': 'http://www.adultswim.com/videos/tim-and-eric-awesome-show-great-job/dr-steve-brule-for-your-wine/',
'info_dict': {
'id': 'sY3cMUR_TbuE4YmdjzbIcQ',
'ext': 'mp4',
'title': 'Tim and Eric Awesome Show Great Job! - Dr. Steve Brule, For Your Wine',
'description': 'Dr. Brule reports live from Wine Country with a special report on wines. \nWatch Tim and Eric Awesome Show Great Job! episode #20, "Embarrassed" on Adult Swim.',
'upload_date': '20080124',
'timestamp': 1201150800,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://www.adultswim.com/videos/decker/inside-decker-a-new-hero/',
'info_dict': {
'id': 'I0LQFQkaSUaFp8PnAWHhoQ',
'ext': 'mp4',
'title': 'Decker - Inside Decker: A New Hero',
'description': 'The guys recap the conclusion of the season. They announce a new hero, take a peek into the Victorville Film Archive and welcome back the talented James Dean.',
'timestamp': 1469480460,
'upload_date': '20160725',
},
'params': {
# m3u8 download
'skip_download': True,
},
'expected_warnings': ['Unable to download f4m manifest'],
}, {
'url': 'http://www.adultswim.com/videos/attack-on-titan',
'info_dict': {
'id': 'b7A69dzfRzuaXIECdxW8XQ',
'title': 'Attack on Titan',
'description': 'md5:6c8e003ea0777b47013e894767f5e114',
},
'playlist_mincount': 12,
}, {
'url': 'http://www.adultswim.com/videos/streams/williams-stream',
'info_dict': {
'id': 'd8DEBj7QRfetLsRgFnGEyg',
'ext': 'mp4',
'title': r're:^Williams Stream \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
'description': 'original programming',
},
'params': {
# m3u8 download
'skip_download': True,
},
}]
def _real_extract(self, url):
show_path, episode_path = re.match(self._VALID_URL, url).groups()
display_id = episode_path or show_path
webpage = self._download_webpage(url, display_id)
initial_data = self._parse_json(self._search_regex(
r'AS_INITIAL_DATA(?:__)?\s*=\s*({.+?});',
webpage, 'initial data'), display_id)
is_stream = show_path == 'streams'
if is_stream:
if not episode_path:
episode_path = 'live-stream'
video_data = next(stream for stream_path, stream in initial_data['streams'].items() if stream_path == episode_path)
video_id = video_data.get('stream')
if not video_id:
entries = []
for episode in video_data.get('archiveEpisodes', []):
episode_url = episode.get('url')
if not episode_url:
continue
entries.append(self.url_result(
episode_url, 'AdultSwim', episode.get('id')))
return self.playlist_result(
entries, video_data.get('id'), video_data.get('title'),
strip_or_none(video_data.get('description')))
else:
show_data = initial_data['show']
if not episode_path:
entries = []
for video in show_data.get('videos', []):
slug = video.get('slug')
if not slug:
continue
entries.append(self.url_result(
'http://adultswim.com/videos/%s/%s' % (show_path, slug),
'AdultSwim', video.get('id')))
return self.playlist_result(
entries, show_data.get('id'), show_data.get('title'),
strip_or_none(show_data.get('metadata', {}).get('description')))
video_data = show_data['sluggedVideo']
video_id = video_data['id']
info = self._extract_cvp_info(
'http://www.adultswim.com/videos/api/v0/assets?platform=desktop&id=' + video_id,
video_id, {
'secure': {
'media_src': 'http://androidhls-secure.cdn.turner.com/adultswim/big',
'tokenizer_src': 'http://www.adultswim.com/astv/mvpd/processors/services/token_ipadAdobe.do',
},
}, {
'url': url,
'site_name': 'AdultSwim',
'auth_required': video_data.get('auth'),
})
info.update({
'id': video_id,
'display_id': display_id,
'description': info.get('description') or strip_or_none(video_data.get('description')),
})
if not is_stream:
info.update({
'duration': info.get('duration') or int_or_none(video_data.get('duration')),
'timestamp': info.get('timestamp') or int_or_none(video_data.get('launch_date')),
'season_number': info.get('season_number') or int_or_none(video_data.get('season_number')),
'episode': info['title'],
'episode_number': info.get('episode_number') or int_or_none(video_data.get('episode_number')),
})
info['series'] = video_data.get('collection_title') or info.get('series')
if info['series'] and info['series'] != info['title']:
info['title'] = '%s - %s' % (info['series'], info['title'])
return info
| 42.04375
| 191
| 0.523562
|
3c88f641711919d27c353723f2056e83740be812
| 31,218
|
py
|
Python
|
theano/tensor/type.py
|
mdda/Theano
|
6ca7b2b65000e371f009b617d41bc5a90f022d38
|
[
"BSD-3-Clause"
] | 295
|
2015-09-25T21:15:04.000Z
|
2022-01-13T01:16:18.000Z
|
libs/Theano/theano/tensor/type.py
|
shenshenzhanzhan/attention-lvcsr
|
598d487c118e66875fdd625baa84ed29d283b800
|
[
"MIT"
] | 21
|
2015-10-28T19:06:32.000Z
|
2022-03-11T23:13:05.000Z
|
libs/Theano/theano/tensor/type.py
|
shenshenzhanzhan/attention-lvcsr
|
598d487c118e66875fdd625baa84ed29d283b800
|
[
"MIT"
] | 114
|
2015-09-26T21:23:02.000Z
|
2021-11-19T02:36:41.000Z
|
import logging
import warnings
import numpy
import theano
from theano import config
from theano.gof import hashtype, Type, Variable
from theano import scalar as scal
_logger = logging.getLogger("theano.tensor.type")
class TensorType(Type):
"""
Symbolic `Type` representing a numpy.ndarray value.
Initialize self.dtype and self.broadcastable.
Parameters
----------
dtype: str
Corresponding to numpy dtype (e.g., 'int64')
The value (ndarray) associated to a `Variable` of this `Type` will
have this dtype.
broadcastable: tuple, list, or array of boolean values
This argument serves two purposes. First, the True elements of this
list indicate the dimensions where the shape of an associated value
must be 1. Secondly, the length of this list is the number of
dimensions that an associated value must have. See
doc:`broadcasting` for an explanation of how this list is used.
name : str
Optional name for this type.
"""
filter_checks_isfinite = False
"""
When this is True, strict filtering rejects data containing NaN or
Inf entries. (Used in `DebugMode`)
"""
def __init__(self, dtype, broadcastable, name=None, sparse_grad=False):
self.dtype = str(dtype)
if self.dtype == 'floatX':
self.dtype = config.floatX
# broadcastable is immutable, and all elements are either
# True or False
self.broadcastable = tuple(bool(b) for b in broadcastable)
self.dtype_specs() # error checking is done there
self.name = name
self.numpy_dtype = numpy.dtype(self.dtype)
self.sparse_grad = sparse_grad
if sparse_grad:
warnings.warn(
"DEPRECATION WARNING: You use an old interface to"
" AdvancedSubtensor1 sparse_grad. Now use"
" theano.sparse_grad(a_tensor[an_int_vector]).")
def clone(self, dtype=None, broadcastable=None):
"""
Return a copy of the type optionally with a new dtype or
broadcastable pattern.
"""
if dtype is None:
dtype = self.dtype
if broadcastable is None:
broadcastable = self.broadcastable
return self.__class__(dtype, broadcastable, name=self.name,
sparse_grad=self.sparse_grad)
def filter(self, data, strict=False, allow_downcast=None):
"""
Convert `data` to something which can be associated to a
`TensorVariable`.
This function is not meant to be called in user code. It is for
`Linker` instances to use when running a compiled graph.
"""
# Explicit error message when one accidentally uses a Variable as
# input (typical mistake, especially with shared variables).
if isinstance(data, Variable):
raise TypeError(
'Expected an array-like object, but found a Variable: '
'maybe you are trying to call a function on a (possibly '
'shared) variable instead of a numeric array?')
if ((type(data) is numpy.ndarray) and
(data.dtype == self.numpy_dtype)):
if data.dtype.num != self.numpy_dtype.num:
data = theano._asarray(data, dtype=self.dtype)
# -- now fall through to ndim check
elif ((type(data) is numpy.memmap) and
(data.dtype == self.numpy_dtype)):
# numpy.memmap is a "safe" subclass of ndarray,
# so we can use it whereever we expect a base ndarray.
# however, casting it would defeat the purpose of not
# loading the whole data into memory
pass
elif strict:
# If any of the two conditions above was not met,
# we raise a meaningful TypeError.
if not (type(data) is numpy.ndarray):
raise TypeError("%s expected a ndarray object." % self,
data, type(data))
if data.dtype != self.numpy_dtype:
raise TypeError(("%s expected a ndarray object with "
"dtype = %s (got %s).") %
(self, self.numpy_dtype, data.dtype))
assert False, "This point should never be reached."
else:
if allow_downcast:
# Convert to self.dtype, regardless of the type of data
data = theano._asarray(data, dtype=self.dtype)
# TODO: consider to pad shape with ones to make it consistent
# with self.broadcastable... like vector->row type thing
else:
if isinstance(data, numpy.ndarray):
# Check if self.dtype can accurately represent data
# (do not try to convert the data)
up_dtype = scal.upcast(self.dtype, data.dtype)
if up_dtype == self.dtype:
# Bug in the following line when data is a
# scalar array, see
# http://projects.scipy.org/numpy/ticket/1611
# data = data.astype(self.dtype)
data = theano._asarray(data, dtype=self.dtype)
if up_dtype != self.dtype:
err_msg = (
'%s cannot store a value of dtype %s without '
'risking loss of precision. If you do not mind '
'this loss, you can: '
'1) explicitly cast your data to %s, or '
'2) set "allow_input_downcast=True" when calling '
'"function".'
% (self, data.dtype, self.dtype))
raise TypeError(err_msg, data)
elif (allow_downcast is None and
type(data) is float and
self.dtype == theano.config.floatX):
# Special case where we allow downcasting of Python float
# literals to floatX, even when floatX=='float32'
data = theano._asarray(data, self.dtype)
else:
# data has to be converted.
# Check that this conversion is lossless
converted_data = theano._asarray(data, self.dtype)
# We use the `values_eq` static function from TensorType
# to handle NaN values.
if TensorType.values_eq(numpy.asarray(data),
converted_data,
force_same_dtype=False):
data = converted_data
else:
# Do not print a too long description of data
# (ndarray truncates it, but it's not sure for data)
str_data = str(data)
if len(str_data) > 80:
str_data = str_data[:75] + '(...)'
err_msg = (
'%s cannot store accurately value %s, '
'it would be represented as %s. '
'If you do not mind this precision loss, you can: '
'1) explicitly convert your data to a numpy array '
'of dtype %s, or '
'2) set "allow_input_downcast=True" when calling '
'"function".'
% (self, data, converted_data, self.dtype))
raise TypeError(err_msg, data)
if self.ndim != data.ndim:
raise TypeError("Wrong number of dimensions: expected %s,"
" got %s with shape %s." % (self.ndim, data.ndim,
data.shape))
if not data.flags.aligned:
try:
msg = "object buffer" + str(data.data)
except AttributeError:
msg = ""
raise TypeError("The numpy.ndarray object is not aligned."
" Theano C code does not support that.",
msg,
"object shape", data.shape,
"object strides", data.strides,
"object dtype", data.dtype)
i = 0
for b in self.broadcastable:
if b and data.shape[i] != 1:
raise TypeError("Non-unit value on shape on a broadcastable"
" dimension.", data.shape, self.broadcastable)
i += 1
if (self.filter_checks_isfinite and
not numpy.all(numpy.isfinite(data))):
raise ValueError("non-finite elements not allowed")
return data
def filter_variable(self, other, allow_convert=True):
"""
Convert a symbolic Variable into a TensorType, if compatible.
For the moment, only a TensorType or CudaNdarrayType will be
converted, provided they have the same number of dimensions,
broadcastable pattern, and dtype.
"""
if hasattr(other, '_as_TensorVariable'):
other = other._as_TensorVariable()
if not isinstance(other, Variable):
# The value is not a Variable: we cast it into
# a Constant of the appropriate Type.
other = self.Constant(type=self, data=other)
if other.type == self:
return other
if allow_convert:
# Attempt safe broadcast conversion.
other2 = self.convert_variable(other)
if other2 is not None and other2.type == self:
return other2
raise TypeError(
'Cannot convert Type %(othertype)s '
'(of Variable %(other)s) into Type %(self)s. '
'You can try to manually convert %(other)s into a %(self)s.' %
dict(othertype=other.type,
other=other,
self=self))
def value_validity_msg(self, a):
try:
self.filter(a, strict=True)
except Exception as e:
return str(e)
return "value is valid"
def dtype_specs(self):
"""
Return a tuple (python type, c type, numpy typenum) that corresponds
to self.dtype.
This function is used internally as part of C code generation.
"""
# TODO: add more type correspondances for e.g. int32, int64, float32,
# complex64, etc.
try:
return {
'float16': (float, 'npy_float16', 'NPY_FLOAT16'),
'float32': (float, 'npy_float32', 'NPY_FLOAT32'),
'float64': (float, 'npy_float64', 'NPY_FLOAT64'),
'uint8': (int, 'npy_uint8', 'NPY_UINT8'),
'int8': (int, 'npy_int8', 'NPY_INT8'),
'uint16': (int, 'npy_uint16', 'NPY_UINT16'),
'int16': (int, 'npy_int16', 'NPY_INT16'),
'uint32': (int, 'npy_uint32', 'NPY_UINT32'),
'int32': (int, 'npy_int32', 'NPY_INT32'),
'uint64': (int, 'npy_uint64', 'NPY_UINT64'),
'int64': (int, 'npy_int64', 'NPY_INT64'),
'complex128': (complex, 'theano_complex128', 'NPY_COMPLEX128'),
'complex64': (complex, 'theano_complex64', 'NPY_COMPLEX64')
}[self.dtype]
except KeyError:
raise TypeError("Unsupported dtype for %s: %s"
% (self.__class__.__name__, self.dtype))
def to_scalar_type(self):
return scal.get_scalar_type(dtype=self.dtype)
def __eq__(self, other):
"""
Compare True iff other is the same kind of TensorType.
"""
return type(self) == type(other) and other.dtype == self.dtype \
and other.broadcastable == self.broadcastable
def convert_variable(self, var):
if (type(self) == type(var.type) and # noqa
self.dtype == var.type.dtype and
self.ndim == var.type.ndim and
all(sb == ob or ob for sb, ob in zip(self.broadcastable,
var.type.broadcastable))):
return theano.tensor.patternbroadcast(var, self.broadcastable)
@staticmethod
def may_share_memory(a, b):
# This is a method of TensorType, so both a and b should be ndarrays
if isinstance(a, numpy.ndarray) and isinstance(b, numpy.ndarray):
return numpy.may_share_memory(a, b)
else:
return False
@staticmethod
def values_eq(a, b, force_same_dtype=True):
# TODO: check to see if the shapes must match
# for now, we err on safe side...
if a.shape != b.shape:
return False
if force_same_dtype and a.dtype != b.dtype:
return False
a_eq_b = (a == b)
r = numpy.all(a_eq_b)
if r:
return True
# maybe the trouble is that there are NaNs
a_missing = numpy.isnan(a)
if a_missing.any():
b_missing = numpy.isnan(b)
return numpy.all(a_eq_b + (a_missing == b_missing))
else:
return False
@staticmethod
def values_eq_approx(a, b, allow_remove_inf=False, allow_remove_nan=False,
rtol=None, atol=None):
"""
Parameters
----------
allow_remove_inf
If True, when there is an inf in a, we allow any value in b in
that position. Event -inf
allow_remove_nan
If True, when there is a nan in a, we allow any value in b in
that position. Event +-inf
rtol
Relative tolerance, passed to _allclose.
atol
Absolute tolerance, passed to _allclose.
"""
if isinstance(a, numpy.ndarray) and isinstance(b, numpy.ndarray):
if a.shape != b.shape:
return False
if a.dtype != b.dtype:
return False
if 'int' in str(a.dtype):
return numpy.all(a == b)
else:
# work around a numpy.allclose bug:
# http://projects.scipy.org/numpy/ticket/1672
if a.ndim == 0 and numpy.isinf(a):
a = a.reshape(1)
b = b.reshape(1)
cmp = theano.tensor.basic._allclose(a, b, rtol=rtol, atol=atol)
if cmp:
# Numpy claims they are close, this is good enough for us.
return True
# Numpy is unhappy, but it does not necessarily mean that a and
# b are different. Indeed, Numpy does not like missing values
# and will return False whenever some are found in a or b.
# The proper way would be to use the MaskArray stuff available
# in Numpy. However, it looks like it has been added to Numpy's
# core recently, so it may not be available to everyone. Thus,
# for now we use a home-made recipe, that should probably be
# revisited in the future.
a_missing = numpy.isnan(a)
a_inf = numpy.isinf(a)
if not (a_missing.any() or (allow_remove_inf and a_inf.any())):
# There are no missing values in a, thus this is not the
# reason why numpy.allclose(a, b) returned False.
_logger.info(
'numpy allclose failed for abs_err %f and rel_err %f',
numpy.max(abs(a - b)),
numpy.max(abs(a - b) / (abs(a) + abs(b))))
return False
# The following line is what numpy.allclose bases its decision
# upon, according to its documentation.
rtol = 1.0000000000000001e-05
atol = 1e-8
cmp_elemwise = (numpy.absolute(a - b) <=
(atol + rtol * numpy.absolute(b)))
# Find places where both a and b have missing values.
both_missing = a_missing * numpy.isnan(b)
# Find places where both a and b have inf of the same sign.
both_inf = a_inf * numpy.isinf(b)
# cmp_elemwise is weird when we have inf and -inf.
# set it to False
cmp_elemwise = numpy.where(
both_inf & cmp_elemwise,
a == b,
cmp_elemwise)
# check the sign of the inf
both_inf = numpy.where(both_inf, (a == b), both_inf)
if allow_remove_inf:
both_inf += a_inf
if allow_remove_nan:
both_missing += a_missing
# Combine all information.
return (cmp_elemwise + both_missing + both_inf).all()
return False
def __hash__(self):
"""Hash equal for same kinds of TensorType"""
return hashtype(self) ^ hash(self.dtype) ^ hash(self.broadcastable)
ndim = property(lambda self: len(self.broadcastable),
doc="number of dimensions")
"""
Number of dimensions.
This read-only property is the preferred way to get the number of
dimensions of a `TensorType`.
"""
def make_variable(self, name=None):
"""
Return a `TensorVariable` of this type.
Parameters
----------
name : str
A pretty name to identify this `Variable` when printing and
debugging
"""
return self.Variable(self, name=name)
def __str__(self):
if self.name:
return self.name
else:
b = self.broadcastable
named_broadcastable = {(): 'scalar',
(False,): 'vector',
(False, True): 'col',
(True, False): 'row',
(False, False): 'matrix'}
if b in named_broadcastable:
bcast = named_broadcastable[b]
else:
if any(b):
bcast = str(b)
else:
bcast = '%iD' % len(b)
return "TensorType(%s, %s)" % (str(self.dtype), bcast)
def __repr__(self):
return str(self)
# "TensorType{%s, %s}" % (str(self.dtype), str(self.broadcastable))
def c_declare(self, name, sub, check_input=True):
"""
Override `CLinkerType.c_declare`.
"""
if(check_input):
check = """
typedef %(dtype)s dtype_%(name)s;
""" % dict(sub, name=name, dtype=self.dtype_specs()[1])
else:
check = ""
declaration = """
PyArrayObject* %(name)s;
""" % dict(sub, name=name, dtype=self.dtype_specs()[1])
return declaration + check
def c_init(self, name, sub):
"""
Override `CLinkerType.c_init`.
"""
return """
%(name)s = NULL;
""" % dict(sub, name=name, type_num=self.dtype_specs()[2])
def c_extract(self, name, sub, check_input=True):
"""
Override `CLinkerType.c_extract`.
"""
if(check_input):
check = """
%(name)s = NULL;
if (py_%(name)s == Py_None) {
// We can either fail here or set %(name)s to NULL and rely on Ops
// using tensors to handle the NULL case, but if they fail to do so
// they'll end up with nasty segfaults, so this is public service.
PyErr_SetString(PyExc_ValueError, "expected an ndarray, not None");
%(fail)s
}
if (!PyArray_Check(py_%(name)s)) {
PyErr_SetString(PyExc_ValueError, "expected an ndarray");
%(fail)s
}
// We expect %(type_num)s
if (!PyArray_ISALIGNED((PyArrayObject*) py_%(name)s)) {
PyArrayObject * tmp = (PyArrayObject*) py_%(name)s;
PyErr_Format(PyExc_NotImplementedError,
"expected an aligned array of type %%ld "
"(%(type_num)s), got non-aligned array of type %%ld"
" with %%ld dimensions, with 3 last dims "
"%%ld, %%ld, %%ld"
" and 3 last strides %%ld %%ld, %%ld.",
(long int) %(type_num)s,
(long int) PyArray_TYPE((PyArrayObject*) py_%(name)s),
(long int) PyArray_NDIM(tmp),
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-1] : -1,
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-1] : -1
);
%(fail)s
}
// This is a TypeError to be consistent with DEBUG_MODE
// Note: DEBUG_MODE also tells the name of the container
if (PyArray_TYPE((PyArrayObject*) py_%(name)s) != %(type_num)s) {
PyErr_Format(PyExc_TypeError,
"expected type_num %%d (%(type_num)s) got %%d",
%(type_num)s, PyArray_TYPE((PyArrayObject*) py_%(name)s));
%(fail)s
}
""" % dict(sub, name=name, type_num=self.dtype_specs()[2])
else:
check = ""
return check + """
%(name)s = (PyArrayObject*)(py_%(name)s);
Py_XINCREF(%(name)s);
""" % dict(sub, name=name, type_num=self.dtype_specs()[2])
def c_cleanup(self, name, sub):
"""
Override `CLinkerType.c_cleanup`.
"""
return """
if (%(name)s) {
Py_XDECREF(%(name)s);
}
""" % locals()
def c_sync(self, name, sub):
"""
Override `CLinkerType.c_sync`.
"""
fail = sub['fail']
type_num = self.dtype_specs()[2]
return """
{Py_XDECREF(py_%(name)s);}
if (!%(name)s) {
Py_INCREF(Py_None);
py_%(name)s = Py_None;
}
else if ((void*)py_%(name)s != (void*)%(name)s) {
py_%(name)s = (PyObject*)%(name)s;
}
{Py_XINCREF(py_%(name)s);}
if (%(name)s && !PyArray_ISALIGNED((PyArrayObject*) py_%(name)s)) {
PyErr_Format(PyExc_NotImplementedError,
"c_sync: expected an aligned array, got non-aligned array of type %%ld"
" with %%ld dimensions, with 3 last dims "
"%%ld, %%ld, %%ld"
" and 3 last strides %%ld %%ld, %%ld.",
(long int) PyArray_TYPE((PyArrayObject*) py_%(name)s),
(long int) PyArray_NDIM(%(name)s),
(long int) PyArray_NDIM(%(name)s) >= 3 ?
PyArray_DIMS(%(name)s)[PyArray_NDIM(%(name)s)-3] : -1,
(long int) PyArray_NDIM(%(name)s) >= 2 ?
PyArray_DIMS(%(name)s)[PyArray_NDIM(%(name)s)-2] : -1,
(long int) PyArray_NDIM(%(name)s) >= 1 ?
PyArray_DIMS(%(name)s)[PyArray_NDIM(%(name)s)-1] : -1,
(long int) PyArray_NDIM(%(name)s) >= 3 ?
PyArray_STRIDES(%(name)s)[PyArray_NDIM(%(name)s)-3] : -1,
(long int) PyArray_NDIM(%(name)s) >= 2 ?
PyArray_STRIDES(%(name)s)[PyArray_NDIM(%(name)s)-2] : -1,
(long int) PyArray_NDIM(%(name)s) >= 1 ?
PyArray_STRIDES(%(name)s)[PyArray_NDIM(%(name)s)-1] : -1
);
%(fail)s
}
""" % locals()
def c_headers(self, c_compiler):
"""
Override `CLinkerObject.c_headers`.
"""
return scal.get_scalar_type(self.dtype).c_headers(c_compiler)
def c_libraries(self, c_compiler):
return scal.get_scalar_type(self.dtype).c_libraries(c_compiler)
def c_compile_args(self, c_compiler):
return scal.get_scalar_type(self.dtype).c_compile_args(c_compiler)
def c_support_code(self):
"""
Override `CLinkerObject.c_support_code`.
"""
return scal.get_scalar_type(self.dtype).c_support_code()
def c_init_code(self):
return scal.get_scalar_type(self.dtype).c_init_code()
def c_code_cache_version(self):
scalar_version = scal.get_scalar_type(self.dtype).c_code_cache_version()
if scalar_version:
return (11,) + scalar_version
else:
return ()
def value_zeros(self, shape):
"""
Create an numpy ndarray full of 0 values.
"""
return numpy.zeros(shape, dtype=self.dtype)
def get_shape_info(self, obj):
"""
Return the information needed to compute the memory size of ``obj``.
The memory size is only the data, so this excludes the container.
For an ndarray, this is the data, but not the ndarray object and
other data structures such as shape and strides.
``get_shape_info()`` and ``get_size()`` work in tandem for the memory
profiler.
``get_shape_info()`` is called during the execution of the function.
So it is better that it is not too slow.
``get_size()`` will be called on the output of this function
when printing the memory profile.
Parameters
----------
obj
The object that this Type represents during execution.
Returns
-------
object
Python object that ``self.get_size()`` understands.
"""
return obj.shape
def get_size(self, shape_info):
"""
Number of bytes taken by the object represented by shape_info.
Parameters
----------
shape_info
The output of the call to get_shape_info().
Returns
-------
int
The number of bytes taken by the object described by ``shape_info``.
"""
if shape_info:
return numpy.prod(shape_info) * numpy.dtype(self.dtype).itemsize
else: # a scalar
return numpy.dtype(self.dtype).itemsize
theano.compile.ops.expandable_types += (TensorType,)
def values_eq_approx_remove_inf(a, b):
return TensorType.values_eq_approx(a, b, True)
def values_eq_approx_remove_nan(a, b):
return TensorType.values_eq_approx(a, b, False, True)
def values_eq_approx_remove_inf_nan(a, b):
return TensorType.values_eq_approx(a, b, True, True)
def values_eq_approx_always_true(a, b):
return True
# Register TensorType C code for ViewOp.
theano.compile.register_view_op_c_code(
TensorType,
"""
Py_XDECREF(%(oname)s);
%(oname)s = %(iname)s;
Py_XINCREF(%(oname)s);
""",
version=1)
# Register TensorType C code for Shape Op.
theano.compile.register_shape_c_code(
TensorType,
"""
npy_intp shape[] = {PyArray_NDIM(%(iname)s)};
if(%(oname)s == NULL || (PyArray_DIMS(%(oname)s)[0] != shape[0]))
{
Py_XDECREF(%(oname)s);
%(oname)s = (PyArrayObject*) PyArray_SimpleNew(1, shape, NPY_INT64);
}
for(int i=0;i<shape[0];i++)
{
((npy_int64*)PyArray_GETPTR1(%(oname)s, i))[0] = PyArray_DIMS(%(iname)s)[i];
}
""",
version=1)
# Register TensorType C code for ViewOp.
theano.compile.register_shape_i_c_code(
TensorType,
"""
if(!%(oname)s)
%(oname)s=(PyArrayObject*)PyArray_EMPTY(0, NULL, NPY_INT64, 0);
((npy_int64*)PyArray_DATA(%(oname)s))[0]=PyArray_DIMS(%(iname)s)[%(i)s];
""",
"""
if (%(i)s>=PyArray_NDIM(%(iname)s)){
PyErr_SetString(PyExc_TypeError,
"Number of dimensions lower than expected");
%(fail)s
}
""",
version=3)
# Register TensorType C code for DeepCopyOp
theano.compile.register_deep_copy_op_c_code(
TensorType,
"""
int alloc = %(oname)s == NULL;
for(int i=0; !alloc && i<PyArray_NDIM(%(oname)s); i++) {
if(PyArray_DIMS(%(iname)s)[i] != PyArray_DIMS(%(oname)s)[i]) {
alloc = true;
break;
}
}
if(alloc) {
Py_XDECREF(%(oname)s);
%(oname)s = (PyArrayObject*)PyArray_NewCopy(%(iname)s,
NPY_ANYORDER);
if (!%(oname)s)
{
PyErr_SetString(PyExc_ValueError,
"DeepCopyOp: the copy failed!");
%(fail)s;
}
} else {
if(PyArray_CopyInto(%(oname)s, %(iname)s)){
PyErr_SetString(PyExc_ValueError,
"DeepCopyOp: the copy failed into already allocated space!");
%(fail)s;
}
}
""",
version=2)
theano.compile.register_rebroadcast_c_code(
TensorType,
"""
if(PyArray_DIMS(%(iname)s)[%(axis)s] != 1){
PyErr_Format(PyExc_ValueError,
"Dimension %(axis)s in Rebroadcast's input was"
" supposed to be 1 (got %%d instead)",
PyArray_DIMS(%(iname)s)[%(axis)s]);
%(fail)s
}
""",
version=1)
theano.compile.register_specify_shape_c_code(
TensorType,
"""
if (PyArray_NDIM(%(iname)s) != PyArray_DIMS(%(shape)s)[0]) {
PyErr_Format(PyExc_AssertionError,
"SpecifyShape: vector of shape has %%d elements,"
" but the input has %%d dimensions.",
PyArray_NDIM(%(iname)s),
PyArray_DIMS(%(shape)s)[0]);
%(fail)s;
}
for(int i = 0; i < PyArray_NDIM(%(iname)s); i++){
dtype_%(shape)s shp = ((dtype_%(shape)s*)PyArray_GETPTR1(%(shape)s,
i))[0];
if (PyArray_DIMS(%(iname)s)[i] != shp) {
PyErr_Format(PyExc_AssertionError,
"SpecifyShape: dim %%d of input has shape %%d,"
" expected %%d.",
i, PyArray_DIMS(%(iname)s)[i],
shp);
%(fail)s;
}
}
Py_XDECREF(%(oname)s);
%(oname)s = %(iname)s;
Py_XINCREF(%(oname)s);
""",
version=1)
| 37.885922
| 96
| 0.519924
|
c283f877414182dbcd9c39ce1c4235bd841b845a
| 986
|
py
|
Python
|
Basic programs of Socket Programming/connecting_to_google.py
|
kampaitees/Accessing-other-s-systems-using-Socket-Programming
|
b17ddc440c0ff79b3cbb992f404d24d3f57c9513
|
[
"MIT"
] | 1
|
2019-10-04T22:46:39.000Z
|
2019-10-04T22:46:39.000Z
|
Basic programs of Socket Programming/connecting_to_google.py
|
kampaitees/Accessing-other-s-systems-using-Socket-Programming
|
b17ddc440c0ff79b3cbb992f404d24d3f57c9513
|
[
"MIT"
] | null | null | null |
Basic programs of Socket Programming/connecting_to_google.py
|
kampaitees/Accessing-other-s-systems-using-Socket-Programming
|
b17ddc440c0ff79b3cbb992f404d24d3f57c9513
|
[
"MIT"
] | null | null | null |
import sys
import socket
#AF_INET reprsents that we are going to use IPv4version of Internet Protocol
#SOCK_STREAMrepresent that we are using TCP/IP as a Internet Protocol instead of UDP
try:
#creating the socket with IPv4 version and TCP as protocol in transport layer
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('Socket successfully created')
#raising an error while creating the socket
except socket.error as msg:
print('Error in socket connection ', msg)
try:
#getting the IPaddress of google using PING
host = socket.gethostbyname('www.google.com')
#raising error if we get any problem in getting IP address of google
except socket.gaierror as msg:
print('Error in getting host address')
sys.exit()
#using Http as a port as it's port number is 80
port = 80
#connecting to the google server
s.connect((host, port))
print('The socket is successfully connected to google on IP address: ', host)
| 29.878788
| 85
| 0.729209
|
059cc0a0e54e47a29f9076d7e5992325aa6281d7
| 10,464
|
py
|
Python
|
data_processing/ssmi_proc/bytemaps.py
|
jkcm/mesoscale-morphology
|
7ee3f97d880878659ba2acb0418b53569b54ccb9
|
[
"MIT"
] | null | null | null |
data_processing/ssmi_proc/bytemaps.py
|
jkcm/mesoscale-morphology
|
7ee3f97d880878659ba2acb0418b53569b54ccb9
|
[
"MIT"
] | null | null | null |
data_processing/ssmi_proc/bytemaps.py
|
jkcm/mesoscale-morphology
|
7ee3f97d880878659ba2acb0418b53569b54ccb9
|
[
"MIT"
] | null | null | null |
""" Module for reading and verifying RSS gridded binary data files. """
import copy
import decimal
import gzip
import numpy as np
import sys
from collections import namedtuple
from collections import OrderedDict
from operator import mul
from functools import reduce
class Dataset:
""" Base class for bytemap datasets. """
"""
Public data:
filename = name of data file
missing = fill value used for missing data;
if None, then fill with byte codes (251-255)
dimensions = dictionary of dimensions for each coordinate
variables = dictionary of data for each variable
All classes derived from Dataset must implement the following:
_attributes() = returns list of attributes for each variable (list)
_coordinates() = returns coordinates (tuple)
_shape() = returns shape of raw data (tuple)
_variables() = returns list of all variables to get (list)
The derived class must provide "_get_" methods for the attributes.
If the derived class provides "_get_" methods for the variables,
those methods receive first priority.
The "_get_" methods in this module receive second priority.
The last priority is "_default_get", which requires:
_get_index(var) = returns bmap index for var
_get_scale(var) = returns bmap scale for var
_get_offset(var) = returns bmap offset for var
"""
def __init__(self):
self.dimensions = self._get_dimensions()
self.variables = self._get_variables()
def _default_get(self,var,bmap):
data = get_data(bmap,self._get_index(var))
acopy = copy.deepcopy(data)
bad = is_bad(data)
try: data *= self._get_scale(var)
except _NoValueFound: pass
try: data += self._get_offset(var)
except _NoValueFound: pass
if self.missing == None: data[bad] = acopy[bad]
else: data[bad] = self.missing
return data
def _dtype(self): return np.uint8
def _get(self,var):
try: return _get_(var,_from_=self)
except _NoMethodFound: pass
try: return _get_(var,_from_=thismodule())
except _NoMethodFound: pass
return self._default_get
def _get_avariable(self,var,data):
variable = self._get(var)(var,data)
return Variable(var,variable,self)
def _get_coordinates(self,var=None):
if not var: return self._coordinates()
if var in self._coordinates(): return (var,)
return tuple([c for c in self._coordinates() if c != 'variable'])
def _get_dimensions(self):
dims = OrderedDict(zip(self._coordinates(),self._shape()))
del dims['variable']
return dims
def _get_variables(self):
data = OrderedDict()
try: stream = readgz(self.filename)
except: return data
bmap = unpack(stream, shape=self._shape(), dtype=self._dtype())
for var in self._variables():
data[var] = self._get_avariable(var,bmap)
return data
def readgz(filename):
f = gzip.open(filename,'rb')
stream = f.read()
f.close()
return stream
def thismodule(): return sys.modules[__name__]
def unpack(stream,shape,dtype):
count = reduce(mul,shape)
return np.fromstring(stream,dtype=dtype,count=count).reshape(shape)
""" Library of Methods for _get_ Functions: """
def btest(ival,ipos):
"""Same usage as Fortran btest function."""
return ( ival & (1 << ipos) ) != 0
def cosd(x): return np.cos(np.radians(x))
def get_data(bmap,indx,dtype=np.float64):
"""Return numpy array of dytpe for variable in bmap given by indx."""
return np.array(np.squeeze(bmap[...,indx,:,:]),dtype=dtype)
def get_uv(speed,direction):
"""
Given speed and direction (degrees oceanographic),
return u (zonal) and v (meridional) components.
"""
u = speed * sind(direction)
v = speed * cosd(direction)
return u, v
def ibits(ival,ipos,ilen):
"""Same usage as Fortran ibits function."""
ones = ((1 << ilen)-1)
return ( ival & (ones << ipos) ) >> ipos
def is_bad(bmap,maxvalid=250):
"""Return mask where data are bad."""
return bmap > maxvalid
def sind(x): return np.sin(np.radians(x))
where = np.where
""" Library of Named Exceptions: """
_NoMethodFound = AttributeError
_NoValueFound = (AttributeError,KeyError)
_NotFound = AttributeError
""" Library of Named _get_ Functions: """
def _get_(var,_from_): return getattr(_from_,'_get_'+var)
def _get_ice(var,bmap,indx=0,icevalue=252):
return get_data(bmap,indx,dtype=bmap.dtype) == icevalue
def _get_land(var,bmap,indx=0,landvalue=255):
return get_data(bmap,indx,dtype=bmap.dtype) == landvalue
def _get_latitude(var,bmap,nlat=720,dlat=0.25,lat0=-89.875):
if np.shape(bmap)[-2] != nlat: sys.exit('Latitude mismatch')
return np.array([dlat*ilat + lat0 for ilat in range(nlat)])
def _get_longitude(var,bmap,nlon=1440,dlon=0.25,lon0=0.125):
if np.shape(bmap)[-1] != nlon: sys.exit('Longitude mismatch')
return np.array([dlon*ilon + lon0 for ilon in range(nlon)])
def _get_nodata(var,bmap,indx=0):
return is_bad(get_data(bmap,indx,dtype=bmap.dtype))
class Variable(np.ndarray):
""" Variable exists solely to subclass numpy array with attributes. """
def __new__(cls,var,data,dataset):
obj = np.asarray(data).view(cls)
for attr in dataset._attributes():
get = _get_(attr,_from_=dataset)
setattr(obj,attr,get(var))
return obj
class Verify:
""" Base class for bytemap read verification. """
"""
Public data:
data = OrderedDict of OneOb-namedtuple lists for each variable
success = True/False
The derived class must supply the following:
For all files:
filename = name of verify file
variables = list of variables to verify
The following indices (1-based):
ilon1 = longitude index
ilon2 = longitude index
ilat1 = latitude index
ilat2 = latitude index
iasc = asc/dsc index (daily only)
For files organized as a list:
startline = starting line number of data (integer)
columns = column numbers for each variable (dictionary)
For files organized as arrays:
startline = starting line number of data for each variable (dict)
The startline and columns are counting starting from 1.
"""
def __init__(self,dataset):
self._file = [tokenize(line) for line in readtext(self.filename)]
self.data = self._get_data()
self.success = verify(dataset,self)
def _asc(self):
try: return zerobased(self.iasc)
except _NotFound: return Ellipsis
def _get_avariable(self,var):
data = []
indices = np.ndindex(self._nlat(),self._nlon())
for ilat,ilon in indices:
data.append(self._get_oneob(var,ilon,ilat))
return data
def _get_data(self):
data = OrderedDict()
for var in self.variables:
data[var] = self._get_avariable(var)
return data
def _get_line_word(self,var,ilon,ilat):
if self._islist(): return self._get_line_word_list(var,ilon,ilat)
else: return self._get_line_word_array(var,ilon,ilat)
def _get_line_word_array(self,var,ilon,ilat):
iline = zerobased(self.startline[var]) + ilat
iword = ilon
return iline,iword
def _get_line_word_list(self,var,ilon,ilat):
iline = zerobased(self.startline) + ilat*self._nlon() + ilon
iword = zerobased(self.columns[var])
return iline,iword
def _get_oneob(self,var,ilon,ilat):
iline,iword = self._get_line_word(var,ilon,ilat)
avalue = self._file[iline][iword]
return OneOb(self._lon(ilon), self._lat(ilat), self._asc(),
float(avalue), places(avalue))
def _islist(self): return hasattr(self,'columns')
def _lat(self,ilat): return zerobased(self.ilat1) + ilat
def _lon(self,ilon): return zerobased(self.ilon1) + ilon
def _nlat(self): return self.ilat2 - self.ilat1 + 1
def _nlon(self): return self.ilon2 - self.ilon1 + 1
OneOb = namedtuple('OneOb','lon lat asc val ndp')
"""
OneOb corresponds to one observation from verify file with:
lon = longitude index
lat = latitude index
asc = ascending/descending index
val = verify value
ndp = number of decimal places given in verify
The (asc,lat,lon) indices are 0-based.
"""
def places(astring):
"""
Given a string representing a floating-point number,
return number of decimal places of precision (note: is negative).
"""
return decimal.Decimal(astring).as_tuple().exponent
def readtext(filename):
f = open(filename,'r')
lines = f.readlines()
f.close()
return lines
def tokenize(line): return [item.strip() for item in line.split()]
def verify(dataset,verify):
""" Verify data were read correctly. """
"""
Required arguments:
dataset = a read Dataset instance
verify = a Verify instance
Returns:
success = True or False
"""
success = True
for var in verify.variables:
for ob in verify.data[var]:
readval = dataset.variables[var][ob.asc, ob.lat, ob.lon]
diff = abs(ob.val - readval)
match = diff < pow(10,ob.ndp)
if not match: success = False
print( ' '.join([str(ob.lon), str(ob.lat), str(var),
str(ob.val), str(readval), str(diff), str(match)]) )
return success
def zerobased(indx): return indx-1
if __name__ == '__main__':
link = 'http://www.remss.com/terms_of_data_use/terms_of_data_use.html'
print('Remote Sensing Systems')
print('444 Tenth Street, Suite 200')
print('Santa Rosa, CA 95401, USA')
print('FTP: ftp://ftp.ssmi.com')
print('Web: http://www.remss.com')
print('Support: support@remss.com')
print('Terms of Data Use: '+link)
| 32.598131
| 77
| 0.620795
|
f33f1cb38f719519d070579791ae5b08cc179785
| 17,879
|
py
|
Python
|
pi_software/robot_libs/ukmarsey_commands.py
|
robzed/pizero_for_ukmarsbot
|
331998e245cfd01ce6a5c72b54b3438965642942
|
[
"MIT"
] | null | null | null |
pi_software/robot_libs/ukmarsey_commands.py
|
robzed/pizero_for_ukmarsbot
|
331998e245cfd01ce6a5c72b54b3438965642942
|
[
"MIT"
] | null | null | null |
pi_software/robot_libs/ukmarsey_commands.py
|
robzed/pizero_for_ukmarsbot
|
331998e245cfd01ce6a5c72b54b3438965642942
|
[
"MIT"
] | 1
|
2021-02-05T13:41:49.000Z
|
2021-02-05T13:41:49.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
""" Main Control file for UKMarsBot Robot with an Arduino Nano co-processor.
Intended to run on a Raspberry Pi on the actual robot.
"""
#
# Copyright (c) 2020/2021 Rob Probin.
# (Some items taken from Vison2/Dizzy platform 2016)
# All original work.
#
# This is licensed under the MIT License. Please see LICENSE.
#
# NOTES
# * Coding Convention PEP-8 https://www.python.org/dev/peps/pep-0008/
# * Docstrings PEP-257 https://www.python.org/dev/peps/pep-0257/
import serial
import time
import datetime
from robot_libs.serial_snooper import serial_snooper
from robot_libs.Raspberry_Pi_Lib import shutdown_raspberry_pi
from robot_settings import INHIBIT_LOW_BATTERY_SHUTDOWN
from robot_settings import USE_OK_FOR_ALL_COMMANDS
################################################################
#
# Constants - usually not changed
#
# if we don't get this version, then abort!
MINIMUM_UKMARSEY_ARDUINO_NANO_SOFTWARE_VERSION = 1.6
MAXIMUM_UKMARSEY_ARDUINO_NANO_SOFTWARE_VERSION = 1.6 # can be None
NEWLINE = b"\x0A" # could be "\n" ... but we know only one byte is required
NEWLINE_VALUE = NEWLINE[0]
UKMARSEY_CLI_ENCODING = 'utf8'
# sensor constants
LEFT_SENSOR_INDEX = 2
RIGHT_SENSOR_INDEX = 0
FRONT_SENSOR_INDEX = 1
################################################################
#
# List of Command Constants
#
RESET_STATE_COMMAND = b"^" + NEWLINE
SHOW_VERSION_COMMAND = b"v" + NEWLINE
VERBOSE_OFF_COMMAND = b"V0" + NEWLINE
VERBOSE_ON_COMMAND = b"V1" + NEWLINE
EXTRA_VERBOSE_ON_COMMAND = b"V2" + NEWLINE
ECHO_OFF_COMMAND = b"E0" + NEWLINE
ECHO_ON_COMMAND = b"E1" + NEWLINE
OK_COMMAND = b"?" + NEWLINE
HELP_COMMAND = b"h" + NEWLINE
SWITCH_READ_COMMAND = b"s" + NEWLINE
BATTERY_READ_COMMAND = b"b" + NEWLINE
MOTOR_ACTION_STOP_COMMAND = b"x" + NEWLINE
LED_COMMAND = b"l%i" + NEWLINE
READ_SENSORS_COMMAND = b"S" + NEWLINE
READ_SENSORS_HEX_COMMAND = b"Sh" + NEWLINE # use read Hex (faster)
PINMODE_COMMAND = b"P%i=%s" + NEWLINE
DIGITAL_WRITE_COMMAND = b"D%i=%i" + NEWLINE
DIGITAL_READ_COMMAND = b"D%i" + NEWLINE
#SET_SPEED_AND_ROTATION = b"T%i,%i" + NEWLINE
STOP_MOTORS_COMMANDS = b"x" + NEWLINE
SENSOR_DISABLE = b"*0" + NEWLINE
SENSOR_ENABLE = b"*1" + NEWLINE
CONTROL_C_ETX = b"\x03" # aborts line
CONTROL_X_CAN = b"\x18" # aborts line and resets interpreter
################################################################
#
# List of Response Constants
#
UNSOLICITED_PREFIX = b"@"
UNSOLICITED_PREFIX_VALUE = UNSOLICITED_PREFIX[0]
ERROR_PREFIX = b"@Error:"
RESET_STATE_RETURN = b"RST"
OK_RESULT_VERBOSE = b"OK"
OK_RESULT_NUMERIC = ERROR_PREFIX + b"0"
################################################################
#
# Exceptions
#
class SoftReset(Exception):
pass
class ShutdownRequest(Exception):
pass
class MajorError(Exception):
pass
class SerialSyncError(Exception):
pass
class InterpreterError(Exception):
pass
class UkmarseyCommands:
""" This is the main control class for commands.
We make this a class so that:
* Method names are automatially associated with an instance.
* Variables are associated with the object
* It's easy to see what's a private member (starting with _)
The alternative is passing the serial port
into each command, and having a long list of imports
or using from ukmarsey_commands import * which is horrible
because it polutes the namespace - which increases changes of
collisions and makes debugging harder.
There will probably never be more than one.
"""
################################################################
#
# General Commad Helper Functions
#
def _do_command(self, command):
# TODO: Is this obsolete and can be deleted?
pass
def _process_error_code(self, data):
print(data)
raise InterpreterError("Interpreter Error code returned")
def _process_unsolicited_data(self, data):
""" This function handles any unsolicited data returns that are made.
These always start with an @ character
"""
if data.startswith(ERROR_PREFIX):
self._process_error_code(data)
else:
# TODO: Process unsolicited data
print("Unsolicited data unhandled", data)
def _blocking_process_reply(self, expected):
""" This is a generic reply handler, that handles the most common cases of
a single expected return"""
#print("Expecting", expected)
while True:
data = self.port.read_until(NEWLINE)
#print('_blocking_process_reply:', data)
if data[-1] == NEWLINE_VALUE:
if data.startswith(expected):
return True
# check for "@Defaulting Params" type commands
elif data[0] == UNSOLICITED_PREFIX_VALUE:
self._process_unsolicited_data(data)
else:
# TODO: Probably need to handle errors here?
print(data)
raise SerialSyncError("Unexpected return data")
else:
# TODO: Get a better method than throwing an exception.
raise SerialSyncError("Newline not found - timeout")
return False
def _blocking_get_reply(self):
""" This is a generic reply handler, that handles the most common cases of
getting some result back"""
while True:
data = self.port.read_until(NEWLINE)
#print('_blocking_get_reply:', data)
if data[-1] == NEWLINE_VALUE:
# check for "@Defaulting Params" type commands
if data[0] == UNSOLICITED_PREFIX_VALUE:
self._process_unsolicited_data(data)
else:
self._process_ok_reply()
return data # includes the NEWLINE
else:
# TODO: Get a better method than throwing an exception.
raise SerialSyncError("Newline not found - timeout")
return False
def _clear_replies(self):
""" This is a reply handler that ignores replies up to a timeout happens with no newline"""
while True:
data = self.port.read_until(NEWLINE)
#print("_clear_replies", data)
if NEWLINE in data:
if data[0] == b"@":
self._process_unsolicited_data(data)
else:
break
def _process_ok_reply(self):
""" for commands that don't reply on success, enable a reply if super verbose is on"""
if(self.super_verbose):
self._blocking_process_reply(OK_RESULT_VERBOSE)
################################################################
#
# Blocking Command Functions
# These functions block until they receive an ok (if applicable).
#
# These blocking commands are one way at a time (also called a half-duplex
# protocol and/or synchronous) and and is slower that possible - because we
# don't use the both recieve and transmit cable at the same time.
#
# We can send and receive at the same time - because we have both a receive and
# a transmit cable.
#
# This send-and-receive is also called asynchronous or full-duplex. However
# while this second, faster method, this is harder because we have to manage
# multiple commands at the same time and match the results to the command that
# generated them.
#
# There are actually a few replies that happen asynchronously (unsolicited
# by command) but we handle these inside these commands.
#
# Generally blocking commands are much easier to work with - and should be how
# you start.
def do_ok_test(self):
""" do_ok_test is a very basic command that always get a reply. Used for connection testing"""
self.port.write(OK_COMMAND)
if(self.numeric_error_codes):
self._blocking_process_reply(OK_RESULT_NUMERIC)
else:
self._blocking_process_reply(OK_RESULT_VERBOSE)
def get_version(self):
""" get_version is a very basic command that gets the version. Used for getting the version"""
self.port.write(SHOW_VERSION_COMMAND)
reply = self._blocking_get_reply().rstrip()
if len(reply) < 2 or reply[:1] != b'v':
print("Version returned =", reply)
raise MajorError("Version return not correct")
version = float(reply[1:].decode(UKMARSEY_CLI_ENCODING))
if version < MINIMUM_UKMARSEY_ARDUINO_NANO_SOFTWARE_VERSION:
print("Minimum required", MINIMUM_UKMARSEY_ARDUINO_NANO_SOFTWARE_VERSION)
raise MajorError("Version too old for Pi Zero control program")
if MAXIMUM_UKMARSEY_ARDUINO_NANO_SOFTWARE_VERSION is not None:
if version > MAXIMUM_UKMARSEY_ARDUINO_NANO_SOFTWARE_VERSION:
print("Maximum required", MAXIMUM_UKMARSEY_ARDUINO_NANO_SOFTWARE_VERSION)
raise MajorError("Version too new for Pi Zero control program")
return version
def set_echo_off(self):
""" Send an echo off to supress echoing of the commands back to us.
This is a special command in that it doesn't care about any replys on purpose
"""
self.port.write(ECHO_OFF_COMMAND)
self._clear_replies()
self.echo_on = False
self._process_ok_reply()
def set_echo_on(self):
""" Send an echo on.
This is a special command in that it doesn't care about any replys on purpose
"""
self.port.write(ECHO_ON_COMMAND)
self._clear_replies()
self.echo_on = True
self._process_ok_reply()
def set_numeric_error_codes(self):
self.port.write(VERBOSE_OFF_COMMAND)
# No reply expected
self.super_verbose = False
self.numeric_error_codes = True
self._process_ok_reply()
def set_text_error_codes(self):
self.port.write(VERBOSE_ON_COMMAND)
# No reply expected
self.super_verbose = False
self.numeric_error_codes = False
self._process_ok_reply()
def set_super_verbose_codes(self):
self.port.write(EXTRA_VERBOSE_ON_COMMAND)
self.super_verbose = True
self.numeric_error_codes = False
self._process_ok_reply()
def get_switches(self):
""" get_switches """
self.port.write(SWITCH_READ_COMMAND)
reply = self._blocking_get_reply().rstrip()
value = int(reply.decode(UKMARSEY_CLI_ENCODING))
return value
def change_arduino_led(self, state):
"""
Turn on/off Arduino LED
:param port: serial port, as opened by main
:param state: 0 or 1 for off and on
:return: Nothing returned
"""
self.port.write(LED_COMMAND % state)
# No reply expected in non-V2 mode
self._process_ok_reply()
def configure_GPIO_pinmode(self, pin, mode):
''' Same as Ardunio Pinmode - set the modes of the GPIO pins on the
Arduino.
:param pin = pin number
:param mode = string "INPUT", "OUTPUT", or "INPUT_PULLUP"
:return None
'''
if mode == "INPUT":
mode = b"I"
elif mode == "OUTPUT":
mode = b"O"
elif mode == "INPUT_PULLUP":
mode = b"U"
else:
print("Error in configure_GPIO_pinmode - what is ", mode)
return
self.port.write(PINMODE_COMMAND % (pin, mode))
self._process_ok_reply()
def write_GPIO_output(self, pin, state):
''' Similar to digitalWrite on Arduino
:param pin = pin number
:param state = 0 or 1
:return None
'''
self.port.write(DIGITAL_WRITE_COMMAND % (pin, state))
self._process_ok_reply()
def read_GPIO_input(self, pin):
''' Similar to digitalRead on Arduino
:param pin = pin number
:return 0 or 1
'''
self.port.write(DIGITAL_READ_COMMAND % pin)
reply = self._blocking_get_reply().rstrip()
return int(reply.decode(UKMARSEY_CLI_ENCODING))
def change_sensor_led(self, led, state):
raise MajorError("Unimplemented")
def get_battery_voltage(self):
""" get battery voltage in volts"""
self.port.write(BATTERY_READ_COMMAND)
return float(self._blocking_get_reply().decode(UKMARSEY_CLI_ENCODING))
def get_sensors(self):
""" Read the sensors from the robot
Returns 6 sensor readings (light-dark) """
self.port.write(READ_SENSORS_COMMAND)
data = self._blocking_get_reply().decode(UKMARSEY_CLI_ENCODING)
data = data.strip()
data_list = [int(i) for i in data.split(',')]
return data_list
def get_sensors_faster(self):
""" Read the sensors from the robot
Returns 6 sensor readings (light-dark) """
self.port.write(READ_SENSORS_HEX_COMMAND)
data = self._blocking_get_reply().decode(UKMARSEY_CLI_ENCODING)
data = data.strip()
# multiply by 4 to make consistent with other forms
data_list = [x*4 for x in list(bytes.fromhex(data))]
return data_list
def emergency_all_stop_command(self):
# Control-X
self.port.write(CONTROL_X_CAN)
self.port.write(CONTROL_X_CAN)
self._clear_replies()
def stop_motors(self):
self.port.write(STOP_MOTORS_COMMANDS)
self._process_ok_reply()
#def set_speed_and_rotation(self, v, w):
# self.port.write(SET_SPEED_AND_ROTATION % (v, w))
def enable_sensors(self):
""" Send an echo on.
This is a special command in that it doesn't care about any replys on purpose
"""
self.port.write(SENSOR_ENABLE)
self._process_ok_reply()
def disable_sensors(self):
""" Send an echo on.
This is a special command in that it doesn't care about any replys on purpose
"""
self.port.write(SENSOR_DISABLE)
self._process_ok_reply()
################################################################
#
# Higher Level Functions
#
def low_battery_shutdown(self, bat_voltage):
if not INHIBIT_LOW_BATTERY_SHUTDOWN:
self.emergency_all_stop_command()
f = open('shutdown_log.txt', 'a')
f.write("%s Low Battery Shutdown - %fv\n" % (str(datetime.datetime.now()), bat_voltage))
f.close()
shutdown_raspberry_pi()
def reset_arduino(self):
"""
reset_arduino() does the correct things for us to get the Arduino Nano
back into a known state to run the robot.
:param port: serial port, as opened by main
:return: Nothing returned
"""
self.port.write(CONTROL_C_ETX)
time.sleep(0.02) # wait 20ms
self.port.write(CONTROL_X_CAN)
time.sleep(0.02)
found = False
count = 50
while not found:
self.port.write(RESET_STATE_COMMAND)
time.sleep(0.20)
# we do simple processing here
lines = self.port.readlines()
# print(lines)
for line in lines:
if line.startswith(RESET_STATE_RETURN):
print("Reset arduino")
found = True
count -= 1
if(count <= 0):
print("Having problems resetting arduino")
count = 200
self._clear_replies()
self.set_echo_off()
# make sure echo is off! (Doing it twice just in case)
self.set_echo_off()
self.do_ok_test()
version = self.get_version()
print("Arduino Nano Software Version = ", version)
self._clear_replies() # clear ok from get version
# which type of replies do we want?
if USE_OK_FOR_ALL_COMMANDS:
self.set_super_verbose_codes()
else:
self.set_numeric_error_codes()
# final tests that things are working ok
self.do_ok_test()
def front_wall_sensor(self):
sensor_list = self.get_sensors_faster()
return sensor_list[FRONT_SENSOR_INDEX]
################################################################
#
# Set up Functions
#
def set_up_port(self):
"""
reset_arduino() does the correct things for us to get the Arduino Nano
back into a known state to run the robot.
:param port: serial port, as opened by main
:return: Nothing returned
"""
port = serial.Serial(self.serial_port, baudrate=self.baud_rate, timeout=0.1)
if self.snoop_serial_data:
port = serial_snooper(port)
self.port = port
time.sleep(0.05)
bytes_waiting = self.port.in_waiting
if bytes_waiting != 0:
print("Bytes Waiting = ", bytes_waiting)
incoming = self.port.read(bytes_waiting)
print(incoming)
print("Flushed bytes")
return port
def __init__(self, serial_port = None, snoop_serial_data = False, baud_rate = 115200):
""" Init the instance. """
self.serial_port = serial_port
self.snoop_serial_data = snoop_serial_data
self.baud_rate = baud_rate
if serial_port is not None:
self.set_up_port()
else:
self.port = None
self.numeric_error_codes = False
self.super_verbose = False
self.echo_on = True
| 33.048059
| 102
| 0.607752
|
f0582bc3db26e7b7d061153b310b0960a7a54436
| 1,063
|
py
|
Python
|
checkov/kubernetes/checks/MemoryRequests.py
|
niradler/checkov
|
2628c6f28a5604efe3877d6eacc3044d2b66b7b1
|
[
"Apache-2.0"
] | 4,013
|
2019-12-09T13:16:54.000Z
|
2022-03-31T14:31:01.000Z
|
checkov/kubernetes/checks/MemoryRequests.py
|
niradler/checkov
|
2628c6f28a5604efe3877d6eacc3044d2b66b7b1
|
[
"Apache-2.0"
] | 1,258
|
2019-12-17T09:55:51.000Z
|
2022-03-31T19:17:17.000Z
|
checkov/kubernetes/checks/MemoryRequests.py
|
niradler/checkov
|
2628c6f28a5604efe3877d6eacc3044d2b66b7b1
|
[
"Apache-2.0"
] | 638
|
2019-12-19T08:57:38.000Z
|
2022-03-30T21:38:37.000Z
|
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.kubernetes.base_spec_check import BaseK8Check
class MemoryRequests(BaseK8Check):
def __init__(self):
name = "Memory requests should be set"
id = "CKV_K8S_12"
# Location: container .resources.requests.memory
supported_kind = ['containers', 'initContainers']
categories = [CheckCategories.KUBERNETES]
super().__init__(name=name, id=id, categories=categories, supported_entities=supported_kind)
def get_resource_id(self, conf):
return f'{conf["parent"]} - {conf["name"]}' if conf.get('name') else conf["parent"]
def scan_spec_conf(self, conf):
if conf.get("resources"):
if "requests" in conf["resources"]:
if "memory" not in conf["resources"]["requests"]:
return CheckResult.FAILED
else:
return CheckResult.FAILED
else:
return CheckResult.FAILED
return CheckResult.PASSED
check = MemoryRequests()
| 34.290323
| 100
| 0.647225
|
dda84532c342642fc023a85e1532102c3c07899c
| 1,737
|
py
|
Python
|
sinfo/perifericos/forms.py
|
webdesigncuba/Sinfo
|
15998b43057b0c0f13083a3017f27740c64239bf
|
[
"MIT"
] | null | null | null |
sinfo/perifericos/forms.py
|
webdesigncuba/Sinfo
|
15998b43057b0c0f13083a3017f27740c64239bf
|
[
"MIT"
] | null | null | null |
sinfo/perifericos/forms.py
|
webdesigncuba/Sinfo
|
15998b43057b0c0f13083a3017f27740c64239bf
|
[
"MIT"
] | null | null | null |
#
# Created on Sat Dec 25 2021
#
# The MIT License (MIT)
# Copyright (c) 2021 David Cordero Rosales
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Formularios del Modelo Perifericos
# Django
from django import forms
# Models
from .models import *
class ChasisForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for forms in self.visible_fields():
forms.field.widget.attrs['class']='form-control mb-3'
class Meta:
model = Chasis
fields = '__all__'
labels = {
'name': 'Marca',
'name': 'Modelo',
'name': 'Numero de Serie',
'name': 'Numero de Inventario',
}
| 36.957447
| 122
| 0.701209
|
605b7b28579bafd1a73be6653b257c74c41c7b59
| 627
|
py
|
Python
|
backpack.py
|
tauhid61/pandas_basic
|
45c4b7a797a98b73319dfeda8a21f8d4725de382
|
[
"Apache-2.0"
] | null | null | null |
backpack.py
|
tauhid61/pandas_basic
|
45c4b7a797a98b73319dfeda8a21f8d4725de382
|
[
"Apache-2.0"
] | null | null | null |
backpack.py
|
tauhid61/pandas_basic
|
45c4b7a797a98b73319dfeda8a21f8d4725de382
|
[
"Apache-2.0"
] | null | null | null |
#Copied a dataframe from lecture video
#pasted the unformatted text in a text file
#Using this py script, formatted the text , and written into 'backpack.csv file'
import csv
list_word = [['Item', 'Category', 'Quantity', 'Weight (oz.)']]
handler = open('backpack.txt')
for line in handler:
word = line.rstrip().split()
list_word.append(word)
# data to be written row-wise in csv file
data = list_word
print(data)
# opening the csv file in 'w+' mode
file = open('backpack.csv', 'w+', newline ='')
# writing the data into the file
with file:
write = csv.writer(file)
write.writerows(data)
| 28.5
| 81
| 0.676236
|
03ed96dc30f2ff4d2a5921225a50a9779f2095f1
| 418
|
py
|
Python
|
001.py
|
evdeev/Project-Euler-Solutions
|
7a5d359435b78bbf4745320186fbfd0f78f6e128
|
[
"MIT"
] | null | null | null |
001.py
|
evdeev/Project-Euler-Solutions
|
7a5d359435b78bbf4745320186fbfd0f78f6e128
|
[
"MIT"
] | null | null | null |
001.py
|
evdeev/Project-Euler-Solutions
|
7a5d359435b78bbf4745320186fbfd0f78f6e128
|
[
"MIT"
] | null | null | null |
# Problem 1
# Multiples of 3 and 5
# If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9.
# The sum of these multiples is 23. Find the sum of all the multiples of 3 or 5 below 1000.
count = 0
for x in range(1000):
if (not x % 3) or (not x % 5):
count += x
# Альтернативное решение через список
count = sum([x for x in range(1000) if (not x % 3) or (not x % 5)])
| 34.833333
| 97
| 0.650718
|
3f554887adbad2e3252414008c7a17abb6477c33
| 44,830
|
py
|
Python
|
lib/ruyaml/representer.py
|
pycontribs/ruamel-yaml
|
ecbfde4ed6fa69017c7630b0983376c5ad1c2fd2
|
[
"MIT"
] | 7
|
2020-02-01T10:07:10.000Z
|
2020-07-19T14:33:36.000Z
|
lib/ruyaml/representer.py
|
pycontribs/ruamel-yaml
|
ecbfde4ed6fa69017c7630b0983376c5ad1c2fd2
|
[
"MIT"
] | 4
|
2020-02-01T10:15:06.000Z
|
2020-07-19T19:31:20.000Z
|
lib/ruyaml/representer.py
|
pycontribs/ruamel-yaml
|
ecbfde4ed6fa69017c7630b0983376c5ad1c2fd2
|
[
"MIT"
] | 3
|
2020-04-22T10:38:34.000Z
|
2020-07-19T13:56:25.000Z
|
# coding: utf-8
import base64
import copyreg
import datetime
import types
from collections import OrderedDict
from ruyaml.anchor import Anchor
from ruyaml.comments import (
CommentedKeyMap,
CommentedKeySeq,
CommentedMap,
CommentedOrderedMap,
CommentedSeq,
CommentedSet,
TaggedScalar,
comment_attrib,
merge_attrib,
)
from ruyaml.compat import ordereddict # NOQA; type: ignore
from ruyaml.compat import _F
from ruyaml.error import * # NOQA
from ruyaml.nodes import * # NOQA
from ruyaml.scalarbool import ScalarBoolean
from ruyaml.scalarfloat import ScalarFloat
from ruyaml.scalarint import BinaryInt, HexCapsInt, HexInt, OctalInt, ScalarInt
from ruyaml.scalarstring import (
DoubleQuotedScalarString,
FoldedScalarString,
LiteralScalarString,
PlainScalarString,
SingleQuotedScalarString,
)
from ruyaml.timestamp import TimeStamp
if False: # MYPY
from typing import Any, Dict, List, Optional, Text, Union # NOQA
# fmt: off
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
'RepresenterError', 'RoundTripRepresenter']
# fmt: on
class RepresenterError(YAMLError):
pass
class BaseRepresenter:
yaml_representers = {} # type: Dict[Any, Any]
yaml_multi_representers = {} # type: Dict[Any, Any]
def __init__(self, default_style=None, default_flow_style=None, dumper=None):
# type: (Any, Any, Any, Any) -> None
self.dumper = dumper
if self.dumper is not None:
self.dumper._representer = self
self.default_style = default_style
self.default_flow_style = default_flow_style
self.represented_objects = {} # type: Dict[Any, Any]
self.object_keeper = [] # type: List[Any]
self.alias_key = None # type: Optional[int]
self.sort_base_mapping_type_on_output = True
@property
def serializer(self):
# type: () -> Any
try:
if hasattr(self.dumper, 'typ'):
return self.dumper.serializer # type: ignore
return self.dumper._serializer # type: ignore
except AttributeError:
return self # cyaml
def represent(self, data):
# type: (Any) -> None
node = self.represent_data(data)
self.serializer.serialize(node)
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def represent_data(self, data):
# type: (Any) -> Any
if self.ignore_aliases(data):
self.alias_key = None
else:
self.alias_key = id(data)
if self.alias_key is not None:
if self.alias_key in self.represented_objects:
node = self.represented_objects[self.alias_key]
# if node is None:
# raise RepresenterError(
# f"recursive objects are not allowed: {data!r}")
return node
# self.represented_objects[alias_key] = None
self.object_keeper.append(data)
data_types = type(data).__mro__
if data_types[0] in self.yaml_representers:
node = self.yaml_representers[data_types[0]](self, data)
else:
for data_type in data_types:
if data_type in self.yaml_multi_representers:
node = self.yaml_multi_representers[data_type](self, data)
break
else:
if None in self.yaml_multi_representers:
node = self.yaml_multi_representers[None](self, data)
elif None in self.yaml_representers:
node = self.yaml_representers[None](self, data)
else:
node = ScalarNode(None, str(data))
# if alias_key is not None:
# self.represented_objects[alias_key] = node
return node
def represent_key(self, data):
# type: (Any) -> Any
"""
David Fraser: Extract a method to represent keys in mappings, so that
a subclass can choose not to quote them (for example)
used in represent_mapping
https://bitbucket.org/davidfraser/pyyaml/commits/d81df6eb95f20cac4a79eed95ae553b5c6f77b8c
"""
return self.represent_data(data)
@classmethod
def add_representer(cls, data_type, representer):
# type: (Any, Any) -> None
if 'yaml_representers' not in cls.__dict__:
cls.yaml_representers = cls.yaml_representers.copy()
cls.yaml_representers[data_type] = representer
@classmethod
def add_multi_representer(cls, data_type, representer):
# type: (Any, Any) -> None
if 'yaml_multi_representers' not in cls.__dict__:
cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
cls.yaml_multi_representers[data_type] = representer
def represent_scalar(self, tag, value, style=None, anchor=None):
# type: (Any, Any, Any, Any) -> Any
if style is None:
style = self.default_style
comment = None
if style and style[0] in '|>':
comment = getattr(value, 'comment', None)
if comment:
comment = [None, [comment]]
node = ScalarNode(tag, value, style=style, comment=comment, anchor=anchor)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
return node
def represent_sequence(self, tag, sequence, flow_style=None):
# type: (Any, Any, Any) -> Any
value = [] # type: List[Any]
node = SequenceNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
for item in sequence:
node_item = self.represent_data(item)
if not (isinstance(node_item, ScalarNode) and not node_item.style):
best_style = False
value.append(node_item)
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def represent_omap(self, tag, omap, flow_style=None):
# type: (Any, Any, Any) -> Any
value = [] # type: List[Any]
node = SequenceNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
for item_key in omap:
item_val = omap[item_key]
node_item = self.represent_data({item_key: item_val})
# if not (isinstance(node_item, ScalarNode) \
# and not node_item.style):
# best_style = False
value.append(node_item)
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def represent_mapping(self, tag, mapping, flow_style=None):
# type: (Any, Any, Any) -> Any
value = [] # type: List[Any]
node = MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = list(mapping.items())
if self.sort_base_mapping_type_on_output:
try:
mapping = sorted(mapping)
except TypeError:
pass
for item_key, item_value in mapping:
node_key = self.represent_key(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def ignore_aliases(self, data):
# type: (Any) -> bool
return False
class SafeRepresenter(BaseRepresenter):
def ignore_aliases(self, data):
# type: (Any) -> bool
# https://docs.python.org/3/reference/expressions.html#parenthesized-forms :
# "i.e. two occurrences of the empty tuple may or may not yield the same object"
# so "data is ()" should not be used
if data is None or (isinstance(data, tuple) and data == ()):
return True
if isinstance(data, (bytes, str, bool, int, float)):
return True
return False
def represent_none(self, data):
# type: (Any) -> Any
return self.represent_scalar('tag:yaml.org,2002:null', 'null')
def represent_str(self, data):
# type: (Any) -> Any
return self.represent_scalar('tag:yaml.org,2002:str', data)
def represent_binary(self, data):
# type: (Any) -> Any
data = base64.encodebytes(data).decode('ascii')
return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
def represent_bool(self, data, anchor=None):
# type: (Any, Optional[Any]) -> Any
try:
value = self.dumper.boolean_representation[bool(data)] # type: ignore
except AttributeError:
if data:
value = 'true'
else:
value = 'false'
return self.represent_scalar('tag:yaml.org,2002:bool', value, anchor=anchor)
def represent_int(self, data):
# type: (Any) -> Any
return self.represent_scalar('tag:yaml.org,2002:int', str(data))
inf_value = 1e300
while repr(inf_value) != repr(inf_value * inf_value):
inf_value *= inf_value
def represent_float(self, data):
# type: (Any) -> Any
if data != data or (data == 0.0 and data == 1.0):
value = '.nan'
elif data == self.inf_value:
value = '.inf'
elif data == -self.inf_value:
value = '-.inf'
else:
value = repr(data).lower()
if getattr(self.serializer, 'use_version', None) == (1, 1):
if '.' not in value and 'e' in value:
# Note that in some cases `repr(data)` represents a float number
# without the decimal parts. For instance:
# >>> repr(1e17)
# '1e17'
# Unfortunately, this is not a valid float representation according
# to the definition of the `!!float` tag in YAML 1.1. We fix
# this by adding '.0' before the 'e' symbol.
value = value.replace('e', '.0e', 1)
return self.represent_scalar('tag:yaml.org,2002:float', value)
def represent_list(self, data):
# type: (Any) -> Any
# pairs = (len(data) > 0 and isinstance(data, list))
# if pairs:
# for item in data:
# if not isinstance(item, tuple) or len(item) != 2:
# pairs = False
# break
# if not pairs:
return self.represent_sequence('tag:yaml.org,2002:seq', data)
# value = []
# for item_key, item_value in data:
# value.append(self.represent_mapping('tag:yaml.org,2002:map',
# [(item_key, item_value)]))
# return SequenceNode('tag:yaml.org,2002:pairs', value)
def represent_dict(self, data):
# type: (Any) -> Any
return self.represent_mapping('tag:yaml.org,2002:map', data)
def represent_ordereddict(self, data):
# type: (Any) -> Any
return self.represent_omap('tag:yaml.org,2002:omap', data)
def represent_set(self, data):
# type: (Any) -> Any
value = {} # type: Dict[Any, None]
for key in data:
value[key] = None
return self.represent_mapping('tag:yaml.org,2002:set', value)
def represent_date(self, data):
# type: (Any) -> Any
value = data.isoformat()
return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
def represent_datetime(self, data):
# type: (Any) -> Any
value = data.isoformat(' ')
return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
def represent_yaml_object(self, tag, data, cls, flow_style=None):
# type: (Any, Any, Any, Any) -> Any
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__.copy()
return self.represent_mapping(tag, state, flow_style=flow_style)
def represent_undefined(self, data):
# type: (Any) -> None
raise RepresenterError(_F('cannot represent an object: {data!s}', data=data))
SafeRepresenter.add_representer(type(None), SafeRepresenter.represent_none)
SafeRepresenter.add_representer(str, SafeRepresenter.represent_str)
SafeRepresenter.add_representer(bytes, SafeRepresenter.represent_binary)
SafeRepresenter.add_representer(bool, SafeRepresenter.represent_bool)
SafeRepresenter.add_representer(int, SafeRepresenter.represent_int)
SafeRepresenter.add_representer(float, SafeRepresenter.represent_float)
SafeRepresenter.add_representer(list, SafeRepresenter.represent_list)
SafeRepresenter.add_representer(tuple, SafeRepresenter.represent_list)
SafeRepresenter.add_representer(dict, SafeRepresenter.represent_dict)
SafeRepresenter.add_representer(set, SafeRepresenter.represent_set)
SafeRepresenter.add_representer(ordereddict, SafeRepresenter.represent_ordereddict)
SafeRepresenter.add_representer(OrderedDict, SafeRepresenter.represent_ordereddict)
SafeRepresenter.add_representer(datetime.date, SafeRepresenter.represent_date)
SafeRepresenter.add_representer(datetime.datetime, SafeRepresenter.represent_datetime)
SafeRepresenter.add_representer(None, SafeRepresenter.represent_undefined)
class Representer(SafeRepresenter):
def represent_complex(self, data):
# type: (Any) -> Any
if data.imag == 0.0:
data = repr(data.real)
elif data.real == 0.0:
data = _F('{data_imag!r}j', data_imag=data.imag)
elif data.imag > 0:
data = _F(
'{data_real!r}+{data_imag!r}j', data_real=data.real, data_imag=data.imag
)
else:
data = _F(
'{data_real!r}{data_imag!r}j', data_real=data.real, data_imag=data.imag
)
return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
def represent_tuple(self, data):
# type: (Any) -> Any
return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
def represent_name(self, data):
# type: (Any) -> Any
try:
name = _F(
'{modname!s}.{qualname!s}',
modname=data.__module__,
qualname=data.__qualname__,
)
except AttributeError:
# ToDo: check if this can be reached in Py3
name = _F(
'{modname!s}.{name!s}', modname=data.__module__, name=data.__name__
)
return self.represent_scalar('tag:yaml.org,2002:python/name:' + name, "")
def represent_module(self, data):
# type: (Any) -> Any
return self.represent_scalar(
'tag:yaml.org,2002:python/module:' + data.__name__, ""
)
def represent_object(self, data):
# type: (Any) -> Any
# We use __reduce__ API to save the data. data.__reduce__ returns
# a tuple of length 2-5:
# (function, args, state, listitems, dictitems)
# For reconstructing, we calls function(*args), then set its state,
# listitems, and dictitems if they are not None.
# A special case is when function.__name__ == '__newobj__'. In this
# case we create the object with args[0].__new__(*args).
# Another special case is when __reduce__ returns a string - we don't
# support it.
# We produce a !!python/object, !!python/object/new or
# !!python/object/apply node.
cls = type(data)
if cls in copyreg.dispatch_table: # type: ignore
reduce = copyreg.dispatch_table[cls](data) # type: ignore
elif hasattr(data, '__reduce_ex__'):
reduce = data.__reduce_ex__(2)
elif hasattr(data, '__reduce__'):
reduce = data.__reduce__()
else:
raise RepresenterError(_F('cannot represent object: {data!r}', data=data))
reduce = (list(reduce) + [None] * 5)[:5] # type: ignore
function, args, state, listitems, dictitems = reduce # type: ignore
args = list(args)
if state is None:
state = {}
if listitems is not None:
listitems = list(listitems)
if dictitems is not None:
dictitems = dict(dictitems)
if function.__name__ == '__newobj__':
function = args[0]
args = args[1:]
tag = 'tag:yaml.org,2002:python/object/new:'
newobj = True
else:
tag = 'tag:yaml.org,2002:python/object/apply:'
newobj = False
try:
function_name = _F(
'{fun!s}.{qualname!s}',
fun=function.__module__,
qualname=function.__qualname__,
)
except AttributeError:
# ToDo: check if this can be reached in Py3
function_name = _F(
'{fun!s}.{name!s}', fun=function.__module__, name=function.__name__
)
if (
not args
and not listitems
and not dictitems
and isinstance(state, dict)
and newobj
):
return self.represent_mapping(
'tag:yaml.org,2002:python/object:' + function_name, state
)
if not listitems and not dictitems and isinstance(state, dict) and not state:
return self.represent_sequence(tag + function_name, args)
value = {}
if args:
value['args'] = args
if state or not isinstance(state, dict):
value['state'] = state
if listitems:
value['listitems'] = listitems
if dictitems:
value['dictitems'] = dictitems
return self.represent_mapping(tag + function_name, value)
Representer.add_representer(complex, Representer.represent_complex)
Representer.add_representer(tuple, Representer.represent_tuple)
Representer.add_representer(type, Representer.represent_name)
Representer.add_representer(types.FunctionType, Representer.represent_name)
Representer.add_representer(types.BuiltinFunctionType, Representer.represent_name)
Representer.add_representer(types.ModuleType, Representer.represent_module)
Representer.add_multi_representer(object, Representer.represent_object)
Representer.add_multi_representer(type, Representer.represent_name)
class RoundTripRepresenter(SafeRepresenter):
# need to add type here and write out the .comment
# in serializer and emitter
def __init__(self, default_style=None, default_flow_style=None, dumper=None):
# type: (Any, Any, Any) -> None
if not hasattr(dumper, 'typ') and default_flow_style is None:
default_flow_style = False
SafeRepresenter.__init__(
self,
default_style=default_style,
default_flow_style=default_flow_style,
dumper=dumper,
)
def ignore_aliases(self, data):
# type: (Any) -> bool
try:
if data.anchor is not None and data.anchor.value is not None:
return False
except AttributeError:
pass
return SafeRepresenter.ignore_aliases(self, data)
def represent_none(self, data):
# type: (Any) -> Any
if (
len(self.represented_objects) == 0
and not self.serializer.use_explicit_start
):
# this will be open ended (although it is not yet)
return self.represent_scalar('tag:yaml.org,2002:null', 'null')
return self.represent_scalar('tag:yaml.org,2002:null', "")
def represent_literal_scalarstring(self, data):
# type: (Any) -> Any
tag = None
style = '|'
anchor = data.yaml_anchor(any=True)
tag = 'tag:yaml.org,2002:str'
return self.represent_scalar(tag, data, style=style, anchor=anchor)
represent_preserved_scalarstring = represent_literal_scalarstring
def represent_folded_scalarstring(self, data):
# type: (Any) -> Any
tag = None
style = '>'
anchor = data.yaml_anchor(any=True)
for fold_pos in reversed(getattr(data, 'fold_pos', [])):
if (
data[fold_pos] == ' '
and (fold_pos > 0 and not data[fold_pos - 1].isspace())
and (fold_pos < len(data) and not data[fold_pos + 1].isspace())
):
data = data[:fold_pos] + '\a' + data[fold_pos:]
tag = 'tag:yaml.org,2002:str'
return self.represent_scalar(tag, data, style=style, anchor=anchor)
def represent_single_quoted_scalarstring(self, data):
# type: (Any) -> Any
tag = None
style = "'"
anchor = data.yaml_anchor(any=True)
tag = 'tag:yaml.org,2002:str'
return self.represent_scalar(tag, data, style=style, anchor=anchor)
def represent_double_quoted_scalarstring(self, data):
# type: (Any) -> Any
tag = None
style = '"'
anchor = data.yaml_anchor(any=True)
tag = 'tag:yaml.org,2002:str'
return self.represent_scalar(tag, data, style=style, anchor=anchor)
def represent_plain_scalarstring(self, data):
# type: (Any) -> Any
tag = None
style = ''
anchor = data.yaml_anchor(any=True)
tag = 'tag:yaml.org,2002:str'
return self.represent_scalar(tag, data, style=style, anchor=anchor)
def insert_underscore(self, prefix, s, underscore, anchor=None):
# type: (Any, Any, Any, Any) -> Any
if underscore is None:
return self.represent_scalar(
'tag:yaml.org,2002:int', prefix + s, anchor=anchor
)
if underscore[0]:
sl = list(s)
pos = len(s) - underscore[0]
while pos > 0:
sl.insert(pos, '_')
pos -= underscore[0]
s = "".join(sl)
if underscore[1]:
s = '_' + s
if underscore[2]:
s += '_'
return self.represent_scalar('tag:yaml.org,2002:int', prefix + s, anchor=anchor)
def represent_scalar_int(self, data):
# type: (Any) -> Any
if data._width is not None:
s = '{:0{}d}'.format(data, data._width)
else:
s = format(data, 'd')
anchor = data.yaml_anchor(any=True)
return self.insert_underscore("", s, data._underscore, anchor=anchor)
def represent_binary_int(self, data):
# type: (Any) -> Any
if data._width is not None:
# cannot use '{:#0{}b}', that strips the zeros
s = '{:0{}b}'.format(data, data._width)
else:
s = format(data, 'b')
anchor = data.yaml_anchor(any=True)
return self.insert_underscore('0b', s, data._underscore, anchor=anchor)
def represent_octal_int(self, data):
# type: (Any) -> Any
if data._width is not None:
# cannot use '{:#0{}o}', that strips the zeros
s = '{:0{}o}'.format(data, data._width)
else:
s = format(data, 'o')
anchor = data.yaml_anchor(any=True)
return self.insert_underscore('0o', s, data._underscore, anchor=anchor)
def represent_hex_int(self, data):
# type: (Any) -> Any
if data._width is not None:
# cannot use '{:#0{}x}', that strips the zeros
s = '{:0{}x}'.format(data, data._width)
else:
s = format(data, 'x')
anchor = data.yaml_anchor(any=True)
return self.insert_underscore('0x', s, data._underscore, anchor=anchor)
def represent_hex_caps_int(self, data):
# type: (Any) -> Any
if data._width is not None:
# cannot use '{:#0{}X}', that strips the zeros
s = '{:0{}X}'.format(data, data._width)
else:
s = format(data, 'X')
anchor = data.yaml_anchor(any=True)
return self.insert_underscore('0x', s, data._underscore, anchor=anchor)
def represent_scalar_float(self, data):
# type: (Any) -> Any
"""this is way more complicated"""
value = None
anchor = data.yaml_anchor(any=True)
if data != data or (data == 0.0 and data == 1.0):
value = '.nan'
elif data == self.inf_value:
value = '.inf'
elif data == -self.inf_value:
value = '-.inf'
if value:
return self.represent_scalar(
'tag:yaml.org,2002:float', value, anchor=anchor
)
if data._exp is None and data._prec > 0 and data._prec == data._width - 1:
# no exponent, but trailing dot
value = '{}{:d}.'.format(
data._m_sign if data._m_sign else "", abs(int(data))
)
elif data._exp is None:
# no exponent, "normal" dot
prec = data._prec
ms = data._m_sign if data._m_sign else ""
# -1 for the dot
value = '{}{:0{}.{}f}'.format(
ms, abs(data), data._width - len(ms), data._width - prec - 1
)
if prec == 0 or (prec == 1 and ms != ""):
value = value.replace('0.', '.')
while len(value) < data._width:
value += '0'
else:
# exponent
m, es = '{:{}.{}e}'.format(
# data, data._width, data._width - data._prec + (1 if data._m_sign else 0)
data,
data._width,
data._width + (1 if data._m_sign else 0),
).split('e')
w = data._width if data._prec > 0 else (data._width + 1)
if data < 0:
w += 1
m = m[:w]
e = int(es)
m1, m2 = m.split('.') # always second?
while len(m1) + len(m2) < data._width - (1 if data._prec >= 0 else 0):
m2 += '0'
if data._m_sign and data > 0:
m1 = '+' + m1
esgn = '+' if data._e_sign else ""
if data._prec < 0: # mantissa without dot
if m2 != '0':
e -= len(m2)
else:
m2 = ""
while (len(m1) + len(m2) - (1 if data._m_sign else 0)) < data._width:
m2 += '0'
e -= 1
value = m1 + m2 + data._exp + '{:{}0{}d}'.format(e, esgn, data._e_width)
elif data._prec == 0: # mantissa with trailing dot
e -= len(m2)
value = (
m1
+ m2
+ '.'
+ data._exp
+ '{:{}0{}d}'.format(e, esgn, data._e_width)
)
else:
if data._m_lead0 > 0:
m2 = '0' * (data._m_lead0 - 1) + m1 + m2
m1 = '0'
m2 = m2[: -data._m_lead0] # these should be zeros
e += data._m_lead0
while len(m1) < data._prec:
m1 += m2[0]
m2 = m2[1:]
e -= 1
value = (
m1
+ '.'
+ m2
+ data._exp
+ '{:{}0{}d}'.format(e, esgn, data._e_width)
)
if value is None:
value = repr(data).lower()
return self.represent_scalar('tag:yaml.org,2002:float', value, anchor=anchor)
def represent_sequence(self, tag, sequence, flow_style=None):
# type: (Any, Any, Any) -> Any
value = [] # type: List[Any]
# if the flow_style is None, the flow style tacked on to the object
# explicitly will be taken. If that is None as well the default flow
# style rules
try:
flow_style = sequence.fa.flow_style(flow_style)
except AttributeError:
flow_style = flow_style
try:
anchor = sequence.yaml_anchor()
except AttributeError:
anchor = None
node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
try:
comment = getattr(sequence, comment_attrib)
node.comment = comment.comment
# reset any comment already printed information
if node.comment and node.comment[1]:
for ct in node.comment[1]:
ct.reset()
item_comments = comment.items
for v in item_comments.values():
if v and v[1]:
for ct in v[1]:
ct.reset()
item_comments = comment.items
if node.comment is None:
node.comment = comment.comment
else:
# as we are potentially going to extend this, make a new list
node.comment = comment.comment[:]
try:
node.comment.append(comment.end)
except AttributeError:
pass
except AttributeError:
item_comments = {}
for idx, item in enumerate(sequence):
node_item = self.represent_data(item)
self.merge_comments(node_item, item_comments.get(idx))
if not (isinstance(node_item, ScalarNode) and not node_item.style):
best_style = False
value.append(node_item)
if flow_style is None:
if len(sequence) != 0 and self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def merge_comments(self, node, comments):
# type: (Any, Any) -> Any
if comments is None:
assert hasattr(node, 'comment')
return node
if getattr(node, 'comment', None) is not None:
for idx, val in enumerate(comments):
if idx >= len(node.comment):
continue
nc = node.comment[idx]
if nc is not None:
assert val is None or val == nc
comments[idx] = nc
node.comment = comments
return node
def represent_key(self, data):
# type: (Any) -> Any
if isinstance(data, CommentedKeySeq):
self.alias_key = None
return self.represent_sequence(
'tag:yaml.org,2002:seq', data, flow_style=True
)
if isinstance(data, CommentedKeyMap):
self.alias_key = None
return self.represent_mapping(
'tag:yaml.org,2002:map', data, flow_style=True
)
return SafeRepresenter.represent_key(self, data)
def represent_mapping(self, tag, mapping, flow_style=None):
# type: (Any, Any, Any) -> Any
value = [] # type: List[Any]
try:
flow_style = mapping.fa.flow_style(flow_style)
except AttributeError:
flow_style = flow_style
try:
anchor = mapping.yaml_anchor()
except AttributeError:
anchor = None
node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
# no sorting! !!
try:
comment = getattr(mapping, comment_attrib)
if node.comment is None:
node.comment = comment.comment
else:
# as we are potentially going to extend this, make a new list
node.comment = comment.comment[:]
if node.comment and node.comment[1]:
for ct in node.comment[1]:
ct.reset()
item_comments = comment.items
if self.dumper.comment_handling is None: # type: ignore
for v in item_comments.values():
if v and v[1]:
for ct in v[1]:
ct.reset()
try:
node.comment.append(comment.end)
except AttributeError:
pass
else:
# NEWCMNT
pass
except AttributeError:
item_comments = {}
merge_list = [m[1] for m in getattr(mapping, merge_attrib, [])]
try:
merge_pos = getattr(mapping, merge_attrib, [[0]])[0][0]
except IndexError:
merge_pos = 0
item_count = 0
if bool(merge_list):
items = mapping.non_merged_items()
else:
items = mapping.items()
for item_key, item_value in items:
item_count += 1
node_key = self.represent_key(item_key)
node_value = self.represent_data(item_value)
item_comment = item_comments.get(item_key)
if item_comment:
# assert getattr(node_key, 'comment', None) is None
# issue 351 did throw this because the comment from the list item was
# moved to the dict
node_key.comment = item_comment[:2]
nvc = getattr(node_value, 'comment', None)
if nvc is not None: # end comment already there
nvc[0] = item_comment[2]
nvc[1] = item_comment[3]
else:
node_value.comment = item_comment[2:]
if not (isinstance(node_key, ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if (
(item_count != 0) or bool(merge_list)
) and self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
if bool(merge_list):
# because of the call to represent_data here, the anchors
# are marked as being used and thereby created
if len(merge_list) == 1:
arg = self.represent_data(merge_list[0])
else:
arg = self.represent_data(merge_list)
arg.flow_style = True
value.insert(merge_pos, (ScalarNode('tag:yaml.org,2002:merge', '<<'), arg))
return node
def represent_omap(self, tag, omap, flow_style=None):
# type: (Any, Any, Any) -> Any
value = [] # type: List[Any]
try:
flow_style = omap.fa.flow_style(flow_style)
except AttributeError:
flow_style = flow_style
try:
anchor = omap.yaml_anchor()
except AttributeError:
anchor = None
node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
try:
comment = getattr(omap, comment_attrib)
if node.comment is None:
node.comment = comment.comment
else:
# as we are potentially going to extend this, make a new list
node.comment = comment.comment[:]
if node.comment and node.comment[1]:
for ct in node.comment[1]:
ct.reset()
item_comments = comment.items
for v in item_comments.values():
if v and v[1]:
for ct in v[1]:
ct.reset()
try:
node.comment.append(comment.end)
except AttributeError:
pass
except AttributeError:
item_comments = {}
for item_key in omap:
item_val = omap[item_key]
node_item = self.represent_data({item_key: item_val})
# node_item.flow_style = False
# node item has two scalars in value: node_key and node_value
item_comment = item_comments.get(item_key)
if item_comment:
if item_comment[1]:
node_item.comment = [None, item_comment[1]]
assert getattr(node_item.value[0][0], 'comment', None) is None
node_item.value[0][0].comment = [item_comment[0], None]
nvc = getattr(node_item.value[0][1], 'comment', None)
if nvc is not None: # end comment already there
nvc[0] = item_comment[2]
nvc[1] = item_comment[3]
else:
node_item.value[0][1].comment = item_comment[2:]
# if not (isinstance(node_item, ScalarNode) \
# and not node_item.style):
# best_style = False
value.append(node_item)
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def represent_set(self, setting):
# type: (Any) -> Any
flow_style = False
tag = 'tag:yaml.org,2002:set'
# return self.represent_mapping(tag, value)
value = [] # type: List[Any]
flow_style = setting.fa.flow_style(flow_style)
try:
anchor = setting.yaml_anchor()
except AttributeError:
anchor = None
node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
# no sorting! !!
try:
comment = getattr(setting, comment_attrib)
if node.comment is None:
node.comment = comment.comment
else:
# as we are potentially going to extend this, make a new list
node.comment = comment.comment[:]
if node.comment and node.comment[1]:
for ct in node.comment[1]:
ct.reset()
item_comments = comment.items
for v in item_comments.values():
if v and v[1]:
for ct in v[1]:
ct.reset()
try:
node.comment.append(comment.end)
except AttributeError:
pass
except AttributeError:
item_comments = {}
for item_key in setting.odict:
node_key = self.represent_key(item_key)
node_value = self.represent_data(None)
item_comment = item_comments.get(item_key)
if item_comment:
assert getattr(node_key, 'comment', None) is None
node_key.comment = item_comment[:2]
node_key.style = node_value.style = '?'
if not (isinstance(node_key, ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
best_style = best_style
return node
def represent_dict(self, data):
# type: (Any) -> Any
"""write out tag if saved on loading"""
try:
t = data.tag.value
except AttributeError:
t = None
if t:
if t.startswith('!!'):
tag = 'tag:yaml.org,2002:' + t[2:]
else:
tag = t
else:
tag = 'tag:yaml.org,2002:map'
return self.represent_mapping(tag, data)
def represent_list(self, data):
# type: (Any) -> Any
try:
t = data.tag.value
except AttributeError:
t = None
if t:
if t.startswith('!!'):
tag = 'tag:yaml.org,2002:' + t[2:]
else:
tag = t
else:
tag = 'tag:yaml.org,2002:seq'
return self.represent_sequence(tag, data)
def represent_datetime(self, data):
# type: (Any) -> Any
inter = 'T' if data._yaml['t'] else ' '
_yaml = data._yaml
if _yaml['delta']:
data += _yaml['delta']
value = data.isoformat(inter)
else:
value = data.isoformat(inter)
if _yaml['tz']:
value += _yaml['tz']
return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
def represent_tagged_scalar(self, data):
# type: (Any) -> Any
try:
tag = data.tag.value
except AttributeError:
tag = None
try:
anchor = data.yaml_anchor()
except AttributeError:
anchor = None
return self.represent_scalar(tag, data.value, style=data.style, anchor=anchor)
def represent_scalar_bool(self, data):
# type: (Any) -> Any
try:
anchor = data.yaml_anchor()
except AttributeError:
anchor = None
return SafeRepresenter.represent_bool(self, data, anchor=anchor)
def represent_yaml_object(self, tag, data, cls, flow_style=None):
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__.copy()
anchor = state.pop(Anchor.attrib, None)
res = self.represent_mapping(tag, state, flow_style=flow_style)
if anchor is not None:
res.anchor = anchor
return res
RoundTripRepresenter.add_representer(type(None), RoundTripRepresenter.represent_none)
RoundTripRepresenter.add_representer(
LiteralScalarString, RoundTripRepresenter.represent_literal_scalarstring
)
RoundTripRepresenter.add_representer(
FoldedScalarString, RoundTripRepresenter.represent_folded_scalarstring
)
RoundTripRepresenter.add_representer(
SingleQuotedScalarString, RoundTripRepresenter.represent_single_quoted_scalarstring
)
RoundTripRepresenter.add_representer(
DoubleQuotedScalarString, RoundTripRepresenter.represent_double_quoted_scalarstring
)
RoundTripRepresenter.add_representer(
PlainScalarString, RoundTripRepresenter.represent_plain_scalarstring
)
# RoundTripRepresenter.add_representer(tuple, Representer.represent_tuple)
RoundTripRepresenter.add_representer(
ScalarInt, RoundTripRepresenter.represent_scalar_int
)
RoundTripRepresenter.add_representer(
BinaryInt, RoundTripRepresenter.represent_binary_int
)
RoundTripRepresenter.add_representer(OctalInt, RoundTripRepresenter.represent_octal_int)
RoundTripRepresenter.add_representer(HexInt, RoundTripRepresenter.represent_hex_int)
RoundTripRepresenter.add_representer(
HexCapsInt, RoundTripRepresenter.represent_hex_caps_int
)
RoundTripRepresenter.add_representer(
ScalarFloat, RoundTripRepresenter.represent_scalar_float
)
RoundTripRepresenter.add_representer(
ScalarBoolean, RoundTripRepresenter.represent_scalar_bool
)
RoundTripRepresenter.add_representer(CommentedSeq, RoundTripRepresenter.represent_list)
RoundTripRepresenter.add_representer(CommentedMap, RoundTripRepresenter.represent_dict)
RoundTripRepresenter.add_representer(
CommentedOrderedMap, RoundTripRepresenter.represent_ordereddict
)
RoundTripRepresenter.add_representer(
OrderedDict, RoundTripRepresenter.represent_ordereddict
)
RoundTripRepresenter.add_representer(CommentedSet, RoundTripRepresenter.represent_set)
RoundTripRepresenter.add_representer(
TaggedScalar, RoundTripRepresenter.represent_tagged_scalar
)
RoundTripRepresenter.add_representer(TimeStamp, RoundTripRepresenter.represent_datetime)
| 37.420701
| 97
| 0.575954
|
eccc47124e3469aebabdff649ac02d5565d5b056
| 7,502
|
py
|
Python
|
numpy/distutils/tests/test_exec_command.py
|
chatcannon/numpy
|
f1b3f00f7abdd97d59dc5b1c0bb922a692452736
|
[
"BSD-3-Clause"
] | 1
|
2017-10-27T17:10:40.000Z
|
2017-10-27T17:10:40.000Z
|
numpy/distutils/tests/test_exec_command.py
|
chatcannon/numpy
|
f1b3f00f7abdd97d59dc5b1c0bb922a692452736
|
[
"BSD-3-Clause"
] | 1
|
2015-06-22T21:15:24.000Z
|
2015-06-22T21:23:53.000Z
|
numpy/distutils/tests/test_exec_command.py
|
chatcannon/numpy
|
f1b3f00f7abdd97d59dc5b1c0bb922a692452736
|
[
"BSD-3-Clause"
] | 1
|
2018-11-15T19:41:09.000Z
|
2018-11-15T19:41:09.000Z
|
from __future__ import division, absolute_import, print_function
import os
import sys
from tempfile import TemporaryFile
from numpy.distutils import exec_command
from numpy.distutils.exec_command import get_pythonexe
from numpy.testing import TestCase, run_module_suite, tempdir
# In python 3 stdout, stderr are text (unicode compliant) devices, so to
# emulate them import StringIO from the io module.
if sys.version_info[0] >= 3:
from io import StringIO
else:
from StringIO import StringIO
class redirect_stdout(object):
"""Context manager to redirect stdout for exec_command test."""
def __init__(self, stdout=None):
self._stdout = stdout or sys.stdout
def __enter__(self):
self.old_stdout = sys.stdout
sys.stdout = self._stdout
def __exit__(self, exc_type, exc_value, traceback):
self._stdout.flush()
sys.stdout = self.old_stdout
# note: closing sys.stdout won't close it.
self._stdout.close()
class redirect_stderr(object):
"""Context manager to redirect stderr for exec_command test."""
def __init__(self, stderr=None):
self._stderr = stderr or sys.stderr
def __enter__(self):
self.old_stderr = sys.stderr
sys.stderr = self._stderr
def __exit__(self, exc_type, exc_value, traceback):
self._stderr.flush()
sys.stderr = self.old_stderr
# note: closing sys.stderr won't close it.
self._stderr.close()
class emulate_nonposix(object):
"""Context manager to emulate os.name != 'posix' """
def __init__(self, osname='non-posix'):
self._new_name = osname
def __enter__(self):
self._old_name = os.name
os.name = self._new_name
def __exit__(self, exc_type, exc_value, traceback):
os.name = self._old_name
def test_exec_command_stdout():
# Regression test for gh-2999 and gh-2915.
# There are several packages (nose, scipy.weave.inline, Sage inline
# Fortran) that replace stdout, in which case it doesn't have a fileno
# method. This is tested here, with a do-nothing command that fails if the
# presence of fileno() is assumed in exec_command.
# The code has a special case for posix systems, so if we are on posix test
# both that the special case works and that the generic code works.
# Test posix version:
with redirect_stdout(StringIO()):
with redirect_stderr(TemporaryFile()):
exec_command.exec_command("cd '.'")
if os.name == 'posix':
# Test general (non-posix) version:
with emulate_nonposix():
with redirect_stdout(StringIO()):
with redirect_stderr(TemporaryFile()):
exec_command.exec_command("cd '.'")
def test_exec_command_stderr():
# Test posix version:
with redirect_stdout(TemporaryFile(mode='w+')):
with redirect_stderr(StringIO()):
exec_command.exec_command("cd '.'")
if os.name == 'posix':
# Test general (non-posix) version:
with emulate_nonposix():
with redirect_stdout(TemporaryFile()):
with redirect_stderr(StringIO()):
exec_command.exec_command("cd '.'")
class TestExecCommand(TestCase):
def setUp(self):
self.pyexe = get_pythonexe()
def check_nt(self, **kws):
s, o = exec_command.exec_command('echo path=%path%')
self.assertEqual(s, 0)
self.assertNotEqual(o, '')
s, o = exec_command.exec_command(
'"%s" -c "import sys;sys.stderr.write(sys.platform)"' % self.pyexe)
self.assertEqual(s, 0)
self.assertEqual(o, 'win32')
def check_posix(self, **kws):
s, o = exec_command.exec_command("echo Hello", **kws)
self.assertEqual(s, 0)
self.assertEqual(o, 'Hello')
s, o = exec_command.exec_command('echo $AAA', **kws)
self.assertEqual(s, 0)
self.assertEqual(o, '')
s, o = exec_command.exec_command('echo "$AAA"', AAA='Tere', **kws)
self.assertEqual(s, 0)
self.assertEqual(o, 'Tere')
s, o = exec_command.exec_command('echo "$AAA"', **kws)
self.assertEqual(s, 0)
self.assertEqual(o, '')
if 'BBB' not in os.environ:
os.environ['BBB'] = 'Hi'
s, o = exec_command.exec_command('echo "$BBB"', **kws)
self.assertEqual(s, 0)
self.assertEqual(o, 'Hi')
s, o = exec_command.exec_command('echo "$BBB"', BBB='Hey', **kws)
self.assertEqual(s, 0)
self.assertEqual(o, 'Hey')
s, o = exec_command.exec_command('echo "$BBB"', **kws)
self.assertEqual(s, 0)
self.assertEqual(o, 'Hi')
del os.environ['BBB']
s, o = exec_command.exec_command('echo "$BBB"', **kws)
self.assertEqual(s, 0)
self.assertEqual(o, '')
s, o = exec_command.exec_command('this_is_not_a_command', **kws)
self.assertNotEqual(s, 0)
self.assertNotEqual(o, '')
s, o = exec_command.exec_command('echo path=$PATH', **kws)
self.assertEqual(s, 0)
self.assertNotEqual(o, '')
s, o = exec_command.exec_command(
'"%s" -c "import sys,os;sys.stderr.write(os.name)"' %
self.pyexe, **kws)
self.assertEqual(s, 0)
self.assertEqual(o, 'posix')
def check_basic(self, *kws):
s, o = exec_command.exec_command(
'"%s" -c "raise \'Ignore me.\'"' % self.pyexe, **kws)
self.assertNotEqual(s, 0)
self.assertNotEqual(o, '')
s, o = exec_command.exec_command(
'"%s" -c "import sys;sys.stderr.write(\'0\');'
'sys.stderr.write(\'1\');sys.stderr.write(\'2\')"' %
self.pyexe, **kws)
self.assertEqual(s, 0)
self.assertEqual(o, '012')
s, o = exec_command.exec_command(
'"%s" -c "import sys;sys.exit(15)"' % self.pyexe, **kws)
self.assertEqual(s, 15)
self.assertEqual(o, '')
s, o = exec_command.exec_command(
'"%s" -c "print(\'Heipa\'")' % self.pyexe, **kws)
self.assertEqual(s, 0)
self.assertEqual(o, 'Heipa')
def check_execute_in(self, **kws):
with tempdir() as tmpdir:
fn = "file"
tmpfile = os.path.join(tmpdir, fn)
f = open(tmpfile, 'w')
f.write('Hello')
f.close()
s, o = exec_command.exec_command(
'"%s" -c "f = open(\'%s\', \'r\'); f.close()"' %
(self.pyexe, fn), **kws)
self.assertNotEqual(s, 0)
self.assertNotEqual(o, '')
s, o = exec_command.exec_command(
'"%s" -c "f = open(\'%s\', \'r\'); print(f.read()); '
'f.close()"' % (self.pyexe, fn), execute_in=tmpdir, **kws)
self.assertEqual(s, 0)
self.assertEqual(o, 'Hello')
def test_basic(self):
with redirect_stdout(StringIO()):
with redirect_stderr(StringIO()):
if os.name == "posix":
self.check_posix(use_tee=0)
self.check_posix(use_tee=1)
elif os.name == "nt":
self.check_nt(use_tee=0)
self.check_nt(use_tee=1)
self.check_execute_in(use_tee=0)
self.check_execute_in(use_tee=1)
if __name__ == "__main__":
run_module_suite()
| 34.1
| 79
| 0.580645
|
2cd52dafc50a4afbd1fe46317f4d382ab1297fa0
| 9,096
|
py
|
Python
|
lasagne/layers/dense.py
|
huangshunliang/Lasagne_h
|
359ea1b9f12678c3523c0cb100f646528d49df9e
|
[
"MIT"
] | 3,986
|
2015-04-09T17:00:42.000Z
|
2022-03-30T08:21:55.000Z
|
lasagne/layers/dense.py
|
huangshunliang/Lasagne_h
|
359ea1b9f12678c3523c0cb100f646528d49df9e
|
[
"MIT"
] | 736
|
2015-04-09T16:23:00.000Z
|
2021-01-02T01:35:45.000Z
|
lasagne/layers/dense.py
|
huangshunliang/Lasagne_h
|
359ea1b9f12678c3523c0cb100f646528d49df9e
|
[
"MIT"
] | 1,311
|
2015-04-09T17:05:38.000Z
|
2022-03-27T03:41:01.000Z
|
import numpy as np
import theano.tensor as T
from .. import init
from .. import nonlinearities
from .base import Layer
__all__ = [
"DenseLayer",
"NINLayer",
]
class DenseLayer(Layer):
"""
lasagne.layers.DenseLayer(incoming, num_units,
W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),
nonlinearity=lasagne.nonlinearities.rectify, num_leading_axes=1, **kwargs)
A fully connected layer.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape
num_units : int
The number of units of the layer
W : Theano shared variable, expression, numpy array or callable
Initial value, expression or initializer for the weights.
These should be a matrix with shape ``(num_inputs, num_units)``.
See :func:`lasagne.utils.create_param` for more information.
b : Theano shared variable, expression, numpy array, callable or ``None``
Initial value, expression or initializer for the biases. If set to
``None``, the layer will have no biases. Otherwise, biases should be
a 1D array with shape ``(num_units,)``.
See :func:`lasagne.utils.create_param` for more information.
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
num_leading_axes : int
Number of leading axes to distribute the dot product over. These axes
will be kept in the output tensor, remaining axes will be collapsed and
multiplied against the weight matrix. A negative number gives the
(negated) number of trailing axes to involve in the dot product.
Examples
--------
>>> from lasagne.layers import InputLayer, DenseLayer
>>> l_in = InputLayer((100, 20))
>>> l1 = DenseLayer(l_in, num_units=50)
If the input has more than two axes, by default, all trailing axes will be
flattened. This is useful when a dense layer follows a convolutional layer.
>>> l_in = InputLayer((None, 10, 20, 30))
>>> DenseLayer(l_in, num_units=50).output_shape
(None, 50)
Using the `num_leading_axes` argument, you can specify to keep more than
just the first axis. E.g., to apply the same dot product to each step of a
batch of time sequences, you would want to keep the first two axes.
>>> DenseLayer(l_in, num_units=50, num_leading_axes=2).output_shape
(None, 10, 50)
>>> DenseLayer(l_in, num_units=50, num_leading_axes=-1).output_shape
(None, 10, 20, 50)
"""
def __init__(self, incoming, num_units, W=init.GlorotUniform(),
b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
num_leading_axes=1, **kwargs):
super(DenseLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = (nonlinearities.identity if nonlinearity is None
else nonlinearity)
self.num_units = num_units
if num_leading_axes >= len(self.input_shape):
raise ValueError(
"Got num_leading_axes=%d for a %d-dimensional input, "
"leaving no trailing axes for the dot product." %
(num_leading_axes, len(self.input_shape)))
elif num_leading_axes < -len(self.input_shape):
raise ValueError(
"Got num_leading_axes=%d for a %d-dimensional input, "
"requesting more trailing axes than there are input "
"dimensions." % (num_leading_axes, len(self.input_shape)))
self.num_leading_axes = num_leading_axes
if any(s is None for s in self.input_shape[num_leading_axes:]):
raise ValueError(
"A DenseLayer requires a fixed input shape (except for "
"the leading axes). Got %r for num_leading_axes=%d." %
(self.input_shape, self.num_leading_axes))
num_inputs = int(np.prod(self.input_shape[num_leading_axes:]))
self.W = self.add_param(W, (num_inputs, num_units), name="W")
if b is None:
self.b = None
else:
self.b = self.add_param(b, (num_units,), name="b",
regularizable=False)
def get_output_shape_for(self, input_shape):
return input_shape[:self.num_leading_axes] + (self.num_units,)
def get_output_for(self, input, **kwargs):
num_leading_axes = self.num_leading_axes
if num_leading_axes < 0:
num_leading_axes += input.ndim
if input.ndim > num_leading_axes + 1:
# flatten trailing axes (into (n+1)-tensor for num_leading_axes=n)
input = input.flatten(num_leading_axes + 1)
activation = T.dot(input, self.W)
if self.b is not None:
activation = activation + self.b
return self.nonlinearity(activation)
class NINLayer(Layer):
"""
lasagne.layers.NINLayer(incoming, num_units, untie_biases=False,
W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),
nonlinearity=lasagne.nonlinearities.rectify, **kwargs)
Network-in-network layer.
Like DenseLayer, but broadcasting across all trailing dimensions beyond the
2nd. This results in a convolution operation with filter size 1 on all
trailing dimensions. Any number of trailing dimensions is supported,
so NINLayer can be used to implement 1D, 2D, 3D, ... convolutions.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape
num_units : int
The number of units of the layer
untie_biases : bool
If false the network has a single bias vector similar to a dense
layer. If true a separate bias vector is used for each trailing
dimension beyond the 2nd.
W : Theano shared variable, expression, numpy array or callable
Initial value, expression or initializer for the weights.
These should be a matrix with shape ``(num_inputs, num_units)``,
where ``num_inputs`` is the size of the second dimension of the input.
See :func:`lasagne.utils.create_param` for more information.
b : Theano shared variable, expression, numpy array, callable or ``None``
Initial value, expression or initializer for the biases. If set to
``None``, the layer will have no biases. Otherwise, biases should be
a 1D array with shape ``(num_units,)`` for ``untie_biases=False``, and
a tensor of shape ``(num_units, input_shape[2], ..., input_shape[-1])``
for ``untie_biases=True``.
See :func:`lasagne.utils.create_param` for more information.
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
Examples
--------
>>> from lasagne.layers import InputLayer, NINLayer
>>> l_in = InputLayer((100, 20, 10, 3))
>>> l1 = NINLayer(l_in, num_units=5)
References
----------
.. [1] Lin, Min, Qiang Chen, and Shuicheng Yan (2013):
Network in network. arXiv preprint arXiv:1312.4400.
"""
def __init__(self, incoming, num_units, untie_biases=False,
W=init.GlorotUniform(), b=init.Constant(0.),
nonlinearity=nonlinearities.rectify, **kwargs):
super(NINLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = (nonlinearities.identity if nonlinearity is None
else nonlinearity)
self.num_units = num_units
self.untie_biases = untie_biases
num_input_channels = self.input_shape[1]
self.W = self.add_param(W, (num_input_channels, num_units), name="W")
if b is None:
self.b = None
else:
if self.untie_biases:
biases_shape = (num_units,) + self.output_shape[2:]
else:
biases_shape = (num_units,)
self.b = self.add_param(b, biases_shape, name="b",
regularizable=False)
def get_output_shape_for(self, input_shape):
return (input_shape[0], self.num_units) + input_shape[2:]
def get_output_for(self, input, **kwargs):
# cf * bc01... = fb01...
out_r = T.tensordot(self.W, input, axes=[[0], [1]])
# input dims to broadcast over
remaining_dims = range(2, input.ndim)
# bf01...
out = out_r.dimshuffle(1, 0, *remaining_dims)
if self.b is None:
activation = out
else:
if self.untie_biases:
# no broadcast
remaining_dims_biases = range(1, input.ndim - 1)
else:
remaining_dims_biases = ['x'] * (input.ndim - 2) # broadcast
b_shuffled = self.b.dimshuffle('x', 0, *remaining_dims_biases)
activation = out + b_shuffled
return self.nonlinearity(activation)
| 40.070485
| 79
| 0.633575
|
3b8a105f2a49bb0ecfafac09a6b536b8a1585d99
| 2,767
|
py
|
Python
|
macro.py
|
heegong/auto_mouse_python
|
bbef6b09783d952d72c1026b2c114675296e4683
|
[
"MIT"
] | 1
|
2020-12-09T12:36:25.000Z
|
2020-12-09T12:36:25.000Z
|
macro.py
|
heegong/auto_mouse_python
|
bbef6b09783d952d72c1026b2c114675296e4683
|
[
"MIT"
] | null | null | null |
macro.py
|
heegong/auto_mouse_python
|
bbef6b09783d952d72c1026b2c114675296e4683
|
[
"MIT"
] | null | null | null |
import pyautogui as pa
import os
import time
ls2 = []
while True:
count = 0
ls = []
choose = int(input("1. 마우스 위치 등록, 2. 마우스 현재 위치 확인, 3. 매크로 시작 4. 등록된 매크로 보기\n>>> "))
if choose == 1:
position_x = int(input("x 좌표 : "))
position_y = int(input("y 좌표 : "))
ls.append(position_x)
ls.append(position_y)
ls2.append(ls)
print(ls2)
time.sleep(1)
os.system('cls')
if choose == 2:
while True:
my_position = str(pa.position())
my_position = my_position.replace('Point','')
my_position = my_position.replace('(','')
my_position = my_position.replace(')','')
my_position = my_position.replace('x=','')
my_position = my_position.replace('y=','')
print(my_position)
count += 1
time.sleep(0.2)
if count == 10:
break
choosing = int(input("\n\n마지막 좌표를 등록 하시겠습니까? \n\n1. 등록, \n2. 등록 안함\n>>> "))
if choosing == 1:
print(my_position,"을 등록했습니다.")
my_position = my_position.replace(' ','')
my_position_list = my_position.split(',')
ls.append(my_position_list[0])
ls.append(my_position_list[1])
ls2.append(ls)
print("\n"+str(ls2))
time.sleep(1)
os.system('cls')
if choosing == 2:
print("\n\n")
if choose == 3:
num = int(input("몇 번 실행하기겠습니까? 0번은 무한\n>>> "))
flag = True
if num == 0:
while flag:
for i in range(len(ls2)):
pa.moveTo(int(ls2[i][0]),int(ls2[i][1]))
time.sleep(0.2)
pa.click()
time.sleep(0.2)
else:
a = 0
while num > a:
for i in range(len(ls2)):
pa.moveTo(int(ls2[i][0]),int(ls2[i][1]))
time.sleep(0.2)
pa.click()
if keyboard.is_pressed('q'):
break
time.sleep(0.2)
a += 1
os.system('cls')
if choose == 4:
st = str(ls2)
st = st.replace('[','')
st = st.replace(']','')
st_list = st.split(',')
st = []
x = []
y = []
Ls = []
for i in range(0,len(ls2) * 2,2):
x.append(st_list[i])
for i in range(1,len(ls2) * 2,2):
y.append(st_list[i])
for i in range(len(x)):
st2 = "x 좌표 :"+x[i]+"\ty 좌표 :"+y[i]+"\n"
Ls.append(st2)
for i in range(len(Ls)):
print(Ls[i])
print("총 갯수 : %d\n\n"%len(Ls))
| 28.234694
| 87
| 0.428984
|
f344faaed6efdb55380a728e18b9884b9e5e2388
| 639
|
py
|
Python
|
cisco_umbrella_enforcement/setup.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
cisco_umbrella_enforcement/setup.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
cisco_umbrella_enforcement/setup.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
# GENERATED BY KOMAND SDK - DO NOT EDIT
from setuptools import setup, find_packages
setup(name='cisco_umbrella_enforcement-rapid7-plugin',
version='1.0.0',
description='Cisco Umbrella Enforcement give technology partners the ability to send security events from their platform/service/appliance within a customer environment to the Cisco security cloud for enforcement',
author='rapid7',
author_email='',
url='',
packages=find_packages(),
install_requires=['komand'], # Add third-party dependencies to requirements.txt, not here!
scripts=['bin/komand_cisco_umbrella_enforcement']
)
| 42.6
| 220
| 0.733959
|
dee1fca7f4eb4f62d617b8570c4c71d5577f3d76
| 2,250
|
py
|
Python
|
lib/exabgp/configuration/flow/then.py
|
earies/exabgp
|
dc11110e7e14f6a93dd62b94cd532d03ca730679
|
[
"BSD-3-Clause"
] | null | null | null |
lib/exabgp/configuration/flow/then.py
|
earies/exabgp
|
dc11110e7e14f6a93dd62b94cd532d03ca730679
|
[
"BSD-3-Clause"
] | null | null | null |
lib/exabgp/configuration/flow/then.py
|
earies/exabgp
|
dc11110e7e14f6a93dd62b94cd532d03ca730679
|
[
"BSD-3-Clause"
] | null | null | null |
# encoding: utf-8
"""
parse_flow.py
Created by Thomas Mangin on 2015-06-22.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
from exabgp.configuration.core import Section
from exabgp.configuration.flow.parser import accept
from exabgp.configuration.flow.parser import discard
from exabgp.configuration.flow.parser import rate_limit
from exabgp.configuration.flow.parser import redirect
from exabgp.configuration.flow.parser import redirect_next_hop
from exabgp.configuration.flow.parser import copy
from exabgp.configuration.flow.parser import mark
from exabgp.configuration.flow.parser import action
from exabgp.configuration.static.parser import community
from exabgp.configuration.static.parser import extended_community
class ParseFlowThen (Section):
definition = [
'accept',
'discard',
'rate-limit 9600',
'redirect 30740:12345',
'redirect 1.2.3.4:5678',
'redirect 1.2.3.4',
'redirect-next-hop',
'copy 1.2.3.4',
'mark 123',
'action sample|terminal|sample-terminal',
]
syntax = \
'then {\n' \
' %s;\n' \
'}' % ';\n '.join(definition)
known = {
'accept': accept,
'discard': discard,
'rate-limit': rate_limit,
'redirect': redirect,
'redirect-to-nexthop': redirect_next_hop,
'copy': copy,
'mark': mark,
'action': action,
'community': community,
'extended-community': extended_community,
}
# 'community','extended-community'
action = {
'accept': 'nop',
'discard': 'attribute-add',
'rate-limit': 'attribute-add',
'redirect': 'nexthop-and-attribute',
'redirect-to-nexthop': 'attribute-add',
'copy': 'nexthop-and-attribute',
'mark': 'attribute-add',
'action': 'attribute-add',
'community': 'attribute-add',
'extended-community': 'attribute-add',
}
name = 'flow/then'
def __init__ (self, tokeniser, scope, error, logger):
Section.__init__(self,tokeniser,scope,error,logger)
def clear (self):
pass
def pre (self):
self.scope.set(self.name,self.scope.get('flow/route'))
return True
def post (self):
self.scope.pop(self.name)
return True
| 26.162791
| 65
| 0.649778
|
965ce595cd4fb89ef3d08bdb4ccc8a99358ed050
| 6,433
|
py
|
Python
|
180712_DownloadCities.py
|
nateraluis/phd_server
|
0df6ffd9ce955718b6af908406374c93966b4c6c
|
[
"MIT"
] | null | null | null |
180712_DownloadCities.py
|
nateraluis/phd_server
|
0df6ffd9ce955718b6af908406374c93966b4c6c
|
[
"MIT"
] | null | null | null |
180712_DownloadCities.py
|
nateraluis/phd_server
|
0df6ffd9ce955718b6af908406374c93966b4c6c
|
[
"MIT"
] | null | null | null |
import time
import osmnx as ox
import os
import networkx as nx
#get_ipython().run_line_magic('matplotlib', 'inline')
useful_tags = ox.settings.useful_tags_path + ['cycleway']
ox.config(data_folder='Data', logs_folder='logs',
imgs_folder='imgs', cache_folder='cache',
use_cache=True, log_console=True, useful_tags_path=useful_tags)
def get_network(city, n_type='all', infrastructure='way["highway"]'):
try:
G = ox.graph_from_place(city, network_type=n_type, simplify=True,
which_result=1, infrastructure=infrastructure)
except:
G = ox.graph_from_place(city, network_type=n_type, simplify=True,
which_result=2, infrastructure=infrastructure)
return ox.project_graph(G)
def bike_network(city):
try:
G = ox.graph_from_place(city, network_type='bike', simplify=False, which_result=1)
except:
G = ox.graph_from_place(city, network_type='bike', simplify=False, which_result=2)
#non_cycleways = [(u, v, k) for u, v, k, d in G.edges(keys=True, data=True)
# if not ('cycleway' in d or d['highway'] == 'cycleway')]
#G.remove_edges_from(non_cycleways)
#G = ox.remove_isolated_nodes(G)
G = ox.simplify_graph(G)
return ox.project_graph(G)
def assure_path_exists(path):
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
def simplify_graph(G):
G_simple = nx.Graph()
for i, j, data in G.edges(data=True):
w = data['weight'] if 'weight' in data else 1.0
if G_simple.has_edge(i, j):
G_simple[i][j]['weight'] += w
else:
G_simple.add_edge(i, j, weight=w)
return G_simple
def bike_walk_network(G):
"""
Filter the network to get only the cycleways or pedestrians
"""
#G = get_network(city)
cycle = []
remove = []
edges = dict(G.edges)
for k, v in edges.items():
if (v['highway']) != 'cycleway' or v != 'cycleway':
cycle.append(k)
elif isinstance(v['highway'], list):
for s in v['highway']:
if s != cycleway:
cycle.append(k)
for c in cycle:
u, v, w = c
G.remove_edge(u, v)
degree = list(G.degree())
for k in degree:
n, v = k
if v == 0:
remove.append(n)
G.remove_nodes_from(remove)
return G
def area(city):
city_shape = ox.gdf_from_place(city)
if city_shape.geom_type.all() != 'Polygon' or city_shape.geom_type.all() != 'MultiPolygon':
city_shape = ox.gdf_from_place(city, which_result=2)
return city_shape
cities = {'Phoenix': 'Phoenix, Arizona, USA',
'Amsterdam': 'Amsterdam, Netherlands',
'Detroit': 'Detroit, Michigan, USA',
'Manhattan': 'Manhattan, New York City, New York, USA',
'Mexico': 'DF, Mexico',
'London': 'London, England',
'Singapore': 'Singapore, Singapore',
'Budapest': 'Budapest, Hungary',
'Copenhagen': 'Copenhagen Municipality, Denmark',
'Barcelona': 'Barcelona, Catalunya, Spain',
'Portland': 'Portland, Oregon, USA',
'Bogota': 'Bogotá, Colombia',
'LA': 'Los Angeles, Los Angeles County, California, USA',
'Jakarta': 'Daerah Khusus Ibukota Jakarta, Indonesia'}
start = time.time()
for name, city in cities.items():
start_0 = time.time()
# Create and check the path
path = 'data/bikes_streets/{}/'.format(name)
assure_path_exists(path)
path_simple = 'data/bikes_streets/{}/simple/'.format(name)
assure_path_exists(path_simple)
print('Starting with: {}'.format(name))
# Download the shape
city_shape = area(city)
city_shape = ox.project_gdf(city_shape)
ox.save_gdf_shapefile(city_shape, filename='{}_shape'.format(name), folder=path)
print('Saved')
ox.plot_shape(city_shape)
# Drive
'''
G_drive = get_network(city, n_type='drive')
ox.save_graphml(G_drive, filename='{}_drive.graphml'.format(name), folder=path)
print('{} Drive downloaded and saved. Elapsed time {} s\nSimplifying the network...'.format(
name, round(time.time()-start_0, 2)))
G_simple = simplify_graph(G_drive)
nx.write_edgelist(G_simple, path=path_simple+'{}_drive_simple.txt'.format(name))
print('{} Drive simplified and saved. Elapsed time {} s'.format(
name, round(time.time()-start_0, 2)))
# Pedestrian
G = get_network(city, n_type='walk')
ox.save_graphml(G, filename='{}_pedestrian.graphml'.format(name), folder=path)
print('{} Pedestrian downloaded and saved. Elapsed time {} s\nSimplifying the network...'.format(
name, round(time.time()-start_0, 2)))
G_simple = simplify_graph(G)
nx.write_edgelist(G_simple, path=path_simple+'{}_pedestrian_simple.txt'.format(name))
print('{} Pedestrian simplified and saved. Elapsed time {} s'.format(
name, round(time.time()-start_0, 2)))
'''
# Bike
#if name == 'Beihai':
# G = bike_walk_network(G_drive)
#else:
# G = bike_network(city)
G = bike_network(city)
ox.save_graphml(G, filename='{}_bike.graphml'.format(name), folder=path)
print('{} Bike downloaded and saved. Elapsed time {} s\nSimplifying the network...'.format(
name, round(time.time()-start_0, 2)))
G_simple = simplify_graph(G)
nx.write_edgelist(G_simple, path=path_simple+'{}_bike_simple.txt'.format(name))
print('{} Bike simplified and saved. Elapsed time {} s'.format(name, round(time.time()-start_0, 2)))
# Rail
try:
G = get_network(city, n_type='none',
infrastructure='way["railway"~"subway|tram|light_rail"]')
except:
G = get_network(city, n_type='none', infrastructure='way["railway"]')
ox.save_graphml(G, filename='{}_rail.graphml'.format(name), folder=path)
print('{} Rail downloaded and saved. Elapsed time {} s\nSimplifying the network...'.format(
name, round(time.time()-start_0, 2)))
G_simple = simplify_graph(G)
nx.write_edgelist(G_simple, path=path_simple+'{}_rail_simple.txt'.format(name))
print('{} Bike simplified and saved. Elapsed time {} s'.format(name, round(time.time()-start_0, 2)))
print('{} done in {} s'.format(name, round(time.time()-start_0, 2)))
print('All cities done in {} min'.format((time.time()-start)/60))
| 35.153005
| 104
| 0.6291
|
2c096831e0fc48b81f46ed4ee8c3b853ed0b948d
| 1,044
|
py
|
Python
|
tests/test_pgdump.py
|
jshahbazi/pgbackup
|
6b1dce5290566c2db10b7732ee93e6e63c0861b2
|
[
"MIT"
] | null | null | null |
tests/test_pgdump.py
|
jshahbazi/pgbackup
|
6b1dce5290566c2db10b7732ee93e6e63c0861b2
|
[
"MIT"
] | null | null | null |
tests/test_pgdump.py
|
jshahbazi/pgbackup
|
6b1dce5290566c2db10b7732ee93e6e63c0861b2
|
[
"MIT"
] | null | null | null |
import pytest
import subprocess
from pgbackup import pgdump
url = "postgres://bob:password@example.com:5432/db_one"
def test_dump_calls_pg_dump(mocker):
"""
Utilize pg_dump with the database URL
"""
mocker.patch('subprocess.Popen')
assert pgdump.dump(url)
subprocess.Popen.assert_called_with(['pg_dump', url], stdout=subprocess.PIPE)
def test_dump_handles_oserror(mocker):
"""
pgdump.dump returns a reasonable error if pg_dump isn't installed.
"""
mocker.patch('subprocess.Popen', side_effect=OSError("no such file"))
with pytest.raises(SystemExit):
pgdump.dump(url)
def test_dump_file_name_without_timestamp():
"""
pgdump.db_file_name returns the name of the database
"""
assert pgdump.dump_file_name(url) == "db_one.sql"
def test_dump_file_name_with_timestamp():
"""
pgdump.dump_file_name returns the name of the database
"""
timestamp = "2017-12-03T13:14:10"
assert pgdump.dump_file_name(url, timestamp) == "db_one-2017-12-03T13:14:10.sql"
| 29.828571
| 84
| 0.70977
|
42f169de73207c41b84a753839f824ef23b60a4e
| 3,602
|
py
|
Python
|
leetcode_python/Depth-First-Search/strobogrammatic-number-ii.py
|
yennanliu/Python_basics
|
6a597442d39468295946cefbfb11d08f61424dc3
|
[
"Unlicense"
] | 18
|
2019-08-01T07:45:02.000Z
|
2022-03-31T18:05:44.000Z
|
leetcode_python/Depth-First-Search/strobogrammatic-number-ii.py
|
yennanliu/Python_basics
|
6a597442d39468295946cefbfb11d08f61424dc3
|
[
"Unlicense"
] | null | null | null |
leetcode_python/Depth-First-Search/strobogrammatic-number-ii.py
|
yennanliu/Python_basics
|
6a597442d39468295946cefbfb11d08f61424dc3
|
[
"Unlicense"
] | 15
|
2019-12-29T08:46:20.000Z
|
2022-03-08T14:14:05.000Z
|
# V0
class Solution:
"""
@param n: the length of strobogrammatic number
@return: All strobogrammatic numbers
"""
def findStrobogrammatic(self, n):
evenMidCandidate = ["11","69","88","96", "00"]
oddMidCandidate = ["0", "1", "8"]
if n == 0:
return [""]
if n == 1:
return oddMidCandidate
if n == 2:
return evenMidCandidate[:-1]
#if n % 2:
if n % 2 == 1:
pre, midCandidate = self.findStrobogrammatic(n-1), oddMidCandidate
else:
pre, midCandidate = self.findStrobogrammatic(n-2), evenMidCandidate
premid = ( n - 1 ) / 2
return [p[:premid] + c + p[premid:] for c in midCandidate for p in pre]
# V1
# https://www.jiuzhang.com/solution/strobogrammatic-number-ii/#tag-highlight-lang-python
# IDEA : RECURSION
# EXAMPLE
# n = 1 -> ans = 0, 1, 8
# n = 2 -> ans = 11, 69, 88, 96, 00
# n = 3 -> ans = 1?1, 6?9, 8?8, 9?6, 0?0, for ? in [0,1,8]
# n = 4 -> ans = 11??11, 11??69, 11??88 .... for ?? in [11, 69, 88, 96, 00]
# .
# .
class Solution:
"""
@param n: the length of strobogrammatic number
@return: All strobogrammatic numbers
"""
def findStrobogrammatic(self, n):
evenMidCandidate = ["11","69","88","96", "00"]
oddMidCandidate = ["0", "1", "8"]
if n == 0:
return [""]
if n == 1:
return oddMidCandidate
if n == 2:
return evenMidCandidate[:-1]
#if n % 2:
if n % 2 == 1:
# n is odd
pre, midCandidate = self.findStrobogrammatic(n-1), oddMidCandidate
else:
# n is even
pre, midCandidate = self.findStrobogrammatic(n-2), evenMidCandidate
premid = ( n - 1 ) / 2
return [p[:premid] + c + p[premid:] for c in midCandidate for p in pre]
# V1'
# https://www.cnblogs.com/grandyang/p/5200919.html
# http://www.voidcn.com/article/p-rltfcrmu-zo.html
from copy import copy
class Solution(object):
def findStrobogrammatic(self, n):
"""
:type n: int
:rtype: List[str]
"""
s = ['0', '1', '8', '6', '9']
d = {'0': '0', '1': '1', '6': '9', '8': '8', '9': '6'}
ret1, ret2 = [""], []
if n == 0:
return ret1
mid = n / 2
i = 0
while i < mid:
if i == 0:
for j in range(1, len(s)):
ret2.append(s[j] + d[s[j]])
else:
for j in ret1:
for k in s:
ret2.append(j[:i] + k + d[k] + j[i:])
ret1 = copy(ret2)
ret2 = []
i += 1
if n % 2 != 0:
for j in ret1:
for k in range(3):
ret2.append(j[:i] + s[k] + j[i:])
ret1 = copy(ret2)
return ret1
# V2
# Time: O(n^2 * 5^(n/2))
# Space: O(n)
class Solution(object):
lookup = {'0':'0', '1':'1', '6':'9', '8':'8', '9':'6'}
# @param {integer} n
# @return {string[]}
def findStrobogrammatic(self, n):
return self.findStrobogrammaticRecu(n, n)
def findStrobogrammaticRecu(self, n, k):
if k == 0:
return ['']
elif k == 1:
return ['0', '1', '8']
result = []
for num in self.findStrobogrammaticRecu(n, k - 2):
for key, val in self.lookup.iteritems():
if n != k or key != '0':
result.append(key + num + val)
return result
| 30.268908
| 88
| 0.463076
|
a2564476745cd953a7a6c19671e16b940a43c550
| 196
|
py
|
Python
|
wooey/tests/scripts/versioned_script/v3.py
|
fridmundklaus/wooey
|
4a2e31c282bfe86edf77b0ff8f58f4177eeab9dd
|
[
"BSD-3-Clause"
] | 1,572
|
2015-06-19T21:31:41.000Z
|
2022-03-30T23:37:13.000Z
|
wooey/tests/scripts/versioned_script/v3.py
|
fridmundklaus/wooey
|
4a2e31c282bfe86edf77b0ff8f58f4177eeab9dd
|
[
"BSD-3-Clause"
] | 309
|
2015-07-08T02:33:08.000Z
|
2022-02-08T00:37:11.000Z
|
wooey/tests/scripts/versioned_script/v3.py
|
fridmundklaus/wooey
|
4a2e31c282bfe86edf77b0ff8f58f4177eeab9dd
|
[
"BSD-3-Clause"
] | 220
|
2015-07-01T10:30:27.000Z
|
2022-02-05T04:10:54.000Z
|
import argparse
import os
parser = argparse.ArgumentParser(description="This is version 3")
parser.add_argument('--one')
if __name__ == '__main__':
args = parser.parse_args()
print(args)
| 21.777778
| 65
| 0.729592
|
875f0c7e9bfc78732417d7937f6abb2e8862b6ec
| 376
|
py
|
Python
|
tests/test_newsapi.py
|
plasticruler/newshound
|
c97ef09165eabb27ac65682e4893cf72dae7f3fb
|
[
"Apache-2.0"
] | null | null | null |
tests/test_newsapi.py
|
plasticruler/newshound
|
c97ef09165eabb27ac65682e4893cf72dae7f3fb
|
[
"Apache-2.0"
] | null | null | null |
tests/test_newsapi.py
|
plasticruler/newshound
|
c97ef09165eabb27ac65682e4893cf72dae7f3fb
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from sources.newsapi import NewsApi
class TestNewsApi(unittest.TestCase):
def test_get_newsapi(self):
hn = NewsApi("c2d941c74c144421945618d97a458144") #get a seperate key?
articles = hn.get_articles(limit=2)
self.assertNotEqual(0,len(articles),"No articles returned")
if __name__=='__main__':
unittest.main()
| 31.333333
| 78
| 0.696809
|
7d608acb06b016d1a79b30fca8ee444b64b6b8f4
| 1,255
|
py
|
Python
|
Django2.0-Python-Full-Stack-Web-Developer/15-Django_Level_Two/first_project/populate_first_app.py
|
nobodysshadow/udemy_django
|
ff564d5d03f579be80ba03ae85230a1f19321e31
|
[
"MIT"
] | 6
|
2021-07-26T14:21:25.000Z
|
2021-07-26T14:32:01.000Z
|
Django2.0-Python-Full-Stack-Web-Developer/15-Django_Level_Two/first_project/populate_first_app.py
|
nobodysshadow/udemy_django
|
ff564d5d03f579be80ba03ae85230a1f19321e31
|
[
"MIT"
] | 2
|
2021-12-10T10:25:19.000Z
|
2021-12-10T10:27:15.000Z
|
Web-Dev/DJANGO_COURSE_1.xx/Django_Level_Two/first_project/populate_first_app.py
|
Advik-B/Learn-Python
|
66ac57259764e8f2c3c6513a8de6c106800d8abe
|
[
"MIT"
] | null | null | null |
import os
# Configure settings for project
# Need to run this before calling models from application!
os.environ.setdefault('DJANGO_SETTINGS_MODULE','first_project.settings')
import django
# Import settings
django.setup()
import random
from first_app.models import Topic,Webpage,AccessRecord
from faker import Faker
fakegen = Faker()
topics = ['Search','Social','Marketplace','News','Games']
def add_topic():
t = Topic.objects.get_or_create(top_name=random.choice(topics))[0]
t.save()
return t
def populate(N=5):
'''
Create N Entries of Dates Accessed
'''
for entry in range(N):
# Get Topic for Entry
top = add_topic()
# Create Fake Data for entry
fake_url = fakegen.url()
fake_date = fakegen.date()
fake_name = fakegen.company()
# Create new Webpage Entry
webpg = Webpage.objects.get_or_create(topic=top,url=fake_url,name=fake_name)[0]
# Create Fake Access Record for that page
# Could add more of these if you wanted...
accRec = AccessRecord.objects.get_or_create(name=webpg,date=fake_date)[0]
if __name__ == '__main__':
print("Populating the databases...Please Wait")
populate(20)
print('Populating Complete')
| 24.607843
| 87
| 0.682072
|
023897b4a0122c21b93120eee711eb2d4f3d64b5
| 6,060
|
py
|
Python
|
netbox/secrets/forms.py
|
xcorp/netbox
|
48b9c9da932dc736710d9c14793067093f8f1bde
|
[
"Apache-2.0"
] | null | null | null |
netbox/secrets/forms.py
|
xcorp/netbox
|
48b9c9da932dc736710d9c14793067093f8f1bde
|
[
"Apache-2.0"
] | null | null | null |
netbox/secrets/forms.py
|
xcorp/netbox
|
48b9c9da932dc736710d9c14793067093f8f1bde
|
[
"Apache-2.0"
] | null | null | null |
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
from django import forms
from taggit.forms import TagField
from dcim.models import Device
from extras.forms import AddRemoveTagsForm, CustomFieldBulkEditForm, CustomFieldFilterForm, CustomFieldForm
from utilities.forms import (
APISelect, APISelectMultiple, BootstrapMixin, FilterChoiceField, FlexibleModelChoiceField, SlugField,
StaticSelect2Multiple
)
from .models import Secret, SecretRole, UserKey
def validate_rsa_key(key, is_secret=True):
"""
Validate the format and type of an RSA key.
"""
try:
key = RSA.importKey(key)
except ValueError:
raise forms.ValidationError("Invalid RSA key. Please ensure that your key is in PEM (base64) format.")
except Exception as e:
raise forms.ValidationError("Invalid key detected: {}".format(e))
if is_secret and not key.has_private():
raise forms.ValidationError("This looks like a public key. Please provide your private RSA key.")
elif not is_secret and key.has_private():
raise forms.ValidationError("This looks like a private key. Please provide your public RSA key.")
try:
PKCS1_OAEP.new(key)
except Exception:
raise forms.ValidationError("Error validating RSA key. Please ensure that your key supports PKCS#1 OAEP.")
#
# Secret roles
#
class SecretRoleForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = SecretRole
fields = [
'name', 'slug', 'users', 'groups',
]
widgets = {
'users': StaticSelect2Multiple(),
'groups': StaticSelect2Multiple(),
}
class SecretRoleCSVForm(CustomFieldForm):
slug = SlugField()
class Meta:
model = SecretRole
fields = SecretRole.csv_headers
help_texts = {
'name': 'Name of secret role',
}
#
# Secrets
#
class SecretForm(BootstrapMixin, CustomFieldForm):
plaintext = forms.CharField(
max_length=65535,
required=False,
label='Plaintext',
widget=forms.PasswordInput(
attrs={
'class': 'requires-session-key',
}
)
)
plaintext2 = forms.CharField(
max_length=65535,
required=False,
label='Plaintext (verify)',
widget=forms.PasswordInput()
)
tags = TagField(
required=False
)
class Meta:
model = Secret
fields = [
'role', 'name', 'plaintext', 'plaintext2', 'tags',
]
widgets = {
'role': APISelect(
api_url="/api/secrets/secret-roles/"
)
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# A plaintext value is required when creating a new Secret
if not self.instance.pk:
self.fields['plaintext'].required = True
def clean(self):
# Verify that the provided plaintext values match
if self.cleaned_data['plaintext'] != self.cleaned_data['plaintext2']:
raise forms.ValidationError({
'plaintext2': "The two given plaintext values do not match. Please check your input."
})
class SecretCSVForm(CustomFieldForm):
device = FlexibleModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name',
help_text='Device name or ID',
error_messages={
'invalid_choice': 'Device not found.',
}
)
role = forms.ModelChoiceField(
queryset=SecretRole.objects.all(),
to_field_name='name',
help_text='Name of assigned role',
error_messages={
'invalid_choice': 'Invalid secret role.',
}
)
plaintext = forms.CharField(
help_text='Plaintext secret data'
)
class Meta:
model = Secret
fields = Secret.csv_headers
help_texts = {
'name': 'Name or username',
}
def save(self, *args, **kwargs):
s = super().save(*args, **kwargs)
s.plaintext = str(self.cleaned_data['plaintext'])
return s
class SecretBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Secret.objects.all(),
widget=forms.MultipleHiddenInput()
)
role = forms.ModelChoiceField(
queryset=SecretRole.objects.all(),
required=False,
widget=APISelect(
api_url="/api/secrets/secret-roles/"
)
)
name = forms.CharField(
max_length=100,
required=False
)
class Meta:
nullable_fields = [
'name',
]
class SecretFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = Secret
q = forms.CharField(
required=False,
label='Search'
)
role = FilterChoiceField(
queryset=SecretRole.objects.all(),
to_field_name='slug',
widget=APISelectMultiple(
api_url="/api/secrets/secret-roles/",
value_field="slug",
)
)
#
# UserKeys
#
class UserKeyForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = UserKey
fields = ['public_key']
help_texts = {
'public_key': "Enter your public RSA key. Keep the private one with you; you'll need it for decryption. "
"Please note that passphrase-protected keys are not supported.",
}
def clean_public_key(self):
key = self.cleaned_data['public_key']
# Validate the RSA key format.
validate_rsa_key(key, is_secret=False)
return key
class ActivateUserKeyForm(forms.Form):
_selected_action = forms.ModelMultipleChoiceField(
queryset=UserKey.objects.all(),
label='User Keys'
)
secret_key = forms.CharField(
widget=forms.Textarea(
attrs={
'class': 'vLargeTextField',
}
),
label='Your private key'
)
| 26.933333
| 117
| 0.610231
|
87a764208d82c167bc4fd595cf7148219513710a
| 635
|
py
|
Python
|
users/serializers.py
|
JonnatasCabral/todolist
|
c0dc602bf1911e72593507e87bd9ae7695f7f7fc
|
[
"MIT"
] | null | null | null |
users/serializers.py
|
JonnatasCabral/todolist
|
c0dc602bf1911e72593507e87bd9ae7695f7f7fc
|
[
"MIT"
] | null | null | null |
users/serializers.py
|
JonnatasCabral/todolist
|
c0dc602bf1911e72593507e87bd9ae7695f7f7fc
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import authenticate
from rest_framework import serializers
from rest_auth.serializers import LoginSerializer
from users.models import User
class UserSerializer(serializers.ModelSerializer):
def create(self, validated_data):
user = User.objects.create_user(
validated_data['username'],
validated_data['email'],
validated_data['password'],
)
return user
class Meta:
model = User
fields = ('id', 'email', 'username', 'name', 'password')
extra_kwargs = {
'password': {'write_only': True}
}
| 24.423077
| 64
| 0.629921
|
782764fe330588502a9d9d4d72a799d674a8b600
| 5,417
|
py
|
Python
|
sim/sim-tracing.py
|
covid19-model/simulator
|
ab3eb808293e93343804e636764c1607f15f2b08
|
[
"MIT"
] | 70
|
2020-04-11T19:19:56.000Z
|
2022-03-14T05:24:33.000Z
|
sim/sim-tracing.py
|
Kipropa/simulator
|
ab3eb808293e93343804e636764c1607f15f2b08
|
[
"MIT"
] | 8
|
2020-04-22T08:25:04.000Z
|
2021-02-24T12:05:17.000Z
|
sim/sim-tracing.py
|
Kipropa/simulator
|
ab3eb808293e93343804e636764c1607f15f2b08
|
[
"MIT"
] | 37
|
2020-04-16T16:20:17.000Z
|
2021-10-01T17:05:50.000Z
|
import sys
if '..' not in sys.path:
sys.path.append('..')
import random as rd
import pandas as pd
from lib.measures import *
from lib.experiment import Experiment, options_to_str, process_command_line
from lib.calibrationFunctions import get_calibrated_params, get_calibrated_params_from_path
from lib.distributions import CovidDistributions
from lib.calibrationSettings import calibration_lockdown_beta_multipliers
TO_HOURS = 24.0
if __name__ == '__main__':
# command line parsing
args = process_command_line()
country = args.country
area = args.area
cpu_count = args.cpu_count
continued_run = args.continued
name = 'tracing'
start_date = '2021-01-01'
end_date = '2021-05-01'
random_repeats = 100
full_scale = True
verbose = True
seed_summary_path = None
set_initial_seeds_to = {}
expected_daily_base_expo_per100k = 5 / 7
condensed_summary = True
# seed
c = 0
np.random.seed(c)
rd.seed(c)
if not args.calibration_state:
calibrated_params = get_calibrated_params(country=country, area=area)
else:
calibrated_params = get_calibrated_params_from_path(args.calibration_state)
print('Loaded non-standard calibration state.')
# contact tracing experiment parameters
min_contact_time = 0.25 # hours
if args.tracing_threshold is not None:
smart_tracing_thresholds = [args.tracing_threshold]
else:
distr = CovidDistributions(country=country)
smart_tracing_thresholds = [(min_contact_time * calibrated_params['beta_site']
* (1 - np.exp(distr.gamma * (- distr.delta))))]
if args.test_lag is not None:
test_lags = [args.test_lag]
else:
test_lags = [48.0, 24.0, 3.0, 0.5]
if args.p_adoption is not None:
ps_adoption = [args.p_adoption]
else:
ps_adoption = [1.0, 0.75, 0.5, 0.25, 0.1, 0.05, 0.0]
# ps_adoption = [1.0, 0.75, 0.5, 0.4, 0.3, 0.25, 0.2, 0.15, 0.1, 0.05]
if args.smoke_test:
start_date = '2021-01-01'
end_date = '2021-02-15'
random_repeats = 1
full_scale = False
ps_adoption = [1.0]
test_lags = [48.0]
# create experiment object
experiment_info = f'{name}-{country}-{area}'
experiment = Experiment(
experiment_info=experiment_info,
start_date=start_date,
end_date=end_date,
random_repeats=random_repeats,
cpu_count=cpu_count,
full_scale=full_scale,
verbose=verbose,
)
# contact tracing experiment for various options
for smart_tracing_threshold in smart_tracing_thresholds:
for test_lag in test_lags:
for p_adoption in ps_adoption:
# measures
max_days = (pd.to_datetime(end_date) - pd.to_datetime(start_date)).days
m = [
# standard tracing measures
ComplianceForAllMeasure(
t_window=Interval(0.0, TO_HOURS * max_days),
p_compliance=p_adoption),
SocialDistancingForSmartTracing(
t_window=Interval(0.0, TO_HOURS * max_days),
p_stay_home=1.0,
smart_tracing_isolation_duration=TO_HOURS * 14.0),
SocialDistancingForSmartTracingHousehold(
t_window=Interval(0.0, TO_HOURS * max_days),
p_isolate=1.0,
smart_tracing_isolation_duration=TO_HOURS * 14.0),
]
# set testing params via update function of standard testing parameters
def test_update(d):
d['smart_tracing_actions'] = ['isolate', 'test']
d['test_reporting_lag'] = test_lag
# isolation
d['smart_tracing_policy_isolate'] = 'advanced-threshold'
d['smart_tracing_isolation_threshold'] = smart_tracing_threshold
d['smart_tracing_isolated_contacts'] = 100000
d['smart_tracing_isolation_duration'] = 14 * TO_HOURS,
# testing
d['smart_tracing_policy_test'] = 'advanced-threshold'
d['smart_tracing_testing_threshold'] = smart_tracing_threshold
d['smart_tracing_tested_contacts'] = 100000
d['trigger_tracing_after_posi_trace_test'] = False
return d
simulation_info = options_to_str(
p_adoption=p_adoption,
test_lag=test_lag,
tracing_threshold=smart_tracing_threshold,
)
experiment.add(
simulation_info=simulation_info,
country=country,
area=area,
measure_list=m,
test_update=test_update,
seed_summary_path=seed_summary_path,
set_initial_seeds_to=set_initial_seeds_to,
set_calibrated_params_to=calibrated_params,
full_scale=full_scale,
expected_daily_base_expo_per100k=expected_daily_base_expo_per100k)
print(f'{experiment_info} configuration done.')
# execute all simulations
experiment.run_all()
| 34.724359
| 91
| 0.598302
|
029cfc01d88b71cc6360712e08c7f07721e64450
| 1,049
|
py
|
Python
|
src/Logikanalysator/sigrok-integration/libsigrokdecode/decoders/midi/__init__.py
|
dm7h/fpga-event-recorder
|
4e53babbbb514ee375f4b5585b1d24e5b40f8df7
|
[
"0BSD"
] | null | null | null |
src/Logikanalysator/sigrok-integration/libsigrokdecode/decoders/midi/__init__.py
|
dm7h/fpga-event-recorder
|
4e53babbbb514ee375f4b5585b1d24e5b40f8df7
|
[
"0BSD"
] | null | null | null |
src/Logikanalysator/sigrok-integration/libsigrokdecode/decoders/midi/__init__.py
|
dm7h/fpga-event-recorder
|
4e53babbbb514ee375f4b5585b1d24e5b40f8df7
|
[
"0BSD"
] | null | null | null |
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2013 Uwe Hermann <uwe@hermann-uwe.de>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
'''
MIDI protocol decoder.
The MIDI protocol is layered on top of the UART (async serial) protocol,
with a fixed baud rate of 31250 baud (+/- 1%) and 8n1 settings. Bytes are
sent LSB-first.
'''
from .pd import *
| 33.83871
| 76
| 0.734986
|
64d496a8f16eff8b26e241777df00ffb1fde44c6
| 55,548
|
py
|
Python
|
test/sql/test_resultset.py
|
rissikess/sqlalchemy-ceodbc
|
6f81f3f7a3c6e39843d478e11e010923a3bf7672
|
[
"MIT"
] | null | null | null |
test/sql/test_resultset.py
|
rissikess/sqlalchemy-ceodbc
|
6f81f3f7a3c6e39843d478e11e010923a3bf7672
|
[
"MIT"
] | null | null | null |
test/sql/test_resultset.py
|
rissikess/sqlalchemy-ceodbc
|
6f81f3f7a3c6e39843d478e11e010923a3bf7672
|
[
"MIT"
] | null | null | null |
from sqlalchemy.testing import eq_, assert_raises_message, assert_raises, \
in_, not_in_, is_, ne_, le_
from sqlalchemy import testing
from sqlalchemy.testing import fixtures, engines
from sqlalchemy import util
from sqlalchemy import (
exc, sql, func, select, String, Integer, MetaData, ForeignKey,
VARCHAR, INT, CHAR, text, type_coerce, literal_column,
TypeDecorator, table, column, literal)
from sqlalchemy.engine import result as _result
from sqlalchemy.testing.schema import Table, Column
import operator
from sqlalchemy.testing import assertions
from sqlalchemy import exc as sa_exc
from sqlalchemy.testing.mock import patch, Mock
from contextlib import contextmanager
from sqlalchemy.engine import default
class ResultProxyTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
'users', metadata,
Column(
'user_id', INT, primary_key=True,
test_needs_autoincrement=True),
Column('user_name', VARCHAR(20)),
test_needs_acid=True
)
Table(
'addresses', metadata,
Column(
'address_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('user_id', Integer, ForeignKey('users.user_id')),
Column('address', String(30)),
test_needs_acid=True
)
Table(
'users2', metadata,
Column('user_id', INT, primary_key=True),
Column('user_name', VARCHAR(20)),
test_needs_acid=True
)
def test_row_iteration(self):
users = self.tables.users
users.insert().execute(
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9, 'user_name': 'fred'},
)
r = users.select().execute()
l = []
for row in r:
l.append(row)
eq_(len(l), 3)
@testing.requires.subqueries
def test_anonymous_rows(self):
users = self.tables.users
users.insert().execute(
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9, 'user_name': 'fred'},
)
sel = select([users.c.user_id]).where(users.c.user_name == 'jack'). \
as_scalar()
for row in select([sel + 1, sel + 3], bind=users.bind).execute():
eq_(row['anon_1'], 8)
eq_(row['anon_2'], 10)
def test_row_comparison(self):
users = self.tables.users
users.insert().execute(user_id=7, user_name='jack')
rp = users.select().execute().first()
eq_(rp, rp)
is_(not(rp != rp), True)
equal = (7, 'jack')
eq_(rp, equal)
eq_(equal, rp)
is_((not (rp != equal)), True)
is_(not (equal != equal), True)
def endless():
while True:
yield 1
ne_(rp, endless())
ne_(endless(), rp)
# test that everything compares the same
# as it would against a tuple
for compare in [False, 8, endless(), 'xyz', (7, 'jack')]:
for op in [
operator.eq, operator.ne, operator.gt,
operator.lt, operator.ge, operator.le
]:
try:
control = op(equal, compare)
except TypeError:
# Py3K raises TypeError for some invalid comparisons
assert_raises(TypeError, op, rp, compare)
else:
eq_(control, op(rp, compare))
try:
control = op(compare, equal)
except TypeError:
# Py3K raises TypeError for some invalid comparisons
assert_raises(TypeError, op, compare, rp)
else:
eq_(control, op(compare, rp))
@testing.provide_metadata
def test_column_label_overlap_fallback(self):
content = Table(
'content', self.metadata,
Column('type', String(30)),
)
bar = Table(
'bar', self.metadata,
Column('content_type', String(30))
)
self.metadata.create_all(testing.db)
testing.db.execute(content.insert().values(type="t1"))
row = testing.db.execute(content.select(use_labels=True)).first()
in_(content.c.type, row)
not_in_(bar.c.content_type, row)
in_(sql.column('content_type'), row)
row = testing.db.execute(
select([content.c.type.label("content_type")])).first()
in_(content.c.type, row)
not_in_(bar.c.content_type, row)
in_(sql.column('content_type'), row)
row = testing.db.execute(select([func.now().label("content_type")])). \
first()
not_in_(content.c.type, row)
not_in_(bar.c.content_type, row)
in_(sql.column('content_type'), row)
def test_pickled_rows(self):
users = self.tables.users
addresses = self.tables.addresses
users.insert().execute(
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9, 'user_name': 'fred'},
)
for pickle in False, True:
for use_labels in False, True:
result = users.select(use_labels=use_labels).order_by(
users.c.user_id).execute().fetchall()
if pickle:
result = util.pickle.loads(util.pickle.dumps(result))
eq_(
result,
[(7, "jack"), (8, "ed"), (9, "fred")]
)
if use_labels:
eq_(result[0]['users_user_id'], 7)
eq_(
list(result[0].keys()),
["users_user_id", "users_user_name"])
else:
eq_(result[0]['user_id'], 7)
eq_(list(result[0].keys()), ["user_id", "user_name"])
eq_(result[0][0], 7)
eq_(result[0][users.c.user_id], 7)
eq_(result[0][users.c.user_name], 'jack')
if not pickle or use_labels:
assert_raises(
exc.NoSuchColumnError,
lambda: result[0][addresses.c.user_id])
else:
# test with a different table. name resolution is
# causing 'user_id' to match when use_labels wasn't used.
eq_(result[0][addresses.c.user_id], 7)
assert_raises(
exc.NoSuchColumnError, lambda: result[0]['fake key'])
assert_raises(
exc.NoSuchColumnError,
lambda: result[0][addresses.c.address_id])
def test_column_error_printing(self):
result = testing.db.execute(select([1]))
row = result.first()
class unprintable(object):
def __str__(self):
raise ValueError("nope")
msg = r"Could not locate column in row for column '%s'"
for accessor, repl in [
("x", "x"),
(Column("q", Integer), "q"),
(Column("q", Integer) + 12, r"q \+ :q_1"),
(unprintable(), "unprintable element.*"),
]:
assert_raises_message(
exc.NoSuchColumnError,
msg % repl,
result._getter, accessor
)
is_(result._getter(accessor, False), None)
assert_raises_message(
exc.NoSuchColumnError,
msg % repl,
lambda: row[accessor]
)
def test_fetchmany(self):
users = self.tables.users
users.insert().execute(user_id=7, user_name='jack')
users.insert().execute(user_id=8, user_name='ed')
users.insert().execute(user_id=9, user_name='fred')
r = users.select().execute()
l = []
for row in r.fetchmany(size=2):
l.append(row)
eq_(len(l), 2)
def test_column_slices(self):
users = self.tables.users
addresses = self.tables.addresses
users.insert().execute(user_id=1, user_name='john')
users.insert().execute(user_id=2, user_name='jack')
addresses.insert().execute(
address_id=1, user_id=2, address='foo@bar.com')
r = text(
"select * from addresses", bind=testing.db).execute().first()
eq_(r[0:1], (1,))
eq_(r[1:], (2, 'foo@bar.com'))
eq_(r[:-1], (1, 2))
def test_column_accessor_basic_compiled(self):
users = self.tables.users
users.insert().execute(
dict(user_id=1, user_name='john'),
dict(user_id=2, user_name='jack')
)
r = users.select(users.c.user_id == 2).execute().first()
eq_(r.user_id, 2)
eq_(r['user_id'], 2)
eq_(r[users.c.user_id], 2)
eq_(r.user_name, 'jack')
eq_(r['user_name'], 'jack')
eq_(r[users.c.user_name], 'jack')
def test_column_accessor_basic_text(self):
users = self.tables.users
users.insert().execute(
dict(user_id=1, user_name='john'),
dict(user_id=2, user_name='jack')
)
r = testing.db.execute(
text("select * from users where user_id=2")).first()
eq_(r.user_id, 2)
eq_(r['user_id'], 2)
eq_(r[users.c.user_id], 2)
eq_(r.user_name, 'jack')
eq_(r['user_name'], 'jack')
eq_(r[users.c.user_name], 'jack')
def test_column_accessor_textual_select(self):
users = self.tables.users
users.insert().execute(
dict(user_id=1, user_name='john'),
dict(user_id=2, user_name='jack')
)
# this will create column() objects inside
# the select(), these need to match on name anyway
r = testing.db.execute(
select([
column('user_id'), column('user_name')
]).select_from(table('users')).
where(text('user_id=2'))
).first()
eq_(r.user_id, 2)
eq_(r['user_id'], 2)
eq_(r[users.c.user_id], 2)
eq_(r.user_name, 'jack')
eq_(r['user_name'], 'jack')
eq_(r[users.c.user_name], 'jack')
def test_column_accessor_dotted_union(self):
users = self.tables.users
users.insert().execute(
dict(user_id=1, user_name='john'),
)
# test a little sqlite < 3.10.0 weirdness - with the UNION,
# cols come back as "users.user_id" in cursor.description
r = testing.db.execute(
text(
"select users.user_id, users.user_name "
"from users "
"UNION select users.user_id, "
"users.user_name from users"
)
).first()
eq_(r['user_id'], 1)
eq_(r['user_name'], "john")
eq_(list(r.keys()), ["user_id", "user_name"])
def test_column_accessor_sqlite_raw(self):
users = self.tables.users
users.insert().execute(
dict(user_id=1, user_name='john'),
)
r = text(
"select users.user_id, users.user_name "
"from users "
"UNION select users.user_id, "
"users.user_name from users",
bind=testing.db).execution_options(sqlite_raw_colnames=True). \
execute().first()
if testing.against("sqlite < 3.10.0"):
not_in_('user_id', r)
not_in_('user_name', r)
eq_(r['users.user_id'], 1)
eq_(r['users.user_name'], "john")
eq_(list(r.keys()), ["users.user_id", "users.user_name"])
else:
not_in_('users.user_id', r)
not_in_('users.user_name', r)
eq_(r['user_id'], 1)
eq_(r['user_name'], "john")
eq_(list(r.keys()), ["user_id", "user_name"])
def test_column_accessor_sqlite_translated(self):
users = self.tables.users
users.insert().execute(
dict(user_id=1, user_name='john'),
)
r = text(
"select users.user_id, users.user_name "
"from users "
"UNION select users.user_id, "
"users.user_name from users",
bind=testing.db).execute().first()
eq_(r['user_id'], 1)
eq_(r['user_name'], "john")
if testing.against("sqlite < 3.10.0"):
eq_(r['users.user_id'], 1)
eq_(r['users.user_name'], "john")
else:
not_in_('users.user_id', r)
not_in_('users.user_name', r)
eq_(list(r.keys()), ["user_id", "user_name"])
def test_column_accessor_labels_w_dots(self):
users = self.tables.users
users.insert().execute(
dict(user_id=1, user_name='john'),
)
# test using literal tablename.colname
r = text(
'select users.user_id AS "users.user_id", '
'users.user_name AS "users.user_name" '
'from users', bind=testing.db).\
execution_options(sqlite_raw_colnames=True).execute().first()
eq_(r['users.user_id'], 1)
eq_(r['users.user_name'], "john")
not_in_("user_name", r)
eq_(list(r.keys()), ["users.user_id", "users.user_name"])
def test_column_accessor_unary(self):
users = self.tables.users
users.insert().execute(
dict(user_id=1, user_name='john'),
)
# unary expressions
r = select([users.c.user_name.distinct()]).order_by(
users.c.user_name).execute().first()
eq_(r[users.c.user_name], 'john')
eq_(r.user_name, 'john')
def test_column_accessor_err(self):
r = testing.db.execute(select([1])).first()
assert_raises_message(
AttributeError,
"Could not locate column in row for column 'foo'",
getattr, r, "foo"
)
assert_raises_message(
KeyError,
"Could not locate column in row for column 'foo'",
lambda: r['foo']
)
def test_graceful_fetch_on_non_rows(self):
"""test that calling fetchone() etc. on a result that doesn't
return rows fails gracefully.
"""
# these proxies don't work with no cursor.description present.
# so they don't apply to this test at the moment.
# result.FullyBufferedResultProxy,
# result.BufferedRowResultProxy,
# result.BufferedColumnResultProxy
users = self.tables.users
conn = testing.db.connect()
for meth in [
lambda r: r.fetchone(),
lambda r: r.fetchall(),
lambda r: r.first(),
lambda r: r.scalar(),
lambda r: r.fetchmany(),
lambda r: r._getter('user'),
lambda r: r._has_key('user'),
]:
trans = conn.begin()
result = conn.execute(users.insert(), user_id=1)
assert_raises_message(
exc.ResourceClosedError,
"This result object does not return rows. "
"It has been closed automatically.",
meth, result,
)
trans.rollback()
def test_fetchone_til_end(self):
result = testing.db.execute("select * from users")
eq_(result.fetchone(), None)
eq_(result.fetchone(), None)
eq_(result.fetchone(), None)
result.close()
assert_raises_message(
exc.ResourceClosedError,
"This result object is closed.",
result.fetchone
)
def test_row_case_sensitive(self):
row = testing.db.execute(
select([
literal_column("1").label("case_insensitive"),
literal_column("2").label("CaseSensitive")
])
).first()
eq_(list(row.keys()), ["case_insensitive", "CaseSensitive"])
in_("case_insensitive", row._keymap)
in_("CaseSensitive", row._keymap)
not_in_("casesensitive", row._keymap)
eq_(row["case_insensitive"], 1)
eq_(row["CaseSensitive"], 2)
assert_raises(
KeyError,
lambda: row["Case_insensitive"]
)
assert_raises(
KeyError,
lambda: row["casesensitive"]
)
def test_row_case_sensitive_unoptimized(self):
ins_db = engines.testing_engine(options={"case_sensitive": True})
row = ins_db.execute(
select([
literal_column("1").label("case_insensitive"),
literal_column("2").label("CaseSensitive"),
text("3 AS screw_up_the_cols")
])
).first()
eq_(
list(row.keys()),
["case_insensitive", "CaseSensitive", "screw_up_the_cols"])
in_("case_insensitive", row._keymap)
in_("CaseSensitive", row._keymap)
not_in_("casesensitive", row._keymap)
eq_(row["case_insensitive"], 1)
eq_(row["CaseSensitive"], 2)
eq_(row["screw_up_the_cols"], 3)
assert_raises(KeyError, lambda: row["Case_insensitive"])
assert_raises(KeyError, lambda: row["casesensitive"])
assert_raises(KeyError, lambda: row["screw_UP_the_cols"])
def test_row_case_insensitive(self):
ins_db = engines.testing_engine(options={"case_sensitive": False})
row = ins_db.execute(
select([
literal_column("1").label("case_insensitive"),
literal_column("2").label("CaseSensitive")
])
).first()
eq_(list(row.keys()), ["case_insensitive", "CaseSensitive"])
in_("case_insensitive", row._keymap)
in_("CaseSensitive", row._keymap)
in_("casesensitive", row._keymap)
eq_(row["case_insensitive"], 1)
eq_(row["CaseSensitive"], 2)
eq_(row["Case_insensitive"], 1)
eq_(row["casesensitive"], 2)
def test_row_case_insensitive_unoptimized(self):
ins_db = engines.testing_engine(options={"case_sensitive": False})
row = ins_db.execute(
select([
literal_column("1").label("case_insensitive"),
literal_column("2").label("CaseSensitive"),
text("3 AS screw_up_the_cols")
])
).first()
eq_(
list(row.keys()),
["case_insensitive", "CaseSensitive", "screw_up_the_cols"])
in_("case_insensitive", row._keymap)
in_("CaseSensitive", row._keymap)
in_("casesensitive", row._keymap)
eq_(row["case_insensitive"], 1)
eq_(row["CaseSensitive"], 2)
eq_(row["screw_up_the_cols"], 3)
eq_(row["Case_insensitive"], 1)
eq_(row["casesensitive"], 2)
eq_(row["screw_UP_the_cols"], 3)
def test_row_as_args(self):
users = self.tables.users
users.insert().execute(user_id=1, user_name='john')
r = users.select(users.c.user_id == 1).execute().first()
users.delete().execute()
users.insert().execute(r)
eq_(users.select().execute().fetchall(), [(1, 'john')])
def test_result_as_args(self):
users = self.tables.users
users2 = self.tables.users2
users.insert().execute([
dict(user_id=1, user_name='john'),
dict(user_id=2, user_name='ed')])
r = users.select().execute()
users2.insert().execute(list(r))
eq_(
users2.select().order_by(users2.c.user_id).execute().fetchall(),
[(1, 'john'), (2, 'ed')]
)
users2.delete().execute()
r = users.select().execute()
users2.insert().execute(*list(r))
eq_(
users2.select().order_by(users2.c.user_id).execute().fetchall(),
[(1, 'john'), (2, 'ed')]
)
@testing.requires.duplicate_names_in_cursor_description
def test_ambiguous_column(self):
users = self.tables.users
addresses = self.tables.addresses
users.insert().execute(user_id=1, user_name='john')
result = users.outerjoin(addresses).select().execute()
r = result.first()
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r['user_id']
)
# pure positional targeting; users.c.user_id
# and addresses.c.user_id are known!
# works as of 1.1 issue #3501
eq_(r[users.c.user_id], 1)
eq_(r[addresses.c.user_id], None)
# try to trick it - fake_table isn't in the result!
# we get the correct error
fake_table = Table('fake', MetaData(), Column('user_id', Integer))
assert_raises_message(
exc.InvalidRequestError,
"Could not locate column in row for column 'fake.user_id'",
lambda: r[fake_table.c.user_id]
)
r = util.pickle.loads(util.pickle.dumps(r))
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r['user_id']
)
result = users.outerjoin(addresses).select().execute()
result = _result.BufferedColumnResultProxy(result.context)
r = result.first()
assert isinstance(r, _result.BufferedColumnRow)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r['user_id']
)
@testing.requires.duplicate_names_in_cursor_description
def test_ambiguous_column_by_col(self):
users = self.tables.users
users.insert().execute(user_id=1, user_name='john')
ua = users.alias()
u2 = users.alias()
result = select([users.c.user_id, ua.c.user_id]).execute()
row = result.first()
# as of 1.1 issue #3501, we use pure positional
# targeting for the column objects here
eq_(row[users.c.user_id], 1)
eq_(row[ua.c.user_id], 1)
# this now works as of 1.1 issue #3501;
# previously this was stuck on "ambiguous column name"
assert_raises_message(
exc.InvalidRequestError,
"Could not locate column in row",
lambda: row[u2.c.user_id]
)
@testing.requires.duplicate_names_in_cursor_description
def test_ambiguous_column_case_sensitive(self):
eng = engines.testing_engine(options=dict(case_sensitive=False))
row = eng.execute(select([
literal_column('1').label('SOMECOL'),
literal_column('1').label('SOMECOL'),
])).first()
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: row['somecol']
)
@testing.requires.duplicate_names_in_cursor_description
def test_ambiguous_column_contains(self):
users = self.tables.users
addresses = self.tables.addresses
# ticket 2702. in 0.7 we'd get True, False.
# in 0.8, both columns are present so it's True;
# but when they're fetched you'll get the ambiguous error.
users.insert().execute(user_id=1, user_name='john')
result = select([users.c.user_id, addresses.c.user_id]).\
select_from(users.outerjoin(addresses)).execute()
row = result.first()
eq_(
set([users.c.user_id in row, addresses.c.user_id in row]),
set([True])
)
def test_ambiguous_column_by_col_plus_label(self):
users = self.tables.users
users.insert().execute(user_id=1, user_name='john')
result = select(
[users.c.user_id,
type_coerce(users.c.user_id, Integer).label('foo')]).execute()
row = result.first()
eq_(
row[users.c.user_id], 1
)
eq_(
row[1], 1
)
def test_fetch_partial_result_map(self):
users = self.tables.users
users.insert().execute(user_id=7, user_name='ed')
t = text("select * from users").columns(
user_name=String()
)
eq_(
testing.db.execute(t).fetchall(), [(7, 'ed')]
)
def test_fetch_unordered_result_map(self):
users = self.tables.users
users.insert().execute(user_id=7, user_name='ed')
class Goofy1(TypeDecorator):
impl = String
def process_result_value(self, value, dialect):
return value + "a"
class Goofy2(TypeDecorator):
impl = String
def process_result_value(self, value, dialect):
return value + "b"
class Goofy3(TypeDecorator):
impl = String
def process_result_value(self, value, dialect):
return value + "c"
t = text(
"select user_name as a, user_name as b, "
"user_name as c from users").columns(
a=Goofy1(), b=Goofy2(), c=Goofy3()
)
eq_(
testing.db.execute(t).fetchall(), [
('eda', 'edb', 'edc')
]
)
@testing.requires.subqueries
def test_column_label_targeting(self):
users = self.tables.users
users.insert().execute(user_id=7, user_name='ed')
for s in (
users.select().alias('foo'),
users.select().alias(users.name),
):
row = s.select(use_labels=True).execute().first()
eq_(row[s.c.user_id], 7)
eq_(row[s.c.user_name], 'ed')
def test_keys(self):
users = self.tables.users
users.insert().execute(user_id=1, user_name='foo')
result = users.select().execute()
eq_(
result.keys(),
['user_id', 'user_name']
)
row = result.first()
eq_(
row.keys(),
['user_id', 'user_name']
)
def test_keys_anon_labels(self):
"""test [ticket:3483]"""
users = self.tables.users
users.insert().execute(user_id=1, user_name='foo')
result = testing.db.execute(
select([
users.c.user_id,
users.c.user_name.label(None),
func.count(literal_column('1'))]).
group_by(users.c.user_id, users.c.user_name)
)
eq_(
result.keys(),
['user_id', 'user_name_1', 'count_1']
)
row = result.first()
eq_(
row.keys(),
['user_id', 'user_name_1', 'count_1']
)
def test_items(self):
users = self.tables.users
users.insert().execute(user_id=1, user_name='foo')
r = users.select().execute().first()
eq_(
[(x[0].lower(), x[1]) for x in list(r.items())],
[('user_id', 1), ('user_name', 'foo')])
def test_len(self):
users = self.tables.users
users.insert().execute(user_id=1, user_name='foo')
r = users.select().execute().first()
eq_(len(r), 2)
r = testing.db.execute('select user_name, user_id from users'). \
first()
eq_(len(r), 2)
r = testing.db.execute('select user_name from users').first()
eq_(len(r), 1)
def test_sorting_in_python(self):
users = self.tables.users
users.insert().execute(
dict(user_id=1, user_name='foo'),
dict(user_id=2, user_name='bar'),
dict(user_id=3, user_name='def'),
)
rows = users.select().order_by(users.c.user_name).execute().fetchall()
eq_(rows, [(2, 'bar'), (3, 'def'), (1, 'foo')])
eq_(sorted(rows), [(1, 'foo'), (2, 'bar'), (3, 'def')])
def test_column_order_with_simple_query(self):
# should return values in column definition order
users = self.tables.users
users.insert().execute(user_id=1, user_name='foo')
r = users.select(users.c.user_id == 1).execute().first()
eq_(r[0], 1)
eq_(r[1], 'foo')
eq_([x.lower() for x in list(r.keys())], ['user_id', 'user_name'])
eq_(list(r.values()), [1, 'foo'])
def test_column_order_with_text_query(self):
# should return values in query order
users = self.tables.users
users.insert().execute(user_id=1, user_name='foo')
r = testing.db.execute('select user_name, user_id from users'). \
first()
eq_(r[0], 'foo')
eq_(r[1], 1)
eq_([x.lower() for x in list(r.keys())], ['user_name', 'user_id'])
eq_(list(r.values()), ['foo', 1])
@testing.crashes('oracle', 'FIXME: unknown, varify not fails_on()')
@testing.crashes('firebird', 'An identifier must begin with a letter')
@testing.provide_metadata
def test_column_accessor_shadow(self):
shadowed = Table(
'test_shadowed', self.metadata,
Column('shadow_id', INT, primary_key=True),
Column('shadow_name', VARCHAR(20)),
Column('parent', VARCHAR(20)),
Column('row', VARCHAR(40)),
Column('_parent', VARCHAR(20)),
Column('_row', VARCHAR(20)),
)
self.metadata.create_all()
shadowed.insert().execute(
shadow_id=1, shadow_name='The Shadow', parent='The Light',
row='Without light there is no shadow',
_parent='Hidden parent', _row='Hidden row')
r = shadowed.select(shadowed.c.shadow_id == 1).execute().first()
eq_(r.shadow_id, 1)
eq_(r['shadow_id'], 1)
eq_(r[shadowed.c.shadow_id], 1)
eq_(r.shadow_name, 'The Shadow')
eq_(r['shadow_name'], 'The Shadow')
eq_(r[shadowed.c.shadow_name], 'The Shadow')
eq_(r.parent, 'The Light')
eq_(r['parent'], 'The Light')
eq_(r[shadowed.c.parent], 'The Light')
eq_(r.row, 'Without light there is no shadow')
eq_(r['row'], 'Without light there is no shadow')
eq_(r[shadowed.c.row], 'Without light there is no shadow')
eq_(r['_parent'], 'Hidden parent')
eq_(r['_row'], 'Hidden row')
def test_nontuple_row(self):
"""ensure the C version of BaseRowProxy handles
duck-type-dependent rows."""
from sqlalchemy.engine import RowProxy
class MyList(object):
def __init__(self, l):
self.l = l
def __len__(self):
return len(self.l)
def __getitem__(self, i):
return list.__getitem__(self.l, i)
proxy = RowProxy(object(), MyList(['value']), [None], {
'key': (None, None, 0), 0: (None, None, 0)})
eq_(list(proxy), ['value'])
eq_(proxy[0], 'value')
eq_(proxy['key'], 'value')
@testing.provide_metadata
def test_no_rowcount_on_selects_inserts(self):
"""assert that rowcount is only called on deletes and updates.
This because cursor.rowcount may can be expensive on some dialects
such as Firebird, however many dialects require it be called
before the cursor is closed.
"""
metadata = self.metadata
engine = engines.testing_engine()
t = Table('t1', metadata,
Column('data', String(10))
)
metadata.create_all(engine)
with patch.object(
engine.dialect.execution_ctx_cls, "rowcount") as mock_rowcount:
mock_rowcount.__get__ = Mock()
engine.execute(t.insert(),
{'data': 'd1'},
{'data': 'd2'},
{'data': 'd3'})
eq_(len(mock_rowcount.__get__.mock_calls), 0)
eq_(
engine.execute(t.select()).fetchall(),
[('d1', ), ('d2', ), ('d3', )]
)
eq_(len(mock_rowcount.__get__.mock_calls), 0)
engine.execute(t.update(), {'data': 'd4'})
eq_(len(mock_rowcount.__get__.mock_calls), 1)
engine.execute(t.delete())
eq_(len(mock_rowcount.__get__.mock_calls), 2)
def test_rowproxy_is_sequence(self):
import collections
from sqlalchemy.engine import RowProxy
row = RowProxy(
object(), ['value'], [None],
{'key': (None, None, 0), 0: (None, None, 0)})
assert isinstance(row, collections.Sequence)
@testing.provide_metadata
def test_rowproxy_getitem_indexes_compiled(self):
values = Table('rp', self.metadata,
Column('key', String(10), primary_key=True),
Column('value', String(10)))
values.create()
testing.db.execute(values.insert(), dict(key='One', value='Uno'))
row = testing.db.execute(values.select()).first()
eq_(row['key'], 'One')
eq_(row['value'], 'Uno')
eq_(row[0], 'One')
eq_(row[1], 'Uno')
eq_(row[-2], 'One')
eq_(row[-1], 'Uno')
eq_(row[1:0:-1], ('Uno',))
@testing.only_on("sqlite")
def test_rowproxy_getitem_indexes_raw(self):
row = testing.db.execute("select 'One' as key, 'Uno' as value").first()
eq_(row['key'], 'One')
eq_(row['value'], 'Uno')
eq_(row[0], 'One')
eq_(row[1], 'Uno')
eq_(row[-2], 'One')
eq_(row[-1], 'Uno')
eq_(row[1:0:-1], ('Uno',))
@testing.requires.cextensions
def test_row_c_sequence_check(self):
import csv
metadata = MetaData()
metadata.bind = 'sqlite://'
users = Table('users', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(40)),
)
users.create()
users.insert().execute(name='Test')
row = users.select().execute().fetchone()
s = util.StringIO()
writer = csv.writer(s)
# csv performs PySequenceCheck call
writer.writerow(row)
assert s.getvalue().strip() == '1,Test'
@testing.requires.selectone
def test_empty_accessors(self):
statements = [
(
"select 1",
[
lambda r: r.last_inserted_params(),
lambda r: r.last_updated_params(),
lambda r: r.prefetch_cols(),
lambda r: r.postfetch_cols(),
lambda r: r.inserted_primary_key
],
"Statement is not a compiled expression construct."
),
(
select([1]),
[
lambda r: r.last_inserted_params(),
lambda r: r.inserted_primary_key
],
r"Statement is not an insert\(\) expression construct."
),
(
select([1]),
[
lambda r: r.last_updated_params(),
],
r"Statement is not an update\(\) expression construct."
),
(
select([1]),
[
lambda r: r.prefetch_cols(),
lambda r: r.postfetch_cols()
],
r"Statement is not an insert\(\) "
r"or update\(\) expression construct."
),
]
for stmt, meths, msg in statements:
r = testing.db.execute(stmt)
try:
for meth in meths:
assert_raises_message(
sa_exc.InvalidRequestError,
msg,
meth, r
)
finally:
r.close()
class KeyTargetingTest(fixtures.TablesTest):
run_inserts = 'once'
run_deletes = None
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
'keyed1', metadata, Column("a", CHAR(2), key="b"),
Column("c", CHAR(2), key="q")
)
Table('keyed2', metadata, Column("a", CHAR(2)), Column("b", CHAR(2)))
Table('keyed3', metadata, Column("a", CHAR(2)), Column("d", CHAR(2)))
Table('keyed4', metadata, Column("b", CHAR(2)), Column("q", CHAR(2)))
Table('content', metadata, Column('t', String(30), key="type"))
Table('bar', metadata, Column('ctype', String(30), key="content_type"))
if testing.requires.schemas.enabled:
Table(
'wschema', metadata,
Column("a", CHAR(2), key="b"),
Column("c", CHAR(2), key="q"),
schema=testing.config.test_schema
)
@classmethod
def insert_data(cls):
cls.tables.keyed1.insert().execute(dict(b="a1", q="c1"))
cls.tables.keyed2.insert().execute(dict(a="a2", b="b2"))
cls.tables.keyed3.insert().execute(dict(a="a3", d="d3"))
cls.tables.keyed4.insert().execute(dict(b="b4", q="q4"))
cls.tables.content.insert().execute(type="t1")
if testing.requires.schemas.enabled:
cls.tables[
'%s.wschema' % testing.config.test_schema].insert().execute(
dict(b="a1", q="c1"))
@testing.requires.schemas
def test_keyed_accessor_wschema(self):
keyed1 = self.tables['%s.wschema' % testing.config.test_schema]
row = testing.db.execute(keyed1.select()).first()
eq_(row.b, "a1")
eq_(row.q, "c1")
eq_(row.a, "a1")
eq_(row.c, "c1")
def test_keyed_accessor_single(self):
keyed1 = self.tables.keyed1
row = testing.db.execute(keyed1.select()).first()
eq_(row.b, "a1")
eq_(row.q, "c1")
eq_(row.a, "a1")
eq_(row.c, "c1")
def test_keyed_accessor_single_labeled(self):
keyed1 = self.tables.keyed1
row = testing.db.execute(keyed1.select().apply_labels()).first()
eq_(row.keyed1_b, "a1")
eq_(row.keyed1_q, "c1")
eq_(row.keyed1_a, "a1")
eq_(row.keyed1_c, "c1")
@testing.requires.duplicate_names_in_cursor_description
def test_keyed_accessor_composite_conflict_2(self):
keyed1 = self.tables.keyed1
keyed2 = self.tables.keyed2
row = testing.db.execute(select([keyed1, keyed2])).first()
# row.b is unambiguous
eq_(row.b, "b2")
# row.a is ambiguous
assert_raises_message(
exc.InvalidRequestError,
"Ambig",
getattr, row, "a"
)
def test_keyed_accessor_composite_names_precedent(self):
keyed1 = self.tables.keyed1
keyed4 = self.tables.keyed4
row = testing.db.execute(select([keyed1, keyed4])).first()
eq_(row.b, "b4")
eq_(row.q, "q4")
eq_(row.a, "a1")
eq_(row.c, "c1")
@testing.requires.duplicate_names_in_cursor_description
def test_keyed_accessor_composite_keys_precedent(self):
keyed1 = self.tables.keyed1
keyed3 = self.tables.keyed3
row = testing.db.execute(select([keyed1, keyed3])).first()
eq_(row.q, "c1")
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name 'a'",
getattr, row, "b"
)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name 'a'",
getattr, row, "a"
)
eq_(row.d, "d3")
def test_keyed_accessor_composite_labeled(self):
keyed1 = self.tables.keyed1
keyed2 = self.tables.keyed2
row = testing.db.execute(select([keyed1, keyed2]).apply_labels()). \
first()
eq_(row.keyed1_b, "a1")
eq_(row.keyed1_a, "a1")
eq_(row.keyed1_q, "c1")
eq_(row.keyed1_c, "c1")
eq_(row.keyed2_a, "a2")
eq_(row.keyed2_b, "b2")
assert_raises(KeyError, lambda: row['keyed2_c'])
assert_raises(KeyError, lambda: row['keyed2_q'])
def test_column_label_overlap_fallback(self):
content, bar = self.tables.content, self.tables.bar
row = testing.db.execute(
select([content.c.type.label("content_type")])).first()
not_in_(content.c.type, row)
not_in_(bar.c.content_type, row)
in_(sql.column('content_type'), row)
row = testing.db.execute(select([func.now().label("content_type")])). \
first()
not_in_(content.c.type, row)
not_in_(bar.c.content_type, row)
in_(sql.column('content_type'), row)
def test_column_label_overlap_fallback_2(self):
content, bar = self.tables.content, self.tables.bar
row = testing.db.execute(content.select(use_labels=True)).first()
in_(content.c.type, row)
not_in_(bar.c.content_type, row)
not_in_(sql.column('content_type'), row)
def test_columnclause_schema_column_one(self):
keyed2 = self.tables.keyed2
# this is addressed by [ticket:2932]
# ColumnClause._compare_name_for_result allows the
# columns which the statement is against to be lightweight
# cols, which results in a more liberal comparison scheme
a, b = sql.column('a'), sql.column('b')
stmt = select([a, b]).select_from(table("keyed2"))
row = testing.db.execute(stmt).first()
in_(keyed2.c.a, row)
in_(keyed2.c.b, row)
in_(a, row)
in_(b, row)
def test_columnclause_schema_column_two(self):
keyed2 = self.tables.keyed2
a, b = sql.column('a'), sql.column('b')
stmt = select([keyed2.c.a, keyed2.c.b])
row = testing.db.execute(stmt).first()
in_(keyed2.c.a, row)
in_(keyed2.c.b, row)
in_(a, row)
in_(b, row)
def test_columnclause_schema_column_three(self):
keyed2 = self.tables.keyed2
# this is also addressed by [ticket:2932]
a, b = sql.column('a'), sql.column('b')
stmt = text("select a, b from keyed2").columns(a=CHAR, b=CHAR)
row = testing.db.execute(stmt).first()
in_(keyed2.c.a, row)
in_(keyed2.c.b, row)
in_(a, row)
in_(b, row)
in_(stmt.c.a, row)
in_(stmt.c.b, row)
def test_columnclause_schema_column_four(self):
keyed2 = self.tables.keyed2
# this is also addressed by [ticket:2932]
a, b = sql.column('keyed2_a'), sql.column('keyed2_b')
stmt = text("select a AS keyed2_a, b AS keyed2_b from keyed2").columns(
a, b)
row = testing.db.execute(stmt).first()
in_(keyed2.c.a, row)
in_(keyed2.c.b, row)
in_(a, row)
in_(b, row)
in_(stmt.c.keyed2_a, row)
in_(stmt.c.keyed2_b, row)
def test_columnclause_schema_column_five(self):
keyed2 = self.tables.keyed2
# this is also addressed by [ticket:2932]
stmt = text("select a AS keyed2_a, b AS keyed2_b from keyed2").columns(
keyed2_a=CHAR, keyed2_b=CHAR)
row = testing.db.execute(stmt).first()
in_(keyed2.c.a, row)
in_(keyed2.c.b, row)
in_(stmt.c.keyed2_a, row)
in_(stmt.c.keyed2_b, row)
class PositionalTextTest(fixtures.TablesTest):
run_inserts = 'once'
run_deletes = None
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
'text1',
metadata,
Column("a", CHAR(2)),
Column("b", CHAR(2)),
Column("c", CHAR(2)),
Column("d", CHAR(2))
)
@classmethod
def insert_data(cls):
cls.tables.text1.insert().execute([
dict(a="a1", b="b1", c="c1", d="d1"),
])
def test_via_column(self):
c1, c2, c3, c4 = column('q'), column('p'), column('r'), column('d')
stmt = text("select a, b, c, d from text1").columns(c1, c2, c3, c4)
result = testing.db.execute(stmt)
row = result.first()
eq_(row[c2], "b1")
eq_(row[c4], "d1")
eq_(row[1], "b1")
eq_(row["b"], "b1")
eq_(row.keys(), ["a", "b", "c", "d"])
eq_(row["r"], "c1")
eq_(row["d"], "d1")
def test_fewer_cols_than_sql_positional(self):
c1, c2 = column('q'), column('p')
stmt = text("select a, b, c, d from text1").columns(c1, c2)
# no warning as this can be similar for non-positional
result = testing.db.execute(stmt)
row = result.first()
eq_(row[c1], "a1")
eq_(row["c"], "c1")
def test_fewer_cols_than_sql_non_positional(self):
c1, c2 = column('a'), column('p')
stmt = text("select a, b, c, d from text1").columns(c2, c1, d=CHAR)
# no warning as this can be similar for non-positional
result = testing.db.execute(stmt)
row = result.first()
# c1 name matches, locates
eq_(row[c1], "a1")
eq_(row["c"], "c1")
# c2 name does not match, doesn't locate
assert_raises_message(
exc.NoSuchColumnError,
"in row for column 'p'",
lambda: row[c2]
)
def test_more_cols_than_sql(self):
c1, c2, c3, c4 = column('q'), column('p'), column('r'), column('d')
stmt = text("select a, b from text1").columns(c1, c2, c3, c4)
with assertions.expect_warnings(
r"Number of columns in textual SQL \(4\) is "
"smaller than number of columns requested \(2\)"):
result = testing.db.execute(stmt)
row = result.first()
eq_(row[c2], "b1")
assert_raises_message(
exc.NoSuchColumnError,
"in row for column 'r'",
lambda: row[c3]
)
def test_dupe_col_obj(self):
c1, c2, c3 = column('q'), column('p'), column('r')
stmt = text("select a, b, c, d from text1").columns(c1, c2, c3, c2)
assert_raises_message(
exc.InvalidRequestError,
"Duplicate column expression requested in "
"textual SQL: <.*.ColumnClause.*; p>",
testing.db.execute, stmt
)
def test_anon_aliased_unique(self):
text1 = self.tables.text1
c1 = text1.c.a.label(None)
c2 = text1.alias().c.c
c3 = text1.alias().c.b
c4 = text1.alias().c.d.label(None)
stmt = text("select a, b, c, d from text1").columns(c1, c2, c3, c4)
result = testing.db.execute(stmt)
row = result.first()
eq_(row[c1], "a1")
eq_(row[c2], "b1")
eq_(row[c3], "c1")
eq_(row[c4], "d1")
# key fallback rules still match this to a column
# unambiguously based on its name
eq_(row[text1.c.a], "a1")
# key fallback rules still match this to a column
# unambiguously based on its name
eq_(row[text1.c.d], "d1")
# text1.c.b goes nowhere....because we hit key fallback
# but the text1.c.b doesn't derive from text1.c.c
assert_raises_message(
exc.NoSuchColumnError,
"Could not locate column in row for column 'text1.b'",
lambda: row[text1.c.b]
)
def test_anon_aliased_overlapping(self):
text1 = self.tables.text1
c1 = text1.c.a.label(None)
c2 = text1.alias().c.a
c3 = text1.alias().c.a.label(None)
c4 = text1.c.a.label(None)
stmt = text("select a, b, c, d from text1").columns(c1, c2, c3, c4)
result = testing.db.execute(stmt)
row = result.first()
eq_(row[c1], "a1")
eq_(row[c2], "b1")
eq_(row[c3], "c1")
eq_(row[c4], "d1")
# key fallback rules still match this to a column
# unambiguously based on its name
eq_(row[text1.c.a], "a1")
def test_anon_aliased_name_conflict(self):
text1 = self.tables.text1
c1 = text1.c.a.label("a")
c2 = text1.alias().c.a
c3 = text1.alias().c.a.label("a")
c4 = text1.c.a.label("a")
# all cols are named "a". if we are positional, we don't care.
# this is new logic in 1.1
stmt = text("select a, b as a, c as a, d as a from text1").columns(
c1, c2, c3, c4)
result = testing.db.execute(stmt)
row = result.first()
eq_(row[c1], "a1")
eq_(row[c2], "b1")
eq_(row[c3], "c1")
eq_(row[c4], "d1")
# fails, because we hit key fallback and find conflicts
# in columns that are presnet
assert_raises_message(
exc.NoSuchColumnError,
"Could not locate column in row for column 'text1.a'",
lambda: row[text1.c.a]
)
class AlternateResultProxyTest(fixtures.TablesTest):
__requires__ = ('sqlite', )
@classmethod
def setup_bind(cls):
cls.engine = engine = engines.testing_engine('sqlite://')
return engine
@classmethod
def define_tables(cls, metadata):
Table(
'test', metadata,
Column('x', Integer, primary_key=True),
Column('y', String(50, convert_unicode='force'))
)
@classmethod
def insert_data(cls):
cls.engine.execute(cls.tables.test.insert(), [
{'x': i, 'y': "t_%d" % i} for i in range(1, 12)
])
@contextmanager
def _proxy_fixture(self, cls):
self.table = self.tables.test
class ExcCtx(default.DefaultExecutionContext):
def get_result_proxy(self):
return cls(self)
self.patcher = patch.object(
self.engine.dialect, "execution_ctx_cls", ExcCtx)
with self.patcher:
yield
def _test_proxy(self, cls):
with self._proxy_fixture(cls):
rows = []
r = self.engine.execute(select([self.table]))
assert isinstance(r, cls)
for i in range(5):
rows.append(r.fetchone())
eq_(rows, [(i, "t_%d" % i) for i in range(1, 6)])
rows = r.fetchmany(3)
eq_(rows, [(i, "t_%d" % i) for i in range(6, 9)])
rows = r.fetchall()
eq_(rows, [(i, "t_%d" % i) for i in range(9, 12)])
r = self.engine.execute(select([self.table]))
rows = r.fetchmany(None)
eq_(rows[0], (1, "t_1"))
# number of rows here could be one, or the whole thing
assert len(rows) == 1 or len(rows) == 11
r = self.engine.execute(select([self.table]).limit(1))
r.fetchone()
eq_(r.fetchone(), None)
r = self.engine.execute(select([self.table]).limit(5))
rows = r.fetchmany(6)
eq_(rows, [(i, "t_%d" % i) for i in range(1, 6)])
# result keeps going just fine with blank results...
eq_(r.fetchmany(2), [])
eq_(r.fetchmany(2), [])
eq_(r.fetchall(), [])
eq_(r.fetchone(), None)
# until we close
r.close()
self._assert_result_closed(r)
r = self.engine.execute(select([self.table]).limit(5))
eq_(r.first(), (1, "t_1"))
self._assert_result_closed(r)
r = self.engine.execute(select([self.table]).limit(5))
eq_(r.scalar(), 1)
self._assert_result_closed(r)
def _assert_result_closed(self, r):
assert_raises_message(
sa_exc.ResourceClosedError,
"object is closed",
r.fetchone
)
assert_raises_message(
sa_exc.ResourceClosedError,
"object is closed",
r.fetchmany, 2
)
assert_raises_message(
sa_exc.ResourceClosedError,
"object is closed",
r.fetchall
)
def test_basic_plain(self):
self._test_proxy(_result.ResultProxy)
def test_basic_buffered_row_result_proxy(self):
self._test_proxy(_result.BufferedRowResultProxy)
def test_basic_fully_buffered_result_proxy(self):
self._test_proxy(_result.FullyBufferedResultProxy)
def test_basic_buffered_column_result_proxy(self):
self._test_proxy(_result.BufferedColumnResultProxy)
def test_resultprocessor_plain(self):
self._test_result_processor(_result.ResultProxy, False)
def test_resultprocessor_plain_cached(self):
self._test_result_processor(_result.ResultProxy, True)
def test_resultprocessor_buffered_column(self):
self._test_result_processor(_result.BufferedColumnResultProxy, False)
def test_resultprocessor_buffered_column_cached(self):
self._test_result_processor(_result.BufferedColumnResultProxy, True)
def test_resultprocessor_buffered_row(self):
self._test_result_processor(_result.BufferedRowResultProxy, False)
def test_resultprocessor_buffered_row_cached(self):
self._test_result_processor(_result.BufferedRowResultProxy, True)
def test_resultprocessor_fully_buffered(self):
self._test_result_processor(_result.FullyBufferedResultProxy, False)
def test_resultprocessor_fully_buffered_cached(self):
self._test_result_processor(_result.FullyBufferedResultProxy, True)
def _test_result_processor(self, cls, use_cache):
class MyType(TypeDecorator):
impl = String()
def process_result_value(self, value, dialect):
return "HI " + value
with self._proxy_fixture(cls):
with self.engine.connect() as conn:
if use_cache:
cache = {}
conn = conn.execution_options(compiled_cache=cache)
stmt = select([literal("THERE", type_=MyType())])
for i in range(2):
r = conn.execute(stmt)
eq_(r.scalar(), "HI THERE")
def test_buffered_row_growth(self):
with self._proxy_fixture(_result.BufferedRowResultProxy):
with self.engine.connect() as conn:
conn.execute(self.table.insert(), [
{'x': i, 'y': "t_%d" % i} for i in range(15, 1200)
])
result = conn.execute(self.table.select())
checks = {
0: 5, 1: 10, 9: 20, 135: 250, 274: 500,
1351: 1000
}
for idx, row in enumerate(result, 0):
if idx in checks:
eq_(result._bufsize, checks[idx])
le_(
len(result._BufferedRowResultProxy__rowbuffer),
1000
)
def test_max_row_buffer_option(self):
with self._proxy_fixture(_result.BufferedRowResultProxy):
with self.engine.connect() as conn:
conn.execute(self.table.insert(), [
{'x': i, 'y': "t_%d" % i} for i in range(15, 1200)
])
result = conn.execution_options(max_row_buffer=27).execute(
self.table.select()
)
for idx, row in enumerate(result, 0):
if idx in (16, 70, 150, 250):
eq_(result._bufsize, 27)
le_(
len(result._BufferedRowResultProxy__rowbuffer),
27
)
| 32.42732
| 79
| 0.544844
|
b3700103fee0d65a639749f910f1454dc55d1ec5
| 2,167
|
py
|
Python
|
share/qt/extract_strings_qt.py
|
Nugetzrul3/dogecash
|
f09a4ed70e9ea6f5599c8a20fe255cbe24373cc1
|
[
"MIT"
] | 52
|
2018-10-08T07:17:35.000Z
|
2021-11-29T22:53:08.000Z
|
share/qt/extract_strings_qt.py
|
Nugetzrul3/dogecash
|
f09a4ed70e9ea6f5599c8a20fe255cbe24373cc1
|
[
"MIT"
] | 50
|
2018-10-20T10:42:54.000Z
|
2021-02-15T21:53:51.000Z
|
share/qt/extract_strings_qt.py
|
Nugetzrul3/dogecash
|
f09a4ed70e9ea6f5599c8a20fe255cbe24373cc1
|
[
"MIT"
] | 53
|
2018-11-03T16:42:43.000Z
|
2021-12-11T03:55:21.000Z
|
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/dogecashstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *dogecash_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("dogecash-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| 25.797619
| 105
| 0.620212
|
23f5c64fb9f0d09ec79ab076bfa18d08debbd7a5
| 2,017
|
py
|
Python
|
modules/fdns.py
|
it-jhack/subtaker
|
a6c0e373c358b6ff849e6bfa835c80f9f7db93de
|
[
"MIT"
] | null | null | null |
modules/fdns.py
|
it-jhack/subtaker
|
a6c0e373c358b6ff849e6bfa835c80f9f7db93de
|
[
"MIT"
] | null | null | null |
modules/fdns.py
|
it-jhack/subtaker
|
a6c0e373c358b6ff849e6bfa835c80f9f7db93de
|
[
"MIT"
] | 1
|
2021-02-12T09:21:59.000Z
|
2021-02-12T09:21:59.000Z
|
import subprocess
import modules.utils
from more_itertools import unique_everseen
# Converts subdomain text in order to be parsed by grep and jq commands
# It is important to keep the same domains order in both 'grep_list' and 'jq_regex_list'
def _grep_jq_convert(domain):
'''Transforms a (sub)domain into two strings: intended for 'grep' and 'jq' subprocess, respectively.
Example:
domain input: 'example.com'
output: '.example.com' (for grep)
'\\.example\\.com' (for jq)'''
domain = domain.strip()
sliced_domain = domain.split('.')
grep_str = '.'
jq_str = '\\\\.'
i = 1
for item in sliced_domain:
grep_str += item
jq_str += item
if i < len(sliced_domain):
grep_str += '.'
jq_str += '\\\\.'
i += 1
return grep_str, jq_str
def grep_subd_fdns(domain, fdns_file):
'''Parses for a domain in a 'Rapid7 Open Data FDNS' database file and returns a list of subdomains.
Database files available at https://opendata.rapid7.com'''
#! Dev Warning 0: 'shell=True' was needed to avoid mem kill,
# as opposed to using multiple 'p = subprocess.run' and 'input=p.stdout' method
#! Dev Warning 1: this step may heavily load the CPU. TODO try multithreading in the future'
fdns_outp_list = []
grep_str, jq_str = _grep_jq_convert(domain)
jq_filter = f'\'if (.name | test("{jq_str}")) then .name elif (.value | test("{jq_str}")) then .value else empty end\''
p = subprocess.run([f'zcat {fdns_file}\
| grep -F {grep_str}\
| jq -crM {jq_filter}\
| sort\
| uniq\
'], capture_output=True, shell=True, text=True)
fdns_outp_list.extend(p.stdout.split('\n'))
# Removing eventual duplicated results
fdns_outp_list = list(unique_everseen(fdns_outp_list))
# Removing eventual empty results
if '' in fdns_outp_list:
fdns_outp_list.remove('')
return fdns_outp_list
| 30.104478
| 123
| 0.629648
|
3f7570859b3ca07396011341fa5e5b401d8a747f
| 21,534
|
py
|
Python
|
nuitka/tree/ReformulationContractionExpressions.py
|
CoyoteLeo/Nuitka
|
f9db11d82af13af46f3c09bced5f6ea26cf29572
|
[
"Apache-2.0"
] | null | null | null |
nuitka/tree/ReformulationContractionExpressions.py
|
CoyoteLeo/Nuitka
|
f9db11d82af13af46f3c09bced5f6ea26cf29572
|
[
"Apache-2.0"
] | 1
|
2020-07-11T17:53:56.000Z
|
2020-07-11T17:53:56.000Z
|
nuitka/tree/ReformulationContractionExpressions.py
|
CoyoteLeo/Nuitka
|
f9db11d82af13af46f3c09bced5f6ea26cf29572
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Reformulation of contraction expressions.
Consult the developer manual for information. TODO: Add ability to sync
source code comments with developer manual sections.
"""
from nuitka.__past__ import intern # pylint: disable=I0021,redefined-builtin
from nuitka.nodes.AssignNodes import (
StatementAssignmentVariable,
StatementReleaseVariable,
)
from nuitka.nodes.AsyncgenNodes import (
ExpressionAsyncgenObjectBody,
ExpressionMakeAsyncgenObject,
)
from nuitka.nodes.BuiltinIteratorNodes import (
ExpressionAsyncIter,
ExpressionAsyncNext,
ExpressionBuiltinIter1,
)
from nuitka.nodes.BuiltinNextNodes import ExpressionBuiltinNext1
from nuitka.nodes.CodeObjectSpecs import CodeObjectSpec
from nuitka.nodes.ConditionalNodes import makeStatementConditional
from nuitka.nodes.ConstantRefNodes import makeConstantRefNode
from nuitka.nodes.ContainerOperationNodes import (
StatementListOperationAppend,
StatementSetOperationAdd,
)
from nuitka.nodes.DictionaryNodes import (
StatementDictOperationSet,
StatementDictOperationSetKeyValue,
)
from nuitka.nodes.FrameNodes import StatementsFrameFunction, StatementsFrameGenerator
from nuitka.nodes.FunctionNodes import ExpressionFunctionRef
from nuitka.nodes.GeneratorNodes import (
ExpressionGeneratorObjectBody,
ExpressionMakeGeneratorObject,
StatementGeneratorReturnNone,
)
from nuitka.nodes.LoopNodes import StatementLoop, StatementLoopBreak
from nuitka.nodes.NodeMakingHelpers import makeVariableRefNode
from nuitka.nodes.OutlineNodes import ExpressionOutlineBody, ExpressionOutlineFunction
from nuitka.nodes.ReturnNodes import StatementReturn
from nuitka.nodes.StatementNodes import StatementExpressionOnly, StatementsSequence
from nuitka.nodes.VariableRefNodes import ExpressionTempVariableRef
from nuitka.nodes.YieldNodes import ExpressionYield, ExpressionYieldFromWaitable
from nuitka.PythonVersions import python_version
from .ReformulationAssignmentStatements import buildAssignmentStatements
from .ReformulationBooleanExpressions import buildAndNode
from .ReformulationTryExceptStatements import makeTryExceptSingleHandlerNode
from .ReformulationTryFinallyStatements import makeTryFinallyStatement
from .TreeHelpers import (
buildNode,
buildNodeList,
detectFunctionBodyKind,
getKind,
makeStatementsSequenceFromStatement,
makeStatementsSequenceFromStatements,
mergeStatements,
)
def _makeIteratorCreation(provider, qual, for_asyncgen, source_ref):
if getattr(qual, "is_async", 0):
result = ExpressionAsyncIter(
value=buildNode(provider=provider, node=qual.iter, source_ref=source_ref),
source_ref=source_ref,
)
if not for_asyncgen or python_version < 370:
result = ExpressionYieldFromWaitable(
expression=result, source_ref=source_ref
)
return result
else:
return ExpressionBuiltinIter1(
value=buildNode(provider=provider, node=qual.iter, source_ref=source_ref),
source_ref=source_ref,
)
def _makeIteratorNext(qual, iterator_ref, source_ref):
if getattr(qual, "is_async", 0):
return ExpressionYieldFromWaitable(
expression=ExpressionAsyncNext(value=iterator_ref, source_ref=source_ref),
source_ref=source_ref,
)
else:
return ExpressionBuiltinNext1(value=iterator_ref, source_ref=source_ref)
def _getStopIterationName(qual):
if getattr(qual, "is_async", 0):
return "StopAsyncIteration"
else:
return "StopIteration"
def _buildPython2ListContraction(provider, node, source_ref):
# The contraction nodes are reformulated to function bodies, with loops as
# described in the developer manual. They use a lot of temporary names,
# nested blocks, etc. and so a lot of variable names.
# Note: The assign_provider is only to cover Python2 list contractions,
# assigning one of the loop variables to the outside scope.
function_body = ExpressionOutlineBody(
provider=provider, name="list_contraction", source_ref=source_ref
)
iter_tmp = function_body.allocateTempVariable(temp_scope=None, name=".0")
container_tmp = function_body.allocateTempVariable(
temp_scope=None, name="contraction_result"
)
statements, release_statements = _buildContractionBodyNode(
provider=provider,
node=node,
emit_class=StatementListOperationAppend,
iter_tmp=iter_tmp,
temp_scope=None,
start_value=[],
container_tmp=container_tmp,
function_body=function_body,
assign_provider=True,
for_asyncgen=False,
source_ref=source_ref,
)
statements.append(
StatementReturn(
expression=ExpressionTempVariableRef(
variable=container_tmp, source_ref=source_ref
),
source_ref=source_ref,
)
)
statement = makeTryFinallyStatement(
provider=function_body,
tried=statements,
final=release_statements,
source_ref=source_ref.atInternal(),
)
function_body.setBody(makeStatementsSequenceFromStatement(statement=statement))
return function_body
def buildListContractionNode(provider, node, source_ref):
# List contractions are dealt with by general code.
if python_version < 300:
return _buildPython2ListContraction(
provider=provider, node=node, source_ref=source_ref
)
return _buildContractionNode(
provider=provider,
node=node,
name="<listcomp>",
emit_class=StatementListOperationAppend,
start_value=[],
source_ref=source_ref,
)
def buildSetContractionNode(provider, node, source_ref):
# Set contractions are dealt with by general code.
return _buildContractionNode(
provider=provider,
node=node,
name="<setcontraction>",
emit_class=StatementSetOperationAdd,
start_value=set(),
source_ref=source_ref,
)
def buildDictContractionNode(provider, node, source_ref):
# Dict contractions are dealt with by general code.
return _buildContractionNode(
provider=provider,
node=node,
name="<dictcontraction>",
emit_class=StatementDictOperationSet
if python_version < 380
else StatementDictOperationSetKeyValue,
start_value={},
source_ref=source_ref,
)
def buildGeneratorExpressionNode(provider, node, source_ref):
# Generator expressions are dealt with by general code.
assert getKind(node) == "GeneratorExp"
function_body = ExpressionOutlineBody(
provider=provider, name="genexpr", source_ref=source_ref
)
iter_tmp = function_body.allocateTempVariable(temp_scope=None, name=".0")
parent_module = provider.getParentModule()
code_object = CodeObjectSpec(
co_name="<genexpr>",
co_kind="Generator",
co_varnames=(".0",),
co_argcount=1,
co_posonlyargcount=0,
co_kwonlyargcount=0,
co_has_starlist=False,
co_has_stardict=False,
co_filename=parent_module.getRunTimeFilename(),
co_lineno=source_ref.getLineNumber(),
future_spec=parent_module.getFutureSpec(),
)
if python_version < 370:
is_async = any(getattr(qual, "is_async", 0) for qual in node.generators)
else:
is_async = detectFunctionBodyKind(nodes=[node])[0] in ("Asyncgen", "Coroutine")
if is_async:
code_body = ExpressionAsyncgenObjectBody(
provider=provider,
name="<genexpr>",
code_object=code_object,
flags=None,
auto_release=None,
source_ref=source_ref,
)
maker_class = ExpressionMakeAsyncgenObject
else:
code_body = ExpressionGeneratorObjectBody(
provider=provider,
name="<genexpr>",
code_object=code_object,
flags=None,
auto_release=None,
source_ref=source_ref.atColumnNumber(node.col_offset + 1),
)
maker_class = ExpressionMakeGeneratorObject
function_body.setBody(
makeStatementsSequenceFromStatements(
StatementAssignmentVariable(
variable=iter_tmp,
source=_makeIteratorCreation(
provider=provider,
qual=node.generators[0],
for_asyncgen=is_async,
source_ref=source_ref,
),
source_ref=source_ref,
),
makeTryFinallyStatement(
provider=function_body,
tried=StatementReturn(
expression=maker_class(
ExpressionFunctionRef(
function_body=code_body, source_ref=source_ref
),
source_ref=source_ref,
),
source_ref=source_ref,
),
final=StatementReleaseVariable(
variable=iter_tmp, source_ref=source_ref
),
source_ref=source_ref,
),
)
)
statements, release_statements = _buildContractionBodyNode(
provider=provider,
node=node,
emit_class=ExpressionYield,
iter_tmp=iter_tmp,
temp_scope=None,
start_value=None,
container_tmp=None,
function_body=code_body,
assign_provider=False,
for_asyncgen=is_async,
source_ref=source_ref,
)
if is_async:
statements.append(StatementGeneratorReturnNone(source_ref=source_ref))
statements = (
makeTryFinallyStatement(
provider=function_body,
tried=statements,
final=release_statements,
source_ref=source_ref.atInternal(),
),
)
code_body.setBody(
makeStatementsSequenceFromStatement(
statement=StatementsFrameGenerator(
statements=mergeStatements(statements, False),
code_object=code_object,
source_ref=source_ref,
)
)
)
return function_body
def _buildContractionBodyNode(
provider,
node,
emit_class,
start_value,
container_tmp,
iter_tmp,
temp_scope,
assign_provider,
function_body,
for_asyncgen,
source_ref,
):
# This uses lots of variables and branches. There is no good way
# around that, and we deal with many cases, due to having generator
# expressions sharing this code, pylint: disable=too-many-branches,too-many-locals
# Note: The assign_provider is only to cover Python2 list contractions,
# assigning one of the loop variables to the outside scope.
tmp_variables = []
if emit_class is not ExpressionYield:
tmp_variables.append(iter_tmp)
if container_tmp is not None:
tmp_variables.append(container_tmp)
statements = []
# First assign the iterator if we are an outline.
if assign_provider:
statements.append(
StatementAssignmentVariable(
variable=iter_tmp,
source=_makeIteratorCreation(
provider=provider,
qual=node.generators[0],
for_asyncgen=False,
source_ref=source_ref,
),
source_ref=source_ref.atInternal(),
)
)
if for_asyncgen and python_version >= 370 and node.generators[0].is_async:
statements.append(
StatementAssignmentVariable(
variable=iter_tmp,
source=ExpressionTempVariableRef(
variable=iter_tmp, source_ref=source_ref
),
source_ref=source_ref,
)
)
if start_value is not None:
statements.append(
StatementAssignmentVariable(
variable=container_tmp,
source=makeConstantRefNode(constant=start_value, source_ref=source_ref),
source_ref=source_ref.atInternal(),
)
)
if hasattr(node, "elt"):
if start_value is not None:
current_body = emit_class(
ExpressionTempVariableRef(
variable=container_tmp, source_ref=source_ref
),
buildNode(
provider=function_body if not assign_provider else provider,
node=node.elt,
source_ref=source_ref,
),
source_ref=source_ref,
)
else:
assert emit_class is ExpressionYield
current_body = emit_class(
buildNode(provider=function_body, node=node.elt, source_ref=source_ref),
source_ref=source_ref,
)
else:
current_body = emit_class(
dict_arg=ExpressionTempVariableRef(
variable=container_tmp, source_ref=source_ref
),
key=buildNode(
provider=function_body if not assign_provider else provider,
node=node.key,
source_ref=source_ref,
),
value=buildNode(
provider=function_body if not assign_provider else provider,
node=node.value,
source_ref=source_ref,
),
source_ref=source_ref,
)
if current_body.isExpression():
current_body = StatementExpressionOnly(
expression=current_body, source_ref=source_ref
)
for count, qual in enumerate(reversed(node.generators)):
tmp_value_variable = function_body.allocateTempVariable(
temp_scope=temp_scope, name="iter_value_%d" % count
)
tmp_variables.append(tmp_value_variable)
# The first iterated value is to be calculated outside of the function
# and will be given as a parameter "_iterated", the others are built
# inside the function.
if qual is node.generators[0]:
iterator_ref = makeVariableRefNode(variable=iter_tmp, source_ref=source_ref)
if for_asyncgen and python_version >= 370:
iterator_ref = ExpressionYieldFromWaitable(
expression=iterator_ref, source_ref=source_ref
)
tmp_iter_variable = None
nested_statements = []
else:
# First create the iterator and store it, next should be loop body
value_iterator = _makeIteratorCreation(
provider=provider if assign_provider else function_body,
qual=qual,
for_asyncgen=False,
source_ref=source_ref,
)
tmp_iter_variable = function_body.allocateTempVariable(
temp_scope=temp_scope, name="contraction_iter_%d" % count
)
tmp_variables.append(tmp_iter_variable)
nested_statements = [
StatementAssignmentVariable(
variable=tmp_iter_variable,
source=value_iterator,
source_ref=source_ref,
)
]
iterator_ref = ExpressionTempVariableRef(
variable=tmp_iter_variable, source_ref=source_ref
)
loop_statements = [
makeTryExceptSingleHandlerNode(
tried=StatementAssignmentVariable(
variable=tmp_value_variable,
source=_makeIteratorNext(
iterator_ref=iterator_ref, qual=qual, source_ref=source_ref
),
source_ref=source_ref,
),
exception_name=_getStopIterationName(qual),
handler_body=StatementLoopBreak(source_ref=source_ref),
source_ref=source_ref,
),
buildAssignmentStatements(
provider=provider if assign_provider else function_body,
temp_provider=function_body,
node=qual.target,
source=ExpressionTempVariableRef(
variable=tmp_value_variable, source_ref=source_ref
),
source_ref=source_ref,
),
]
conditions = buildNodeList(
provider=provider if assign_provider else function_body,
nodes=qual.ifs,
source_ref=source_ref,
)
if len(conditions) >= 1:
loop_statements.append(
makeStatementConditional(
condition=buildAndNode(values=conditions, source_ref=source_ref),
yes_branch=current_body,
no_branch=None,
source_ref=source_ref,
)
)
else:
loop_statements.append(current_body)
nested_statements.append(
StatementLoop(
body=StatementsSequence(
statements=mergeStatements(loop_statements), source_ref=source_ref
),
source_ref=source_ref,
)
)
if tmp_iter_variable is not None:
nested_statements.append(
StatementReleaseVariable(
variable=tmp_iter_variable, source_ref=source_ref
)
)
current_body = StatementsSequence(
statements=mergeStatements(nested_statements, False), source_ref=source_ref
)
statements.append(current_body)
statements = mergeStatements(statements)
release_statements = [
StatementReleaseVariable(variable=tmp_variable, source_ref=source_ref)
for tmp_variable in tmp_variables
]
return statements, release_statements
def _buildContractionNode(provider, node, name, emit_class, start_value, source_ref):
# The contraction nodes are reformulated to function bodies, with loops as
# described in the developer manual. They use a lot of temporary names,
# nested blocks, etc. and so a lot of variable names.
function_body = ExpressionOutlineFunction(
provider=provider, name=intern(name[1:-1]), source_ref=source_ref
)
iter_tmp = function_body.allocateTempVariable(temp_scope=None, name=".0")
container_tmp = function_body.allocateTempVariable(
temp_scope=None, name="contraction"
)
statements, release_statements = _buildContractionBodyNode(
provider=provider,
node=node,
emit_class=emit_class,
iter_tmp=iter_tmp,
temp_scope=None,
start_value=start_value,
container_tmp=container_tmp,
function_body=function_body,
assign_provider=False,
for_asyncgen=False,
source_ref=source_ref,
)
assign_iter_statement = StatementAssignmentVariable(
source=_makeIteratorCreation(
provider=provider,
qual=node.generators[0],
for_asyncgen=False,
source_ref=source_ref,
),
variable=iter_tmp,
source_ref=source_ref,
)
statements.append(
StatementReturn(
expression=ExpressionTempVariableRef(
variable=container_tmp, source_ref=source_ref
),
source_ref=source_ref,
)
)
statements = (
makeTryFinallyStatement(
provider=function_body,
tried=statements,
final=release_statements,
source_ref=source_ref.atInternal(),
),
)
if python_version < 300:
body = makeStatementsSequenceFromStatements(assign_iter_statement, statements)
else:
parent_module = provider.getParentModule()
code_object = CodeObjectSpec(
co_name=name,
co_kind="Function",
co_varnames=(),
co_argcount=1,
co_posonlyargcount=0,
co_kwonlyargcount=0,
co_has_starlist=False,
co_has_stardict=False,
co_filename=parent_module.getRunTimeFilename(),
co_lineno=source_ref.getLineNumber(),
future_spec=parent_module.getFutureSpec(),
)
body = makeStatementsSequenceFromStatements(
assign_iter_statement,
StatementsFrameFunction(
statements=mergeStatements(statements, False),
code_object=code_object,
source_ref=source_ref,
),
)
function_body.setBody(body)
return function_body
| 32.627273
| 88
| 0.63885
|
4792491c32adcc1c92a437caa63500fdb9154dc9
| 1,878
|
py
|
Python
|
example/runner.py
|
SJTU-IPADS/fgnn-artifacts
|
c96e7ec8204d767152958dc63a764466e90424fd
|
[
"Apache-2.0"
] | 23
|
2022-01-25T13:28:51.000Z
|
2022-03-23T07:05:47.000Z
|
example/runner.py
|
SJTU-IPADS/gnnlab
|
5c73564e4a9bd5deeff7eed0b923c115ccba34d7
|
[
"Apache-2.0"
] | null | null | null |
example/runner.py
|
SJTU-IPADS/gnnlab
|
5c73564e4a9bd5deeff7eed0b923c115ccba34d7
|
[
"Apache-2.0"
] | 1
|
2022-02-28T18:48:56.000Z
|
2022-02-28T18:48:56.000Z
|
from runner_helper import Arch, RunConfig, ConfigList, App, Dataset, CachePolicy, TMP_LOG_DIR, run_in_list, SampleType, percent_gen
import os
def tmp_call_back(cfg: RunConfig):
os.system(f"grep -A 4 'average' \"{cfg.get_log_fname()}.log\"")
if __name__ == '__main__':
from sys import argv
do_mock = False
durable_log = True
for arg in argv[1:]:
if arg == '-m' or arg == '--mock':
do_mock = True
elif arg == '-i' or arg == '--interactive':
durable_log = False
run_in_list(ConfigList()
.select('app', [
App.gcn,
# App.graphsage,
# App.pinsage,
]).select('dataset', [
# Dataset.reddit,
# Dataset.products,
Dataset.papers100M,
# Dataset.friendster,
]).select('cache_policy', [
CachePolicy.no_cache,
# CachePolicy.cache_by_degree,
# CachePolicy.cache_by_heuristic,
# CachePolicy.dynamic_cache,
]).select('pipeline', [
False,
# True,
])
# .override_arch(Arch.arch0)
.override('logdir', [TMP_LOG_DIR])
.override('dataset', [
Dataset.papers100M_300,
]).override('sample_type', [
# SampleType.kKHop1,
# SampleType.kWeightedKHop,
SampleType.kDefaultForApp,
]).override('cache_policy', [
# CachePolicy.cache_by_degree,
CachePolicy.cache_by_heuristic,
# CachePolicy.cache_by_presample,
# CachePolicy.cache_by_degree_hop,
# CachePolicy.cache_by_presample_static,
# CachePolicy.cache_by_fake_optimal,
# CachePolicy.cache_by_presample_1,
# CachePolicy.cache_by_presample_2,
]).override('batch_size',[
# 1000,
8000,
]).override('cache_percent', [
# 0.0,
0.01,0.02,0.03,0.04,0.05,0.10,0.15,0.20,0.25,0.30,0.35,0.40,0.45,0.50,
# 0.55, 0.60,
# 1,
])
.conf_list
,do_mock
,durable_log
# , tmp_call_back
)
| 27.617647
| 131
| 0.621406
|
188e6815d2f1fda3e09911ed47927b798bab3c6c
| 9,449
|
py
|
Python
|
reproject/mosaicking/tests/test_coadd.py
|
bryanmiller/reproject
|
e0472eb79c9f343a8371b655a1c4d23c8bb864b3
|
[
"BSD-3-Clause"
] | 2
|
2020-12-11T14:44:13.000Z
|
2020-12-11T14:44:15.000Z
|
reproject/mosaicking/tests/test_coadd.py
|
mwest007/reproject
|
e0472eb79c9f343a8371b655a1c4d23c8bb864b3
|
[
"BSD-3-Clause"
] | null | null | null |
reproject/mosaicking/tests/test_coadd.py
|
mwest007/reproject
|
e0472eb79c9f343a8371b655a1c4d23c8bb864b3
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import random
import numpy as np
import pytest
from astropy.wcs import WCS
from astropy.io.fits import Header
from numpy.testing import assert_allclose
from ... import reproject_exact, reproject_interp
from ..coadd import reproject_and_coadd
from ...tests.helpers import array_footprint_to_hdulist
ATOL = 1.e-9
DATA = os.path.join(os.path.dirname(__file__), '..', '..', 'tests', 'data')
@pytest.fixture(params=[reproject_interp, reproject_exact],
ids=["interp", "exact"])
def reproject_function(request):
return request.param
class TestReprojectAndCoAdd():
def setup_method(self, method):
self.wcs = WCS(naxis=2)
self.wcs.wcs.ctype = 'RA---TAN', 'DEC--TAN'
self.wcs.wcs.crpix = 322, 151
self.wcs.wcs.crval = 43, 23
self.wcs.wcs.cdelt = -0.1, 0.1
self.wcs.wcs.equinox = 2000.
self.array = np.random.random((399, 334))
def _get_tiles(self, views):
# Given a list of views as (imin, imax, jmin, jmax), construct
# tiles that can be passed into the co-adding code
input_data = []
for (jmin, jmax, imin, imax) in views:
array = self.array[jmin:jmax, imin:imax].copy()
wcs = self.wcs.deepcopy()
wcs.wcs.crpix[0] -= imin
wcs.wcs.crpix[1] -= jmin
input_data.append((array, wcs))
return input_data
@property
def _nonoverlapping_views(self):
ie = (0, 122, 233, 245, 334)
je = (0, 44, 45, 333, 335, 399)
views = []
for i in range(4):
for j in range(5):
views.append((je[j], je[j+1], ie[i], ie[i+1]))
return views
@property
def _overlapping_views(self):
ie = (0, 122, 233, 245, 334)
je = (0, 44, 45, 333, 335, 399)
views = []
for i in range(4):
for j in range(5):
views.append((je[j], je[j+1] + 10, ie[i], ie[i+1] + 10))
return views
@pytest.mark.parametrize('combine_function', ['mean', 'sum'])
def test_coadd_no_overlap(self, combine_function, reproject_function):
# Make sure that if all tiles are exactly non-overlapping, and
# we use 'sum' or 'mean', we get the exact input array back.
input_data = self._get_tiles(self._nonoverlapping_views)
input_data = [(self.array, self.wcs)]
array, footprint = reproject_and_coadd(input_data, self.wcs,
shape_out=self.array.shape,
combine_function=combine_function,
reproject_function=reproject_function)
assert_allclose(array, self.array, atol=ATOL)
assert_allclose(footprint, 1, atol=ATOL)
def test_coadd_with_overlap(self, reproject_function):
# Here we make the input tiles overlapping. We can only check the
# mean, not the sum.
input_data = self._get_tiles(self._overlapping_views)
array, footprint = reproject_and_coadd(input_data, self.wcs,
shape_out=self.array.shape,
combine_function='mean',
reproject_function=reproject_function)
assert_allclose(array, self.array, atol=ATOL)
def test_coadd_background_matching(self, reproject_function):
# Test out the background matching
input_data = self._get_tiles(self._overlapping_views)
for array, wcs in input_data:
array += random.uniform(-3, 3)
# First check that without background matching the arrays don't match
array, footprint = reproject_and_coadd(input_data, self.wcs,
shape_out=self.array.shape,
combine_function='mean',
reproject_function=reproject_function)
assert not np.allclose(array, self.array, atol=ATOL)
# Now check that once the backgrounds are matched the values agree
array, footprint = reproject_and_coadd(input_data, self.wcs,
shape_out=self.array.shape,
combine_function='mean',
reproject_function=reproject_function,
match_background=True)
# The absolute values of the two arrays will be offset since any
# solution that reproduces the offsets between images is valid
assert_allclose(array - np.mean(array),
self.array - np.mean(self.array), atol=ATOL)
def test_coadd_background_matching_with_nan(self, reproject_function):
# Test out the background matching when NaN values are present. We do
# this by using three arrays with the same footprint but with different
# parts masked.
array1 = self.array.copy() + random.uniform(-3, 3)
array2 = self.array.copy() + random.uniform(-3, 3)
array3 = self.array.copy() + random.uniform(-3, 3)
array1[:, 122:] = np.nan
array2[:, :50] = np.nan
array2[:, 266:] = np.nan
array3[:, :199] = np.nan
input_data = [(array1, self.wcs), (array2, self.wcs), (array3, self.wcs)]
array, footprint = reproject_and_coadd(input_data, self.wcs,
shape_out=self.array.shape,
combine_function='mean',
reproject_function=reproject_function,
match_background=True)
# The absolute values of the two arrays will be offset since any
# solution that reproduces the offsets between images is valid
assert_allclose(array - np.mean(array),
self.array - np.mean(self.array), atol=ATOL)
def test_coadd_with_weights(self, reproject_function):
# Make sure that things work properly when specifying weights
array1 = self.array + 1
array2 = self.array - 1
weight1 = np.cumsum(np.ones_like(self.array), axis=1) - 1
weight2 = weight1[:, ::-1]
input_data = [(array1, self.wcs), (array2, self.wcs)]
input_weights = [weight1, weight2]
array, footprint = reproject_and_coadd(input_data, self.wcs,
shape_out=self.array.shape,
combine_function='mean',
input_weights=input_weights,
reproject_function=reproject_function,
match_background=False)
expected = self.array + (2 * (weight1 / weight1.max()) - 1)
assert_allclose(array, expected, atol=ATOL)
HEADER_SOLAR_OUT = """
WCSAXES = 2
CRPIX1 = 90.5
CRPIX2 = 45.5
CDELT1 = 2
CDELT2 = 2
CUNIT1 = 'deg'
CUNIT2 = 'deg'
CTYPE1 = 'HGLN-CAR'
CTYPE2 = 'HGLT-CAR'
CRVAL1 = 0.0
CRVAL2 = 0.0
LONPOLE = 0.0
LATPOLE = 90.0
DATE-OBS= '2011-02-15T00:14:03.654'
MJD-OBS = 55607.009764514
MJD-OBS = 55607.009764514
"""
@pytest.mark.array_compare()
def test_coadd_solar_map():
# This is a test that exercises a lot of different parts of the mosaicking
# code. The idea is to take three solar images from different viewpoints
# and combine them into a single one. This uses weight maps that are not
# uniform and also include NaN values.
pytest.importorskip('sunpy', minversion='1.0.4')
from sunpy.map import Map, all_coordinates_from_map
# Load in three images from different viewpoints around the Sun
filenames = ['secchi_l0_a.fits', 'aia_171_level1.fits', 'secchi_l0_b.fits']
maps = [Map(os.path.join(DATA, f)) for f in filenames]
# Produce weight maps that are centered on the solar disk and go to zero at the edges
coordinates = tuple(map(all_coordinates_from_map, maps))
input_weights = [coord.transform_to("heliocentric").z.value for coord in coordinates]
input_weights = [(w / np.nanmax(w)) ** 4 for w in input_weights]
shape_out = [90, 180]
wcs_out = WCS(Header.fromstring(HEADER_SOLAR_OUT, sep='\n'))
scales = [1/6, 1, 1/6]
input_data = tuple((a.data * scale, a.wcs) for (a, scale) in zip(maps, scales))
array, footprint = reproject_and_coadd(input_data, wcs_out, shape_out,
input_weights=input_weights,
reproject_function=reproject_interp,
match_background=True)
header_out = wcs_out.to_header()
# ASTROPY_LT_40: astropy v4.0 introduced new default header keywords,
# once we support only astropy 4.0 and later we can update the reference
# data files and remove this section.
for key in ('MJDREFF', 'MJDREFI'):
header_out.pop(key, None)
return array_footprint_to_hdulist(array, footprint, header_out)
| 36.342308
| 89
| 0.572971
|
131b02e0190bd9e67e784eea7c61f989b1bfa882
| 9,493
|
py
|
Python
|
MedicalDecisionDiabetes/MedicalDecisionDiabetesModel.py
|
nikunjpansari/stochastic-optimization
|
a01e95e9168dd8f87751c29f94bb382f83567e71
|
[
"MIT"
] | 92
|
2019-05-08T15:12:03.000Z
|
2022-03-31T08:47:57.000Z
|
MedicalDecisionDiabetes/MedicalDecisionDiabetesModel.py
|
nikunjpansari/stochastic-optimization
|
a01e95e9168dd8f87751c29f94bb382f83567e71
|
[
"MIT"
] | 1
|
2020-12-15T22:24:45.000Z
|
2020-12-15T22:24:45.000Z
|
MedicalDecisionDiabetes/MedicalDecisionDiabetesModel.py
|
nikunjpansari/stochastic-optimization
|
a01e95e9168dd8f87751c29f94bb382f83567e71
|
[
"MIT"
] | 50
|
2019-03-04T21:12:18.000Z
|
2022-03-23T05:06:09.000Z
|
"""
Model class - Medical Decisions Diabetes Treatments
"""
from collections import namedtuple
import numpy as np
import pandas as pd
import math
from random import randint
# This function returns the precision (beta), given the s.d. (sigma)
def Beta(sigma):
return 1/sigma**2
class MedicalDecisionDiabetesModel():
"""
Base class for Medical Decisions Diabetes Treatments model
"""
def __init__(self, state_names, x_names, S0, additional_params, exog_info_fn=None, transition_fn=None, objective_fn=None, seed=20180529):
"""
Initializes the model
:param state_variable: list(str) - state variable dimension names
:param decision_variable: list(str) - decision variable dimension names
:param state_0: dict - needs to contain at least the information to populate initial state using state_names
:param exog_info_fn: function - calculates relevant exogenous information
:param transition_fn: function - takes in decision variables and exogenous information to describe how the state
evolves
:param objective_fn: function - calculates contribution at time t
:param seed: int - seed for random number generator
"""
self.init_args = {seed: seed}
self.prng = np.random.RandomState(seed)
self.init_state = {x:[S0.loc[x, 'mu_0'],Beta(S0.loc[x, 'sigma_0']),0] for x in x_names }
self.state_names = state_names
self.x_names = x_names
self.State = namedtuple('State', state_names)
self.state = self.build_state(self.init_state)
self.Decision = namedtuple('Decision', x_names)
self.obj = 0.0
self.obj_sum = 0.0
self.sigma_w = additional_params.loc['sigma_w', 0]
self.truth_params_dict = {} #changed later
self.truth_type = additional_params.loc['truth_type', 0]
self.mu = {} #updated using "exog_info_sample_mu" at the beginning of each sample path
self.t = 0 # time counter (in months)
if self.truth_type == 'fixed_uniform':
self.truth_params_dict = {x:[S0.loc[x, 'mu_fixed'],S0.loc[x, 'fixed_uniform_a'],S0.loc[x, 'fixed_uniform_b']] for x in self.x_names }
elif self.truth_type == 'prior_uniform':
self.truth_params_dict = {x:[S0.loc[x, 'mu_0'],S0.loc[x, 'prior_mult_a'],S0.loc[x, 'prior_mult_b']] for x in self.x_names }
else:
self.truth_params_dict = {x:[S0.loc[x, 'mu_truth'],S0.loc[x, 'sigma_truth'],0] for x in self.x_names }
def printState(self):
print("Current state ")
for x in self.x_names:
print("Treatment {}: mu {:.2f}, sigma {:.2f} and N {}".format(x,getattr(self.state,x)[0],1/math.sqrt(getattr(self.state,x)[1]),getattr(self.state,x)[2]))
print("\n\n")
def printTruth(self):
print("Model truth_type {}. Meaurement noise sigma_W {} ".format(self.truth_type,self.sigma_w))
for x in self.x_names:
print("Treatment {}: par1 {:.2f}, par2 {:.2f} and par3 {}".format(x,self.truth_params_dict[x][0],self.truth_params_dict[x][1],self.truth_params_dict[x][2]))
print("\n\n")
# this function gives a state containing all the state information needed
# State is a vector of dim.5 where each entry is a list: [mu, beta]
# want to replace the attribute of a state corresponding to the decision
def build_state(self, info):
return self.State(*[info[k] for k in self.state_names])
# this function gives a decision
# Our decision will be the choice of medication to try for a month.
# N.B: This function is currently not in use.
def build_decision(self, info):
return self.Decision(*[info[k] for k in self.x_names])
def exog_info_sample_mu(self):
if self.truth_type == "known":
self.mu = {x:self.truth_params_dict[x][0] for x in self.x_names}
elif self.truth_type == "fixed_uniform":
self.mu = {x:self.truth_params_dict[x][0] + self.prng.uniform(self.truth_params_dict[x][1],self.truth_params_dict[x][2]) for x in self.x_names}
elif self.truth_type == "prior_uniform":
self.mu = {x:self.truth_params_dict[x][0] + self.prng.uniform(self.truth_params_dict[x][1]*self.truth_params_dict[x][0],self.truth_params_dict[x][2]*self.truth_params_dict[x][0]) for x in self.x_names}
else:
self.mu = {x:self.prng.normal(self.truth_params_dict[x][0], self.truth_params_dict[x][1]) for x in self.x_names}
# this function gives the exogenous information that is dependent on a random process
# In our case, exogeneous information: W^(n+1) = mu_x + eps^(n+1),
# Where eps^(n+1) is normally distributed with mean 0 and known variance (here s.d. 0.05)
# W^(n+1)_x : reduction in A1C level
# self.prng.normal takes two values, mu and sigma.
def exog_info_fn(self, decision):
W = self.prng.normal(self.mu[decision], self.sigma_w)
beta_W = Beta(self.sigma_w)
return {"reduction": W, "beta_W": beta_W, "mu": self.mu[decision]}
# this function takes in the decision and exogenous information to return\
# the new mu_empirical and beta values corresponding to the decision.
def transition_fn(self, decision, exog_info):
# for x = x_n only. Other entries unchanged.
beta = (getattr(self.state, decision))[1] + exog_info["beta_W"]
mu_empirical = ((getattr(self.state, decision))[1]*(getattr(self.state, decision))[0] + \
exog_info["beta_W"]*exog_info["reduction"]) / beta
N_x = getattr(self.state, decision)[2] + 1 # count of no. times drug x was given.
return {decision: [mu_empirical, beta, N_x]}
# this function calculates W (reduction in A1C level)
def objective_fn(self, decision, exog_info):
mu = exog_info["mu"]
W = exog_info["reduction"]
return mu
# this method steps the process forward by one time increment by updating the sum of the contributions, the
# exogenous information and the state variable
def step(self, decision):
# build dictionary copy of self.states (which is right now a named tuple)
#current_state = {}
#for k in self.state_names:
# current_state[k] = getattr(self.state, k)
# compute new mu_empirical and beta for the decision.
exog_info = self.exog_info_fn(decision)
exog_info.update(self.transition_fn(decision, exog_info))
# update objective (add new W to previous obj)
# This is the sum
self.obj += self.objective_fn(decision, exog_info)
current_state = {key:exog_info[decision] if key == decision else getattr(self.state, key) for key in self.state_names}
self.state = self.build_state(current_state)
self.t_update()
# re-build self.state
#for key in current_state:
# if key == decision:
# # replace the entry corresponding to the decision with the new exog.info
# current_state[decision] = exog_info[decision]
# # rebuild the self.state tuple using the updated dictionary current_state
# self.state = self.build_state(current_state)
# else:
# pass
return exog_info
# Update method for time counter
def t_update(self):
self.t += 1
return self.t
# =============================================================================
# # UNIT TEST
# if __name__ == "__main__":
# '''
# this is an example of creating a model, choosing the decision at random
# (out of 5 possible drugs), and running the model for a fixed time period (in months)
# '''
# file = 'MDDMparameters.xlsx'
# S0 = pd.read_excel(file, sheet_name = 'parameters1')
# sigmaW = pd.read_excel(file, sheet_name = 'parameters2')
#
# # this needs to be (vector of 5 entries, each of which is a 2-entry vec)
# state_names = ['M', 'Sens', 'Secr', 'AGI', 'PA']
# # for each drug: (first entry: mu_0, second entry: beta_0, third entry: number of times drug x was given)
# init_state = {'M': [ S0.loc['M', 'mu'] , Beta(S0.loc['M', 'sigma']), 0], \
# 'Sens': [S0.loc['Sens', 'mu'] , Beta(S0.loc['Sens', 'sigma']), 0], \
# 'Secr': [S0.loc['Secr', 'mu'] , Beta(S0.loc['Secr', 'sigma']), 0], \
# 'AGI': [S0.loc['AGI', 'mu'] , Beta(S0.loc['AGI', 'sigma']), 0], \
# 'PA': [S0.loc['PA', 'mu'] , Beta(S0.loc['PA', 'sigma']), 0]}
# # in order: Metformin, Sensitizer, Secretagoge, Alpha-glucosidase inhibitor, Peptide analog.
# x_names = ['M', 'Sens', 'Secr', 'AGI', 'PA']
#
# Model = MedicalDecisionDiabetesModel(state_names, x_names, init_state)
# # initialize sigma_w
# Model.sigma_w = sigmaW.loc['sigma_w', 0]
#
# # each time step is 1 month.
# t_stop = 50
#
#
# # Testing
# for k in range(t_stop):
# # policy: pick a random drug: Pure exploration.
# decision = x_names[randint(0,4)] # policy goes here
# # update model according to the decision
# print("t = {}, \nstate = {} \ndecision = {}, \nobjective = {} \n".format(Model.t, Model.state, decision, Model.obj)) # better format state
# Model.step(decision)
# Model.t_update()
#
# =============================================================================
| 46.534314
| 213
| 0.620773
|
3b81e9e2f5c3f95f7f1c3d985154f990d46a4a6b
| 607
|
py
|
Python
|
app/controller/Ping.py
|
junpengxu/MyFlask
|
14457b5c44e9a2a881cf2b5c1ed9ba7bf783b31a
|
[
"MIT"
] | null | null | null |
app/controller/Ping.py
|
junpengxu/MyFlask
|
14457b5c44e9a2a881cf2b5c1ed9ba7bf783b31a
|
[
"MIT"
] | null | null | null |
app/controller/Ping.py
|
junpengxu/MyFlask
|
14457b5c44e9a2a881cf2b5c1ed9ba7bf783b31a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2021/10/24 3:33 下午
# @Author : xujunpeng
import time
from typing import List, Dict
from app.base.base_controller import BaseController
from app.model.ping import Ping
class PingController(BaseController):
def get(self, pk):
Ping.query.get(pk).simple_info()
Ping.query.filter().all()
return Ping.query.get(pk).simple_info()
def create(self, *args, **kwargs):
desc = kwargs.get("desc", "default")
Ping(desc=desc).save()
def query(self, page=1, offset=10, sort: List[set] = None, query: Dict = None):
pass
| 26.391304
| 83
| 0.637562
|
146de10cf52cd08b96f1557effb5cf82f8708501
| 6,575
|
py
|
Python
|
run_predictions/run_Rosetta/fixbb.py
|
wells-wood-research/PDBench
|
8d8ce16a5554ac15ed8593b7846da93498fc9bfe
|
[
"MIT"
] | 1
|
2021-08-01T21:23:21.000Z
|
2021-08-01T21:23:21.000Z
|
run_predictions/run_Rosetta/fixbb.py
|
wells-wood-research/PDBench
|
8d8ce16a5554ac15ed8593b7846da93498fc9bfe
|
[
"MIT"
] | 10
|
2021-05-05T12:01:53.000Z
|
2021-09-01T16:45:18.000Z
|
run_predictions/run_Rosetta/fixbb.py
|
wells-wood-research/PDBench
|
8d8ce16a5554ac15ed8593b7846da93498fc9bfe
|
[
"MIT"
] | 1
|
2021-08-14T14:26:16.000Z
|
2021-08-14T14:26:16.000Z
|
import ampal
import gzip
import glob
import subprocess
import multiprocessing
import os
from pathlib import Path
from sklearn.preprocessing import OneHotEncoder
import warnings
import numpy as np
import pandas as pd
import string
import urllib
from sklearn import metrics
import numpy as np
acids = [
"A",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"K",
"L",
"M",
"N",
"P",
"Q",
"R",
"S",
"T",
"V",
"W",
"Y",
]
standard_residues = [
"ALA",
"ARG",
"ASN",
"ASP",
"CYS",
"GLU",
"GLN",
"GLY",
"HIS",
"ILE",
"LEU",
"LYS",
"MET",
"PHE",
"PRO",
"SER",
"THR",
"TRP",
"TYR",
"VAL",
]
def atom_to_hetatm(pdb: Path):
"""Rosetta labels non-standard acids as ATOM instead of HETATM. This crashes AMPAL."""
with open(pdb, "r") as file:
text = file.readlines()
for i, line in enumerate(text):
if line[0:6].strip() == "ATOM" and line[17:20].strip() not in standard_residues:
text[i] = "HETATM" + text[i][6:]
with open(pdb, "w") as file:
file.writelines(text)
def run_Rosetta(
pdb: str,
chain: str,
working_dir: Path,
path_to_Rosetta: Path,
path_to_assemblies: Path,
) -> None:
"""Runs Rosetta design with fixed backbone
Patameters
----------
pdb: str
PDB code.
chain: str
Chain code.
working_dir: str
Dir where to store temporary files and results.
path_to_Rosetta: Path
Location of Rosetta executable.
path_to_assemblies:Path
Location of input PDB structures.
"""
print(f"Starting {pdb}{chain}.")
# make resfile to predict only the specified chain, skip non-canonical residues
assembly = ampal.load_pdb(Path(path_to_assemblies / pdb).with_suffix(".pdb"))
with open(working_dir / ("resfile_" + pdb), "w") as file:
file.write("NATRO\nstart\n")
for i, x in enumerate(assembly[chain]):
file.write(f"{x.id} {chain} ALLAA\n")
p = subprocess.run(
f'{path_to_Rosetta} -s {Path(path_to_assemblies/pdb).with_suffix(".pdb")} -linmem_ig 10 -ignore_unrecognized_res -overwrite -resfile {working_dir/("resfile_"+pdb)} -out:path:all {working_dir/"results"}',
shell=True,
)
print(f"{pdb}{chain} done.")
def seq_to_arr(working_dir: Path, user_list: Path, ignore_uncommon: bool = False):
"""Produces prediction format compatible with the benchmarking tool.
working_dir: Path
Dir where Rosetta results are stored.
user_list: Path
Path to .txt file with protein chains to include in the benchmark"""
with open(Path(user_list)) as file:
chains = [x.strip("\n") for x in file.readlines()]
predicted_sequences = []
path = working_dir / "results"
enc = OneHotEncoder(categories=[acids], sparse=False)
with open(path / "datasetmap.txt", "w") as file:
file.write(f"ignore_uncommon {ignore_uncommon}\ninclude_pdbs\n##########\n")
for protein in chains:
prediction_path = path / f"{protein[:4]}_0001.pdb"
# check for empty and missing files
if prediction_path.exists():
try:
assembly = ampal.load_pdb(prediction_path)
# fix malformed files
except ValueError:
atom_to_hetatm(prediction_path)
assembly = ampal.load_pdb(prediction_path)
# exclude positions with non-cannonical amino acids
if ignore_uncommon == True:
# path to pdb has changed, change it manualy if you decide to use this option.
temp_assembly = ampal.load_pdb(working_dir / f"{protein[:4]}.pdb")
true_seq = temp_assembly[protein[-1]].sequence
print(metrics.accuracy_score(list(seq), list(true_seq)))
assert len(seq) == len(
true_seq
), f"{protein} sequence lengths don't match"
seq = "".join(
[
pred_ch
for pred_ch, true_ch in zip(list(seq), list(true_seq))
if true_ch != "X"
]
)
if seq.find("X") != -1:
warnings.warn(
f"Rosetta: {protein} has remaining non-canonical acids."
)
seq = assembly[protein[-1]].sequence
predicted_sequences += list(seq)
file.write(f"{protein} {len(seq)}\n")
else:
warnings.warn(f"Rosetta: {protein} prediction does not exits.")
arr = enc.fit_transform(np.array(predicted_sequences).reshape(-1, 1))
pd.DataFrame(arr).to_csv(path / "rosetta.csv", header=None, index=None)
def multi_Rosetta(
structures: list,
working_dir: Path,
path_to_assemblies: Path,
path_to_rosetta: Path,
max_processes: int = 8,
) -> None:
"""Runs Rosetta on all PDB chains in the DataFrame.
Parameters
----------
structures:List
List with PDB and chain codes.
number_of_runs: int
Number of sequences to be generated for each PDB file.
max_processes: int = 8
Number of cores to use, default is 8.
working_dir: Path
Dir where to store temporary files and results.
path_to_assemblies: Path
Dir with biological assemblies.
path_to_rosetta: Path
Location of rosetta executable.
"""
inputs = []
# check if working directory exists. Make one if doesn't exist.
if not working_dir.exists():
os.makedirs(working_dir)
if not (working_dir / "results").exists():
os.makedirs(working_dir / "results")
print(f"{len(structures)} structures will be predicted.")
for protein in structures:
inputs.append(
(
protein[:4],
protein[4],
working_dir,
path_to_rosetta,
path_to_assemblies,
)
)
with multiprocessing.Pool(max_processes) as P:
P.starmap(run_Rosetta, inputs)
if __name__ == "__main__":
# seq_to_arr(Path('/home/s1706179/Rosetta/data_polyglycine/'),Path('/home/s1706179/Rosetta/data/set.txt'),False)
seq_to_arr(
Path("/home/s1706179/Rosetta/data_nmr_polyglycine/"),
Path("/home/s1706179/Rosetta/data/nmr_set.txt"),
False,
)
| 30.16055
| 211
| 0.572167
|
d493243ba40b13e8763016daae5929b87739c50c
| 126
|
py
|
Python
|
languages/python/design_stackinspection.py
|
Andilyn/learntosolveit
|
fd15345c74ef543e4e26f4691bf91cb6dac568a4
|
[
"BSD-3-Clause"
] | 1
|
2021-04-09T04:15:24.000Z
|
2021-04-09T04:15:24.000Z
|
languages/python/design_stackinspection.py
|
Andilyn/learntosolveit
|
fd15345c74ef543e4e26f4691bf91cb6dac568a4
|
[
"BSD-3-Clause"
] | null | null | null |
languages/python/design_stackinspection.py
|
Andilyn/learntosolveit
|
fd15345c74ef543e4e26f4691bf91cb6dac568a4
|
[
"BSD-3-Clause"
] | 1
|
2021-07-31T02:45:29.000Z
|
2021-07-31T02:45:29.000Z
|
import sys
def foo():
"""blah"""
print sys._getframe().f_back.f_locals[sys._getframe().f_code.co_name].__doc__
foo()
| 18
| 81
| 0.674603
|
5d5b9faca2b83d46cd7225ec077da749797d7e7e
| 1,022
|
py
|
Python
|
inference/src/models.py
|
fojor/object-cut
|
2e9102ef7d21e056110a94931a91a75ae6a2114a
|
[
"Apache-2.0"
] | 87
|
2019-09-06T08:24:11.000Z
|
2022-03-27T10:54:48.000Z
|
inference/src/models.py
|
fojor/object-cut
|
2e9102ef7d21e056110a94931a91a75ae6a2114a
|
[
"Apache-2.0"
] | 3
|
2021-03-06T13:23:16.000Z
|
2021-04-26T03:03:17.000Z
|
inference/src/models.py
|
fojor/object-cut
|
2e9102ef7d21e056110a94931a91a75ae6a2114a
|
[
"Apache-2.0"
] | 12
|
2019-09-24T17:21:46.000Z
|
2022-02-24T16:18:11.000Z
|
from pydantic import BaseModel
from src import EXAMPLE_IMAGE_PATH, EXAMPLE_MESSAGE_SUCCESS
class EngineRequest(BaseModel):
"""
This schema contains how a request should be.
It contains a string that contains an image path and a boolean that tells us if we want white or transparent bg.
"""
img: str
to_remove: str
color_removal: str
secret_access: str
class Config:
schema_extra = dict(example=dict(
img=EXAMPLE_IMAGE_PATH, to_remove='background', color_removal='transparent', secret_access='SECRET'
))
class EngineResponse(BaseModel):
"""
This schema contains how a response should be.
It contains a boolean telling you if the requests was successful or not along with a message
and an image path containing the result image.
"""
error: bool
img: str
message: str
class Config:
schema_extra = dict(example=dict(
error=False, img=EXAMPLE_IMAGE_PATH, message=EXAMPLE_MESSAGE_SUCCESS
))
| 26.894737
| 116
| 0.694716
|
d924f2d8f8d3e94b462fde52e64c6e8b5ee3428c
| 505
|
py
|
Python
|
rest/call/list-get-example-3/list-get-example-3.6.x.py
|
azaddeveloper/api-snippets
|
f88b153cd7186fa70b33733b205886502db0d1f2
|
[
"MIT"
] | 3
|
2020-05-05T10:01:02.000Z
|
2021-02-06T14:23:13.000Z
|
rest/call/list-get-example-3/list-get-example-3.6.x.py
|
azaddeveloper/api-snippets
|
f88b153cd7186fa70b33733b205886502db0d1f2
|
[
"MIT"
] | null | null | null |
rest/call/list-get-example-3/list-get-example-3.6.x.py
|
azaddeveloper/api-snippets
|
f88b153cd7186fa70b33733b205886502db0d1f2
|
[
"MIT"
] | 1
|
2019-10-02T14:36:36.000Z
|
2019-10-02T14:36:36.000Z
|
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
from datetime import date
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
client = Client(account_sid, auth_token)
# A list of call objects with the properties described above
calls = client.calls.list(
status="completed", start_time_after=date(2009, 7, 6)
)
for call in calls:
print(call.to)
| 29.705882
| 72
| 0.784158
|
72913ddaec4eb1f655ddef1c6afde6256b8ecffc
| 9,276
|
py
|
Python
|
python/mxnet/contrib/onnx/onnx2mx/import_onnx.py
|
mchoi8739/incubator-mxnet
|
cff583250479b31c394f568ffb835b720cb84dc4
|
[
"Apache-2.0"
] | 211
|
2016-06-06T08:32:36.000Z
|
2021-07-03T16:50:16.000Z
|
python/mxnet/contrib/onnx/onnx2mx/import_onnx.py
|
mchoi8739/incubator-mxnet
|
cff583250479b31c394f568ffb835b720cb84dc4
|
[
"Apache-2.0"
] | 82
|
2016-03-29T02:40:02.000Z
|
2021-02-06T22:20:40.000Z
|
python/mxnet/contrib/onnx/onnx2mx/import_onnx.py
|
mchoi8739/incubator-mxnet
|
cff583250479b31c394f568ffb835b720cb84dc4
|
[
"Apache-2.0"
] | 58
|
2016-10-27T07:37:08.000Z
|
2021-07-03T16:50:17.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name,too-many-locals,no-self-use
""" Support import export formats."""
import numpy as np
from .... import symbol
from .... import ndarray as nd
from ....base import string_types
from ._import_helper import _convert_map as convert_map
class GraphProto(object): # pylint: disable=too-few-public-methods
"""A helper class for handling mxnet symbol copying from pb2.GraphProto.
Definition: https://github.com/onnx/onnx/blob/master/onnx/onnx.proto
"""
def __init__(self):
self._nodes = {}
self._params = {}
self._num_input = 0
self._num_param = 0
self.aux_dict = {}
self.arg_dict = {}
self.model_metadata = {}
def _convert_operator(self, node_name, op_name, attrs, inputs):
"""Convert from onnx operator to mxnet operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
:param node_name : str
name of the node to be translated.
:param op_name : str
Operator name, such as Convolution, FullyConnected
:param attrs : dict
Dict of operator attributes
:param inputs: list
list of inputs to the operator
Returns
-------
:return mxnet_sym
Converted mxnet symbol
"""
if op_name in convert_map:
op_name, new_attrs, inputs = convert_map[op_name](attrs, inputs, self)
else:
raise NotImplementedError("Operator {} not implemented.".format(op_name))
if isinstance(op_name, string_types):
new_op = getattr(symbol, op_name, None)
if not new_op:
raise RuntimeError("Unable to map op_name {} to sym".format(op_name))
if node_name is None:
mxnet_sym = new_op(*inputs, **new_attrs)
else:
mxnet_sym = new_op(name=node_name, *inputs, **new_attrs)
return mxnet_sym
return op_name
def from_onnx(self, graph):
"""Construct symbol from onnx graph.
Parameters
----------
graph : onnx protobuf object
The loaded onnx graph
Returns
-------
sym :symbol.Symbol
The returned mxnet symbol
params : dict
A dict of name: nd.array pairs, used as pretrained weights
"""
# get input, output shapes
self.model_metadata = self.get_graph_metadata(graph)
# parse network inputs, aka parameters
for init_tensor in graph.initializer:
if not init_tensor.name.strip():
raise ValueError("Tensor's name is required.")
self._params[init_tensor.name] = self._parse_array(init_tensor)
# converting GraphProto message
for i in graph.input:
if i.name in self._params:
# i is a param instead of input
self._nodes[i.name] = symbol.Variable(name=i.name,
shape=self._params[i.name].shape)
else:
self._nodes[i.name] = symbol.Variable(name=i.name)
# constructing nodes, nodes are stored as directed acyclic graph
# converting NodeProto message
for node in graph.node:
op_name = node.op_type
node_name = node.name.strip()
node_name = node_name if node_name else None
onnx_attr = self._parse_attr(node.attribute)
inputs = [self._nodes[i] for i in node.input]
mxnet_sym = self._convert_operator(node_name, op_name, onnx_attr, inputs)
for k, i in zip(list(node.output), range(len(mxnet_sym.list_outputs()))):
self._nodes[k] = mxnet_sym[i]
# splitting params into args and aux params
for args in mxnet_sym.list_arguments():
if args in self._params:
self.arg_dict.update({args: nd.array(self._params[args])})
for aux in mxnet_sym.list_auxiliary_states():
if aux in self._params:
self.aux_dict.update({aux: nd.array(self._params[aux])})
# now return the outputs
out = [self._nodes[i.name] for i in graph.output]
if len(out) > 1:
out = symbol.Group(out)
else:
out = out[0]
return out, self.arg_dict, self.aux_dict
def get_graph_metadata(self, graph):
"""
Get the model metadata from a given onnx graph.
"""
_params = set()
for tensor_vals in graph.initializer:
_params.add(tensor_vals.name)
input_data = []
for graph_input in graph.input:
if graph_input.name not in _params:
shape = [val.dim_value for val in graph_input.type.tensor_type.shape.dim]
input_data.append((graph_input.name, tuple(shape)))
output_data = []
for graph_out in graph.output:
shape = [val.dim_value for val in graph_out.type.tensor_type.shape.dim]
output_data.append((graph_out.name, tuple(shape)))
metadata = {'input_tensor_data' : input_data,
'output_tensor_data' : output_data
}
return metadata
def graph_to_gluon(self, graph, ctx):
"""Construct SymbolBlock from onnx graph.
Parameters
----------
graph : onnx protobuf object
The loaded onnx graph
ctx : Context or list of Context
Loads the model into one or many context(s).
Returns
-------
sym_block :gluon.nn.SymbolBlock
The returned gluon SymbolBlock
"""
sym, arg_params, aux_params = self.from_onnx(graph)
metadata = self.get_graph_metadata(graph)
data_names = [input_tensor[0] for input_tensor in metadata['input_tensor_data']]
data_inputs = [symbol.var(data_name) for data_name in data_names]
from ....gluon import SymbolBlock
net = SymbolBlock(outputs=sym, inputs=data_inputs)
net_params = net.collect_params()
for param in arg_params:
if param in net_params:
net_params[param].shape = arg_params[param].shape
net_params[param]._load_init(arg_params[param], ctx=ctx)
for param in aux_params:
if param in net_params:
net_params[param].shape = aux_params[param].shape
net_params[param]._load_init(aux_params[param], ctx=ctx)
return net
def _parse_array(self, tensor_proto):
"""Grab data in TensorProto and convert to numpy array."""
try:
from onnx.numpy_helper import to_array
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
if len(tuple(tensor_proto.dims)) > 0:
np_array = to_array(tensor_proto).reshape(tuple(tensor_proto.dims))
else:
# If onnx's params are scalar values without dims mentioned.
np_array = np.array([to_array(tensor_proto)])
return nd.array(np_array)
def _parse_attr(self, attr_proto):
"""Convert a list of AttributeProto to a dict, with names as keys."""
attrs = {}
for a in attr_proto:
for f in ['f', 'i', 's']:
if a.HasField(f):
attrs[a.name] = getattr(a, f)
# Needed for supporting python version > 3.5
if isinstance(attrs[a.name], bytes):
attrs[a.name] = attrs[a.name].decode(encoding='utf-8')
for f in ['floats', 'ints', 'strings']:
if list(getattr(a, f)):
assert a.name not in attrs, "Only one type of attr is allowed"
attrs[a.name] = tuple(getattr(a, f))
for f in ['t', 'g']:
if a.HasField(f):
attrs[a.name] = getattr(a, f)
for f in ['tensors', 'graphs']:
if list(getattr(a, f)):
raise NotImplementedError("Filed {} is not supported in mxnet.".format(f))
if a.name not in attrs:
raise ValueError("Cannot parse attribute: \n{}\n.".format(a))
return attrs
| 40.50655
| 94
| 0.594114
|
2371bc3517fc9219bafd6362b02281161d549e10
| 2,957
|
py
|
Python
|
clubs/views.py
|
aprefontaine/TMScheduler
|
298a332532b9df1d3f6a80b1334630bc106d3b78
|
[
"BSD-3-Clause"
] | 1
|
2015-08-30T16:48:17.000Z
|
2015-08-30T16:48:17.000Z
|
clubs/views.py
|
aprefontaine/TMScheduler
|
298a332532b9df1d3f6a80b1334630bc106d3b78
|
[
"BSD-3-Clause"
] | null | null | null |
clubs/views.py
|
aprefontaine/TMScheduler
|
298a332532b9df1d3f6a80b1334630bc106d3b78
|
[
"BSD-3-Clause"
] | null | null | null |
from django.shortcuts import get_object_or_404, render_to_response
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.template import RequestContext
from clubs.models import Club
import datetime
class ClubContextBase:
Type = 0
ClubList = []
def clubsHome(request):
# Messages in Django 1.2...
# club_name = messages.get_messages(request)
clubContext = ClubContextBase
clubContext.ClubList = Club.objects.all().order_by('Number')
return render_to_response('clubs/index.html', {'clubContext': clubContext}, context_instance=RequestContext(request))
class ClubContextAdd(ClubContextBase):
Type = 2
Success = 0
def addClub(request):
clubContext = ClubContextAdd()
try:
clubName = request.POST['clubName']
clubNumber = request.POST['clubNumber']
clubArea = request.POST['clubArea']
clubDivision = request.POST['clubDivision']
clubDistrict = request.POST['clubDistrict']
except ():
#
return render_to_response('clubs/index.html', {
'error_message': "Club create failed.",
}, context_instance=RequestContext(request))
else:
newClub = Club(Name=clubName,Number=clubNumber,Area=clubArea,Division=clubDivision,District=clubDistrict)
newClub.save()
# messages.success(request, 'Add')
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('clubs.views.clubsHome'))
def importClubs(request):
return render_to_response('clubs/ClubImport.html',
{'test': 'I am the second view.'},
context_instance=RequestContext(request))
class ClubContextImport(ClubContextBase):
Type = 1
Added = 0
Existing = 0
Errors = 0
def doImportClubs(request):
csvText = request.POST['csvClubData']
lines = csvText.split('\n')
clubContext = ClubContextImport()
for nextLine in lines:
fields = nextLine.split(',')
clubName = fields[0]
clubNum = fields[1]
clubArea = fields[2]
clubDiv = fields[3]
clubDistrict = fields[4]
existingClub = Club.objects.filter(Number=clubNum)
if len(existingClub) == 0:
newClub = Club(Name=clubName,Number=clubNum,Area=clubArea,Division=clubDiv,District=clubDistrict)
newClub.save()
clubContext.Added = clubContext.Added + 1
else:
clubContext.Existing = clubContext.Existing + 1
clubContext.ClubList = Club.objects.all().order_by('Number')
return render_to_response('clubs/index.html', {'clubContext': clubContext})
# return HttpResponseRedirect(reverse('clubs.views.clubsHome'), {'clubContext': clubContext})
| 37.43038
| 121
| 0.670274
|
4a72082d0576df7e5e9b2ac28436950734e24d3d
| 253
|
py
|
Python
|
reporter/__init__.py
|
josevnz/rpm_query
|
c84b40232a8284a02b475baadf7ed2eb0d789899
|
[
"Apache-2.0"
] | null | null | null |
reporter/__init__.py
|
josevnz/rpm_query
|
c84b40232a8284a02b475baadf7ed2eb0d789899
|
[
"Apache-2.0"
] | null | null | null |
reporter/__init__.py
|
josevnz/rpm_query
|
c84b40232a8284a02b475baadf7ed2eb0d789899
|
[
"Apache-2.0"
] | 1
|
2021-12-03T05:30:36.000Z
|
2021-12-03T05:30:36.000Z
|
__version__ = "0.0.1"
def __is_valid_limit__(limit: str) -> int:
try:
int_limit = int(limit)
if int_limit <= 0:
raise ValueError(f"Invalid limit!: {limit}")
return int_limit
except ValueError:
raise
| 21.083333
| 56
| 0.581028
|
bd6f062feb56351b8bab3b1aa1a99eaef71af2a3
| 135
|
py
|
Python
|
fl_blog/main/views.py
|
anshgandhi/flask_blogging
|
518305d602e9834744b89f96e1e230b6b24f9ca1
|
[
"MIT"
] | null | null | null |
fl_blog/main/views.py
|
anshgandhi/flask_blogging
|
518305d602e9834744b89f96e1e230b6b24f9ca1
|
[
"MIT"
] | null | null | null |
fl_blog/main/views.py
|
anshgandhi/flask_blogging
|
518305d602e9834744b89f96e1e230b6b24f9ca1
|
[
"MIT"
] | null | null | null |
from flask import render_template
from fl_blog.main import main
@main.route('/')
def index():
return render_template('index.html')
| 22.5
| 40
| 0.755556
|
076f782ac349984b22e0c33d0750faea62364f06
| 1,226
|
py
|
Python
|
demo_shop/demo_shop/urls.py
|
Bincha3000/Django-shop
|
e2ea4df50ea3c542f1a0b9b6881be5c9cd8261d8
|
[
"MIT"
] | null | null | null |
demo_shop/demo_shop/urls.py
|
Bincha3000/Django-shop
|
e2ea4df50ea3c542f1a0b9b6881be5c9cd8261d8
|
[
"MIT"
] | 10
|
2020-02-12T00:49:15.000Z
|
2022-02-10T09:20:02.000Z
|
demo_shop/demo_shop/urls.py
|
gladunvv/django-shop
|
e2ea4df50ea3c542f1a0b9b6881be5c9cd8261d8
|
[
"MIT"
] | null | null | null |
"""demo_shop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('cart/', include('cart.urls', namespace='cart')),
path('user/', include('user.urls', namespace='user')),
path('', include('products_list.urls', namespace='shop')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
path('__debug__/', include(debug_toolbar.urls)),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 37.151515
| 80
| 0.704731
|
230a8a9d263188fc28471220e948c925746caf1f
| 1,807
|
py
|
Python
|
src/inmanta/execute/tracking.py
|
inmanta/inmanta-core
|
ae2153d57f124d00ad1b58e6d4bc6818364be4a8
|
[
"Apache-2.0"
] | 6
|
2021-03-09T10:24:02.000Z
|
2022-01-16T03:52:11.000Z
|
src/inmanta/execute/tracking.py
|
inmanta/inmanta-core
|
ae2153d57f124d00ad1b58e6d4bc6818364be4a8
|
[
"Apache-2.0"
] | 1,319
|
2020-12-18T08:52:29.000Z
|
2022-03-31T18:17:32.000Z
|
src/inmanta/execute/tracking.py
|
inmanta/inmanta-core
|
ae2153d57f124d00ad1b58e6d4bc6818364be4a8
|
[
"Apache-2.0"
] | 4
|
2021-03-03T15:36:50.000Z
|
2022-03-11T11:41:51.000Z
|
"""
Copyright 2017 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: code@inmanta.com
"""
"""
This module enables tracking of object construction.
Tracker object are attached to all created Instances in the field trackers. Every Instance can have one or more Trackers.
If the Tracker object is a ModuleTracker, the object is created at module level
If the Tracker object is an ImplementsTracker, the object is created in an Implementation block
"""
from typing import TYPE_CHECKING, List
if TYPE_CHECKING:
from inmanta.ast.blocks import BasicBlock
from inmanta.ast.statements.generator import SubConstructor
from inmanta.execute.runtime import Instance
class Tracker(object):
def get_next(self) -> "List[Tracker]":
return []
class ModuleTracker(Tracker):
def __init__(self, block: "BasicBlock") -> None:
self.block = block
self.namespace = block.namespace
class ImplementsTracker(Tracker):
def __init__(self, subc: "SubConstructor", instance: "Instance") -> None:
self.instance = instance
self.subc = subc
self.implements = subc.implements
self.implementations = self.implements.implementations
def get_next(self) -> "List[Tracker]":
return self.instance.trackers
| 31.155172
| 121
| 0.727726
|
5ee1c929b84bacd5912c86bf87e6d330fdbd1728
| 3,907
|
py
|
Python
|
setup.py
|
mothsART/linkmanager
|
5213eb2757e76caaf919bc1f5158af7830fb4165
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
mothsART/linkmanager
|
5213eb2757e76caaf919bc1f5158af7830fb4165
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
mothsART/linkmanager
|
5213eb2757e76caaf919bc1f5158af7830fb4165
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
try:
# python 3
from urllib import request
except:
# python 2
import urllib as request
from setuptools import setup, find_packages
from setuptools.command import easy_install
from setuptools.command.test import test as TestCommand
from linkmanager import (
__appname__, __version__,
__website__,
__licence__, __author__
)
base = os.path.dirname(__file__)
readme = open(os.path.join(base, 'README.rst')).readlines()
readme = "".join(readme[:12] + readme[34:])
changelog = open(os.path.join(base, 'HISTORY.rst')).read()
# use this option (on end) when using on debian rules : createdeb
print(sys.argv[-1])
if sys.argv[-1] == 'createdeb':
sys.argv.pop()
else:
clint_archive = request.urlopen(
"http://github.com/mothsART/clint/archive/master.zip"
)
output = open('clint.zip', 'wb')
output.write(clint_archive.read())
output.close()
easy_install.main(['-U', 'clint.zip'])
required = []
dlinks = []
r_file = 'python2_requirements.txt'
if sys.version_info[0] == 3:
r_file = 'python3_requirements.txt'
if sys.version_info[1] >= 3:
r_file = 'base.txt'
with open(
os.path.join(base, 'requirements', r_file)
) as f:
required = f.read().splitlines()
for line in required:
if line.startswith('-r '):
required.remove(line)
with open(os.path.join(base, 'requirements', line[3:])) as f:
required += f.read().splitlines()
elif line.startswith('-e '):
required.remove(line)
a = __author__
author = a[:a.find("<") - 1]
author_email = a[a.find("<") + 1:-1]
man_path = '/usr/share/man/man1/'
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = [
'-vv', 'linkmanager/tests/tests.py',
'--cov=linkmanager',
'--cov-report', 'term-missing'
]
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name=__appname__,
version=__version__,
description='Manage your link on terminal',
long_description=readme + '\n' + changelog
+ '\n\n.. _pip: http://www.pip-installer.org/', # + '\n' + todo
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3.3',
'Topic :: Terminals :: Terminal Emulators/X Terminals',
],
keywords='manager link links URL prompt shell',
platforms=["Linux"],
author=author,
author_email=author_email,
url=__website__,
license=__licence__,
packages=find_packages(exclude=['tests']),
include_package_data=True,
scripts=['linkm'],
data_files=[
('/etc/', ['linkmanager.conf']),
(man_path, ['docs/linkmanager.1.gz']),
# symlink : man linkm == man linkmanager
(man_path, ['docs/linkm.1.gz']),
('/usr/bin/', ['linkmanager.zsh'])
],
install_requires=required,
tests_require=['pytest'],
cmdclass={'test': PyTest},
zip_safe=True
)
if sys.argv != ['-c', 'egg_info', '--egg-base', 'pip-egg-info']:
exit(0)
bashrc = '/etc/bash.bashrc'
zshrc = '/etc/zsh/zshrc'
bash_cmd = 'eval "$(register-python-argcomplete linkm)"\n'
zsh_cmd = "source linkmanager.zsh\n"
if os.path.isfile(bashrc):
with open(bashrc, 'r+') as f:
readlines = f.readlines()
if bash_cmd not in readlines:
f.write(bash_cmd)
if os.path.isfile(zshrc):
with open(zshrc, 'r+') as f:
readlines = f.readlines()
if zsh_cmd not in readlines:
f.write(zsh_cmd)
# os.popen('$SHELL')
| 27.70922
| 69
| 0.620425
|
8258bb63c4178ba9abc1b609e83f9aeb2da19a17
| 3,877
|
py
|
Python
|
summary/sumy/sklearn/datasets/california_housing.py
|
WangWenjun559/MITS
|
8d7ace2b3b2a58fb33af225c2997106d9402aaf5
|
[
"Apache-2.0"
] | 6
|
2015-07-12T16:17:58.000Z
|
2017-06-23T04:00:43.000Z
|
summary/sumy/sklearn/datasets/california_housing.py
|
WangWenjun559/MITS
|
8d7ace2b3b2a58fb33af225c2997106d9402aaf5
|
[
"Apache-2.0"
] | null | null | null |
summary/sumy/sklearn/datasets/california_housing.py
|
WangWenjun559/MITS
|
8d7ace2b3b2a58fb33af225c2997106d9402aaf5
|
[
"Apache-2.0"
] | 4
|
2017-04-19T09:05:26.000Z
|
2020-05-20T21:54:25.000Z
|
"""California housing dataset.
The original database is available from StatLib
http://lib.stat.cmu.edu/
The data contains 20,640 observations on 9 variables.
This dataset contains the average house value as target variable
and the following input variables (features): average income,
housing average age, average rooms, average bedrooms, population,
average occupation, latitude, and longitude in that order.
References
----------
Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions,
Statistics and Probability Letters, 33 (1997) 291-297.
"""
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from io import BytesIO
from os.path import join, exists
from os import makedirs
from zipfile import ZipFile
try:
# Python 2
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.request import urlopen
import numpy as np
from .base import get_data_home, Bunch
from ..externals import joblib
DATA_URL = "http://lib.stat.cmu.edu/modules.php?op=modload&name=Downloads&"\
"file=index&req=getit&lid=83"
TARGET_FILENAME = "cal_housing.pkz"
# Grab the module-level docstring to use as a description of the
# dataset
MODULE_DOCS = __doc__
def fetch_california_housing(data_home=None, download_if_missing=True):
"""Loader for the California housing dataset from StatLib.
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : ndarray, shape [20640, 8]
Each row corresponding to the 8 feature values in order.
dataset.target : numpy array of shape (20640,)
Each value corresponds to the average house value in units of 100,000.
dataset.feature_names : array of length 8
Array of ordered feature names used in the dataset.
dataset.DESCR : string
Description of the California housing dataset.
Notes
------
This dataset consists of 20,640 samples and 9 features.
"""
data_home = get_data_home(data_home=data_home)
if not exists(data_home):
makedirs(data_home)
if not exists(join(data_home, TARGET_FILENAME)):
print('downloading Cal. housing from %s to %s' % (DATA_URL, data_home))
fhandle = urlopen(DATA_URL)
buf = BytesIO(fhandle.read())
zip_file = ZipFile(buf)
try:
cadata_fd = zip_file.open('cadata.txt', 'r')
cadata = BytesIO(cadata_fd.read())
# skip the first 27 lines (documentation)
cal_housing = np.loadtxt(cadata, skiprows=27)
joblib.dump(cal_housing, join(data_home, TARGET_FILENAME),
compress=6)
finally:
zip_file.close()
else:
cal_housing = joblib.load(join(data_home, TARGET_FILENAME))
feature_names = ["MedInc", "HouseAge", "AveRooms", "AveBedrms",
"Population", "AveOccup", "Latitude", "Longitude"]
target, data = cal_housing[:, 0], cal_housing[:, 1:]
# avg rooms = total rooms / households
data[:, 2] /= data[:, 5]
# avg bed rooms = total bed rooms / households
data[:, 3] /= data[:, 5]
# avg occupancy = population / housholds
data[:, 5] = data[:, 4] / data[:, 5]
# target in units of 100,000
target = target / 100000.0
return Bunch(data=data,
target=target,
feature_names=feature_names,
DESCR=MODULE_DOCS)
| 30.527559
| 79
| 0.668816
|
f006a4263012ea59a55e922d641a1225d855011d
| 199
|
py
|
Python
|
pages/admin.py
|
BirdOnTheBranch/web-pictures
|
1dbe655df4ab4546f1f8a6863de55b09bf9886a8
|
[
"MIT"
] | 1
|
2020-05-08T08:42:20.000Z
|
2020-05-08T08:42:20.000Z
|
pages/admin.py
|
BirdOnTheBranch/web-pictures
|
1dbe655df4ab4546f1f8a6863de55b09bf9886a8
|
[
"MIT"
] | null | null | null |
pages/admin.py
|
BirdOnTheBranch/web-pictures
|
1dbe655df4ab4546f1f8a6863de55b09bf9886a8
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Page
class PagesAdmin(admin.ModelAdmin):
readonly_fields = ('created', 'updated', 'slug', 'likes')
admin.site.register(Page, PagesAdmin)
| 18.090909
| 61
| 0.738693
|
124f837e9fba48ce2ba03d6fba322f721d74f2c5
| 7,088
|
py
|
Python
|
tensorflow/tools/compatibility/tf_upgrade_v2_main.py
|
scentini/tensorflow
|
204ed332c0886a0e0ab10b22ba8d67b97e1c83c4
|
[
"Apache-2.0"
] | 56
|
2018-06-21T13:47:23.000Z
|
2020-05-13T09:31:47.000Z
|
tensorflow/tools/compatibility/tf_upgrade_v2_main.py
|
scentini/tensorflow
|
204ed332c0886a0e0ab10b22ba8d67b97e1c83c4
|
[
"Apache-2.0"
] | 5
|
2020-06-01T18:50:38.000Z
|
2021-07-16T07:13:52.000Z
|
tensorflow/tools/compatibility/tf_upgrade_v2_main.py
|
scentini/tensorflow
|
204ed332c0886a0e0ab10b22ba8d67b97e1c83c4
|
[
"Apache-2.0"
] | 15
|
2018-09-06T14:18:32.000Z
|
2020-05-14T06:35:30.000Z
|
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts from 1.x TensorFlow to 2.0 TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import six
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import ipynb
from tensorflow.tools.compatibility import tf_upgrade_v2
from tensorflow.tools.compatibility import tf_upgrade_v2_safety
# Make straightforward changes to convert to 2.0. In harder cases,
# use compat.v1.
_DEFAULT_MODE = "DEFAULT"
# Convert to use compat.v1.
_SAFETY_MODE = "SAFETY"
def process_file(in_filename, out_filename, upgrader):
"""Process a file of type `.py` or `.ipynb`."""
if six.ensure_str(in_filename).endswith(".py"):
files_processed, report_text, errors = \
upgrader.process_file(in_filename, out_filename)
elif six.ensure_str(in_filename).endswith(".ipynb"):
files_processed, report_text, errors = \
ipynb.process_file(in_filename, out_filename, upgrader)
else:
raise NotImplementedError(
"Currently converter only supports python or ipynb")
return files_processed, report_text, errors
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Convert a TensorFlow Python file from 1.x to 2.0
Simple usage:
tf_upgrade_v2.py --infile foo.py --outfile bar.py
tf_upgrade_v2.py --infile foo.ipynb --outfile bar.ipynb
tf_upgrade_v2.py --intree ~/code/old --outtree ~/code/new
""")
parser.add_argument(
"--infile",
dest="input_file",
help="If converting a single file, the name of the file "
"to convert")
parser.add_argument(
"--outfile",
dest="output_file",
help="If converting a single file, the output filename.")
parser.add_argument(
"--intree",
dest="input_tree",
help="If converting a whole tree of files, the directory "
"to read from (relative or absolute).")
parser.add_argument(
"--outtree",
dest="output_tree",
help="If converting a whole tree of files, the output "
"directory (relative or absolute).")
parser.add_argument(
"--copyotherfiles",
dest="copy_other_files",
help=("If converting a whole tree of files, whether to "
"copy the other files."),
type=bool,
default=True)
parser.add_argument(
"--inplace",
dest="in_place",
help=("If converting a set of files, whether to "
"allow the conversion to be performed on the "
"input files."),
action="store_true")
parser.add_argument(
"--import_rename",
dest="import_rename",
help=("Whether to rename import to compact.v2 explicitly."),
action="store_true")
parser.add_argument(
"--reportfile",
dest="report_filename",
help=("The name of the file where the report log is "
"stored."
"(default: %(default)s)"),
default="report.txt")
parser.add_argument(
"--mode",
dest="mode",
choices=[_DEFAULT_MODE, _SAFETY_MODE],
help=("Upgrade script mode. Supported modes:\n"
"%s: Perform only straightforward conversions to upgrade to "
"2.0. In more difficult cases, switch to use compat.v1.\n"
"%s: Keep 1.* code intact and import compat.v1 "
"module." %
(_DEFAULT_MODE, _SAFETY_MODE)),
default=_DEFAULT_MODE)
parser.add_argument(
"--print_all",
dest="print_all",
help="Print full log to stdout instead of just printing errors",
action="store_true")
args = parser.parse_args()
if args.mode == _SAFETY_MODE:
change_spec = tf_upgrade_v2_safety.TFAPIChangeSpec()
else:
change_spec = tf_upgrade_v2.TFAPIChangeSpec(args.import_rename)
upgrade = ast_edits.ASTCodeUpgrader(change_spec)
report_text = None
report_filename = args.report_filename
files_processed = 0
if args.input_file:
if not args.in_place and not args.output_file:
raise ValueError(
"--outfile=<output file> argument is required when converting a "
"single file.")
if args.in_place and args.output_file:
raise ValueError(
"--outfile argument is invalid when when converting in place")
output_file = args.input_file if args.in_place else args.output_file
files_processed, report_text, errors = process_file(
args.input_file, output_file, upgrade)
errors = {args.input_file: errors}
files_processed = 1
elif args.input_tree:
if not args.in_place and not args.output_tree:
raise ValueError(
"--outtree=<output directory> argument is required when converting a "
"file tree.")
if args.in_place and args.output_tree:
raise ValueError(
"--outtree argument is invalid when when converting in place")
output_tree = args.input_tree if args.in_place else args.output_tree
files_processed, report_text, errors = upgrade.process_tree(
args.input_tree, output_tree, args.copy_other_files)
else:
parser.print_help()
if report_text:
num_errors = 0
report = []
for f in errors:
if errors[f]:
num_errors += len(errors[f])
report.append(six.ensure_str("-" * 80) + "\n")
report.append("File: %s\n" % f)
report.append(six.ensure_str("-" * 80) + "\n")
report.append("\n".join(errors[f]) + "\n")
report = ("TensorFlow 2.0 Upgrade Script\n"
"-----------------------------\n"
"Converted %d files\n" % files_processed +
"Detected %d issues that require attention" % num_errors + "\n" +
six.ensure_str("-" * 80) + "\n") + "".join(report)
detailed_report_header = six.ensure_str("=" * 80) + "\n"
detailed_report_header += "Detailed log follows:\n\n"
detailed_report_header += six.ensure_str("=" * 80) + "\n"
with open(report_filename, "w") as report_file:
report_file.write(report)
report_file.write(detailed_report_header)
report_file.write(six.ensure_str(report_text))
if args.print_all:
print(report)
print(detailed_report_header)
print(report_text)
else:
print(report)
print("\nMake sure to read the detailed log %r\n" % report_filename)
if __name__ == "__main__":
main()
| 35.79798
| 80
| 0.665068
|
1f694e3091d7a2bcfe7fb01f9d91b9c9d35d4067
| 949
|
py
|
Python
|
neutrino-winds/tests/test_Adiabatic_wind_solver.py
|
colbrydi/neutrino-winds
|
0088a0568841cda00ee8303b797d05be9feab844
|
[
"BSD-3-Clause"
] | null | null | null |
neutrino-winds/tests/test_Adiabatic_wind_solver.py
|
colbrydi/neutrino-winds
|
0088a0568841cda00ee8303b797d05be9feab844
|
[
"BSD-3-Clause"
] | null | null | null |
neutrino-winds/tests/test_Adiabatic_wind_solver.py
|
colbrydi/neutrino-winds
|
0088a0568841cda00ee8303b797d05be9feab844
|
[
"BSD-3-Clause"
] | null | null | null |
import os, sys
sys.path.append("")
import Adiabatic_wind_solver as aws
import pytest
import numpy as np
def test_pc():
'''Simple test of the percent change function'''
s=aws.solver()
assert s.percentChange(np.array([10,0]),np.array([11,0]))==10.0
def test_dw():
'''Checks that the isothermal (base) case with gamma=1 has no change in temperature'''
s=aws.solver()
assert s.dw(1,[2,3,4])==0
def test_generateFunc():
'''Tests that the RK integrator is working correctly for the isothermal case'''
s=aws.solver()
assert s.generateFunc(np.array([1,.001,0]))[2][-1]==pytest.approx(0.7145761164770619)
def test_findV0():
'''Checks that the correct v0 is found for the adiabatic and isothermal cases'''
s=aws.solver()
assert s.findV0(.004,.006,.0001)==pytest.approx(0.005075319385528564)
s1=aws.solver(5/3,.1)
assert s1.findV0(.001,.003,.0001)==pytest.approx(0.0024991476535797122)
| 31.633333
| 90
| 0.679663
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.