hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6683d7523bb35e6eea7af58dcc94e299c8b5221f
| 523
|
py
|
Python
|
patterns/adapter/app.py
|
mattskone/head-first-design-patterns
|
3f0d3a5c39475b418f09e2c45505f88fa673dd41
|
[
"MIT"
] | null | null | null |
patterns/adapter/app.py
|
mattskone/head-first-design-patterns
|
3f0d3a5c39475b418f09e2c45505f88fa673dd41
|
[
"MIT"
] | 1
|
2015-01-13T17:19:19.000Z
|
2015-03-11T16:02:28.000Z
|
patterns/adapter/app.py
|
mattskone/head-first-design-patterns
|
3f0d3a5c39475b418f09e2c45505f88fa673dd41
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from implementations import MallardDuck, WildTurkey, TurkeyAdapter
if __name__ == '__main__':
d = MallardDuck()
print '\nThe Duck says...'
d.quack()
d.fly()
t = WildTurkey()
print '\nThe Turkey says...'
t.gobble()
t.fly()
# Now we use the adapter to show how a Turkey can be made to
# behave like a Duck (expose the same methods, and fly the same
# distance):
td = TurkeyAdapter(t)
print '\nThe TurkeyAdapter says...'
td.quack()
td.fly()
| 23.772727
| 67
| 0.625239
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 237
| 0.453155
|
66849fe8ffb1c558532c4307c57805110b8abc4c
| 134
|
py
|
Python
|
app/config/task.py
|
atulmishra-one/dairy_management_portal
|
a07320dc0f4419d4c78f7d2453c63b1c9544aba8
|
[
"MIT"
] | 2
|
2020-08-02T10:06:19.000Z
|
2022-03-29T06:10:57.000Z
|
app/config/task.py
|
atulmishra-one/dairy_management_portal
|
a07320dc0f4419d4c78f7d2453c63b1c9544aba8
|
[
"MIT"
] | null | null | null |
app/config/task.py
|
atulmishra-one/dairy_management_portal
|
a07320dc0f4419d4c78f7d2453c63b1c9544aba8
|
[
"MIT"
] | 2
|
2019-02-03T15:44:02.000Z
|
2021-03-09T07:30:28.000Z
|
CELERY_BROKER_URL = 'redis://localhost:6379/0'
CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
CELERY_IMPORTS=('app.users.tasks')
| 26.8
| 50
| 0.768657
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 69
| 0.514925
|
6684d6354c57bdba0d562fbf5c959a7bb01edb22
| 5,697
|
py
|
Python
|
GCR.py
|
goodot/character-recognition
|
71cd3664670ec2d672d344e8b1842ce3c3ff47d5
|
[
"Apache-2.0"
] | 1
|
2019-04-25T10:34:21.000Z
|
2019-04-25T10:34:21.000Z
|
GCR.py
|
goodot/character-recognition
|
71cd3664670ec2d672d344e8b1842ce3c3ff47d5
|
[
"Apache-2.0"
] | null | null | null |
GCR.py
|
goodot/character-recognition
|
71cd3664670ec2d672d344e8b1842ce3c3ff47d5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from PIL import Image
from numpy import array
import sqlite3
import tkMessageBox
import matplotlib.pyplot as plt
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import SoftmaxLayer
from pybrain.structure.modules import TanhLayer
from pybrain.structure.modules import SigmoidLayer
# global db, x, dimage, image,alphabet
alphabet = {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e', 5: 'f', 6: 'g', 7: 'h', 8: 'i', 9: 'j',
10: 'k', 11: 'l', 12: 'm', 13: 'n', 14: 'o', 15: 'p', 16: 'q', 17: 'r', 18: 's',
19: 't', 20: 'u', 21: 'v', 22: 'w', 23: 'x', 24: 'y', 25: 'z'}
class Sample:
def __init__(self, Input, Target, Id=None):
self.Id = Id
self.Input = Input
self.Target = Target
def __eq__(self, other):
return isinstance(other, self.__class__) and self.Input == other.Input and self.Target == other.Target
def __ne__(self, other):
return not self.__eq__(other)
def getInput(self):
inp = self.Input.split(',')
return [int(i) for i in inp]
def getTarget(self):
tar = self.Target.split(',')
print tar
return [int(i) for i in tar]
class Params:
def __init__(self, Weights, ID=None):
self.ID = ID
self.Weights = Weights
def __eq__(self, other):
return isinstance(other, self.__class__) and self.Weights == other.Weights
def __ne__(self, other):
return not self.__eq__(other)
def getWeights(self):
w = self.Weights.split(',')
return [float(i) for i in w]
def getcharkey(char):
for key, ch in alphabet.iteritems():
if ch.decode('utf-8') == char:
return key
def init():
global samples, db
# caching samples
samples = []
db = sqlite3.connect('data.db')
cursor = db.cursor()
rows = cursor.execute('SELECT *FROM samples')
rows = rows.fetchall()
for r in rows:
sample = Sample(r[1], r[2])
samples.append(sample)
global net, ds, trainer
ins = 256
hids = ins * 2 / 3
outs = 26
net = buildNetwork(ins, hids, outs, bias=True, outclass=SoftmaxLayer)
ds = SupervisedDataSet(ins, outs)
rows = cursor.execute('SELECT * FROM parameters')
rows = rows.fetchall()
params_list = []
for r in rows:
params = Params(r[1])
params_list.append(params)
if len(params_list) != 0:
params = params_list[len(params_list) - 1]
net._setParameters(params.getWeights())
trainer = BackpropTrainer(net, ds)
if len(samples) > 0:
for s in samples:
ds.addSample(s.getInput(), s.getTarget())
def which(dim):
dim = makelist(dim)
# print dim
out = net.activate(dim)
index = out.argmax()
print alphabet[index]
print str(out[index] * 100) + '%'
# print [i for i in out]
plt.clf()
plt.title("Graph")
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
x = range(26)
plt.xticks(x, labels)
plt.bar(x, out)
plt.show()
def train():
error = 10
it = 0
iterations = []
errors = []
while error > 0.00001:
error = trainer.train()
it += 1
print "Iteration: " + str(it), "Error: " + str(error)
iterations.append(it)
errors.append(error)
params = makestring(net.params)
cursor = db.cursor()
cursor.execute("INSERT INTO parameters (Weights) VALUES (?)", (params,))
db.commit()
plt.clf()
plt.xlabel("Iterations")
plt.ylabel("Errors")
plt.title("Error Graph")
plt.plot(iterations, errors)
plt.show()
print 'training finished'
def close():
db.close()
def average(numlist):
return sum(numlist) / len(numlist)
def blackwhite(dim):
dim = dim.tolist()
imrow = []
im = []
for i in dim:
for j in i:
imrow.append(average(j))
im.append(imrow)
imrow = []
dim = array(im)
return dim
def makestring(dim):
string = [str(i) for i in dim]
string = ','.join(string)
return string
def makelist(dim):
lst = []
for i in dim:
for j in i:
lst.append(j)
return lst
def addSample(sample):
samples.append(sample)
ds.addSample(sample.getInput(), sample.getTarget())
cursor = db.cursor()
cursor.execute("INSERT INTO samples (Input,Target) VALUES (?,?)", [sample.Input, sample.Target])
db.commit()
def getUpRow(dimage):
x = dimage.shape[0]
y = dimage.shape[1]
for i in range(x):
for j in range(y):
if average(dimage[i][j]) < 255:
return i
def getLeftCol(dimage):
x = dimage.shape[0]
y = dimage.shape[1]
for j in range(y):
for i in range(x):
if average(dimage[i][j]) < 255:
return j
def getDownRow(dimage):
x = dimage.shape[0]
y = dimage.shape[1]
for i in range(x - 1, -1, -1):
for j in range(y - 1, -1, -1):
if average(dimage[i][j]) < 255:
return i
def getRightCol(dimage):
x = dimage.shape[0]
y = dimage.shape[1]
for j in range(y - 1, -1, -1):
for i in range(x - 1, -1, -1):
if average(dimage[i][j]) < 255:
return j
def getBox(dimage):
rowUp = getUpRow(dimage)
colLeft = getLeftCol(dimage)
rowDown = getDownRow(dimage)
colRight = getRightCol(dimage)
return (colLeft, rowUp, colRight, rowDown)
init()
| 22.429134
| 110
| 0.566438
| 951
| 0.16693
| 0
| 0
| 0
| 0
| 0
| 0
| 523
| 0.091803
|
6686b772848e4502d8bad3bd405870762f442216
| 2,966
|
py
|
Python
|
grano/logic/projects.py
|
ANCIR/grano
|
cee2ec1974df5df2bc6ed5e214f6bd5d201397a4
|
[
"MIT"
] | 30
|
2018-08-23T15:42:17.000Z
|
2021-11-16T13:11:36.000Z
|
grano/logic/projects.py
|
ANCIR/grano
|
cee2ec1974df5df2bc6ed5e214f6bd5d201397a4
|
[
"MIT"
] | null | null | null |
grano/logic/projects.py
|
ANCIR/grano
|
cee2ec1974df5df2bc6ed5e214f6bd5d201397a4
|
[
"MIT"
] | 5
|
2019-05-30T11:36:53.000Z
|
2021-08-11T16:17:14.000Z
|
import colander
from datetime import datetime
from grano.core import app, db, celery
from grano.logic.validation import database_name
from grano.logic.references import AccountRef
from grano.plugins import notify_plugins
from grano.model import Project
def validate(data, project):
same_project = lambda s: Project.by_slug(s) == project
same_project = colander.Function(same_project, message="Project exists")
class ProjectValidator(colander.MappingSchema):
slug = colander.SchemaNode(colander.String(),
validator=colander.All(database_name,
same_project))
label = colander.SchemaNode(colander.String(),
validator=colander.Length(min=3))
private = colander.SchemaNode(colander.Boolean(),
missing=False)
author = colander.SchemaNode(AccountRef())
settings = colander.SchemaNode(colander.Mapping(),
missing={})
validator = ProjectValidator()
return validator.deserialize(data)
@celery.task
def _project_changed(project_slug, operation):
""" Notify plugins about changes to a relation. """
def _handle(obj):
obj.project_changed(project_slug, operation)
notify_plugins('grano.project.change', _handle)
def save(data, project=None):
""" Create or update a project with a given slug. """
data = validate(data, project)
operation = 'create' if project is None else 'update'
if project is None:
project = Project()
project.slug = data.get('slug')
project.author = data.get('author')
from grano.logic import permissions as permissions_logic
permissions_logic.save({
'account': data.get('author'),
'project': project,
'admin': True
})
project.settings = data.get('settings')
project.label = data.get('label')
project.private = data.get('private')
project.updated_at = datetime.utcnow()
db.session.add(project)
# TODO: make this nicer - separate files?
from grano.logic.schemata import import_schema
with app.open_resource('fixtures/base.yaml') as fh:
import_schema(project, fh)
db.session.flush()
_project_changed(project.slug, operation)
return project
def delete(project):
""" Delete the project and all related data. """
_project_changed(project.slug, 'delete')
db.session.delete(project)
def truncate(project):
""" Delete all entities and relations from this project,
but leave the project, schemata and attributes intact. """
from grano.logic import relations
from grano.logic import entities
project.updated_at = datetime.utcnow()
for relation in project.relations:
relations.delete(relation)
for entity in project.entities:
entities.delete(entity)
| 31.892473
| 76
| 0.650371
| 644
| 0.217127
| 0
| 0
| 242
| 0.081591
| 0
| 0
| 467
| 0.157451
|
6686c68bcf9dc01f99b52c42230df5b834e570c1
| 63
|
py
|
Python
|
code/yahoo_procon2019_qual_a_02.py
|
KoyanagiHitoshi/AtCoder
|
731892543769b5df15254e1f32b756190378d292
|
[
"MIT"
] | 3
|
2019-08-16T16:55:48.000Z
|
2021-04-11T10:21:40.000Z
|
code/yahoo_procon2019_qual_a_02.py
|
KoyanagiHitoshi/AtCoder
|
731892543769b5df15254e1f32b756190378d292
|
[
"MIT"
] | null | null | null |
code/yahoo_procon2019_qual_a_02.py
|
KoyanagiHitoshi/AtCoder
|
731892543769b5df15254e1f32b756190378d292
|
[
"MIT"
] | null | null | null |
N,K=map(int,input().split())
print("YES" if N>=2*K-1 else "NO")
| 31.5
| 34
| 0.603175
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0.142857
|
668a69950d894c5be476b21543db749add8b52d5
| 180
|
py
|
Python
|
allauth/socialaccount/providers/pivotaltracker/urls.py
|
rawjam/django-allauth
|
2daa33178aa1ab749581c494f4c39e1c72ad5c7b
|
[
"MIT"
] | null | null | null |
allauth/socialaccount/providers/pivotaltracker/urls.py
|
rawjam/django-allauth
|
2daa33178aa1ab749581c494f4c39e1c72ad5c7b
|
[
"MIT"
] | null | null | null |
allauth/socialaccount/providers/pivotaltracker/urls.py
|
rawjam/django-allauth
|
2daa33178aa1ab749581c494f4c39e1c72ad5c7b
|
[
"MIT"
] | null | null | null |
from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
from provider import PivotalTrackerProvider
urlpatterns = default_urlpatterns(PivotalTrackerProvider)
| 30
| 75
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
668cea27bdbc4f6209d2380260dbf5312ca4bad1
| 2,944
|
py
|
Python
|
Dorta/sales_modification/wizard/sale_order_popup.py
|
aaparicio87/Odoo12
|
25cfc349b2e85fa1b5f5846ffe693029f77b3b7d
|
[
"MIT"
] | null | null | null |
Dorta/sales_modification/wizard/sale_order_popup.py
|
aaparicio87/Odoo12
|
25cfc349b2e85fa1b5f5846ffe693029f77b3b7d
|
[
"MIT"
] | null | null | null |
Dorta/sales_modification/wizard/sale_order_popup.py
|
aaparicio87/Odoo12
|
25cfc349b2e85fa1b5f5846ffe693029f77b3b7d
|
[
"MIT"
] | null | null | null |
from odoo import fields, models, api, _
from odoo.exceptions import UserError
class SaleOrderPopup(models.TransientModel):
_name = 'sale.order.popup'
@api.multi
def popup_button(self):
for rec in self.env['sale.order'].browse(self._context.get('active_id')):
if rec._get_forbidden_state_confirm() & set(rec.mapped('state')):
raise UserError(_(
'It is not allowed to confirm an order in the following states: %s'
) % (', '.join(rec._get_forbidden_state_confirm())))
for order in rec.filtered(lambda order: order.partner_id not in order.message_partner_ids):
order.message_subscribe([order.partner_id.id])
rec.write({
'state': 'sale',
'confirmation_date': fields.Datetime.now()
})
rec._action_confirm()
if self.env['ir.config_parameter'].sudo().get_param('sale.auto_done_setting'):
rec.action_done()
return True
class Quotation_Send_Popup(models.TransientModel):
_name = 'quotation.send.popup'
@api.multi
def action_quotation_send_popup(self):
for rec in self.env['sale.order'].browse(self._context.get('active_id')):
ir_model_data = self.env['ir.model.data']
try:
template_id = ir_model_data.get_object_reference('sale', 'email_template_edi_sale')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference('mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
lang = rec.env.context.get('lang')
template = template_id and self.env['mail.template'].browse(template_id)
if template and template.lang:
lang = template._render_template(template.lang, 'sale.order', rec.ids[0])
ctx = {
'default_model': 'sale.order',
'default_res_id': rec.ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
'mark_so_as_sent': True,
'model_description': rec.with_context(lang=lang).type_name,
'custom_layout': "mail.mail_notification_paynow",
'proforma': rec.env.context.get('proforma', False),
'force_email': True
}
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
return True
| 42.057143
| 116
| 0.567935
| 2,862
| 0.972147
| 0
| 0
| 2,690
| 0.913723
| 0
| 0
| 737
| 0.25034
|
668da6a3dfe98b38ca927b8c9945a7980761c6b8
| 830
|
py
|
Python
|
tyson-py/udp-echo.py
|
asheraryam/tyson
|
44317a4e3367ef4958c3bb8d3ad538a3908a4566
|
[
"MIT"
] | null | null | null |
tyson-py/udp-echo.py
|
asheraryam/tyson
|
44317a4e3367ef4958c3bb8d3ad538a3908a4566
|
[
"MIT"
] | null | null | null |
tyson-py/udp-echo.py
|
asheraryam/tyson
|
44317a4e3367ef4958c3bb8d3ad538a3908a4566
|
[
"MIT"
] | null | null | null |
"""UDP hole punching server."""
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
import sys
DEFAULT_PORT = 4000
class ServerProtocol(DatagramProtocol):
def datagramReceived(self, datagram, address):
"""Handle incoming datagram messages."""
print(datagram)
# data_string = datagram.decode("utf-8")
# msg_type = data_string[:2]
ip, port = address
for i in range(0, 3):
self.transport.write(bytes(str(port)), address, int(port) +i)
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: ./server.py PORT")
port = DEFAULT_PORT
# sys.exit(1)
else:
port = int(sys.argv[1])
reactor.listenUDP(port, ServerProtocol())
print('Listening on *:%d' % (port))
reactor.run()
| 28.62069
| 73
| 0.631325
| 381
| 0.459036
| 0
| 0
| 0
| 0
| 0
| 0
| 206
| 0.248193
|
668e417b3a6306ecd6bbd0fcf013eefd855c3921
| 12,972
|
py
|
Python
|
src/fhir_types/FHIR_StructureMap_Source.py
|
anthem-ai/fhir-types
|
42348655fb3a9b3f131b911d6bc0782da8c14ce4
|
[
"Apache-2.0"
] | 2
|
2022-02-03T00:51:30.000Z
|
2022-02-03T18:42:43.000Z
|
src/fhir_types/FHIR_StructureMap_Source.py
|
anthem-ai/fhir-types
|
42348655fb3a9b3f131b911d6bc0782da8c14ce4
|
[
"Apache-2.0"
] | null | null | null |
src/fhir_types/FHIR_StructureMap_Source.py
|
anthem-ai/fhir-types
|
42348655fb3a9b3f131b911d6bc0782da8c14ce4
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any, List, Literal, TypedDict
from .FHIR_Address import FHIR_Address
from .FHIR_Age import FHIR_Age
from .FHIR_Annotation import FHIR_Annotation
from .FHIR_Attachment import FHIR_Attachment
from .FHIR_CodeableConcept import FHIR_CodeableConcept
from .FHIR_Coding import FHIR_Coding
from .FHIR_ContactDetail import FHIR_ContactDetail
from .FHIR_ContactPoint import FHIR_ContactPoint
from .FHIR_Contributor import FHIR_Contributor
from .FHIR_Count import FHIR_Count
from .FHIR_DataRequirement import FHIR_DataRequirement
from .FHIR_Distance import FHIR_Distance
from .FHIR_Dosage import FHIR_Dosage
from .FHIR_Duration import FHIR_Duration
from .FHIR_Element import FHIR_Element
from .FHIR_Expression import FHIR_Expression
from .FHIR_HumanName import FHIR_HumanName
from .FHIR_id import FHIR_id
from .FHIR_Identifier import FHIR_Identifier
from .FHIR_integer import FHIR_integer
from .FHIR_Meta import FHIR_Meta
from .FHIR_Money import FHIR_Money
from .FHIR_ParameterDefinition import FHIR_ParameterDefinition
from .FHIR_Period import FHIR_Period
from .FHIR_Quantity import FHIR_Quantity
from .FHIR_Range import FHIR_Range
from .FHIR_Ratio import FHIR_Ratio
from .FHIR_Reference import FHIR_Reference
from .FHIR_RelatedArtifact import FHIR_RelatedArtifact
from .FHIR_SampledData import FHIR_SampledData
from .FHIR_Signature import FHIR_Signature
from .FHIR_string import FHIR_string
from .FHIR_Timing import FHIR_Timing
from .FHIR_TriggerDefinition import FHIR_TriggerDefinition
from .FHIR_UsageContext import FHIR_UsageContext
# A Map of relationships between 2 structures that can be used to transform data.
FHIR_StructureMap_Source = TypedDict(
"FHIR_StructureMap_Source",
{
# Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces.
"id": FHIR_string,
# May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# May be used to represent additional information that is not part of the basic definition of the element and that modifies the understanding of the element in which it is contained and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself).
"modifierExtension": List[Any],
# Type or variable this rule applies to.
"context": FHIR_id,
# Extensions for context
"_context": FHIR_Element,
# Specified minimum cardinality for the element. This is optional; if present, it acts an implicit check on the input content.
"min": FHIR_integer,
# Extensions for min
"_min": FHIR_Element,
# Specified maximum cardinality for the element - a number or a "*". This is optional; if present, it acts an implicit check on the input content (* just serves as documentation; it's the default value).
"max": FHIR_string,
# Extensions for max
"_max": FHIR_Element,
# Specified type for the element. This works as a condition on the mapping - use for polymorphic elements.
"type": FHIR_string,
# Extensions for type
"_type": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueBase64Binary": str,
# Extensions for defaultValueBase64Binary
"_defaultValueBase64Binary": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueBoolean": bool,
# Extensions for defaultValueBoolean
"_defaultValueBoolean": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueCanonical": str,
# Extensions for defaultValueCanonical
"_defaultValueCanonical": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueCode": str,
# Extensions for defaultValueCode
"_defaultValueCode": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueDate": str,
# Extensions for defaultValueDate
"_defaultValueDate": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueDateTime": str,
# Extensions for defaultValueDateTime
"_defaultValueDateTime": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueDecimal": float,
# Extensions for defaultValueDecimal
"_defaultValueDecimal": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueId": str,
# Extensions for defaultValueId
"_defaultValueId": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueInstant": str,
# Extensions for defaultValueInstant
"_defaultValueInstant": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueInteger": float,
# Extensions for defaultValueInteger
"_defaultValueInteger": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueMarkdown": str,
# Extensions for defaultValueMarkdown
"_defaultValueMarkdown": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueOid": str,
# Extensions for defaultValueOid
"_defaultValueOid": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValuePositiveInt": float,
# Extensions for defaultValuePositiveInt
"_defaultValuePositiveInt": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueString": str,
# Extensions for defaultValueString
"_defaultValueString": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueTime": str,
# Extensions for defaultValueTime
"_defaultValueTime": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueUnsignedInt": float,
# Extensions for defaultValueUnsignedInt
"_defaultValueUnsignedInt": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueUri": str,
# Extensions for defaultValueUri
"_defaultValueUri": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueUrl": str,
# Extensions for defaultValueUrl
"_defaultValueUrl": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueUuid": str,
# Extensions for defaultValueUuid
"_defaultValueUuid": FHIR_Element,
# A value to use if there is no existing value in the source object.
"defaultValueAddress": FHIR_Address,
# A value to use if there is no existing value in the source object.
"defaultValueAge": FHIR_Age,
# A value to use if there is no existing value in the source object.
"defaultValueAnnotation": FHIR_Annotation,
# A value to use if there is no existing value in the source object.
"defaultValueAttachment": FHIR_Attachment,
# A value to use if there is no existing value in the source object.
"defaultValueCodeableConcept": FHIR_CodeableConcept,
# A value to use if there is no existing value in the source object.
"defaultValueCoding": FHIR_Coding,
# A value to use if there is no existing value in the source object.
"defaultValueContactPoint": FHIR_ContactPoint,
# A value to use if there is no existing value in the source object.
"defaultValueCount": FHIR_Count,
# A value to use if there is no existing value in the source object.
"defaultValueDistance": FHIR_Distance,
# A value to use if there is no existing value in the source object.
"defaultValueDuration": FHIR_Duration,
# A value to use if there is no existing value in the source object.
"defaultValueHumanName": FHIR_HumanName,
# A value to use if there is no existing value in the source object.
"defaultValueIdentifier": FHIR_Identifier,
# A value to use if there is no existing value in the source object.
"defaultValueMoney": FHIR_Money,
# A value to use if there is no existing value in the source object.
"defaultValuePeriod": FHIR_Period,
# A value to use if there is no existing value in the source object.
"defaultValueQuantity": FHIR_Quantity,
# A value to use if there is no existing value in the source object.
"defaultValueRange": FHIR_Range,
# A value to use if there is no existing value in the source object.
"defaultValueRatio": FHIR_Ratio,
# A value to use if there is no existing value in the source object.
"defaultValueReference": FHIR_Reference,
# A value to use if there is no existing value in the source object.
"defaultValueSampledData": FHIR_SampledData,
# A value to use if there is no existing value in the source object.
"defaultValueSignature": FHIR_Signature,
# A value to use if there is no existing value in the source object.
"defaultValueTiming": FHIR_Timing,
# A value to use if there is no existing value in the source object.
"defaultValueContactDetail": FHIR_ContactDetail,
# A value to use if there is no existing value in the source object.
"defaultValueContributor": FHIR_Contributor,
# A value to use if there is no existing value in the source object.
"defaultValueDataRequirement": FHIR_DataRequirement,
# A value to use if there is no existing value in the source object.
"defaultValueExpression": FHIR_Expression,
# A value to use if there is no existing value in the source object.
"defaultValueParameterDefinition": FHIR_ParameterDefinition,
# A value to use if there is no existing value in the source object.
"defaultValueRelatedArtifact": FHIR_RelatedArtifact,
# A value to use if there is no existing value in the source object.
"defaultValueTriggerDefinition": FHIR_TriggerDefinition,
# A value to use if there is no existing value in the source object.
"defaultValueUsageContext": FHIR_UsageContext,
# A value to use if there is no existing value in the source object.
"defaultValueDosage": FHIR_Dosage,
# A value to use if there is no existing value in the source object.
"defaultValueMeta": FHIR_Meta,
# Optional field for this source.
"element": FHIR_string,
# Extensions for element
"_element": FHIR_Element,
# How to handle the list mode for this element.
"listMode": Literal["first", "not_first", "last", "not_last", "only_one"],
# Extensions for listMode
"_listMode": FHIR_Element,
# Named context for field, if a field is specified.
"variable": FHIR_id,
# Extensions for variable
"_variable": FHIR_Element,
# FHIRPath expression - must be true or the rule does not apply.
"condition": FHIR_string,
# Extensions for condition
"_condition": FHIR_Element,
# FHIRPath expression - must be true or the mapping engine throws an error instead of completing.
"check": FHIR_string,
# Extensions for check
"_check": FHIR_Element,
# A FHIRPath expression which specifies a message to put in the transform log when content matching the source rule is found.
"logMessage": FHIR_string,
# Extensions for logMessage
"_logMessage": FHIR_Element,
},
total=False,
)
| 56.4
| 836
| 0.712458
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8,419
| 0.649013
|
668f3e390bdd48e5a8dc955598a92ec70a35392d
| 2,484
|
py
|
Python
|
ip/ip/ecommerce/views.py
|
SuryaVamsiKrishna/Inner-Pieces
|
deb9e83af891dac58966230446a5a32fe10e86f2
|
[
"MIT"
] | 1
|
2021-02-17T06:06:50.000Z
|
2021-02-17T06:06:50.000Z
|
ip/ip/ecommerce/views.py
|
SuryaVamsiKrishna/Inner-Pieces
|
deb9e83af891dac58966230446a5a32fe10e86f2
|
[
"MIT"
] | null | null | null |
ip/ip/ecommerce/views.py
|
SuryaVamsiKrishna/Inner-Pieces
|
deb9e83af891dac58966230446a5a32fe10e86f2
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from .models import *
from .forms import address_form
from django.http import JsonResponse
from .utils import cartData,guestobj
import json,datetime
def store(request):
items = item.objects.all()
data = cartData(request)
cart_quantity = data['cart_quantity']
context={'items':items, 'cart_quantity':cart_quantity}
return render(request, 'ecom.html', context)
def cart_page(request):
data = cartData(request)
items = data['items']
order = data['order']
cart_quantity = data['cart_quantity']
context={'items':items,'order':order,'cart_quantity':cart_quantity}
return render(request,'cart.html', context)
def checkout(request):
data = cartData(request)
items = data['items']
order = data['order']
cart_quantity = data['cart_quantity']
context={'items':items,'order':order,'cart_quantity':cart_quantity}
return render(request,'checkout.html', context)
def updateitem(request):
data = json.loads(request.body)
itemName = data['itemName']
action = data['action']
user = request.user
Item = item.objects.get(name = itemName)
order,added = cart.objects.get_or_create(user = user,complete=False)
order_item,created = cart_item.objects.get_or_create(order = order, item = Item)
if action == 'add':
order_item.quantity = order_item.quantity + 1
elif action == 'remove':
order_item.quantity = order_item.quantity - 1
order_item.save()
if order_item.quantity <= 0:
order_item.delete()
return JsonResponse('Item was added' , safe = False)
def processOrder(request):
transactionId = datetime.datetime.now().timestamp()
data = json.loads(request.body)
if request.user.is_authenticated:
user = request.user
order,added = cart.objects.get_or_create(user = user,complete=False)
else:
user,order = guestobj(request,data)
total = float(data['form']['total'])
order.transaction_id = transactionId
if total == order.total_bill:
order.complete = True
order.save()
address.objects.create(
user = user,
order = order,
address = data['shipping']['address'],
city = data['shipping']['city'],
state = data['shipping']['state'],
pincode = data['shipping']['zipcode'],
)
return JsonResponse('Payment Complete', safe = False)
| 28.883721
| 84
| 0.67029
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 339
| 0.136473
|
6690a37ed9d0e2c4e7eeabdedc6f1bdca84bc1a4
| 2,899
|
py
|
Python
|
ecogdata/expconfig/config_decode.py
|
miketrumpis/ecogdata
|
ff65820198e69608634c12686a86b97ac3a77558
|
[
"BSD-3-Clause"
] | null | null | null |
ecogdata/expconfig/config_decode.py
|
miketrumpis/ecogdata
|
ff65820198e69608634c12686a86b97ac3a77558
|
[
"BSD-3-Clause"
] | null | null | null |
ecogdata/expconfig/config_decode.py
|
miketrumpis/ecogdata
|
ff65820198e69608634c12686a86b97ac3a77558
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from ecogdata.util import Bunch
__all__ = ['Parameter', 'TypedParam', 'BoolOrNum', 'NSequence', 'NoneOrStr', 'Path', 'parse_param',
'uniform_bunch_case']
class Parameter:
"A pass-thru parameter whose value is the command (a string)"
def __init__(self, command, default=''):
self.command = command
self.default = default
def value(self):
if self.command:
return self.command
return self.default
@classmethod
def with_default(cls, value, *args):
def _gen_param(command):
return cls(command, *args, default=value)
return _gen_param
class TypedParam(Parameter):
"A simply typed parameter that can be evaluated by a 'type'"
def __init__(self, command, ptype, default=''):
super(TypedParam, self).__init__(command, default=default)
self.ptype = ptype
@staticmethod
def from_type(ptype, default=''):
def _gen_param(command):
return TypedParam(command, ptype, default=default)
return _gen_param
def value(self):
if self.command:
return self.ptype(self.command)
return self.ptype(self.default)
class BoolOrNum(Parameter):
"A value that is a boolean (True, False) or a number"
def value(self):
cmd = super(BoolOrNum, self).value().lower()
if cmd in ('true', 'false', ''):
return cmd == 'true'
return float(self.command)
class NSequence(Parameter):
"A sequence of numbers (integers if possible, else floats)"
def value(self):
cmd = super(NSequence, self).value()
cmd = cmd.strip('(').strip(')').strip('[').strip(']').strip(',')
cmd = cmd.replace(' ', '')
if len(cmd):
try:
return list(map(int, cmd.split(',')))
except ValueError:
return list(map(float, cmd.split(',')))
return ()
class NoneOrStr(Parameter):
"""
A single value that is None (null) or something not null.
Will return a string here.
"""
def value(self):
cmd = super(NoneOrStr, self).value()
return None if cmd.lower() == 'none' else cmd
class Path(NoneOrStr):
"""
Speific string that may include ~
"""
def value(self):
val = super(Path, self).value()
if val is not None:
# catch one pernicious corner case
if len(val) > 1 and val[0] == os.path.sep and val[1] == '~':
val = val[1:]
val = os.path.expanduser(val)
return val
def parse_param(name, command, table):
p = table.get(name.lower(), Parameter)(command)
return p.value()
def uniform_bunch_case(b):
b_lower = Bunch()
for k, v in b.items():
if isinstance(k, str):
b_lower[k.lower()] = v
else:
b_lower[k] = v
return b_lower
| 25.883929
| 99
| 0.58089
| 2,386
| 0.823042
| 0
| 0
| 340
| 0.117282
| 0
| 0
| 577
| 0.199034
|
66941e3ed65b1efe5312473285b552d665a56ecc
| 29,897
|
py
|
Python
|
lpjguesstools/lgt_createinput/main.py
|
lukasbaumbach/lpjguesstools
|
f7cc14c2931b4ac9a3b8dddc89c469b8fedd42e3
|
[
"BSD-3-Clause"
] | 2
|
2020-08-03T11:33:00.000Z
|
2021-07-05T21:00:46.000Z
|
lpjguesstools/lgt_createinput/main.py
|
lukasbaumbach/lpjguesstools
|
f7cc14c2931b4ac9a3b8dddc89c469b8fedd42e3
|
[
"BSD-3-Clause"
] | 8
|
2020-08-03T12:45:31.000Z
|
2021-02-23T19:51:32.000Z
|
lpjguesstools/lgt_createinput/main.py
|
lukasbaumbach/lpjguesstools
|
f7cc14c2931b4ac9a3b8dddc89c469b8fedd42e3
|
[
"BSD-3-Clause"
] | 2
|
2020-08-03T12:11:43.000Z
|
2022-01-29T10:59:00.000Z
|
"""FILE lgt_createinput.main.py
This script creates condensed LPJ netcdf files
for landforms and soil properties
landforms.nc:
- lfcnt (landid) number of landforms in cell
- frac (landid, lfid/ standid) area fraction this landform represents
- slope (landid, lfid/ standid)
- elevation (landid, lfid/ standid) avg. elevation in this landform
- soildepth (landid, lfid/ standid) [implemented later const in model for now]
sites.nc:
- soildepth
- clay
- silt
- sand
- totc
- elevation (reference elevation for grid, 0.5deg)
Christian Werner, SENCKENBERG Biodiversity and Climate Research Centre (BiK-F)
email: christian.werner@senkenberg.de
2017/02/07
"""
from collections import OrderedDict
import datetime
import glob
import logging
import math
import numpy as np
import os
import pandas as pd
import string
import time
import xarray as xr
from ._geoprocessing import analyze_filename_dem, \
classify_aspect, \
classify_landform, \
calculate_asp_slope, \
compute_spatial_dataset
from ._srtm1 import split_srtm1_dataset
__version__ = "0.0.2"
log = logging.getLogger(__name__)
# import constants
from . import NODATA
from . import ENCODING
# quick helpers
# TODO: move to a dedicated file later
def time_dec(func):
"""A decorator to measure execution time of function"""
def wrapper(*arg, **kwargs):
t = time.time()
res = func(*arg, **kwargs)
log.debug('DURATION: <%s> : ' % func.__name__ + str(time.time()-t))
return res
return wrapper
varSoil = {'TOTC': ('soc', 'Soil Organic Carbon', 'soc', 'percent', 0.1),
'SDTO': ('sand', 'Sand', 'sand', 'percent', 1.0),
'STPC': ('silt', 'Silt', 'silt', 'percent', 1.0),
'CLPC': ('clay', 'Clay', 'clay', 'percent', 1.0)}
varLF = {'lfcnt': ('lfcnt', 'Number of landforms', 'lfcnt', '-', 1.0),
'slope': ('slope', 'Slope', 'slope', 'deg', 1.0),
'aspect': ('aspect', 'Aspect', 'aspect', 'deg', 1.0),
'asp_slope': ('asp_slope', 'Aspect-corrected Slope', 'asp_slope', 'deg', 1.0),
'fraction': ('fraction', 'Landform Fraction', 'fraction', '1/1', 1.0),
'elevation': ('elevation', 'Elevation', 'elevation', 'm', 1.0),
'soildepth': ('soildepth', 'Soil Depth', 'soildepth', 'm', 1.0)
}
soil_vars = sorted(varSoil.keys())
lf_vars = sorted(varLF.keys())
def convert_float_coord_to_string(coord, p=2):
"""Convert a (lon,lat) coord to string."""
lon, lat = round(coord[0], p), round(coord[1], p)
LA, LO = 'n', 'e'
if lat < 0: LA = 's'
if lon < 0: LO = 'w'
lat_s = "%.2f" % round(abs(lat),2)
lon_s = "%.2f" % round(abs(lon),2)
coord_s = '%s%s%s%s' % (LA, lat_s.zfill(p+3), LO, lon_s.zfill(p+4))
return coord_s
def has_significant_land(ds, min_frac=0.01):
"""Test if land fraction in tile is significant."""
# min_frac in %, default: 0.001 %
if (ds['mask'].values.sum() / float(len(ds.lat.values) * len(ds.lon.values))) * 100 > min_frac:
return True
return False
def define_landform_classes(step, limit, TYPE='SIMPLE'):
"""Define the landform classes."""
# Parameters:
# - step: elevation interval for landform groups (def: 400m )
# - limit: elevation limit [inclusive, in m]
ele_breaks = [-1000] + list(range(step, limit, step)) + [10000]
ele_cnt = range(1, len(ele_breaks))
# code system [code position 2 & 3, 1= elevations_tep]
# code: [slopeid<1..6>][aspectid<0,1..4>]
#
# slope:
#
# Name SIMPLE WEISS
#
# hilltop 1 1
# upper slope 2*
# mid slope 3* 3*
# flats 4 4
# lower slope 5*
# valley 6 6
#
#
# aspect:
#
# Name SIMPLE WEISS
#
# north 1 1
# east 2 2
# south 3 3
# west 4 4
if TYPE == 'WEISS':
lf_set = [10,21,22,23,24,31,32,33,34,40,51,52,53,54,60]
lf_full_set = []
for e in ele_cnt:
lf_full_set += [x+(100*e) for x in lf_set]
elif TYPE == 'SIMPLE':
# TYPE: SIMPLE (1:hilltop, 3:midslope, 4:flat, 6:valley)
lf_set = [10,31,32,33,34,40,60]
lf_full_set = []
for e in ele_cnt:
lf_full_set += [x+(100*e) for x in lf_set]
else:
log.error('Currently only classifiation schemes WEISS, SIMPLE supported.')
return (lf_full_set, ele_breaks)
def tiles_already_processed(TILESTORE_PATH):
"""Check if the tile exists."""
existing_tiles = glob.glob(os.path.join(TILESTORE_PATH, '*.nc'))
#existing_tiles = [os.path.basename(x) for x in glob.glob(glob_string)]
processed_tiles = []
for existing_tile in existing_tiles:
with xr.open_dataset(existing_tile) as ds:
source = ds.tile.get('source')
if source is not None:
processed_tiles.append(source)
else:
log.warn('Source attr not set in file %s.' % existing_tile)
return processed_tiles
def match_watermask_shpfile(glob_string):
"""Check if the generated shp glob_string exists."""
found=False
if len(glob.glob(glob_string)) == 0:
shp = None
elif len(glob.glob(glob_string)) == 1:
shp = glob.glob(glob_string)[0]
found = True
else:
log.error("Too many shape files.")
exit()
# second try: look for zip file
if found is False:
shp = glob_string.replace(".shp", ".zip")
if len(glob.glob(shp)) == 0:
shp = None
elif len(glob.glob(shp)) == 1:
shp = glob.glob(shp)[0]
else:
log.error("Too many shape files.")
exit()
return shp
def get_tile_summary(ds, cutoff=0):
"""Compute the fractional cover of the landforms in this tile."""
unique, counts = np.unique(ds['landform_class'].to_masked_array(), return_counts=True)
counts = np.ma.masked_array(counts, mask=unique.mask)
unique = np.ma.compressed(unique)
counts = np.ma.compressed(counts)
total_valid = float(np.sum(counts))
df = pd.DataFrame({'lf_id': unique.astype('int'), 'cells': counts})
df['frac'] = (df['cells'] / df['cells'].sum())*100
df = df[df['frac'] >= cutoff]
df['frac_scaled'] = (df['cells'] / df['cells'].sum())*100
# also get lf-avg of elevation and slope
df['elevation'] = -1
df['slope'] = -1
df['asp_slope'] = -1
df['aspect'] = -1
if 'soildepth' in ds.data_vars:
df['soildepth'] = -1
a_lf = ds['landform_class'].to_masked_array()
# average aspect angles
def avg_aspect(a):
x = 0
y = 0
for v in a.ravel():
x += math.sin(math.radians(v))
y += math.cos(math.radians(v))
avg = math.degrees(math.atan2(x, y))
if avg < 0:
avg += 360
return avg
# calculate the avg. elevation and slope in landforms
for i, r in df.iterrows():
ix = a_lf == int(r['lf_id'])
lf_slope = ds['slope'].values[ix].mean()
lf_asp_slope = ds['asp_slope'].values[ix].mean()
lf_elevation = ds['elevation'].values[ix].mean()
lf_aspect = avg_aspect(ds['aspect'].values[ix])
if 'soildepth' in ds.data_vars:
lf_soildepth = ds['soildepth'].values[ix].mean()
df.loc[i, 'soildepth'] = lf_soildepth
df.loc[i, 'slope'] = lf_slope
df.loc[i, 'asp_slope'] = lf_asp_slope
df.loc[i, 'elevation'] = lf_elevation
df.loc[i, 'aspect'] = lf_aspect
if 'soildepth' in ds.data_vars:
df.loc[i, 'soildepth'] = lf_soildepth
return df
def tile_files_compatible(files):
"""Get global attribute from all tile netcdf files and check
they were created with an identical elevation step.
"""
fingerprints = []
for file in files:
with xr.open_dataset(file) as ds:
fingerprint = (ds.tile.get('elevation_step'), ds.tile.get('classification'))
fingerprints.append(fingerprint)
# check if elements are equal
if all(x==fingerprints[0] for x in fingerprints):
# check if there are Nones' in any fingerprint
if not all(fingerprints):
return False
return True
return False
def create_stats_table(df, var):
"""Create a landform info table for all coords and given var."""
df_ = df[var].unstack(level=-1, fill_value=NODATA)
# rename columns and split coord tuple col to lon and lat col
df_.columns = ['lf' + str(col) for col in df_.columns]
if 'lf0' in df_.columns:
del df_['lf0']
df_ = df_.reset_index()
df_[['lon', 'lat', 'lf_cnt']] = df_['coord'].apply(pd.Series)
df_['lf_cnt'] = df_['lf_cnt'].astype(int)
# cleanup (move lon, lat to front, drop coord col)
df_.drop('coord', axis=1, inplace=True)
latloncnt_cols = ['lon', 'lat', 'lf_cnt']
new_col_order = latloncnt_cols + \
[x for x in df_.columns.tolist() if x not in latloncnt_cols]
return df_[new_col_order]
@time_dec
def convert_dem_files(cfg, lf_ele_levels):
"""Compute landform units based on elevation, slope, aspect and tpi classes."""
if cfg.SRTMSTORE_PATH is not None:
# if glob_string is a directory, add wildcard for globbing
glob_string = cfg.SRTMSTORE_PATH
if os.path.isdir(cfg.SRTMSTORE_PATH):
glob_string = os.path.join(cfg.SRTMSTORE_PATH, '*')
dem_files = sorted(glob.glob(glob_string))
existing_tiles = tiles_already_processed(cfg.TILESTORE_PATH)
for dem_file in dem_files:
fname = os.path.basename(dem_file)
fdir = os.path.dirname(dem_file)
# SRTM1 default nameing convention
str_lat = fname[:3]
str_lon = fname[3:7]
# if tiles don't exist process them
process_tiles = True
if cfg.OVERWRITE:
process_tiles = True
else:
_, source_name = analyze_filename_dem(fname)
if source_name in existing_tiles:
process_tiles = False
if process_tiles:
log.info('processing: %s (%s)' % (dem_file, datetime.datetime.now()))
shp_glob_string = os.path.join(cfg.WATERMASKSTORE_PATH, str_lon + str_lat + '*.shp')
matched_shp_file = match_watermask_shpfile(shp_glob_string.lower())
ds_srtm1 = compute_spatial_dataset(dem_file, fname_shp=matched_shp_file)
tiles = split_srtm1_dataset(ds_srtm1)
for i, tile in enumerate(tiles):
# reclass
if tile != None and has_significant_land(tile):
log.debug("Valid tile %d in file %s." % (i+1, dem_file))
classify_aspect(tile)
classify_landform(tile, elevation_levels=lf_ele_levels, TYPE=cfg.CLASSIFICATION)
calculate_asp_slope(tile)
# store file in tilestore
# get tile center coordinate and name
lon, lat = tile.geo.center()
lonlat_string = convert_float_coord_to_string((lon,lat))
tile_name = "srtm1_processed_%s.nc" % lonlat_string
tile.to_netcdf(os.path.join(cfg.TILESTORE_PATH, tile_name), \
format='NETCDF4_CLASSIC')
else:
log.debug("Empty tile %d in file %s ignored." % (i+1, dem_file))
@time_dec
def compute_statistics(cfg):
"""Extract landform statistics from tiles in tilestore."""
available_tiles = glob.glob(os.path.join(cfg.TILESTORE_PATH, '*.nc'))
log.debug('Number of tiles found: %d' % len(available_tiles))
if len(available_tiles) == 0:
log.error('No processed tiles available in directory "%s"' % cfg.TILESTORE_PATH)
exit()
tiles = sorted(available_tiles)
if not tile_files_compatible(tiles):
log.error('Tile files in %s are not compatible.' % cfg.TILESTORE_PATH)
exit()
tiles_stats = []
for tile in tiles:
log.debug('Computing statistics for tile %s' % tile)
with xr.open_dataset(tile) as ds:
lf_stats = get_tile_summary(ds, cutoff=cfg.CUTOFF)
lf_stats.reset_index(inplace=True)
number_of_ids = len(lf_stats)
lon, lat = ds.geo.center()
coord_tuple = (round(lon,2),round(lat,2), int(number_of_ids))
lf_stats['coord'] = pd.Series([coord_tuple for _ in range(len(lf_stats))])
lf_stats.set_index(['coord', 'lf_id'], inplace=True)
tiles_stats.append( lf_stats )
df = pd.concat(tiles_stats)
frac_lf = create_stats_table(df, 'frac_scaled')
elev_lf = create_stats_table(df, 'elevation')
slope_lf = create_stats_table(df, 'slope')
asp_slope_lf = create_stats_table(df, 'asp_slope')
aspect_lf = create_stats_table(df, 'aspect')
return (frac_lf, elev_lf, slope_lf, asp_slope_lf, aspect_lf)
def is_3d(ds, v):
"""Check if xr.DataArray has 3 dimensions."""
dims = ds[v].dims
if len(dims) == 3:
return True
return False
def assign_to_dataarray(data, df, lf_full_set, refdata=False):
"""Place value into correct location of data array."""
if refdata==True:
data[:] = NODATA
else:
data[:] = np.nan
for _, r in df.iterrows():
if refdata:
data.loc[r.lat, r.lon] = r.lf_cnt
else:
for lf in r.index[3:]:
if r[lf] > NODATA:
lf_id = int(lf[2:])
lf_pos = lf_full_set.index(lf_id)
data.loc[dict(lf_id=lf_id, lat=r.lat, lon=r.lon)] = r[lf]
return data
def spatialclip_df(df, extent):
"""Clip dataframe wit lat lon columns by extent."""
if any(e is None for e in extent):
log.warn("SpatialClip: extent passed is None.")
lon1, lat1, lon2, lat2 = extent
if ('lon' not in df.columns) or ('lat' not in df.columns):
log.warn("SpatialClip: lat/ lon cloumn missing in df.")
return df[((df.lon >= lon1) & (df.lon <= lon2)) &
((df.lat >= lat1) & (df.lat <= lat2))]
def build_site_netcdf(soilref, elevref, extent=None):
"""Build the site netcdf file."""
# extent: (x1, y1, x2, y2)
ds_soil_orig = xr.open_dataset(soilref)
ds_ele_orig = xr.open_dataset(elevref)
if extent is not None:
lat_min, lat_max = extent[1], extent[3]
lon_min, lon_max = extent[0], extent[2]
# slice simulation domain
ds_soil = ds_soil_orig.where((ds_soil_orig.lon >= lon_min) & (ds_soil_orig.lon <= lon_max) &
(ds_soil_orig.lat >= lat_min) & (ds_soil_orig.lat <= lat_max) &
(ds_soil_orig.lev==1.0), drop=True).squeeze(drop=True)
ds_ele = ds_ele_orig.where((ds_ele_orig.longitude >= lon_min) & (ds_ele_orig.longitude <= lon_max) &
(ds_ele_orig.latitude >= lat_min) & (ds_ele_orig.latitude <= lat_max), drop=True).squeeze(drop=True)
else:
ds_soil = ds_soil_orig.sel(lev=1.0).squeeze(drop=True)
ds_ele = ds_ele_orig.squeeze(drop=True)
del ds_soil['lev']
# identify locations that need filling and use left neighbor
smask = np.where(ds_soil['TOTC'].to_masked_array().mask, 1, 0)
emask = np.where(ds_ele['data'].to_masked_array().mask, 1, 0)
# no soil data but elevation: gap-fill wioth neighbors
missing = np.where((smask == 1) & (emask == 0), 1, 0)
ix, jx = np.where(missing == 1)
if len(ix) > 0:
log.debug('Cells with elevation but no soil data [BEFORE GF: %d].' % len(ix))
for i, j in zip(ix, jx):
for v in soil_vars:
if (j > 0) and np.isfinite(ds_soil[v][i, j-1]):
ds_soil[v][i, j] = ds_soil[v][i, j-1].copy(deep=True)
elif (j < ds_soil[v].shape[1]-1) and np.isfinite(ds_soil[v][i, j+1]):
ds_soil[v][i, j] = ds_soil[v][i, j+1].copy(deep=True)
else:
log.warn('neighbours have nodata !')
x = ds_soil[v][i, j].to_masked_array()
smask2 = np.where(ds_soil['TOTC'].to_masked_array().mask, 1, 0)
missing = np.where((smask2 == 1) & (emask == 0), 1, 0)
ix, jx = np.where(missing == 1)
log.debug('Cells with elevation but no soil data [AFTER GF: %d].' % len(ix))
dsout = xr.Dataset()
# soil vars
for v in soil_vars:
conv = varSoil[v][-1]
da = ds_soil[v].copy(deep=True) * conv
da.name = varSoil[v][0]
vattr = {'name': varSoil[v][0],
'long_name': varSoil[v][1],
'standard_name': varSoil[v][2],
'units': varSoil[v][3],
'coordinates': "lat lon"}
da.tile.update_attrs(vattr)
da.tile.update_encoding(ENCODING)
da[:] = np.ma.masked_where(emask, da.to_masked_array())
dsout[da.name] = da
# ele var
da = xr.full_like(da.copy(deep=True), np.nan)
da.name = 'elevation'
vattr = {'name': 'elevation', 'long_name': 'Elevation',
'units': 'meters', 'standard_name': 'elevation'}
da.tile.update_attrs(vattr)
da.tile.update_encoding(ENCODING)
da[:] = ds_ele['data'].to_masked_array()
dsout[da.name] = da
return dsout
@time_dec
def build_landform_netcdf(lf_full_set, df_dict, cfg, elevation_levels, refnc=None):
"""Build landform netcdf based on refnc dims and datatables."""
def has_soildepth():
if 'soildepth_lf' in df_dict:
return True
else:
return False
dsout = xr.Dataset()
COORDS = [('lf_id', lf_full_set), ('lat', refnc.lat), ('lon', refnc.lon)]
SHAPE = tuple([len(x) for _, x in COORDS])
# initiate data arrays
_blank = np.empty(SHAPE)
da_lfcnt = xr.DataArray(_blank.copy()[0,:,:].astype(int), name='lfcnt',
coords=COORDS[1:])
da_frac = xr.DataArray(_blank.copy(), name='fraction', coords=COORDS)
da_slope = xr.DataArray(_blank.copy(), name='slope', coords=COORDS)
da_asp_slope = xr.DataArray(_blank.copy(), name='asp_slope', coords=COORDS)
da_elev = xr.DataArray(_blank.copy(), name='elevation', coords=COORDS)
da_aspect = xr.DataArray(_blank.copy(), name='aspect', coords=COORDS)
if has_soildepth(): da_soildepth = xr.DataArray(_blank.copy(), name='soildepth', coords=COORDS)
frac_lf = df_dict['frac_lf']
slope_lf = df_dict['slope_lf']
asp_slope_lf = df_dict['asp_slope_lf']
elev_lf = df_dict['elev_lf']
aspect_lf = df_dict['aspect_lf']
if has_soildepth(): soildepth_lf = df_dict['soildepth_lf']
# check that landform coordinates are in refnc
df_extent = [frac_lf.lon.min(), frac_lf.lat.min(), frac_lf.lon.max(), frac_lf.lat.max()]
log.debug('df_extent: %s' % str(df_extent))
log.debug('contains: %s' % str(refnc.geo.contains(df_extent)))
if refnc.geo.contains(df_extent) == False:
frac_lf = spatialclip_df(frac_lf, refnc.geo.extent)
slope_lf = spatialclip_df(slope_lf, refnc.geo.extent)
asp_slope_lf = spatialclip_df(asp_slope_lf, refnc.geo.extent)
elev_lf = spatialclip_df(elev_lf, refnc.geo.extent)
aspect_lf = spatialclip_df(aspect_lf, refnc.geo.extent)
if has_soildepth(): spatialclip_df(soildepth_lf, refnc.geo.extent)
# dump files
frac_lf.to_csv(os.path.join(cfg.OUTDIR, 'df_frac.csv'), index=False)
slope_lf.to_csv(os.path.join(cfg.OUTDIR, 'df_slope.csv'), index=False)
asp_slope_lf.to_csv(os.path.join(cfg.OUTDIR, 'df_asp_slope.csv'), index=False)
elev_lf.to_csv(os.path.join(cfg.OUTDIR, 'df_elev.csv'), index=False)
aspect_lf.to_csv(os.path.join(cfg.OUTDIR, 'df_aspect.csv'), index=False)
if has_soildepth(): soildepth_lf.to_csv(os.path.join(cfg.OUTDIR, 'df_soildepth.csv'), index=False)
# assign dataframe data to arrays
da_lfcnt = assign_to_dataarray(da_lfcnt, frac_lf, lf_full_set, refdata=True)
da_frac = assign_to_dataarray(da_frac, frac_lf, lf_full_set)
da_slope = assign_to_dataarray(da_slope, slope_lf, lf_full_set)
da_asp_slope = assign_to_dataarray(da_asp_slope, asp_slope_lf, lf_full_set)
da_elev = assign_to_dataarray(da_elev, elev_lf, lf_full_set)
da_aspect = assign_to_dataarray(da_aspect, aspect_lf, lf_full_set)
if has_soildepth(): da_soildepth = assign_to_dataarray(da_soildepth, soildepth_lf, lf_full_set)
# store arrays in dataset
dsout[da_lfcnt.name] = da_lfcnt
dsout[da_frac.name] = da_frac
dsout[da_slope.name] = da_slope
dsout[da_asp_slope.name] = da_asp_slope
dsout[da_elev.name] = da_elev
dsout[da_aspect.name] = da_aspect
if has_soildepth(): dsout[da_soildepth.name] = da_soildepth
for v in dsout.data_vars:
vattr = {}
if v in lf_vars:
vattr = {'name': varLF[v][0],
'long_name': varLF[v][1],
'standard_name': varLF[v][2],
'units': varLF[v][3],
'coordinates': "lat lon"}
dsout[v].tile.update_attrs(vattr)
dsout[v].tile.update_encoding(ENCODING)
dsout['lat'].tile.update_attrs(dict(standard_name='latitude',
long_name='latitude',
units='degrees_north'))
dsout['lon'].tile.update_attrs(dict(standard_name='longitude',
long_name='longitude',
units='degrees_east'))
dsout['lf_id'].tile.update_attrs(dict(standard_name='lf_id',
long_name='lf_id',
units='-'))
for dv in dsout.data_vars:
dsout[dv].tile.update_encoding(ENCODING)
# register the specific landform properties (elevation steps, classfication)
dsout.tile.set('elevation_step', elevation_levels[1])
dsout.tile.set('classification', cfg.CLASSIFICATION.lower())
return dsout
def build_compressed(ds):
"""Build LPJ-Guess 4.0 compatible compressed netcdf file."""
# identify landforms netcdf
if 'lfcnt' in ds.data_vars:
v = 'lfcnt'
elif 'elevation' in ds.data_vars:
v = 'elevation'
else:
log.error("Not a valid xr.Dataset (landforms or site only).")
# create id position dataarray
da_ids = xr.ones_like(ds[v]) * NODATA
latL = []
lonL = []
d = ds[v].to_masked_array()
# REVIEW: why is 'to_masked_array()'' not working here?
d = np.ma.masked_where(d == NODATA, d)
land_id = 0
D_ids = OrderedDict()
for j in reversed(range(len(d))):
for i in range(len(d[0])):
if d[j, i] is not np.ma.masked:
lat = float(ds['lat'][j].values)
lon = float(ds['lon'][i].values)
latL.append(lat)
lonL.append(lon)
da_ids.loc[lat, lon] = land_id
D_ids[(lat, lon)] = land_id
land_id += 1
LFIDS = range(land_id)
# create coordinate variables
_blank = np.zeros(len(LFIDS))
lats = xr.DataArray(latL, name='lat', coords=[('land_id', LFIDS)])
lons = xr.DataArray(lonL, name='lon', coords=[('land_id', LFIDS)])
lats.tile.update_attrs(dict(standard_name='latitude',
long_name='latitude',
units='degrees_north'))
lons.tile.update_attrs(dict(standard_name='longitude',
long_name='longitude',
units='degrees_east'))
# create land_id reference array
# TODO: clip land_id array to Chile country extent?
da_ids.tile.update_encoding(ENCODING)
ds_ids = da_ids.to_dataset(name='land_id')
# create xr.Dataset
dsout = xr.Dataset()
dsout[lats.name] = lats
dsout[lons.name] = lons
# walk through variables, get lat/ lon cells' data
for v in ds.data_vars:
if is_3d(ds, v):
_shape = (len(LFIDS), len(ds[ds[v].dims[0]]))
COORDS = [('land_id', LFIDS), ('lf_id', ds['lf_id'])]
else:
_shape = (len(LFIDS),)
COORDS = [('land_id', LFIDS)]
_blank = np.ones( _shape )
_da = xr.DataArray(_blank[:], name=v, coords=COORDS)
for lat, lon in zip(latL, lonL):
land_id = D_ids[(lat, lon)]
vals = ds[v].sel(lat=lat, lon=lon).to_masked_array()
_da.loc[land_id] = vals
_da.tile.update_attrs(ds[v].attrs)
_da.tile.update_encoding(ENCODING)
dsout[_da.name] = _da
if is_3d(ds, v):
dsout['lf_id'].tile.update_attrs(dict(standard_name='lf_id',
long_name='lf_id',
units='-'))
# copy lgt attributes from ssrc to dst
dsout.tile.copy_attrs(ds)
return (ds_ids, dsout)
def mask_dataset(ds, valid):
"""Mask all values that are not valid/ 1 (2d or 3d)."""
for v in ds.data_vars:
dims = ds[v].dims
if len(dims) > len(valid.shape):
z = len(ds[v].values)
valid = np.array(z*[valid])
ds[v].values = np.ma.masked_where(valid == 0, ds[v].values).filled(NODATA)
return ds
def create_gridlist(ds):
"""Create LPJ-Guess 4.0 gridlist file."""
outL = []
for j in reversed(range(len(ds['land_id']))):
for i in range(len(ds['land_id'][0])):
x = ds['land_id'][j, i].values #to_masked_array()
if x != NODATA: #p.ma.masked:
lat = float(ds['lat'][j].values)
lon = float(ds['lon'][i].values)
land_id = int(ds['land_id'].sel(lat=lat, lon=lon).values)
outS = "%3.2f %3.2f %d" % (lat, lon, land_id)
outL.append(outS)
return '\n'.join(outL) + '\n'
def main(cfg):
"""Main Script."""
# default soil and elevation data (contained in package)
import pkg_resources
SOIL_NC = pkg_resources.resource_filename(__name__, '../data/GLOBAL_WISESOIL_DOM_05deg.nc')
ELEVATION_NC = pkg_resources.resource_filename(__name__, '../data/GLOBAL_ELEVATION_05deg.nc')
log.info("Converting DEM files and computing landform stats")
# define the final landform classes (now with elevation brackets)
lf_classes, lf_ele_levels = define_landform_classes(200, 6000, TYPE=cfg.CLASSIFICATION)
# process dem files to tiles (if not already processed)
convert_dem_files(cfg, lf_ele_levels)
#sitenc = build_site_netcdf(SOIL_NC, ELEVATION_NC, extent=cfg.REGION)
# compute stats from tiles
df_frac, df_elev, df_slope, df_asp_slope, df_aspect = compute_statistics(cfg)
#print 'reading files'
#df_frac = pd.read_csv('lfdata.cutoff_1.0p/df_frac.csv')
#df_asp_slope = pd.read_csv('lfdata.cutoff_1.0p/df_asp_slope.csv')
#df_slope = pd.read_csv('lfdata.cutoff_1.0p/df_slope.csv')
#df_aspect = pd.read_csv('lfdata.cutoff_1.0p/df_aspect.csv')
#df_elev = pd.read_csv('lfdata.cutoff_1.0p/df_elev.csv')
# build netcdfs
log.info("Building 2D netCDF files")
sitenc = build_site_netcdf(SOIL_NC, ELEVATION_NC, extent=cfg.REGION)
df_dict = dict(frac_lf=df_frac, elev_lf=df_elev, slope_lf=df_slope,
asp_slope_lf=df_asp_slope, aspect_lf=df_aspect)
landformnc = build_landform_netcdf(lf_classes, df_dict, cfg, lf_ele_levels, refnc=sitenc)
# clip to joined mask
#elev_mask = np.where(sitenc['elevation'].values == NODATA, 0, 1)
#landform_mask = np.where(landformnc['lfcnt'].values == NODATA, 0, 1)
#valid_mask = elev_mask * landform_mask
elev_mask = ~np.ma.getmaskarray(sitenc['elevation'].to_masked_array())
sand_mask = ~np.ma.getmaskarray(sitenc['sand'].to_masked_array())
land_mask = ~np.ma.getmaskarray(landformnc['lfcnt'].to_masked_array())
valid_mask = elev_mask * sand_mask * land_mask
sitenc = mask_dataset(sitenc, valid_mask)
landformnc = mask_dataset(landformnc, valid_mask)
landform_mask = np.where(landformnc['lfcnt'].values == -9999, np.nan, 1)
#landform_mask = np.where(landform_mask == True, np.nan, 1)
for v in sitenc.data_vars:
sitenc[v][:] = sitenc[v].values * landform_mask
# write 2d/ 3d netcdf files
sitenc.to_netcdf(os.path.join(cfg.OUTDIR, 'sites_2d.nc'),
format='NETCDF4_CLASSIC')
landformnc.to_netcdf(os.path.join(cfg.OUTDIR, 'landforms_2d.nc'),
format='NETCDF4_CLASSIC')
# convert to compressed netcdf format
log.info("Building compressed format netCDF files")
ids_2d, comp_sitenc = build_compressed(sitenc)
ids_2db, comp_landformnc = build_compressed(landformnc)
# write netcdf files
ids_2d.to_netcdf(os.path.join(cfg.OUTDIR, "land_ids_2d.nc"),
format='NETCDF4_CLASSIC')
ids_2db.to_netcdf(os.path.join(cfg.OUTDIR, "land_ids_2db.nc"),
format='NETCDF4_CLASSIC')
comp_landformnc.to_netcdf(os.path.join(cfg.OUTDIR, "landform_data.nc"),
format='NETCDF4_CLASSIC')
comp_sitenc.to_netcdf(os.path.join(cfg.OUTDIR, "site_data.nc"),
format='NETCDF4_CLASSIC')
# gridlist file
log.info("Creating gridlist file")
gridlist = create_gridlist(ids_2d)
open(os.path.join(cfg.OUTDIR, cfg.GRIDLIST_TXT), 'w').write(gridlist)
log.info("Done")
| 36.282767
| 135
| 0.596247
| 0
| 0
| 0
| 0
| 8,770
| 0.29334
| 0
| 0
| 8,034
| 0.268723
|
66942000229050463aff5906c4c70265c74740a1
| 4,379
|
py
|
Python
|
html_parsing/www_dns_shop_ru/check_update_price_date__QWebEnginePage_bs4.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | 117
|
2015-12-18T07:18:27.000Z
|
2022-03-28T00:25:54.000Z
|
html_parsing/www_dns_shop_ru/check_update_price_date__QWebEnginePage_bs4.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | 8
|
2018-10-03T09:38:46.000Z
|
2021-12-13T19:51:09.000Z
|
html_parsing/www_dns_shop_ru/check_update_price_date__QWebEnginePage_bs4.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | 28
|
2016-08-02T17:43:47.000Z
|
2022-03-21T08:31:12.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
"""Скрипт проверяет дату обновления прайса на сайте http://www.dns-shop.ru/"""
# # Основа взята из http://stackoverflow.com/a/37755811/5909792
# def get_html(url, check_content_func=None):
# # from PyQt5.QtCore import QUrl
# # from PyQt5.QtWidgets import QApplication
# # from PyQt5.QtWebEngineWidgets import QWebEnginePage
#
# from PyQt4.QtCore import QUrl
# from PyQt4.QtGui import QApplication
# from PyQt4.QtWebKit import QWebPage as QWebEnginePage
#
# class ExtractorHtml:
# def __init__(self, url):
# self.html = None
#
# _app = QApplication([])
# self._page = QWebEnginePage()
# self._page.mainFrame().load(QUrl(url))
# # self._page.load(QUrl(url))
# self._page.loadFinished.connect(self._load_finished_handler)
#
# # Ожидание загрузки страницы и получения его содержимого
# # Этот цикл асинхронный код делает синхронным
# while self.html is None:
# _app.processEvents()
#
# _app.quit()
#
# self._page = None
#
# def _callable(self, data):
# if check_content_func:
# if check_content_func(data):
# self.html = data
#
# else:
# self.html = data
#
# def _load_finished_handler(self):
# # self._page.toHtml(self._callable)
# self.html = self._page.mainFrame().toHtml()
#
# return ExtractorHtml(url).html
#
#
# class UpdateDateTextNotFound(Exception):
# pass
#
#
# import os
#
#
# def download_price():
# url = 'http://www.dns-shop.ru/'
#
# html = get_html(url, lambda html: 'price-list-downloader' in html)
#
# from bs4 import BeautifulSoup
# root = BeautifulSoup(html, 'lxml')
#
# for a in root.select('#price-list-downloader a'):
# href = a['href']
#
# if href.endswith('.xls'):
# from urllib.parse import urljoin
# file_url = urljoin(url, href)
# # print(file_url)
#
# update_date_text = a.next_sibling.strip()
#
# import re
# match = re.search(r'\d{,2}.\d{,2}.\d{4}', update_date_text)
# if match is None:
# raise UpdateDateTextNotFound()
#
# date_string = match.group()
# # print(date_string)
#
# # from datetime import datetime
# # print(datetime.strptime(date_string, '%d.%m.%Y'))
#
# file_name = os.path.basename(href)
# file_name = date_string + '_' + file_name
#
# if os.path.exists(file_name):
# return file_name
#
# from urllib.request import urlretrieve
# urlretrieve(file_url, file_name)
#
# return file_name
#
# return
#
#
# while True:
# file_name = download_price()
# print(file_name)
#
# import time
# # time.sleep(10 * 60 * 60)
# time.sleep(60)
from PyQt5.QtCore import QUrl, QTimer
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWebEngineWidgets import QWebEnginePage
def _callable(html):
if 'price-list-downloader' not in html:
return
from bs4 import BeautifulSoup
root = BeautifulSoup(html, 'lxml')
for a in root.select('#price-list-downloader a'):
href = a['href']
if href.endswith('.xls'):
from urllib.parse import urljoin
file_url = urljoin(url, href)
update_date_text = a.next_sibling.strip()
import re
match = re.search(r'\d{,2}.\d{,2}.\d{4}', update_date_text)
if match is None:
return
date_string = match.group()
import os
file_name = os.path.basename(href)
file_name = date_string + '_' + file_name
from datetime import datetime
print(datetime.today().date(), file_name, file_url)
url = 'http://www.dns-shop.ru/'
app = QApplication([])
page = QWebEnginePage()
page.load(QUrl(url))
page.loadFinished.connect(lambda x=None: page.toHtml(_callable))
# Настроим вызов загрузки страницы на каждые 10 часов
timer = QTimer()
timer.setInterval(10 * 60 * 60 * 1000)
timer.timeout.connect(lambda x=None: page.load(QUrl(url)))
timer.start()
app.exec()
| 26.70122
| 78
| 0.58575
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,278
| 0.718544
|
6696f698bff747564601f269987739a28d5abfe1
| 12,918
|
py
|
Python
|
tests/test_adapters.py
|
Shelestova-Anastasia/cutadapt
|
6e239b3b8e20d17fdec041dc1d967ec2a3cfe770
|
[
"MIT"
] | null | null | null |
tests/test_adapters.py
|
Shelestova-Anastasia/cutadapt
|
6e239b3b8e20d17fdec041dc1d967ec2a3cfe770
|
[
"MIT"
] | null | null | null |
tests/test_adapters.py
|
Shelestova-Anastasia/cutadapt
|
6e239b3b8e20d17fdec041dc1d967ec2a3cfe770
|
[
"MIT"
] | null | null | null |
import pytest
from dnaio import Sequence
from cutadapt.adapters import (
RemoveAfterMatch,
RemoveBeforeMatch,
FrontAdapter,
BackAdapter,
PrefixAdapter,
SuffixAdapter,
LinkedAdapter,
MultipleAdapters,
IndexedPrefixAdapters,
IndexedSuffixAdapters,
)
def test_back_adapter_absolute_number_of_errors():
adapter = BackAdapter(
sequence="GATCGGAAGA",
max_errors=1,
min_overlap=3,
)
assert adapter.max_error_rate == 1 / 10
def test_back_adapter_absolute_number_of_errors_with_wildcards():
adapter = BackAdapter(
sequence="NNNNNNNNNNGATCGGAAGA",
max_errors=1,
)
assert adapter.max_error_rate == 1 / 10
def test_front_adapter_partial_occurrence_in_back():
adapter = FrontAdapter("CTGAATT", max_errors=0, min_overlap=4)
assert adapter.match_to("GGGGGCTGAA") is None
def test_back_adapter_partial_occurrence_in_front():
adapter = BackAdapter("CTGAATT", max_errors=0, min_overlap=4)
assert adapter.match_to("AATTGGGGGGG") is None
def test_issue_52():
adapter = BackAdapter(
sequence="GAACTCCAGTCACNNNNN",
max_errors=0.12,
min_overlap=5,
read_wildcards=False,
adapter_wildcards=True,
)
sequence = "CCCCAGAACTACAGTCCCGGC"
am = RemoveAfterMatch(
astart=0,
astop=17,
rstart=5,
rstop=21,
score=15,
errors=2,
adapter=adapter,
sequence=sequence,
)
assert am.wildcards() == "GGC"
"""
The result above should actually be 'CGGC' since the correct
alignment is this one:
adapter GAACTCCAGTCACNNNNN
mismatches X X
read CCCCAGAACTACAGTC-CCGGC
Since we do not keep the alignment, guessing 'GGC' is the best we
can currently do.
"""
def test_issue_80():
# This issue was at the time not considered to be an actual issue with the alignment
# algorithm. The following alignment with three errors was found because it had more
# matches than the 'obvious' one:
#
# TCGTATGCCGTCTTC
# =========X==XX=
# TCGTATGCCCTC--C
#
# The alignment algorithm has since been changed so that not the number of matches
# is relevant, but a score that penalizes indels. Now, the resulting alignment
# should be this one (with only two errors):
#
# TCGTATGCCGTCTTC
# =========X==X
# TCGTATGCCCTCC
adapter = BackAdapter(
sequence="TCGTATGCCGTCTTC",
max_errors=0.2,
min_overlap=3,
read_wildcards=False,
adapter_wildcards=False,
)
result = adapter.match_to("TCGTATGCCCTCC")
assert result.errors == 2, result
assert result.astart == 0, result
assert result.astop == 13, result
def test_back_adapter_indel_and_exact_occurrence():
adapter = BackAdapter(
sequence="GATCGGAAGA",
max_errors=0.1,
min_overlap=3,
)
match = adapter.match_to("GATCGTGAAGAGATCGGAAGA")
# We want the leftmost match of these two possible ones:
# GATCGTGAAGAGATCGGAAGA
# GATCG-GAAGA
# GATCGGAAGA
assert match.astart == 0
assert match.astop == 10
assert match.rstart == 0
assert match.rstop == 11
assert match.errors == 1
assert match.score == 8
def test_back_adapter_indel_and_mismatch_occurrence():
adapter = BackAdapter(
sequence="GATCGGAAGA",
max_errors=0.1,
min_overlap=3,
)
match = adapter.match_to("CTGGATCGGAGAGCCGTAGATCGGGAGAGGC")
# CTGGATCGGA-GAGCCGTAGATCGGGAGAGGC
# ||||||| || ||||||X|||
# GATCGGAAGA GATCGGAAGA
assert match.astart == 0
assert match.astop == 10
assert match.rstart == 3
assert match.rstop == 12
assert match.score == 7
assert match.errors == 1
def test_str():
a = BackAdapter("ACGT", max_errors=0.1)
str(a)
str(a.match_to("TTACGT"))
def test_prefix_with_indels_one_mismatch():
a = PrefixAdapter(
sequence="GCACATCT",
max_errors=0.15,
min_overlap=1,
read_wildcards=False,
adapter_wildcards=False,
indels=True,
)
# GCACATCGGAA
# |||||||X
# GCACATCT
result = a.match_to("GCACATCGGAA")
assert result.astart == 0
assert result.astop == 8
assert result.rstart == 0
assert result.rstop == 8
assert result.score == 6 # 7 matches, 1 mismatch
assert result.errors == 1
def test_prefix_with_indels_two_mismatches():
a = PrefixAdapter(
sequence="GCACATTT",
max_errors=0.3,
min_overlap=1,
read_wildcards=False,
adapter_wildcards=False,
indels=True,
)
result = a.match_to("GCACATCGGAA")
# GCACATCGGAA
# ||||||XX
# GCACATTT
assert result.astart == 0
assert result.astop == 8
assert result.rstart == 0
assert result.rstop == 8
assert result.score == 4
assert result.errors == 2
def test_linked_adapter():
front_adapter = PrefixAdapter("AAAA", min_overlap=4)
back_adapter = BackAdapter("TTTT", min_overlap=3)
linked_adapter = LinkedAdapter(
front_adapter,
back_adapter,
front_required=True,
back_required=False,
name="name",
)
assert linked_adapter.front_adapter.min_overlap == 4
assert linked_adapter.back_adapter.min_overlap == 3
read = Sequence(name="seq", sequence="AAAACCCCCTTTT")
trimmed = linked_adapter.match_to(read.sequence).trimmed(read)
assert trimmed.name == "seq"
assert trimmed.sequence == "CCCCC"
def test_info_record():
adapter = BackAdapter(
sequence="GAACTCCAGTCACNNNNN",
max_errors=0.12,
min_overlap=5,
read_wildcards=False,
adapter_wildcards=True,
name="Foo",
)
read = Sequence(name="abc", sequence="CCCCAGAACTACAGTCCCGGC")
am = RemoveAfterMatch(
astart=0,
astop=17,
rstart=5,
rstop=21,
score=15,
errors=2,
adapter=adapter,
sequence=read.sequence,
)
assert am.get_info_records(read) == [
[
"",
2,
5,
21,
"CCCCA",
"GAACTACAGTCCCGGC",
"",
"Foo",
"",
"",
"",
]
]
def test_random_match_probabilities():
a = BackAdapter("A", max_errors=0.1).create_statistics()
assert a.end.random_match_probabilities(0.5) == [1, 0.25]
assert a.end.random_match_probabilities(0.2) == [1, 0.4]
for s in ("ACTG", "XMWH"):
a = BackAdapter(s, max_errors=0.1).create_statistics()
assert a.end.random_match_probabilities(0.5) == [
1,
0.25,
0.25**2,
0.25**3,
0.25**4,
]
assert a.end.random_match_probabilities(0.2) == [
1,
0.4,
0.4 * 0.1,
0.4 * 0.1 * 0.4,
0.4 * 0.1 * 0.4 * 0.1,
]
a = FrontAdapter("GTCA", max_errors=0.1).create_statistics()
assert a.end.random_match_probabilities(0.5) == [
1,
0.25,
0.25**2,
0.25**3,
0.25**4,
]
assert a.end.random_match_probabilities(0.2) == [
1,
0.4,
0.4 * 0.1,
0.4 * 0.1 * 0.4,
0.4 * 0.1 * 0.4 * 0.1,
]
def test_add_adapter_statistics():
stats = BackAdapter("A", name="name", max_errors=0.1).create_statistics()
end_stats = stats.end
end_stats.adjacent_bases["A"] = 7
end_stats.adjacent_bases["C"] = 19
end_stats.adjacent_bases["G"] = 23
end_stats.adjacent_bases["T"] = 42
end_stats.adjacent_bases[""] = 45
end_stats.errors[10][0] = 100
end_stats.errors[10][1] = 11
end_stats.errors[10][2] = 3
end_stats.errors[20][0] = 600
end_stats.errors[20][1] = 66
end_stats.errors[20][2] = 6
stats2 = BackAdapter("A", name="name", max_errors=0.1).create_statistics()
end_stats2 = stats2.end
end_stats2.adjacent_bases["A"] = 43
end_stats2.adjacent_bases["C"] = 31
end_stats2.adjacent_bases["G"] = 27
end_stats2.adjacent_bases["T"] = 8
end_stats2.adjacent_bases[""] = 5
end_stats2.errors[10][0] = 234
end_stats2.errors[10][1] = 14
end_stats2.errors[10][3] = 5
end_stats2.errors[15][0] = 90
end_stats2.errors[15][1] = 17
end_stats2.errors[15][2] = 2
stats += stats2
r = stats.end
assert r.adjacent_bases == {"A": 50, "C": 50, "G": 50, "T": 50, "": 50}
assert r.errors == {
10: {0: 334, 1: 25, 2: 3, 3: 5},
15: {0: 90, 1: 17, 2: 2},
20: {0: 600, 1: 66, 2: 6},
}
def test_linked_matches_property():
"""Accessing matches property of non-anchored linked adapters"""
# Issue #265
front_adapter = FrontAdapter("GGG")
back_adapter = BackAdapter("TTT")
la = LinkedAdapter(
front_adapter,
back_adapter,
front_required=False,
back_required=False,
name="name",
)
assert la.match_to("AAAATTTT").score == 3
@pytest.mark.parametrize("adapter_class", [PrefixAdapter, SuffixAdapter])
def test_no_indels_empty_read(adapter_class):
# Issue #376
adapter = adapter_class("ACGT", indels=False)
adapter.match_to("")
def test_prefix_match_with_n_wildcard_in_read():
adapter = PrefixAdapter("NNNACGT", indels=False)
match = adapter.match_to("TTTACGTAAAA")
assert match is not None and (0, 7) == (match.rstart, match.rstop)
match = adapter.match_to("NTTACGTAAAA")
assert match is not None and (0, 7) == (match.rstart, match.rstop)
def test_suffix_match_with_n_wildcard_in_read():
adapter = SuffixAdapter("ACGTNNN", indels=False)
match = adapter.match_to("TTTTACGTTTT")
assert match is not None and (4, 11) == (match.rstart, match.rstop)
match = adapter.match_to("TTTTACGTCNC")
assert match is not None and (4, 11) == (match.rstart, match.rstop)
def test_multiple_adapters():
a1 = BackAdapter("GTAGTCCCGC")
a2 = BackAdapter("GTAGTCCCCC")
ma = MultipleAdapters([a1, a2])
match = ma.match_to("ATACCCCTGTAGTCCCC")
assert match.adapter is a2
def test_indexed_prefix_adapters():
adapters = [
PrefixAdapter("GAAC", indels=False),
PrefixAdapter("TGCT", indels=False),
]
ma = IndexedPrefixAdapters(adapters)
match = ma.match_to("GAACTT")
assert match.adapter is adapters[0]
match = ma.match_to("TGCTAA")
assert match.adapter is adapters[1]
assert ma.match_to("GGGGGGG") is None
def test_indexed_prefix_adapters_incorrect_type():
with pytest.raises(ValueError):
IndexedPrefixAdapters(
[
PrefixAdapter("GAAC", indels=False),
SuffixAdapter("TGCT", indels=False),
]
)
def test_indexed_very_similar(caplog):
IndexedPrefixAdapters(
[
PrefixAdapter("GAAC", max_errors=1, indels=False),
PrefixAdapter("GAAG", max_errors=1, indels=False),
]
)
assert "cannot be assigned uniquely" in caplog.text
def test_indexed_too_high_k():
with pytest.raises(ValueError) as e:
IndexedPrefixAdapters(
[
PrefixAdapter("ACGTACGT", max_errors=3, indels=False),
PrefixAdapter("AAGGTTCC", max_errors=2, indels=False),
]
)
assert "Error rate too high" in e.value.args[0]
def test_indexed_suffix_adapters():
adapters = [
SuffixAdapter("GAAC", indels=False),
SuffixAdapter("TGCT", indels=False),
]
ma = IndexedSuffixAdapters(adapters)
match = ma.match_to("TTGAAC")
assert match.adapter is adapters[0]
match = ma.match_to("AATGCT")
assert match.adapter is adapters[1]
def test_indexed_suffix_adapters_incorrect_type():
with pytest.raises(ValueError):
IndexedSuffixAdapters(
[
SuffixAdapter("GAAC", indels=False),
PrefixAdapter("TGCT", indels=False),
]
)
def test_multi_prefix_adapter_with_indels():
adapters = [
PrefixAdapter("GTAC", max_errors=1, indels=True),
PrefixAdapter("TGCT", max_errors=1, indels=True),
]
ma = IndexedPrefixAdapters(adapters)
match = ma.match_to("GATACGGG")
assert match.adapter is adapters[0]
match = ma.match_to("TAGCTAA")
assert match.adapter is adapters[1]
def test_indexed_prefix_adapters_with_n_wildcard():
sequence = "GGTCCAGA"
ma = IndexedPrefixAdapters([PrefixAdapter(sequence, max_errors=1, indels=False)])
for i in range(len(sequence)):
# N in the read should be counted as mismatch
t = sequence[:i] + "N" + sequence[i + 1 :] + "TGCT"
result = ma.match_to(t)
assert isinstance(result, RemoveBeforeMatch)
assert (result.rstart, result.rstop) == (0, 8)
assert result.errors == 1
assert result.score == 6
| 27.780645
| 88
| 0.618517
| 0
| 0
| 0
| 0
| 211
| 0.016334
| 0
| 0
| 2,150
| 0.166434
|
66992cf30daf9b3de5a678f20db0b9dc5b3fafdf
| 7,561
|
py
|
Python
|
archABM/event_model.py
|
vishalbelsare/ArchABM
|
4a5ed9506ba96c38e1f3d7f53d6e469f28fe6873
|
[
"MIT"
] | 8
|
2021-07-19T11:54:00.000Z
|
2022-03-29T01:45:07.000Z
|
archABM/event_model.py
|
vishalbelsare/ArchABM
|
4a5ed9506ba96c38e1f3d7f53d6e469f28fe6873
|
[
"MIT"
] | null | null | null |
archABM/event_model.py
|
vishalbelsare/ArchABM
|
4a5ed9506ba96c38e1f3d7f53d6e469f28fe6873
|
[
"MIT"
] | 1
|
2021-08-19T23:56:56.000Z
|
2021-08-19T23:56:56.000Z
|
import copy
import random
from .parameters import Parameters
class EventModel:
"""Defines an event model, also called "activity"
An event model is defined by these parameters:
* Activity name: :obj:`str`
* Schedule: :obj:`list` of :obj:`tuple` (in minutes :obj:`int`)
* Repetitions range: minimum (:obj:`int`) and maximum (:obj:`int`)
* Duration range: minimum (:obj:`int`) and maximum (:obj:`int`) in minutes
* Other parameters:
* mask efficiency ratio: :obj:`float`
* collective event: :obj:`bool`
* shared event: :obj:`bool`
The schedule defines the allowed periods of time in which an activity can happen.
For example, ``schedule=[(120,180),(240,300)]`` allows people to carry out this activity from
the time ``120`` to ``180`` and also from time ``240`` until ``300``.
Notice that the schedule units are in minutes.
Each activity is limited to a certain duration, and its priority follows
a piecewise linear function, parametrized by:
* ``r``: repeat\ :sub:`min`
* ``R``: repeat\ :sub:`max`
* ``e``: event count
.. math::
Priority(e) =
\\left\{\\begin{matrix}
1-(1-\\alpha)\\cfrac{e}{r}\,,\quad 0 \leq e < r \\\\
\\alpha\\cfrac{R-e}{R-r}\,,\quad r \leq e < R \\
\end{matrix}\\right.
.. tikz:: Priority piecewise linear function
\pgfmathsetmacro{\\N}{10};
\pgfmathsetmacro{\\M}{6};
\pgfmathsetmacro{\\NN}{\\N-1};
\pgfmathsetmacro{\\MM}{\\M-1};
\pgfmathsetmacro{\\repmin}{2.25};
\pgfmathsetmacro{\\repmax}{8.5};
\pgfmathsetmacro{\\a}{2};
\coordinate (A) at (0,\\MM);
\coordinate (B) at (\\NN,0);
\coordinate (C) at (\\repmin, \\a);
\coordinate (D) at (\\repmax, 0);
\coordinate (E) at (\\repmin, 0);
\coordinate (F) at (0, \\a);
\draw[stepx=1,thin, black!20] (0,0) grid (\\N,\\M);
\draw[->, very thick] (0,0) to (\\N,0) node[right] {Event count};
\draw[->, very thick] (0,0) to (0,\\M) node[above] {Priority};
\draw (0.1,0) -- (-0.1, 0) node[anchor=east] {0};
\draw (0, 0.1) -- (0, -0.1);
\draw (\\repmin,0.1) -- (\\repmin,-0.1) node[anchor=north] {$repeat_{min}$};
\draw (\\repmax,0.1) -- (\\repmax,-0.1) node[anchor=north] {$repeat_{max}$};
\draw[ultra thick] (0.1, \\MM) -- (-0.1, \\MM) node[left] {1};
\draw[very thick, black!50, dashed] (C) -- (F) node[left] {$\\alpha$};
\draw[very thick, black!50, dashed] (C) -- (E);
\draw[ultra thick, red] (A) -- (C);
\draw[ultra thick, red] (C) -- (D);
:xscale: 80
:align: left
"""
id: int = -1
params: Parameters
count: int
noise: int
def __init__(self, params: Parameters) -> None:
self.next()
self.id = EventModel.id
self.params = params
self.count = 0
self.noise = None
@classmethod
def reset(cls) -> None:
"""Resets :class:`~archABM.event_model.EventModel` ID."""
EventModel.id = -1
@staticmethod
def next() -> None:
"""Increments one unit the :class:`~archABM.event_model.EventModel` ID."""
EventModel.id += 1
def get_noise(self) -> int:
"""Generates random noise
Returns:
int: noise amount in minutes
"""
if self.noise is None:
m = 15 # minutes # TODO: review hardcoded value
if m == 0:
self.noise = 0
else:
self.noise = random.randrange(m) # minutes
return self.noise
def new(self):
"""Generates a :class:`~archABM.event_model.EventModel` copy, with reset count and noise
Returns:
EventModel: cloned instance
"""
self.count = 0
self.noise = None
return copy.copy(self)
def duration(self, now) -> int:
"""Generates a random duration between :attr:`duration_min` and :attr:`duration_max`.
.. note::
If the generated duration, together with the current timestamp,
exceeds the allowed schedule, the duration is limited to finish
at the scheduled time interval.
The :attr:`noise` attribute is used to model the schedule's time tolerance.
Args:
now (int): current timestamp in minutes
Returns:
int: event duration in minutes
"""
duration = random.randint(self.params.duration_min, self.params.duration_max)
estimated = now + duration
noise = self.get_noise() # minutes
for interval in self.params.schedule:
a, b = interval
if a - noise <= now <= b + noise < estimated:
duration = b + noise - now + 1
break
return duration
def priority(self) -> float:
"""Computes the priority of a certain event.
The priority function follows a piecewise linear function, parametrized by:
* ``r``: repeat\ :sub:`min`
* ``R``: repeat\ :sub:`max`
* ``e``: event count
.. math::
Priority(e) =
\\left\{\\begin{matrix}
1-(1-\\alpha)\\cfrac{e}{r}\,,\quad 0 \leq e < r \\\\
\\alpha\\cfrac{R-e}{R-r}\,,\quad r \leq e < R \\
\end{matrix}\\right.
Returns:
float: priority value [0-1]
"""
alpha = 0.5 # TODO: review hardcoded value
if self.params.repeat_max is None:
return random.uniform(0.0, 1.0)
if self.count == self.params.repeat_max:
return 0.0
if self.count < self.params.repeat_min:
return 1 - (1 - alpha) * self.count / self.params.repeat_min
if self.params.repeat_min == self.params.repeat_max:
return alpha
return alpha * (self.params.repeat_max - self.count) / (self.params.repeat_max - self.params.repeat_min)
def probability(self, now: int) -> float:
"""Wrapper to call the priority function
If the event :attr:`count` is equal to the :attr:`repeat_max` parameters,
it yields a ``0`` probability. Otherwise, it computes the :meth:`priority` function
described above.
Args:
now (int): current timestamp in minutes
Returns:
float: event probability [0-1]
"""
p = 0.0
if self.count == self.params.repeat_max:
return p
noise = self.get_noise() # minutes
for interval in self.params.schedule:
a, b = interval
if a - noise <= now <= b + noise:
p = self.priority()
break
return p
def valid(self) -> bool:
"""Computes whether the event count has reached the :attr:`repeat_max` limit.
It yields ``True``
if :attr:`repeat_max` is ``undefined`` or
if the event :attr:`count` is less than :attr:`repeat_max`.
Otherwise, it yields ``False``.
Returns:
bool: valid event
"""
if self.params.repeat_max is None:
return True
return self.count < self.params.repeat_max
def consume(self) -> None:
"""Increments one unit the event count"""
self.count += 1
# logging.info("Event %s repeated %d out of %d" % (self.name, self.count, self.target))
def supply(self) -> None:
"""Decrements one unit the event count"""
self.count -= 1
| 33.455752
| 112
| 0.547943
| 7,497
| 0.991536
| 0
| 0
| 280
| 0.037032
| 0
| 0
| 5,067
| 0.670149
|
669c0767b2a56157d94adbe410e078a0a3045bd9
| 13,297
|
py
|
Python
|
tests/test_photokit.py
|
oPromessa/osxphotos
|
0d7e324f0262093727147b9f22ed275e962e8725
|
[
"MIT"
] | 656
|
2019-08-14T14:10:44.000Z
|
2022-03-28T15:25:42.000Z
|
tests/test_photokit.py
|
oPromessa/osxphotos
|
0d7e324f0262093727147b9f22ed275e962e8725
|
[
"MIT"
] | 557
|
2019-10-14T19:00:02.000Z
|
2022-03-28T00:48:30.000Z
|
tests/test_photokit.py
|
oPromessa/osxphotos
|
0d7e324f0262093727147b9f22ed275e962e8725
|
[
"MIT"
] | 58
|
2019-12-27T01:39:33.000Z
|
2022-02-26T22:18:49.000Z
|
""" test photokit.py methods """
import os
import pathlib
import tempfile
import pytest
from osxphotos.photokit import (
LivePhotoAsset,
PhotoAsset,
PhotoLibrary,
VideoAsset,
PHOTOS_VERSION_CURRENT,
PHOTOS_VERSION_ORIGINAL,
PHOTOS_VERSION_UNADJUSTED,
)
skip_test = "OSXPHOTOS_TEST_EXPORT" not in os.environ
pytestmark = pytest.mark.skipif(
skip_test, reason="Skip if not running with author's personal library."
)
UUID_DICT = {
"plain_photo": {
"uuid": "C6C712C5-9316-408D-A3C3-125661422DA9",
"filename": "IMG_8844.JPG",
},
"hdr": {"uuid": "DD641004-4E37-4233-AF31-CAA0896490B2", "filename": "IMG_6162.JPG"},
"selfie": {
"uuid": "C925CFDC-FF2B-4E71-AC9D-C669B6453A8B",
"filename": "IMG_1929.JPG",
},
"video": {
"uuid": "F4430659-7B17-487E-8029-8C1ABEBE23DF",
"filename": "IMG_9411.TRIM.MOV",
},
"hasadjustments": {
"uuid": "2F252D2C-C9DE-4BE1-8610-9F968C634D3D",
"filename": "IMG_2860.JPG",
"adjusted_size": 3012634,
"unadjusted_size": 2580058,
},
"slow_mo": {
"uuid": "160447F8-4EB0-4FAE-A26A-3D32EA698F75",
"filename": "IMG_4055.MOV",
},
"live_photo": {
"uuid": "8EC216A2-0032-4934-BD3F-04C6259B3304",
"filename": "IMG_3259.HEIC",
"filename_video": "IMG_3259.mov",
},
"burst": {
"uuid": "CDE4E5D9-1428-41E6-8569-EC0C45FD8E5A",
"filename": "IMG_8196.JPG",
"burst_selected": 4,
"burst_all": 5,
},
"raw+jpeg": {
"uuid": "E3DD04AF-CB65-4D9B-BB79-FF4C955533DB",
"filename": "IMG_1994.JPG",
"raw_filename": "IMG_1994.CR2",
"unadjusted_size": 16128420,
"uti_raw": "com.canon.cr2-raw-image",
"uti": "public.jpeg",
},
}
def test_fetch_uuid():
"""test fetch_uuid"""
uuid = UUID_DICT["plain_photo"]["uuid"]
filename = UUID_DICT["plain_photo"]["filename"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
assert isinstance(photo, PhotoAsset)
def test_plain_photo():
"""test plain_photo"""
uuid = UUID_DICT["plain_photo"]["uuid"]
filename = UUID_DICT["plain_photo"]["filename"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
assert photo.original_filename == filename
assert photo.raw_filename is None
assert photo.isphoto
assert not photo.ismovie
def test_raw_plus_jpeg():
"""test RAW+JPEG"""
uuid = UUID_DICT["raw+jpeg"]["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
assert photo.original_filename == UUID_DICT["raw+jpeg"]["filename"]
assert photo.raw_filename == UUID_DICT["raw+jpeg"]["raw_filename"]
assert photo.uti_raw() == UUID_DICT["raw+jpeg"]["uti_raw"]
assert photo.uti() == UUID_DICT["raw+jpeg"]["uti"]
def test_hdr():
"""test hdr"""
uuid = UUID_DICT["hdr"]["uuid"]
filename = UUID_DICT["hdr"]["filename"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
assert photo.original_filename == filename
assert photo.hdr
def test_burst():
"""test burst and burstid"""
test_dict = UUID_DICT["burst"]
uuid = test_dict["uuid"]
filename = test_dict["filename"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
assert photo.original_filename == filename
assert photo.burst
assert photo.burstid
# def test_selfie():
# """ test selfie """
# uuid = UUID_DICT["selfie"]["uuid"]
# filename = UUID_DICT["selfie"]["filename"]
# lib = PhotoLibrary()
# photo = lib.fetch_uuid(uuid)
# assert photo.original_filename == filename
# assert photo.selfie
def test_video():
"""test ismovie"""
uuid = UUID_DICT["video"]["uuid"]
filename = UUID_DICT["video"]["filename"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
assert isinstance(photo, VideoAsset)
assert photo.original_filename == filename
assert photo.ismovie
assert not photo.isphoto
def test_slow_mo():
"""test slow_mo"""
test_dict = UUID_DICT["slow_mo"]
uuid = test_dict["uuid"]
filename = test_dict["filename"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
assert isinstance(photo, VideoAsset)
assert photo.original_filename == filename
assert photo.ismovie
assert photo.slow_mo
assert not photo.isphoto
### PhotoAsset
def test_export_photo_original():
"""test PhotoAsset.export"""
test_dict = UUID_DICT["hasadjustments"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_ORIGINAL)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
assert export_path.stat().st_size == test_dict["unadjusted_size"]
def test_export_photo_unadjusted():
"""test PhotoAsset.export"""
test_dict = UUID_DICT["hasadjustments"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_UNADJUSTED)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
assert export_path.stat().st_size == test_dict["unadjusted_size"]
def test_export_photo_current():
"""test PhotoAsset.export"""
test_dict = UUID_DICT["hasadjustments"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
assert export_path.stat().st_size == test_dict["adjusted_size"]
def test_export_photo_raw():
"""test PhotoAsset.export for raw component"""
test_dict = UUID_DICT["raw+jpeg"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, raw=True)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["raw_filename"]
assert export_path.stem == pathlib.Path(filename).stem
assert export_path.stat().st_size == test_dict["unadjusted_size"]
### VideoAsset
def test_export_video_original():
"""test VideoAsset.export"""
test_dict = UUID_DICT["video"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_ORIGINAL)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
def test_export_video_unadjusted():
"""test VideoAsset.export"""
test_dict = UUID_DICT["video"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_UNADJUSTED)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
def test_export_video_current():
"""test VideoAsset.export"""
test_dict = UUID_DICT["video"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_CURRENT)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
### Slow-Mo VideoAsset
def test_export_slow_mo_original():
"""test VideoAsset.export for slow mo video"""
test_dict = UUID_DICT["slow_mo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_ORIGINAL)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
def test_export_slow_mo_unadjusted():
"""test VideoAsset.export for slow mo video"""
test_dict = UUID_DICT["slow_mo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_UNADJUSTED)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
def test_export_slow_mo_current():
"""test VideoAsset.export for slow mo video"""
test_dict = UUID_DICT["slow_mo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_CURRENT)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
### LivePhotoAsset
def test_export_live_original():
"""test LivePhotoAsset.export"""
test_dict = UUID_DICT["live_photo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_ORIGINAL)
for f in export_path:
filepath = pathlib.Path(f)
assert filepath.is_file()
filename = test_dict["filename"]
assert filepath.stem == pathlib.Path(filename).stem
def test_export_live_unadjusted():
"""test LivePhotoAsset.export"""
test_dict = UUID_DICT["live_photo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_UNADJUSTED)
for file in export_path:
filepath = pathlib.Path(file)
assert filepath.is_file()
filename = test_dict["filename"]
assert filepath.stem == pathlib.Path(filename).stem
def test_export_live_current():
"""test LivePhotAsset.export"""
test_dict = UUID_DICT["live_photo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_CURRENT)
for file in export_path:
filepath = pathlib.Path(file)
assert filepath.is_file()
filename = test_dict["filename"]
assert filepath.stem == pathlib.Path(filename).stem
def test_export_live_current_just_photo():
"""test LivePhotAsset.export"""
test_dict = UUID_DICT["live_photo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, photo=True, video=False)
assert len(export_path) == 1
assert export_path[0].lower().endswith(".heic")
def test_export_live_current_just_video():
"""test LivePhotAsset.export"""
test_dict = UUID_DICT["live_photo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, photo=False, video=True)
assert len(export_path) == 1
assert export_path[0].lower().endswith(".mov")
def test_fetch_burst_uuid():
"""test fetch_burst_uuid"""
test_dict = UUID_DICT["burst"]
uuid = test_dict["uuid"]
filename = test_dict["filename"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
bursts_selected = lib.fetch_burst_uuid(photo.burstid)
assert len(bursts_selected) == test_dict["burst_selected"]
assert isinstance(bursts_selected[0], PhotoAsset)
bursts_all = lib.fetch_burst_uuid(photo.burstid, all=True)
assert len(bursts_all) == test_dict["burst_all"]
assert isinstance(bursts_all[0], PhotoAsset)
| 31.360849
| 88
| 0.670828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,035
| 0.228247
|
669c4ded1d39066ae7e38bea807e79c4ad3272ab
| 2,764
|
py
|
Python
|
parse_json_script/lib_parse_json.py
|
amane-uehara/fitbit-fetcher
|
2a949016933dbcac5f949c8b552c7998b2aadd8c
|
[
"MIT"
] | null | null | null |
parse_json_script/lib_parse_json.py
|
amane-uehara/fitbit-fetcher
|
2a949016933dbcac5f949c8b552c7998b2aadd8c
|
[
"MIT"
] | null | null | null |
parse_json_script/lib_parse_json.py
|
amane-uehara/fitbit-fetcher
|
2a949016933dbcac5f949c8b552c7998b2aadd8c
|
[
"MIT"
] | null | null | null |
import os
import sys
import json
def detail_base_item_list():
return [
'distance',
'elevation',
'floors',
'heart',
'minutesFairlyActive',
'minutesLightlyActive',
'minutesSedentary',
'minutesVeryActive',
'steps'
]
def read_json_file(filename):
if not os.path.isfile(filename):
sys.exit('Error: file:`' + filename + '` not found')
raw_file = open(filename, 'r')
data = json.load(raw_file)
raw_file.close()
return data
def parse_item(raw_root, item, grad, yyyymmdd):
filename = os.path.join(raw_root, item + '_' + grad, yyyymmdd + '.json')
data = read_json_file(filename)
tmp = data['activities-' + item + '-intraday']['dataset']
result = {}
for d in tmp:
hhmm = d['time'].replace(':', '')[0:4]
result[hhmm] = d
del result[hhmm]['time']
return result
def parse_sleep(raw_root, grad, yyyymmdd):
filename = os.path.join(raw_root, 'sleep_' + grad, yyyymmdd + '.json')
data = read_json_file(filename)
tmp = []
sleep = data['sleep']
for part in sleep:
tmp = tmp + part['minuteData']
result = {}
for d in tmp:
hhmm = d['dateTime'].replace(':', '')[0:4]
result[hhmm] = d
del result[hhmm]['dateTime']
return result
def hhmm_list(grad):
result = []
if grad == '1m':
for h in range(24):
for m in range(60):
result.append('%02d%02d' % (h,m))
elif grad == '15m':
for h in range(24):
for m15 in range(4):
result.append('%02d%02d' % (h,m15*15))
return result
def item_join(item_dict, grad):
result ={}
for hhmm in hhmm_list(grad):
tmp = {}
for key in item_dict.keys():
if hhmm in item_dict[key].keys():
tmp[key] = item_dict[key][hhmm]
else:
tmp[key] = {}
result[hhmm] = tmp
return result
def simplify(joined_dict, grad, yyyymmdd):
item_list = detail_base_item_list()
item_list.append('sleep')
item_list.remove('distance')
result = []
for hhmm in hhmm_list(grad):
d = joined_dict[hhmm]
tmp = {}
tmp['dt'] = yyyymmdd + hhmm + '00'
for item in item_list:
tmp[item] = int(d[item]['value']) if ('value' in d[item].keys()) else ''
tmp['distance'] = float(d['distance']['value']) if ('value' in d['distance'].keys()) else ''
tmp['calories_level'] = int( d['calories']['level']) if ('level' in d['calories'].keys()) else ''
tmp['calories_mets' ] = int( d['calories']['mets' ]) if ('mets' in d['calories'].keys()) else ''
tmp['calories_value'] = float(d['calories']['value']) if ('value' in d['calories'].keys()) else ''
# mile to meter
tmp['distance'] = round(tmp['distance'] * 1609.344, 4)
tmp['calories_value'] = round(tmp['calories_value'], 4)
result.append(tmp)
return result
| 23.827586
| 102
| 0.599132
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 609
| 0.220333
|
669d3d5f4966f2fc9848beb0d7bd023a928904e0
| 4,251
|
py
|
Python
|
utils/tfds_preprocess.py
|
chansoopark98/tf_keras-Unknown-grasping
|
be0f68280ba0b293940a08732fd4a31e89a272cd
|
[
"MIT"
] | null | null | null |
utils/tfds_preprocess.py
|
chansoopark98/tf_keras-Unknown-grasping
|
be0f68280ba0b293940a08732fd4a31e89a272cd
|
[
"MIT"
] | null | null | null |
utils/tfds_preprocess.py
|
chansoopark98/tf_keras-Unknown-grasping
|
be0f68280ba0b293940a08732fd4a31e89a272cd
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
import random
from utils.dataset_processing import grasp, image
import matplotlib.pyplot as plt
dataset_path = './tfds/'
train_data, meta = tfds.load('Jacquard', split='train', with_info=True, shuffle_files=False)
BATCH_SIZE = 1
number_train = meta.splits['train'].num_examples
output_size = 300
def preprocess(sample):
tfds_rgb = sample['rgb']
tfds_depth = sample['depth']
tfds_box = sample['box']
return (tfds_rgb, tfds_depth, tfds_box)
def augment(tfds_rgb, tfds_depth, tfds_box):
# get center
c = output_size // 2
# rotate box
rotations = [0, np.pi / 2, 2 * np.pi / 2, 3 * np.pi / 2]
rot = random.choice(rotations)
zoom_factor = np.random.uniform(0.5, 1.0)
# zoom box
tfds_box = grasp.GraspRectangles.load_from_tensor(tfds_box)
tfds_box.to_array()
tfds_box.rotate(rot, (c, c))
tfds_box.zoom(zoom_factor, (c, c))
pos_img, ang_img, width_img = tfds_box.draw((output_size, output_size))
width_img = np.clip(width_img, 0.0, output_size /2 ) / (output_size / 2)
cos = np.cos(2 * ang_img)
sin = np.sin(2 * ang_img)
pos_img = tf.expand_dims(pos_img, axis=-1)
cos = tf.expand_dims(cos, axis=-1)
sin = tf.expand_dims(sin, axis=-1)
width_img = tf.expand_dims(width_img, axis=-1)
output = tf.concat([pos_img, cos, sin, width_img], axis=-1)
# input data
rgb_img = image.Image.from_tensor(tfds_rgb)
rgb_img.rotate(rot)
rgb_img.zoom(zoom_factor)
rgb_img.resize((output_size, output_size))
rgb_img.normalise()
# Depth
depth_img = image.DepthImage.from_tensor(tfds_depth)
depth_img.rotate(rot)
depth_img.normalise()
depth_img.zoom(zoom_factor)
depth_img.resize((output_size, output_size))
input = tf.concat([rgb_img, depth_img], axis=-1)
input = tf.cast(input, tf.float64)
return (input, output)
train_data = train_data.map(preprocess)
# train_data = train_data.map(augment)
train_data = train_data.map(lambda tfds_rgb, tfds_depth, tfds_box: tf.py_function(augment, [tfds_rgb, tfds_depth, tfds_box], [tf.float64]))
rows=1
cols=4
train_data = train_data.take(100)
for input, output in train_data:
# pos_img = label[0]
# cos = label[1]
# sin = label[2]
# width_img = label[3]
fig = plt.figure()
ax0 = fig.add_subplot(rows, cols, 1)
ax0.imshow(output[0][:, :, 0])
ax0.set_title('pos_img')
ax0.axis("off")
ax1 = fig.add_subplot(rows, cols, 2)
ax1.imshow(output[0][:, :, 1])
ax1.set_title('cos')
ax1.axis("off")
ax1 = fig.add_subplot(rows, cols, 3)
ax1.imshow(output[0][:, :, 2])
ax1.set_title('sin')
ax1.axis("off")
ax1 = fig.add_subplot(rows, cols, 4)
ax1.imshow(output[0][:, :, 3])
ax1.set_title('width')
ax1.axis("off")
ax2 = fig.add_subplot(rows, cols, 5)
ax2.imshow(input[0][:, :, :3])
ax2.set_title('sin')
ax2.axis("off")
ax3 = fig.add_subplot(rows, cols, 6)
ax3.imshow(input[0][:, :, 3:])
ax3.set_title('width_img')
ax3.axis("off")
# q_img, ang_img, width_img = post_processing(q_img=pos_img,
# cos_img=cos,
# sin_img=sin,
# width_img=width_img)
# ax3 = fig.add_subplot(rows, cols, 9)
# ax3.imshow(q_img)
# ax3.set_title('q_img')
# ax3.axis("off")
# ax3 = fig.add_subplot(rows, cols, 10)
# ax3.imshow(ang_img)
# ax3.set_title('ang_img')
# ax3.axis("off")
# ax3 = fig.add_subplot(rows, cols, 11)
# ax3.imshow(width_img)
# ax3.set_title('width_img')
# ax3.axis("off")
# ax3 = fig.add_subplot(rows, cols, 12)
# ax3.imshow(inpaint_depth)
# ax3.set_title('from_pcd_inpaint')
# ax3.axis("off")
# s = evaluation.calculate_iou_match(grasp_q = q_img,
# grasp_angle = ang_img,
# ground_truth_bbs = gtbbs,
# no_grasps = 3,
# grasp_width = width_img,
# threshold=0.25)
# print('iou results', s)
plt.show()
| 26.735849
| 139
| 0.604799
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,252
| 0.294519
|
669f60ed987d448932641383a9784e17ffb52883
| 836
|
py
|
Python
|
tests/scheduler_test.py
|
peng4217/scylla
|
aa5133d7c6d565c95651fc75b26ad605da0982cd
|
[
"Apache-2.0"
] | 3,556
|
2018-04-28T22:59:40.000Z
|
2022-03-28T22:20:07.000Z
|
tests/scheduler_test.py
|
peng4217/scylla
|
aa5133d7c6d565c95651fc75b26ad605da0982cd
|
[
"Apache-2.0"
] | 120
|
2018-05-20T11:49:00.000Z
|
2022-03-07T00:08:55.000Z
|
tests/scheduler_test.py
|
peng4217/scylla
|
aa5133d7c6d565c95651fc75b26ad605da0982cd
|
[
"Apache-2.0"
] | 518
|
2018-05-27T01:42:25.000Z
|
2022-03-25T12:38:32.000Z
|
import pytest
from scylla.scheduler import Scheduler, cron_schedule
@pytest.fixture
def scheduler():
return Scheduler()
def test_start(mocker, scheduler):
process_start = mocker.patch('multiprocessing.Process.start')
thread_start = mocker.patch('threading.Thread.start')
scheduler.start()
process_start.assert_called_once()
thread_start.assert_called()
def test_cron_schedule(mocker, scheduler):
feed_providers = mocker.patch('scylla.scheduler.Scheduler.feed_providers')
cron_schedule(scheduler, only_once=True)
feed_providers.assert_called_once()
def test_feed_providers(mocker, scheduler):
pass
# TODO: mock Queue.put or find other solutions
# queue_put = mocker.patch('multiprocessing.Queue.put')
#
# scheduler.feed_providers()
#
# queue_put.assert_called()
| 23.885714
| 78
| 0.744019
| 0
| 0
| 0
| 0
| 55
| 0.065789
| 0
| 0
| 256
| 0.30622
|
669ffe2b5e6215275de00b66a4a28e352cc9a091
| 2,063
|
py
|
Python
|
ch16_ex.py
|
DexHunter/Think-Python-book-exercise-solutions
|
d0abae261eda1dca99043e17e8a1e614caad2140
|
[
"CC-BY-4.0"
] | 24
|
2019-05-07T15:11:28.000Z
|
2022-03-02T04:50:28.000Z
|
ch16_ex.py
|
Dekzu/Think-Python-book-exercise-solutions
|
d0abae261eda1dca99043e17e8a1e614caad2140
|
[
"CC-BY-4.0"
] | null | null | null |
ch16_ex.py
|
Dekzu/Think-Python-book-exercise-solutions
|
d0abae261eda1dca99043e17e8a1e614caad2140
|
[
"CC-BY-4.0"
] | 19
|
2019-08-05T20:59:04.000Z
|
2022-03-07T05:13:32.000Z
|
class Time:
'''Represents the time of day.
attributes: hour, minute, second
'''
def print_time(t):
print ('(%.2d:%.2d:%.2d)' % (t.hour, t.minute, t.second))
def is_after(t1, t2):
return (t1.hour, t1.minute, t1.second) > (t2.hour, t2.minute, t2.second)
def mul_time(t, n):
'''Multiple time t by n
n: int
Returns a time tr
'''
return int_to_time(time_to_int(t) * n)
def add_time(t1, t2):
sum = Time()
sum.hour = t1.hour + t2.hour
sum.minute = t1.minute + t2.minute
sum.second = t1.second + t2.second
while sum.second >= 60:
sum.second -= 60
sum.minute += 1
while sum.minute >= 60:
sum.minute -= 60
sum.hour += 1
return sum
def increment(t, sec):
'''Writes a inc function does not contain any loops
#for the second exercise of writing a pure function, I think you can just create a new object by copy.deepcopy(t) and modify the new object. I think it is quite simple so I will skip this one, if you differ please contact me and I will try to help
idea: using divmod
sec: seconds in IS
'''
t.second += sec
inc_min, t.second = div(t.seconds, 60)
t.minute += inc_min
inc_hour, t.minute = div(t.minute, 60)
t.hour += inc_hour
return t
def int_to_time(seconds):
"""Makes a new Time object.
seconds: int seconds since midnight.
"""
time = Time()
minutes, time.second = divmod(seconds, 60)
time.hour, time.minute = divmod(minutes, 60)
return time
def time_to_int(time):
"""Computes the number of seconds since midnight.
time: Time object.
"""
minutes = time.hour * 60 + time.minute
seconds = minutes * 60 + time.second
return seconds
if __name__ == '__main__':
t = Time()
t.hour = 17
t.minute = 43
t.second = 6
print_time(mul_time(t, 3))
t2 = Time()
t2.hour = 17
t2.minute = 44
t2.second = 5
print_time(t)
start = Time()
start.hour = 9
start.minute =45
start.second = 0
duration = Time()
duration.hour = 1
duration.minute = 35
duration.second = 0
done = add_time(start, duration)
print_time(done)
print( is_after(t, t2) )
| 20.838384
| 248
| 0.652448
| 84
| 0.040717
| 0
| 0
| 0
| 0
| 0
| 0
| 663
| 0.321377
|
66a0075c55665ddddee62ce3c5592465d9e8004b
| 200
|
py
|
Python
|
knowit/providers/__init__.py
|
labrys/knowit
|
eea9ac18e38c930230cf81b5dca4a9af9fb10d4e
|
[
"MIT"
] | null | null | null |
knowit/providers/__init__.py
|
labrys/knowit
|
eea9ac18e38c930230cf81b5dca4a9af9fb10d4e
|
[
"MIT"
] | null | null | null |
knowit/providers/__init__.py
|
labrys/knowit
|
eea9ac18e38c930230cf81b5dca4a9af9fb10d4e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Provider package."""
from __future__ import unicode_literals
from .enzyme import EnzymeProvider
from .ffmpeg import FFmpegProvider
from .mediainfo import MediaInfoProvider
| 25
| 40
| 0.785
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 46
| 0.23
|
66a03a53035c1596664c882408ebdf47aa3afc54
| 304
|
py
|
Python
|
python-mundo3/ex077.py
|
abm-astro/estudos-python
|
c0dcd71489e528d445efa25d4986bf2fd08f8fe6
|
[
"MIT"
] | 1
|
2021-08-15T18:18:43.000Z
|
2021-08-15T18:18:43.000Z
|
python-mundo3/ex077.py
|
abm-astro/estudos-python
|
c0dcd71489e528d445efa25d4986bf2fd08f8fe6
|
[
"MIT"
] | null | null | null |
python-mundo3/ex077.py
|
abm-astro/estudos-python
|
c0dcd71489e528d445efa25d4986bf2fd08f8fe6
|
[
"MIT"
] | null | null | null |
list = ('APRENDER', 'PROGRAMAR', 'LINGUAGEM', 'PYTHON', 'CURSO', 'GRATIS', 'ESTUDAR',
'PRATICAR', 'TRABALHAR', 'MERCADO', 'PROGRAMADOR', 'FUTURO')
for p in list:
print(f'\nNa palavra {p} temos: ', end='')
for l in p:
if l.lower() in 'aeiou':
print(l.lower(), end=' ')
| 38
| 85
| 0.546053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 154
| 0.506579
|
66a1405cb275e20463fb6f972194333959f1c8d7
| 1,449
|
py
|
Python
|
src/DataParser/odmdata/variable.py
|
UCHIC/iUTAHData
|
4ffab29ad6b3313416bb2a8b98acf0b2e02c8cab
|
[
"Unlicense"
] | 2
|
2015-02-25T01:12:51.000Z
|
2017-02-08T22:54:41.000Z
|
src/DataParser/odmdata/variable.py
|
UCHIC/iUTAHData
|
4ffab29ad6b3313416bb2a8b98acf0b2e02c8cab
|
[
"Unlicense"
] | 48
|
2015-01-12T18:01:56.000Z
|
2021-06-10T20:05:26.000Z
|
src/DataParser/odmdata/variable.py
|
UCHIC/iUTAHData
|
4ffab29ad6b3313416bb2a8b98acf0b2e02c8cab
|
[
"Unlicense"
] | null | null | null |
from sqlalchemy import *
from sqlalchemy.orm import relationship
from base import Base
from unit import Unit
class Variable(Base):
__tablename__ = 'Variables'
id = Column('VariableID', Integer, primary_key=True)
code = Column('VariableCode', String(255), nullable=False)
name = Column('VariableName', String(255), nullable=False)
speciation = Column('Speciation', String(255), nullable=False)
variable_unit_id = Column('VariableUnitsID', Integer, ForeignKey('Units.UnitsID'), nullable=False)
sample_medium = Column('SampleMedium', String(255), nullable=False)
value_type = Column('ValueType', String(255), nullable=False)
is_regular = Column('IsRegular', Boolean, nullable=False)
time_support = Column('TimeSupport', Float, nullable=False)
time_unit_id = Column('TimeUnitsID', Integer, ForeignKey('Units.UnitsID'), nullable=False)
data_type = Column('DataType', String(255), nullable=False)
general_category = Column('GeneralCategory', String(255), nullable=False)
no_data_value = Column('NoDataValue', Float, nullable=False)
# relationships
variable_unit = relationship(Unit, primaryjoin=(
"Unit.id==Variable.variable_unit_id")) # <-- Uses class attribute names, not table column names
time_unit = relationship(Unit, primaryjoin=("Unit.id==Variable.time_unit_id"))
def __repr__(self):
return "<Variable('%s', '%s', '%s')>" % (self.id, self.code, self.name)
| 45.28125
| 102
| 0.718427
| 1,336
| 0.922015
| 0
| 0
| 0
| 0
| 0
| 0
| 381
| 0.26294
|
66a4535ff16536c58c62bd0252d04c6087d6613d
| 7,751
|
py
|
Python
|
pandas/pandastypes.py
|
pyxll/pyxll-examples
|
e8a1cba1ffdb346191f0c80bea6877cbe0291957
|
[
"Unlicense"
] | 93
|
2015-04-27T14:44:02.000Z
|
2022-03-03T13:14:49.000Z
|
pandas/pandastypes.py
|
samuelpedrini/pyxll-examples
|
ce7f839b4ff4f4032b78dffff2357f3feaadc3a1
|
[
"Unlicense"
] | 4
|
2019-12-13T11:32:17.000Z
|
2022-03-03T14:07:02.000Z
|
pandas/pandastypes.py
|
samuelpedrini/pyxll-examples
|
ce7f839b4ff4f4032b78dffff2357f3feaadc3a1
|
[
"Unlicense"
] | 53
|
2015-04-27T14:44:14.000Z
|
2022-01-23T05:26:52.000Z
|
"""
Custom excel types for pandas objects (eg dataframes).
For information about custom types in PyXLL see:
https://www.pyxll.com/docs/udfs.html#custom-types
For information about pandas see:
http://pandas.pydata.org/
Including this module in your pyxll config adds the following custom types that can
be used as return and argument types to your pyxll functions:
- dataframe
- series
- series_t
Dataframes with multi-index indexes or columns will be returned with the columns and
index values in the resulting array. For normal indexes, the index will only be
returned as part of the resulting array if the index is named.
eg::
from pyxll import xl_func
import pandas as pa
@xl_func("int rows, int cols, float value: dataframe")
def make_empty_dataframe(rows, cols, value):
# create an empty dataframe
df = pa.DataFrame({chr(c + ord('A')) : value for c in range(cols)}, index=range(rows))
# return it. The custom type will convert this to a 2d array that
# excel will understand when this function is called as an array
# function.
return df
@xl_func("dataframe df, string col: float")
def sum_column(df, col):
return df[col].sum()
In excel (use Ctrl+Shift+Enter to enter an array formula)::
=make_empty_dataframe(3, 3, 100)
>> A B C
>> 100 100 100
>> 100 100 100
>> 100 100 100
=sum_column(A1:C4, "A")
>> 300
"""
from pyxll import xl_return_type, xl_arg_type
import datetime as dt
import pandas as pa
import numpy as np
import pytz
try:
import pywintypes
except ImportError:
pywintypes = None
@xl_return_type("dataframe", "var")
def _dataframe_to_var(df):
"""return a list of lists that excel can understand"""
if not isinstance(df, pa.DataFrame):
return df
df = df.applymap(lambda x: RuntimeError() if isinstance(x, float) and np.isnan(x) else x)
index_header = [str(df.index.name)] if df.index.name is not None else []
if isinstance(df.index, pa.MultiIndex):
index_header = [str(x) or "" for x in df.index.names]
if isinstance(df.columns, pa.MultiIndex):
result = [([""] * len(index_header)) + list(z) for z in zip(*list(df.columns))]
for header in result:
for i in range(1, len(header) - 1):
if header[-i] == header[-i-1]:
header[-i] = ""
if index_header:
column_names = [x or "" for x in df.columns.names]
for i, col_name in enumerate(column_names):
result[i][len(index_header)-1] = col_name
if column_names[-1]:
index_header[-1] += (" \ " if index_header[-1] else "") + str(column_names[-1])
num_levels = len(df.columns.levels)
result[num_levels-1][:len(index_header)] = index_header
else:
if index_header and df.columns.name:
index_header[-1] += (" \ " if index_header[-1] else "") + str(df.columns.name)
result = [index_header + list(df.columns)]
if isinstance(df.index, pa.MultiIndex):
prev_ix = None
for ix, row in df.iterrows():
header = list(ix)
if prev_ix:
header = [x if x != px else "" for (x, px) in zip(ix, prev_ix)]
result.append(header + list(row))
prev_ix = ix
elif index_header:
for ix, row in df.iterrows():
result.append([ix] + list(row))
else:
for ix, row in df.iterrows():
result.append(list(row))
return _normalize_dates(result)
@xl_return_type("series", "var")
def _series_to_var(s):
"""return a list of lists that excel can understand"""
if not isinstance(s, pa.Series):
return s
# convert any errors to exceptions so they appear correctly in Excel
s = s.apply(lambda x: RuntimeError() if isinstance(x, float) and np.isnan(x) else x)
result = list(map(list, zip(s.index, s)))
return _normalize_dates(result)
@xl_return_type("series_t", "var")
def _series_to_var_transform(s):
"""return a list of lists that excel can understand"""
if not isinstance(s, pa.Series):
return s
# convert any errors to exceptions so they appear correctly in Excel
s = s.apply(lambda x: RuntimeError() if isinstance(x, float) and np.isnan(x) else x)
result = list(map(list, zip(*zip(s.index, s))))
return _normalize_dates(result)
@xl_arg_type("dataframe", "var")
def _var_to_dataframe(x):
"""return a pandas DataFrame from a list of lists"""
if not isinstance(x, (list, tuple)):
raise TypeError("Expected a list of lists")
x = _fix_pywintypes(x)
columns = x[0]
rows = x[1:]
return pa.DataFrame(list(rows), columns=columns)
@xl_arg_type("series", "var")
def _var_to_series(s):
"""return a pandas Series from a list of lists (arranged vertically)"""
if not isinstance(s, (list, tuple)):
raise TypeError("Expected a list of lists")
s = _fix_pywintypes(s)
keys, values = [], []
for row in s:
if not isinstance(row, (list, tuple)):
raise TypeError("Expected a list of lists")
if len(row) < 2:
raise RuntimeError("Expected rows of length 2 to convert to a pandas Series")
key, value = row[:2]
# skip any empty rows
if key is None and value is None:
continue
keys.append(key)
values.append(value)
return pa.Series(values, index=keys)
@xl_arg_type("series_t", "var")
def _var_to_series_t(s):
"""return a pandas Series from a list of lists (arranged horizontally)"""
if not isinstance(s, (list, tuple)):
raise TypeError("Expected a list of lists")
s = _fix_pywintypes(s)
keys, values = [], []
for row in zip(*s):
if not isinstance(row, (list, tuple)):
raise TypeError("Expected a list of lists")
if len(row) < 2:
raise RuntimeError("Expected rows of length 2 to convert to a pandas Series")
key, value = row[:2]
# skip any empty rows
if key is None and value is None:
continue
keys.append(key)
values.append(value)
return pa.Series(values, index=keys)
def _normalize_dates(data):
"""
Ensure all date types returns are standard datetimes with a timezone.
pythoncom will fail to convert datetimes to Windows dates without tzinfo.
This is useful if using these functions to convert a dataframe to native
python types for setting to a Range using COM. If only passing objects
to/from python using PyXLL functions then this isn't necessary (but
isn't harmful either).
"""
def normalize_date(x):
if isinstance(x, pa.tslib.NaTType):
return ValueError()
elif isinstance(x, pa.tslib.Timestamp) or isinstance(x, dt.datetime):
return dt.datetime(*x.timetuple()[:6], tzinfo=x.tzinfo or pytz.utc)
elif isinstance(x, dt.date):
return dt.datetime(*x.timetuple()[:3], tzinfo=pytz.utc)
return x
return [[normalize_date(c) for c in r] for r in data]
def _fix_pywintypes(data):
"""
Converts any pywintypes.TimeType instances passed in to the
conversion functions into datetime types.
This is useful if using these functions to convert a n Excel Range of
of values a pandas type, as pandas will crash if called with the
pywintypes.TimeType.
"""
if pywintypes is None:
return data
def fix_pywintypes(c):
if isinstance(c, pywintypes.TimeType):
return dt.datetime(*c.timetuple()[:6])
return c
return [[fix_pywintypes(c) for c in r] for r in data]
| 31.897119
| 95
| 0.632047
| 0
| 0
| 0
| 0
| 4,592
| 0.59244
| 0
| 0
| 3,083
| 0.397755
|
66a463bd296e2375b0d9a6abd3ff5e747d929dcd
| 10,912
|
py
|
Python
|
liveDataApp/views.py
|
subahanii/COVID19-tracker
|
b7d30ff996974755e78393f0777d6cf623c4d654
|
[
"MIT"
] | 7
|
2020-04-28T12:34:42.000Z
|
2021-05-17T06:20:51.000Z
|
liveDataApp/views.py
|
subahanii/COVID19-tracker
|
b7d30ff996974755e78393f0777d6cf623c4d654
|
[
"MIT"
] | 1
|
2020-07-09T18:17:32.000Z
|
2020-07-10T13:56:01.000Z
|
liveDataApp/views.py
|
subahanii/COVID19-tracker
|
b7d30ff996974755e78393f0777d6cf623c4d654
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
import requests
from bs4 import BeautifulSoup
import re
from collections import defaultdict as dfd
from .models import *
from datetime import date
from datetime import timedelta
from django.db.models import Sum
from django.db.models import Count
from django.db.models.functions import ExtractDay,ExtractMonth,ExtractYear
today = date.today()
yesterday = today - timedelta(days = 1)
colorList = {
1:"#FF0000",
2:"#FF4040",
3:"#FF4040",
4:"#FF4040",
5:"#FF7474",
6:"#FF7474",
7:"#FF7474",
8:"#FF7474",
9:"#FF7474",
10:"#FF7474",
11:"#FF7474",
12:"#FF7474",
13:"#FF8787",
14:"#FF8787",
15:"#FF8787",
16:"#FF8787",
17:"#FF8787",
18:"#FF8787",
19:"#FF8787",
20:"#FFB3B3",
21:"#FFB3B3",
22:"#FFB3B3",
23:"#FFB3B3",
24:"#FFB3B3",
25:"#FFB3B3",
26:"#FFECEC",
27:"#FFECEC",
28:"#FFECEC",
29:"#FFECEC",
30:"#FFE0E0",
31:"#FFE0E0",
32:"#FFE0E0",
33:"#FFE0E0",
34:"#FFE0E0",
35:"#FFE0E0",
}
stateCode = {
'Andaman and Nicobar Islands': "AN" ,
'Andhra Pradesh': "AP",
'Arunachal Pradesh': "AR",
'Assam': "AS" ,
'Bihar':"BR" ,
'Chandigarh':"CT" ,
'Chhattisgarh': "CH",
'Delhi':"DL" ,
'Dadara & Nagar Havelli': "DN",
'Goa':"GA" ,
'Gujarat': "GJ",
'Haryana': "HR",
'Himachal Pradesh': "HP",
'Jammu and Kashmir': "JK" ,
'Jharkhand': "JH",
'Karnataka': "KA",
'Kerala': "KL",
'Ladakh': "LK",
'Lakshadweep': "LD",
'Madhya Pradesh': "MP",
'Maharashtra':"MH" ,
'Manipur':"MN" ,
'Meghalaya': "ML",
'Mizoram': "MZ",
'Nagaland': "NL",
'Odisha': "OD",
'Puducherry': "PY",
'Punjab': "PB",
'Rajasthan': "RJ",
'Sikkim': "SK",
'Tamil Nadu':"TN" ,
'Telengana': "TS",
'Tripura':"TR" ,
'Uttarakhand': "UK",
'Uttar Pradesh':"UP" ,
'West Bengal':"WB"
}
# Create your views here.
def filter_integer(x):
array = re.findall(r'[0-9]+', x)
return ''.join(array)
def getData():
#get data directlly to scrape site
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# URL = 'https://www.mohfw.gov.in/'
# page = requests.get(URL)
# soup = BeautifulSoup(page.content, 'html.parser')
# tableData = soup.findAll('div', attrs={'class':'data-table table-responsive'})
# tableData = tableData[0].find('tbody')
# dataList=[]
# for i in tableData.findAll('tr'):
# data=[]
# for j,vlu in enumerate(i.findAll('td')):
# if j==1:
# data.append(vlu.text)
# elif j>1:
# data.append(filter_integer(vlu.text))
# if len(data)>2:
# dataList.append(data)
# total = ['Total number of confirmed cases in India']
# for vlu in dataList[-1]:
# total.append(filter_integer(vlu))
# print(total)
# del dataList[-1]
# #dataList[-1]=total
# for i in range(len(dataList)):
# dataList[i].insert(0, i+1)
# print(dataList)
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#get data from database
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
dataList = []
tconfirmCases,tcuredCases,tdeathCases=0,0,0
updateDate=0
for i,vlu in enumerate(dailyData.objects.filter(when__date=date.today()) ):
dataList.append([i+1, vlu.stateName, vlu.confirmedCases, vlu.curedCases, vlu.deathCases])
updateDate = vlu.when
tconfirmCases+=int(vlu.confirmedCases)
tcuredCases+= int(vlu.curedCases)
tdeathCases+= int(vlu.deathCases)
total = ['Total number of confirmed cases in India',tconfirmCases,tcuredCases,tdeathCases]
#print('databse')
#print(total, dataList)
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
confirmCases = dfd(list)
for i in dataList:
try:
confirmCases[ stateCode[i[1]] ].append(int(i[2]))
confirmCases[ stateCode[i[1]] ].append(i[1])
confirmCases[ stateCode[i[1]] ].append(stateCode[i[1]])
except:
print("Except from getData()")
sortedConfirmedCases = sorted(confirmCases.items(), key=lambda x: x[1] , reverse=True)
#print(sortedConfirmedCases)
sortedConfirmedCasesList = []
colorData = dict()
colorFill=dict()
c=0
c2=255
radius=32
colorCode=1
for i in sortedConfirmedCases:
sortedConfirmedCasesList.append({
'centered': i[1][2],
'fillKey': i[1][2],
'radius': radius+((i[1][0])//2400)*2,
'state': i[1][1]+","+str(i[1][0])
})
#colorFill[ i[1][2] ] = "rgb("+str(c2)+","+ str(0)+","+str(c) +")"
colorFill[ i[1][2] ] = colorList[colorCode]
colorCode+=1
#print(colorCode)
colorData[ i[1][2] ]={ 'fillKey': i[1][2] }
c+=(i[1][0])//200
radius-=1
colorFill['defaultFill'] = '#dddddd'
return dataList, total,sortedConfirmedCasesList,colorData,colorFill, updateDate
def tripleGraph(data):
dataPoint1,dataPoint2,dataPoint3= [],[],[]
#print(data)
for i in data:
dataPoint1.append({ 'y': int(i[2]), 'label': i[1] ,'indexLabel': i[2] ,'indexLabelFontSize': 10})
dataPoint2.append({ 'y': int(i[3]), 'label': i[1] ,'indexLabel': i[3] ,'indexLabelFontSize': 10})
dataPoint3.append({ 'y': int(i[4]), 'label': i[1] ,'indexLabel': i[4] ,'indexLabelFontSize': 10})
#print(dataPoint1)
#print(dataPoint2)
#print(dataPoint3)
return dataPoint1,dataPoint2,dataPoint3
def getPieData(data):
confirmedPie,curedPie,deathPie = [], [], []
for i in data:
if i[0]==1:
confirmedPie.append({ 'y': i[2], 'name': i[1], 'exploded': 'true' })
curedPie.append({ 'y': i[3], 'name': i[1], 'exploded': 'true' })
deathPie.append({ 'y': i[4], 'name': i[1], 'exploded': 'true' })
else:
confirmedPie.append({ 'y': i[2], 'name': i[1]})
curedPie.append({ 'y': i[2], 'name': i[1]})
deathPie.append({ 'y': i[2], 'name': i[1]})
return confirmedPie,curedPie,deathPie
def findNewCases():
todayDataDB = dailyData.objects.filter(when__date=date.today())
yesterdayDataDB = dailyData.objects.filter(when__date=( date.today() - timedelta(days = 1) ))
todayConfirmedData =0
todayCuredData = 0
todayDeathData = 0
yesterdayConfirmedData =0
yesterdayCuredData = 0
yesterdayDeathData = 0
for vlu in todayDataDB:
todayConfirmedData+= int(vlu.confirmedCases)
todayCuredData+= int(vlu.curedCases)
todayDeathData+= int(vlu.deathCases)
for vlu in yesterdayDataDB:
yesterdayConfirmedData+= int(vlu.confirmedCases)
yesterdayCuredData+= int(vlu.curedCases)
yesterdayDeathData+= int(vlu.deathCases)
return (todayConfirmedData - yesterdayConfirmedData),(todayCuredData - yesterdayCuredData),(todayDeathData - yesterdayDeathData)
def getIncrementedData():
dataFromDM = dailyData.objects.values( day=ExtractDay('when'),
month=ExtractMonth('when'),
year = ExtractYear('when') ).annotate(Sum('confirmedCases'),
Sum('curedCases'),
Sum('deathCases'))
dataFromDM= dataFromDM.order_by('month')
#print(dataFromDM)
#print(len(dataFromDM))
incrementedConfirmedCases,incrementedCuredCases, incrementedDeathCases = dfd(int), dfd(int), dfd(int)
temp1, temp2, temp3 = 25435,5000,800
for i in dataFromDM:
d='{}/{}/{}'.format(i['day'],i['month'],i['year'])
incrementedConfirmedCases[d]=(i['confirmedCases__sum'] - temp1)
incrementedCuredCases[d]=(i['curedCases__sum'] - temp2)
incrementedDeathCases[d]=(i['deathCases__sum'] - temp3)
temp1 = i['confirmedCases__sum']
temp2 = i['curedCases__sum']
temp3 = i['deathCases__sum']
#print(i['confirmedCases__sum'],d)
#print(incrementedConfirmedCases)
#print(incrementedCuredCases)
#print(incrementedDeathCases)
dateOfCnfInc ,dataOfCnfInc = list(incrementedConfirmedCases.keys()), list(incrementedConfirmedCases.values())
dateOfCurInc ,dataOfCurInc = list(incrementedCuredCases.keys()), list(incrementedCuredCases.values())
dateOfDthInc ,dataOfDthInc = list(incrementedDeathCases.keys()), list(incrementedDeathCases.values())
return dateOfCnfInc ,dataOfCnfInc,dateOfCurInc ,dataOfCurInc,dateOfDthInc ,dataOfDthInc
def getIncrementedTestData():
todayTests = 1000000
incTestCount = 100000
yesterdayTests = 900000
testIncreamentData = []
try:
todayTests = TestCounter.objects.get( when__date=date.today() )
yesterdayTests = TestCounter.objects.get(when__date=( today - timedelta(days = 1) ))
todayTests = todayTests.tests
#print('---> ',yesterdayTests.tests)
yesterdayTests = yesterdayTests.tests
#print("dhdh")
incTestCount = todayTests - yesterdayTests
except:
print("Except from getIncrementedTestData() ")
temp =1199081
for i in TestCounter.objects.all():
#print(i.tests,str(i.when)[:10] )
testIncreamentData.append({ 'y': i.tests-temp, 'label': str(i.when)[:10] })
temp = i.tests
#print(testIncreamentData)
return testIncreamentData, todayTests, incTestCount
def home(request):
data,total,sortedConfirmedCasesList,colorData,colorFill ,updateDate = getData()
sortedData = sorted(data,key= lambda x: int(x[2]))
#print("sorted data",sortedData)
dataPoint1,dataPoint2,dataPoint3 = tripleGraph(sortedData[12:])
confirmedPie,curedPie,deathPie = getPieData(sortedData[12:])
#print(total)
newConfirmedCases,newCuredCases, newDeathCases = findNewCases()
dateOfCnfInc ,dataOfCnfInc,dateOfCurInc ,dataOfCurInc,dateOfDthInc ,dataOfDthInc = getIncrementedData()
testIncreamentData, todayTests, incTestCount = getIncrementedTestData()
#getIncrementedTestData
visiting = Counter(count1=1)
visiting.save()
visited = Counter.objects.all().count()
# totalTests = TestCounter.objects.get( when__date=date.today() )
# totalTests = totalTests.tests
context= {
'data':data,
'total':total,
'sortedConfirmedCasesList':sortedConfirmedCasesList,
'colorData':colorData,
"totalConf":total[1],
"totalCure":total[2],
"totalDeath":total[3],
'colorFill':colorFill,
'dataPoint1':dataPoint1,
'dataPoint2':dataPoint2,
'dataPoint3':dataPoint3,
'totalAffected':len(data),
'updateDate':updateDate,
'visited':visited,
'confirmedPie':confirmedPie,
'curedPie':curedPie,
'deathPie':deathPie,
'newConfirmedCases':newConfirmedCases,
'newCuredCases':newCuredCases,
'newDeathCases':newDeathCases,
'confirmDataOfLineGraph':[{ 'label': i[0], 'y': i[1] } for i in zip(dateOfCnfInc,dataOfCnfInc)] ,
'curedDataOfLineGraph':[{ 'label': i[0], 'y': i[1] } for i in zip(dateOfCurInc,dataOfCurInc)] ,
'deathDataOfLineGraph':[{ 'label': i[0], 'y': i[1] } for i in zip(dateOfDthInc,dataOfDthInc)] ,
'todayTests':todayTests,
'testIncreamentData':testIncreamentData,
'incTestCount':incTestCount
}
#print(dailyData.objects.filter(when__date=yesterday) )
#print([{ 'label': i[0], 'y': i[1] } for i in zip(dateOfCnfInc,dataOfCnfInc)])
#print('today',today)
return render(request,'home.html',context)
| 26.421308
| 129
| 0.640121
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,009
| 0.367394
|
66a65924a1e2768d7469c1f8356205da9b3cbe9a
| 89
|
py
|
Python
|
project/healthcheck.py
|
permallotment/allotment3
|
0eb390086cc8f48ba6817541c6c70c06dfc83058
|
[
"CC0-1.0"
] | null | null | null |
project/healthcheck.py
|
permallotment/allotment3
|
0eb390086cc8f48ba6817541c6c70c06dfc83058
|
[
"CC0-1.0"
] | null | null | null |
project/healthcheck.py
|
permallotment/allotment3
|
0eb390086cc8f48ba6817541c6c70c06dfc83058
|
[
"CC0-1.0"
] | null | null | null |
from django.http import HttpResponse
def health(request):
return HttpResponse("OK")
| 17.8
| 36
| 0.764045
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0.044944
|
66a6d482011b0d35775a7523319647c543ff9fb5
| 11,829
|
py
|
Python
|
src/algo/baselines/randomP/randomP.py
|
Lukeeeeee/CE7490-Group-Project-Python
|
840a655bcb8cebbe3d39e5d3f3d68a01936a6283
|
[
"MIT"
] | null | null | null |
src/algo/baselines/randomP/randomP.py
|
Lukeeeeee/CE7490-Group-Project-Python
|
840a655bcb8cebbe3d39e5d3f3d68a01936a6283
|
[
"MIT"
] | null | null | null |
src/algo/baselines/randomP/randomP.py
|
Lukeeeeee/CE7490-Group-Project-Python
|
840a655bcb8cebbe3d39e5d3f3d68a01936a6283
|
[
"MIT"
] | 1
|
2020-10-20T07:06:18.000Z
|
2020-10-20T07:06:18.000Z
|
from src.core import Basic
import networkx as nx
class RandomP(Basic):
def __init__(self, server_list, network_dataset, node_list):
super().__init__()
self.server_list = server_list
self.network_dataset = network_dataset
self.node_list = node_list
def add_new_primary_node(self,server_list,vp_number):
i = 0
q = len(server_list)//(vp_number+1)
if vp_number == 3:
a = q
b = q * 2
c = q * 3
for n in self.node_list:
if i > len(self.server_list) - 1:
i = 0
if a > len(self.server_list) - 1:
a = 0
if b > len(self.server_list) - 1:
b = 0
if c > len(self.server_list) - 1:
c = 0
primary_server = self.server_list[i]
self.add_primary_copy_to_server(node_id=n, server=primary_server)
vps1 = self.server_list[a]
vps2 = self.server_list[b]
vps3 = self.server_list[c]
self.add_virtual_primary_copy_to_server(node_id=n, server=vps1)
self.add_virtual_primary_copy_to_server(node_id=n, server=vps2)
self.add_virtual_primary_copy_to_server(node_id=n, server=vps3)
i = i + 1
a = a + 1
b = b + 1
c = c + 1
elif vp_number == 2:
a = q
b = q * 2
for n in self.node_list:
if i > len(self.server_list) - 1:
i = 0
if a > len(self.server_list) - 1:
a = 0
if b > len(self.server_list) - 1:
b = 0
primary_server = self.server_list[i]
self.add_primary_copy_to_server(node_id=n, server=primary_server)
vps1 = self.server_list[a]
vps2 = self.server_list[b]
self.add_virtual_primary_copy_to_server(node_id=n, server=vps1)
self.add_virtual_primary_copy_to_server(node_id=n, server=vps2)
i = i + 1
a = a + 1
b = b + 1
elif vp_number == 0:
for n in self.node_list:
if i > len(self.server_list) - 1:
i = 0
primary_server = self.server_list[i]
self.add_primary_copy_to_server(node_id=n, server=primary_server)
i = i + 1
elif vp_number == 1:
a = q
for n in self.node_list:
if i > len(self.server_list) - 1:
i = 0
if a > len(self.server_list) - 1:
a = 0
primary_server = self.server_list[i]
self.add_primary_copy_to_server(node_id=n, server=primary_server)
vps1 = self.server_list[a]
self.add_virtual_primary_copy_to_server(node_id=n, server=vps1)
i = i + 1
a = a + 1
elif vp_number == 4:
a = q
b = q * 2
c = q * 3
d = q * 4
for n in self.node_list:
if i > len(self.server_list) - 1:
i = 0
if a > len(self.server_list) - 1:
a = 0
if b > len(self.server_list) - 1:
b = 0
if c > len(self.server_list) - 1:
c = 0
if d > len(self.server_list) - 1:
d = 0
primary_server = self.server_list[i]
self.add_primary_copy_to_server(node_id=n, server=primary_server)
vps1 = self.server_list[a]
vps2 = self.server_list[b]
vps3 = self.server_list[c]
vps4 = self.server_list[d]
self.add_virtual_primary_copy_to_server(node_id=n, server=vps1)
self.add_virtual_primary_copy_to_server(node_id=n, server=vps2)
self.add_virtual_primary_copy_to_server(node_id=n, server=vps3)
self.add_virtual_primary_copy_to_server(node_id=n, server=vps4)
i = i + 1
a = a + 1
b = b + 1
c = c + 1
d = d + 1
elif vp_number == 5:
a = q
b = q * 2
c = q * 3
d = q * 4
e = q * 5
for n in self.node_list:
if i > len(self.server_list) - 1:
i = 0
if a > len(self.server_list) - 1:
a = 0
if b > len(self.server_list) - 1:
b = 0
if c > len(self.server_list) - 1:
c = 0
if d > len(self.server_list) - 1:
d = 0
if e > len(self.server_list) - 1:
e = 0
primary_server = self.server_list[i]
self.add_primary_copy_to_server(node_id=n, server=primary_server)
vps1 = self.server_list[a]
vps2 = self.server_list[b]
vps3 = self.server_list[c]
vps4 = self.server_list[d]
vps5 = self.server_list[e]
self.add_virtual_primary_copy_to_server(node_id=n, server=vps1)
self.add_virtual_primary_copy_to_server(node_id=n, server=vps2)
self.add_virtual_primary_copy_to_server(node_id=n, server=vps3)
self.add_virtual_primary_copy_to_server(node_id=n, server=vps4)
self.add_virtual_primary_copy_to_server(node_id=n, server=vps5)
i = i + 1
a = a + 1
b = b + 1
c = c + 1
d = d + 1
e = e + 1
elif vp_number == 6:
a = q
b = q * 2
c = q * 3
d = q * 4
e = q * 5
f = q * 6
for n in self.node_list:
if i > len(self.server_list) - 1:
i = 0
if a > len(self.server_list) - 1:
a = 0
if b > len(self.server_list) - 1:
b = 0
if c > len(self.server_list) - 1:
c = 0
if d > len(self.server_list) - 1:
d = 0
if e > len(self.server_list) - 1:
e = 0
if f > len(self.server_list) - 1:
f = 0
primary_server = self.server_list[i]
self.add_primary_copy_to_server(node_id=n, server=primary_server)
vps1 = self.server_list[a]
vps2 = self.server_list[b]
vps3 = self.server_list[c]
vps4 = self.server_list[d]
vps5 = self.server_list[e]
vps6 = self.server_list[f]
self.add_virtual_primary_copy_to_server(node_id=n, server=vps1)
self.add_virtual_primary_copy_to_server(node_id=n, server=vps2)
self.add_virtual_primary_copy_to_server(node_id=n, server=vps3)
self.add_virtual_primary_copy_to_server(node_id=n, server=vps4)
self.add_virtual_primary_copy_to_server(node_id=n, server=vps5)
self.add_virtual_primary_copy_to_server(node_id=n, server=vps6)
i = i + 1
a = a + 1
b = b + 1
c = c + 1
d = d + 1
e = e + 1
f = f + 1
elif vp_number == 7:
a = q
b = q * 2
c = q * 3
d = q * 4
e = q * 5
f = q * 6
g = q * 7
for n in self.node_list:
if i > len(self.server_list) - 1:
i = 0
if a > len(self.server_list) - 1:
a = 0
if b > len(self.server_list) - 1:
b = 0
if c > len(self.server_list) - 1:
c = 0
if d > len(self.server_list) - 1:
d = 0
if e > len(self.server_list) - 1:
e = 0
if f > len(self.server_list) - 1:
f = 0
if g > len(self.server_list) - 1:
g = 0
primary_server = self.server_list[i]
self.add_primary_copy_to_server(node_id=n, server=primary_server)
vps1 = self.server_list[a]
vps2 = self.server_list[b]
vps3 = self.server_list[c]
vps4 = self.server_list[d]
vps5 = self.server_list[e]
vps6 = self.server_list[f]
vps7 = self.server_list[g]
self.add_virtual_primary_copy_to_server(node_id=n, server=vps1)
self.add_virtual_primary_copy_to_server(node_id=n, server=vps2)
self.add_virtual_primary_copy_to_server(node_id=n, server=vps3)
self.add_virtual_primary_copy_to_server(node_id=n, server=vps4)
self.add_virtual_primary_copy_to_server(node_id=n, server=vps5)
self.add_virtual_primary_copy_to_server(node_id=n, server=vps6)
self.add_virtual_primary_copy_to_server(node_id=n, server=vps7)
i = i + 1
a = a + 1
b = b + 1
c = c + 1
d = d + 1
e = e + 1
f = f + 1
g = g + 1
else:
print('vp_number illegal')
def add_primary_copy_to_server(self, node_id, server):
server.add_node(node_id=node_id, node_type=1, write_freq=1)
def add_virtual_primary_copy_to_server(self,node_id, server):
server.add_node(node_id=node_id, node_type=3, write_freq=1)
def add_non_primary_copy_to_server(self,node_id, server):
server.add_node(node_id=node_id, node_type=2, write_freq=1)
def check_locality(self):
for server in self.server_list:
rp=[]
for node in server.graph:
if server.graph.nodes[node]['node_type'] == 1:
for n in self.network_dataset.graph.neighbors(node):
if not server.graph.has_node(n):
rp.append(n)
for i in rp:
if not server.graph.has_node(i):
self.add_non_primary_copy_to_server(node_id=i, server=server)
def compute_inter_sever_cost(self):
cost = 0
for server in self.server_list:
for node in server.graph:
if server.graph.nodes[node]['node_type'] == 2:
cost = cost + server.graph.nodes[node]['write_freq']
print('Inter-Server Cost: ',cost,)
def check_server_load(self):
server_load_list=[]
for server in self.server_list:
server_load_list.append(len(server.graph))
if max(server_load_list)-min(server_load_list)<=1:
print('Load of servers are balanced.')
def find_primary_server(self,node_id):
for server in self.server_list:
if server.graph.has_node(node_id):
if server.graph.nodes[node_id]['node_type']==1:
print('The primary copy of Node',node_id,'is assigned on server',server.id)
def save_all(self, path):
nx.write_gpickle(self.network_dataset.graph, path + '/dataset_graph.gpickle')
for i in range(len(self.server_list)):
nx.write_gpickle(self.server_list[i].graph, path + '/server_%d.gpickle' % i)
| 37.792332
| 95
| 0.481951
| 11,777
| 0.995604
| 0
| 0
| 0
| 0
| 0
| 0
| 209
| 0.017668
|
66a846d0d120e378d227803f5adec0334b4d67ff
| 1,336
|
py
|
Python
|
stations/heathen/migrations/0003_auto_20161128_0519.py
|
boyombo/django-stations
|
93a70be7eb8268f9d48f6e3cf9a532bcb27ff895
|
[
"MIT"
] | null | null | null |
stations/heathen/migrations/0003_auto_20161128_0519.py
|
boyombo/django-stations
|
93a70be7eb8268f9d48f6e3cf9a532bcb27ff895
|
[
"MIT"
] | null | null | null |
stations/heathen/migrations/0003_auto_20161128_0519.py
|
boyombo/django-stations
|
93a70be7eb8268f9d48f6e3cf9a532bcb27ff895
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-11-28 05:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('heathen', '0002_member_gender'),
]
operations = [
migrations.CreateModel(
name='Industry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='member',
name='nok_email',
field=models.EmailField(blank=True, max_length=254, null=True),
),
migrations.AddField(
model_name='member',
name='nok_name',
field=models.CharField(blank=True, max_length=50),
),
migrations.AddField(
model_name='member',
name='nok_phone',
field=models.CharField(blank=True, max_length=20),
),
migrations.AddField(
model_name='member',
name='industry',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='heathen.Industry'),
),
]
| 30.363636
| 115
| 0.577844
| 1,146
| 0.857784
| 0
| 0
| 0
| 0
| 0
| 0
| 215
| 0.160928
|
66aa16869b2a00e5d9cde4a253891d698c5527b2
| 2,437
|
py
|
Python
|
src/observers/simple_observer.py
|
ChenyangTang/bark-ml
|
1d2ab1957bf49929e27d718dd4bd3912162197b8
|
[
"MIT"
] | null | null | null |
src/observers/simple_observer.py
|
ChenyangTang/bark-ml
|
1d2ab1957bf49929e27d718dd4bd3912162197b8
|
[
"MIT"
] | null | null | null |
src/observers/simple_observer.py
|
ChenyangTang/bark-ml
|
1d2ab1957bf49929e27d718dd4bd3912162197b8
|
[
"MIT"
] | null | null | null |
from gym import spaces
import numpy as np
from bark.models.dynamic import StateDefinition
from modules.runtime.commons.parameters import ParameterServer
import math
import operator
from src.commons.spaces import BoundedContinuous, Discrete
from src.observers.observer import StateObserver
class SimpleObserver(StateObserver):
def __init__(self,
params=ParameterServer()):
StateObserver.__init__(self, params)
self._state_definition = [int(StateDefinition.X_POSITION),
int(StateDefinition.Y_POSITION),
int(StateDefinition.THETA_POSITION),
int(StateDefinition.VEL_POSITION)]
self._observation_len = \
self._max_num_vehicles*self._len_state
def observe(self, world, agents_to_observe):
"""see base class
"""
concatenated_state = np.zeros(self._observation_len, dtype=np.float32)
for i, (_, agent) in enumerate(world.agents.items()):
normalized_state = self._normalize(agent.state)
reduced_state = self._select_state_by_index(normalized_state)
starts_id = i*self._len_state
concatenated_state[starts_id:starts_id+self._len_state] = reduced_state
if i >= self._max_num_vehicles:
break
return concatenated_state
def _norm(self, agent_state, position, range):
agent_state[int(position)] = \
(agent_state[int(position)] - range[0])/(range[1]-range[0])
return agent_state
def _normalize(self, agent_state):
agent_state = \
self._norm(agent_state,
StateDefinition.X_POSITION,
self._world_x_range)
agent_state = \
self._norm(agent_state,
StateDefinition.Y_POSITION,
self._world_y_range)
agent_state = \
self._norm(agent_state,
StateDefinition.THETA_POSITION,
self._theta_range)
agent_state = \
self._norm(agent_state,
StateDefinition.VEL_POSITION,
self._velocity_range)
return agent_state
def reset(self, world, agents_to_observe):
super(SimpleObserver, self).reset(world, agents_to_observe)
return world
@property
def observation_space(self):
return spaces.Box(
low=np.zeros(self._observation_len),
high=np.ones(self._observation_len))
@property
def _len_state(self):
return len(self._state_definition)
| 30.848101
| 77
| 0.672959
| 2,141
| 0.878539
| 0
| 0
| 221
| 0.090685
| 0
| 0
| 25
| 0.010259
|
66aa1e9b55b1f6a0fc3a8c730d67ac565985ed59
| 9,610
|
py
|
Python
|
cosilico/base/scatter.py
|
cosilico/cosilico
|
983373139aeaf459271c559a47a6439939ec93a5
|
[
"MIT"
] | null | null | null |
cosilico/base/scatter.py
|
cosilico/cosilico
|
983373139aeaf459271c559a47a6439939ec93a5
|
[
"MIT"
] | null | null | null |
cosilico/base/scatter.py
|
cosilico/cosilico
|
983373139aeaf459271c559a47a6439939ec93a5
|
[
"MIT"
] | null | null | null |
import altair as alt
import pandas as pd
def scatterplot(x, y, data, hue=None, color=None, opacity=1.,
x_autoscale=True, y_autoscale=True):
"""Display a basic scatterplot.
Parameters
----------
x : str
Column in data to be used for x-axis
y : str
Column in data to be used for y-axis
data : pandas.DataFrame
Dataframe holding x and y
hue : str, None
Column in data used to color the points
color : str, None
What color to display the points as
If hue is not None, then color will be overriden by hue
opacity : float
Opacity of the points in the plot
x_autoscale : bool
Scale the x-axis to fit the data,
otherwise axis starts at zero
y_autoscale : bool
Scale the y-axis to fit the data,
otherwise axis starts at zero
Example
-------
>>> import cosilico.base as base
>>> import seaborn as sns
>>>
>>> iris = sns.load_dataset('iris')
>>>
>>> base.scatterplot('sepal_length', 'sepal_width', iris, hue='species')
Returns
-------
altair.Chart
.. output::
https://static.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp
height: 600px
"""
mark_kwargs = {
'opacity': opacity
}
if color is not None and hue is None:
mark_kwargs['color'] = color
encode_kwargs = {}
if hue is not None: encode_kwargs['color'] = f'{hue}:N'
chart = alt.Chart(data).mark_point(**mark_kwargs).encode(
x=alt.X(f'{x}:Q',
scale=alt.Scale(zero=not x_autoscale)
),
y=alt.Y(f'{y}:Q',
scale=alt.Scale(zero=not y_autoscale)
),
**encode_kwargs
)
return chart
def jointplot(x, y, data, hue=None, color=None, show_x=True,
show_y=True, opacity=.6, padding_scalar=.05, maxbins=30,
hist_height=50):
"""Display a scatterplot with axes histograms.
Parameters
----------
x : str
Column in data to be used for x-axis
y : str
Column in data to be used for y-axis
data : pandas.DataFrame
Dataframe holding x and y
hue : str, None
Column in data used to color the points
color : str, None
What color to display the points as
If hue is not None, then color will be overriden by hue
show_X : bool
Show the distribution for the x-axis values
show_y : bool
Show the distribution for the y-axis values
opacity : float
Opacity of the histograms in the plot
maxbins : int
Max bins for the histograms
hist_height : int
Height of histograms
Example
-------
>>> import cosilico.base as base
>>>
>>> import seaborn as sns
>>> iris = sns.load_dataset('iris')
>>>
>>> base.jointplot('sepal_length', 'sepal_width', iris, hue='species')
Returns
-------
altair.Chart
.. output::
https://static.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp
height: 600px
"""
chart = alt.Chart(data)
x_diff = max(data[x]) - min(data[x])
y_diff = max(data[y]) - min(data[y])
xscale = alt.Scale(domain=(min(data[x]) - (x_diff * padding_scalar),
max(data[x]) + (x_diff * padding_scalar)))
yscale = alt.Scale(domain=(min(data[y]) - (y_diff * padding_scalar),
max(data[y]) + (y_diff * padding_scalar)))
area_kwargs = {'opacity': opacity, 'interpolate': 'step'}
mark_kwargs = {}
if hue is not None:
mark_kwargs['color'] = f'{hue}:N'
points = chart.mark_circle().encode(
alt.X(x, scale=xscale),
alt.Y(y, scale=yscale),
**mark_kwargs
)
encode_kwargs = {}
if hue is not None:
encode_kwargs['color'] = f'{hue}:N'
top_hist = chart.mark_area(**area_kwargs).encode(
alt.X(f'{x}:Q',
# when using bins, the axis scale is set through
# the bin extent, so we do not specify the scale here
# (which would be ignored anyway)
bin=alt.Bin(maxbins=maxbins, extent=xscale.domain),
stack=None,
title='',
axis=alt.Axis(labels=False, tickOpacity=0.)
),
alt.Y('count()', stack=None, title=''),
**encode_kwargs
).properties(height=hist_height)
right_hist = chart.mark_area(**area_kwargs).encode(
alt.Y(f'{y}:Q',
bin=alt.Bin(maxbins=maxbins, extent=yscale.domain),
stack=None,
title='',
axis=alt.Axis(labels=False, tickOpacity=0.)
),
alt.X('count()', stack=None, title=''),
**encode_kwargs
).properties(width=hist_height)
if show_x and show_y:
return top_hist & (points | right_hist)
if show_x and not show_y:
return top_hist & points
if not show_x and show_y:
return points | right_hist
return points
def clean_jointplot(x, y, data, hue=None, show_x=True,
show_y=True, opacity=.6, padding_scalar=.2, bandwidth_scalar=10,
line_height=50, top_spacing=-40, right_spacing=0,
apply_configure_view=True):
"""Display a clean scatterplot with axes distribution lines.
Parameters
----------
x : str
Column in data to be used for x-axis
y : str
Column in data to be used for y-axis
data : pandas.DataFrame
Dataframe holding x and y
hue : str, None
Column in data used to coloring the points
show_X : bool
Show the line distribution for the x-axis values
show_y : bool
Show the line distribution for the y-axis values
opacity : float
Opacity of the histograms in the plot
bandwidth_scalar : float, int
Sets bandwidth for the density estimation.
Bandwidth = value_range / bandwidth_scalar
line_height : int
Height of the distribution lines
top_spacing : int
Amount of spacing between top distribution line and scatter
right_spacing : int
Amount of spacing between right distribution line and scatter
apply_configure_view : bool
Whether to apply strokeWidth=0 to the configure view function.
Note that if this is applied you cant later combine this chart
with another chart. To combine this chart with another chart
you will need to set apply_configure_view to False and then reapply
.configure_view in the combined chart to make the weird axis
borders go away
Example
-------
>>> import cosilico.base as base
>>>
>>> import seaborn as sns
>>> iris = sns.load_dataset('iris')
>>>
>>> base.clean_jointplot('sepal_length', 'sepal_width', iris, hue='species')
Returns
-------
altair.Chart
.. output::
https://static.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp
height: 600px
"""
chart = alt.Chart(data)
x_diff = max(data[x]) - min(data[x])
y_diff = max(data[y]) - min(data[y])
xscale = alt.Scale(domain=(min(data[x]) - (x_diff * padding_scalar),
max(data[x]) + (x_diff * padding_scalar)))
yscale = alt.Scale(domain=(min(data[y]) - (y_diff * padding_scalar),
max(data[y]) + (y_diff * padding_scalar)))
area_kwargs = {'opacity': opacity, 'interpolate': 'step'}
mark_kwargs = {}
if hue is not None:
mark_kwargs['color'] = f'{hue}:N'
points = chart.mark_circle().encode(
alt.X(x, scale=xscale),
alt.Y(y, scale=yscale),
**mark_kwargs
)
encode_kwargs = {}
if hue is not None:
encode_kwargs['color'] = f'{hue}:N'
transform_kwargs = {}
if hue is not None:
transform_kwargs['groupby'] = [hue]
line_axis_kwargs = {'labels': False, 'tickOpacity': 0., 'domain': False,
'grid': False}
top_line = chart.transform_density(
density=x,
bandwidth=x_diff / bandwidth_scalar,
counts=True,
extent=xscale.domain,
steps=200,
**transform_kwargs
).mark_line(
opacity=opacity
).encode(
x=alt.X(f'value:Q',
scale=xscale,
title='',
axis=alt.Axis(**line_axis_kwargs)
),
y=alt.Y('density:Q',
title='',
axis=alt.Axis(**line_axis_kwargs)
),
**encode_kwargs
).properties(height=line_height)
right_line = chart.transform_density(
density=y,
bandwidth=y_diff / bandwidth_scalar,
counts=True,
extent=yscale.domain,
steps=200,
**transform_kwargs
).mark_line(
opacity=opacity
).encode(
y=alt.X(f'value:Q',
scale=yscale,
title='',
axis=alt.Axis(**line_axis_kwargs)
),
x=alt.Y('density:Q',
title='',
axis=alt.Axis(**line_axis_kwargs)
),
order='value:Q',
**encode_kwargs
).properties(width=line_height)
if show_x and show_y:
combined = alt.vconcat(top_line,
alt.hconcat(points, right_line, spacing=right_spacing),
spacing=top_spacing)
if show_x and not show_y:
combined = alt.vconcat(top_line, points, spacing=top_spacing)
if not show_x and show_y:
combined = alt.hconcat(points, right_line, spacing=right_spacing)
if not show_x and not show_y:
combined = points
if apply_configure_view:
combined = combined.configure_view(strokeWidth=0)
return combined
| 29.478528
| 87
| 0.591467
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,501
| 0.468366
|
66abb66cbd60706f6fbdf7789edf198d10295b85
| 12,103
|
py
|
Python
|
flappy_env.py
|
timlaroche/FlapPyBird
|
cffc7bb76daad67957a8b5778c1f2c7d82da1514
|
[
"MIT"
] | null | null | null |
flappy_env.py
|
timlaroche/FlapPyBird
|
cffc7bb76daad67957a8b5778c1f2c7d82da1514
|
[
"MIT"
] | null | null | null |
flappy_env.py
|
timlaroche/FlapPyBird
|
cffc7bb76daad67957a8b5778c1f2c7d82da1514
|
[
"MIT"
] | null | null | null |
import gym
from gym import spaces
from itertools import cycle
import random
import sys
import os
import pygame
from pygame.locals import *
import flappy
import numpy as np
import cv2
# GLOBALS
FPS = 30
SCREENWIDTH = 288
SCREENHEIGHT = 512
PIPEGAPSIZE = 100 # gap between upper and lower part of pipe
BASEY = SCREENHEIGHT * 0.79
PLAYERS_FILES = ('assets/sprites/redbird-upflap.png', 'assets/sprites/redbird-midflap.png', 'assets/sprites/redbird-downflap.png')
BACKGROUND_FILE= 'assets/sprites/background-day.png'
PIPES_LIST = 'assets/sprites/pipe-green.png'
IMAGES, SOUNDS, HITMASKS = {}, {}, {}
try:
xrange
except NameError:
xrange = range
class FlappyEnv(gym.Env):
"""Custom Environment that follows gym interface"""
metadata = {'render.modes': ['human']}
def __init__(self, server):
super(FlappyEnv, self).__init__()
if server == True:
os.environ["SDL_VIDEODRIVER"] = "dummy"
self.action_space = spaces.Discrete(10) # Weight the flap such that 1/10 action is to flap.
self.observation_space = spaces.Box(low = 0, high = 255, shape = (80, 80, 1), dtype=np.uint8)
pygame.init()
self.FPSCLOCK = pygame.time.Clock()
self.SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))
pygame.display.set_caption('Flappy Bird')
# numbers sprites for score display
# image, sound and hitmask dicts
IMAGES['numbers'] = (
pygame.image.load('assets/sprites/0.png').convert_alpha(),
pygame.image.load('assets/sprites/1.png').convert_alpha(),
pygame.image.load('assets/sprites/2.png').convert_alpha(),
pygame.image.load('assets/sprites/3.png').convert_alpha(),
pygame.image.load('assets/sprites/4.png').convert_alpha(),
pygame.image.load('assets/sprites/5.png').convert_alpha(),
pygame.image.load('assets/sprites/6.png').convert_alpha(),
pygame.image.load('assets/sprites/7.png').convert_alpha(),
pygame.image.load('assets/sprites/8.png').convert_alpha(),
pygame.image.load('assets/sprites/9.png').convert_alpha()
)
IMAGES['player'] = (
pygame.image.load(PLAYERS_FILES[0]).convert_alpha(),
pygame.image.load(PLAYERS_FILES[1]).convert_alpha(),
pygame.image.load(PLAYERS_FILES[2]).convert_alpha(),
)
IMAGES['pipe'] = (
pygame.transform.flip(
pygame.image.load(PIPES_LIST).convert_alpha(), False, True),
pygame.image.load(PIPES_LIST).convert_alpha(),
)
# game over sprite
IMAGES['gameover'] = pygame.image.load('assets/sprites/gameover.png').convert_alpha()
# message sprite for welcome screen
IMAGES['message'] = pygame.image.load('assets/sprites/message.png').convert_alpha()
# base (ground) sprite
IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha()
IMAGES['background'] = pygame.image.load(BACKGROUND_FILE).convert()
# Sounds
if 'win' in sys.platform:
soundExt = '.wav'
else:
soundExt = '.ogg'
SOUNDS['die'] = pygame.mixer.Sound('assets/audio/die' + soundExt)
SOUNDS['hit'] = pygame.mixer.Sound('assets/audio/hit' + soundExt)
SOUNDS['point'] = pygame.mixer.Sound('assets/audio/point' + soundExt)
SOUNDS['swoosh'] = pygame.mixer.Sound('assets/audio/swoosh' + soundExt)
SOUNDS['wing'] = pygame.mixer.Sound('assets/audio/wing' + soundExt)
# Hitmasks for pipes
HITMASKS['pipe'] = (
self.getHitmask(IMAGES['pipe'][0]),
self.getHitmask(IMAGES['pipe'][1]),
)
# hitmask for player
HITMASKS['player'] = (
self.getHitmask(IMAGES['player'][0]),
self.getHitmask(IMAGES['player'][1]),
self.getHitmask(IMAGES['player'][2]),
)
self.SCREEN.blit(IMAGES['background'], (0,0))
pygame.display.update()
# Game Settings
self.playerIndexGen = cycle([0, 1, 2, 1])
self.basex = 0
self.playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2) + 0
self.playerx = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2) + 0
self.playerIndex = 0
self.score = 0
self.loopIter = 0
self.pipeVelX = -4
self.baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width()
self.playerHeight = IMAGES['player'][self.playerIndex].get_height()
# player velocity, max velocity, downward accleration, accleration on flap
self.playerVelY = -9 # player's velocity along Y, default same as playerFlapped
self.playerMaxVelY = 10 # max vel along Y, max descend speed
self.playerMinVelY = -8 # min vel along Y, max ascend speed
self.playerAccY = 1 # players downward accleration
self.playerRot = 45 # player's rotation
self.playerVelRot = 3 # angular speed
self.playerRotThr = 20 # rotation threshold
self.playerFlapAcc = -9 # players speed on flapping
self.playerFlapped = False # True when player flaps
self.running = True
self.upperPipes = []
self.lowerPipes = []
def step(self, action):
basex = self.basex
reward = 0.0
obs = list()
if action == 1:
if self.playery > -2 * IMAGES['player'][0].get_height():
self.playerVelY = self.playerFlapAcc
self.playerFlapped = True
SOUNDS['wing'].play()
# check for crash here
crashTest = self.checkCrash({'x': self.playerx, 'y': self.playery, 'index': self.playerIndex},
self.upperPipes, self.lowerPipes)
if crashTest[0]:
self.running = False
reward -= 100
else:
reward += 0.1 # Little bit of reward for surviving
# check for score
playerMidPos = self.playerx + IMAGES['player'][0].get_width() / 2
for pipe in self.upperPipes:
pipeMidPos = pipe['x'] + IMAGES['pipe'][0].get_width() / 2
if pipeMidPos <= playerMidPos < pipeMidPos + 4:
self.score += 1
reward += 1
SOUNDS['point'].play()
# playerIndex basex change
if (self.loopIter + 1) % 3 == 0:
self.playerIndex = next(self.playerIndexGen)
self.loopIter = (self.loopIter + 1) % 30
basex = -((-basex + 100) % self.baseShift)
# rotate the player
if self.playerRot > -90:
self.playerRot -= self.playerVelRot
# player's movement
if self.playerVelY < self.playerMaxVelY and not self.playerFlapped:
self.playerVelY += self.playerAccY
if self.playerFlapped:
self.playerFlapped = False
# more rotation to cover the threshold (calculated in visible rotation)
self.playerRot = 45
self.playerHeight = IMAGES['player'][self.playerIndex].get_height()
self.playery += min(self.playerVelY, BASEY - self.playery - self.playerHeight)
# move pipes to left
for uPipe, lPipe in zip(self.upperPipes, self.lowerPipes):
uPipe['x'] += self.pipeVelX
lPipe['x'] += self.pipeVelX
# add new pipe when first pipe is about to touch left of screen
if len(self.upperPipes) > 0 and 0 < self.upperPipes[0]['x'] < 5:
newPipe = self.getRandomPipe()
self.upperPipes.append(newPipe[0])
self.lowerPipes.append(newPipe[1])
# remove first pipe if its out of the screen
if len(self.upperPipes) > 0 and self.upperPipes[0]['x'] < -IMAGES['pipe'][0].get_width():
self.upperPipes.pop(0)
self.lowerPipes.pop(0)
# draw sprites
self.SCREEN.blit(IMAGES['background'], (0,0))
for i, (uPipe, lPipe) in enumerate(zip(self.upperPipes, self.lowerPipes)):
if i == 0:
obs.insert(1, uPipe['x'])
obs.insert(2, uPipe['y'])
obs.insert(3, lPipe['x'])
obs.insert(4, lPipe['y'])
self.SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y']))
self.SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y']))
self.SCREEN.blit(IMAGES['base'], (basex, BASEY))
# print score so player overlaps the score
self.showScore(self.score)
# Player rotation has a threshold
visibleRot = self.playerRotThr
if self.playerRot <= self.playerRotThr:
visibleRot = self.playerRot
playerSurface = pygame.transform.rotate(IMAGES['player'][self.playerIndex], visibleRot)
self.SCREEN.blit(playerSurface, (self.playerx, self.playery))
return self.get_observation(), reward, not self.running, {} # obs, reward, done, info
def get_observation(self):
surf = pygame.surfarray.array3d(pygame.display.get_surface())
x = cv2.resize(surf, (80, 80)) # resize to 80x80
x = np.array(x, dtype=np.uint8)
x = cv2.cvtColor(x, cv2.COLOR_BGR2GRAY)
x = np.reshape(x, (80, 80, 1))
return x
def reset(self):
self.playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2) + 0
self.playerx = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2) - 200
self.basex = 0
self.playerIndex = 0
self.playerIndexGen = cycle([0, 1, 2, 1])
self.score = 0
self.running = True
obs = [0, 0, 0, 0, 0]
baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width()
# get 2 new pipes to add to upperPipes lowerPipes list
newPipe1 = self.getRandomPipe()
newPipe2 = self.getRandomPipe()
# list of upper pipes
self.upperPipes = [
{'x': SCREENWIDTH + 200, 'y': newPipe1[0]['y']},
{'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[0]['y']},
]
# list of lowerpipe
self.lowerPipes = [
{'x': SCREENWIDTH + 200, 'y': newPipe1[1]['y']},
{'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[1]['y']},
]
return self.get_observation()
def render(self, mode='human'):
pygame.display.update()
self.FPSCLOCK.tick(FPS)
# Helper functions
def getRandomPipe(self):
"""returns a randomly generated pipe"""
# y of gap between upper and lower pipe
gapY = random.randrange(0, int(BASEY * 0.6 - PIPEGAPSIZE))
gapY += int(BASEY * 0.2)
pipeHeight = IMAGES['pipe'][0].get_height()
pipeX = SCREENWIDTH + 10
return [
{'x': pipeX, 'y': gapY - pipeHeight}, # upper pipe
{'x': pipeX, 'y': gapY + PIPEGAPSIZE}, # lower pipe
]
def showScore(self, score):
"""displays score in center of screen"""
scoreDigits = [int(x) for x in list(str(score))]
totalWidth = 0 # total width of all numbers to be printed
for digit in scoreDigits:
totalWidth += IMAGES['numbers'][digit].get_width()
Xoffset = (SCREENWIDTH - totalWidth) / 2
for digit in scoreDigits:
self.SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT * 0.1))
Xoffset += IMAGES['numbers'][digit].get_width()
def checkCrash(self, player, upperPipes, lowerPipes):
"""returns True if player collders with base or pipes."""
pi = player['index']
player['w'] = IMAGES['player'][0].get_width()
player['h'] = IMAGES['player'][0].get_height()
# if player crashes into ground
if player['y'] + player['h'] >= BASEY - 1:
return [True, True]
else:
playerRect = pygame.Rect(player['x'], player['y'],
player['w'], player['h'])
pipeW = IMAGES['pipe'][0].get_width()
pipeH = IMAGES['pipe'][0].get_height()
for uPipe, lPipe in zip(upperPipes, lowerPipes):
# upper and lower pipe rects
uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH)
lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH)
# player and upper/lower pipe hitmasks
pHitMask = HITMASKS['player'][pi]
uHitmask = HITMASKS['pipe'][0]
lHitmask = HITMASKS['pipe'][1]
# if bird collided with upipe or lpipe
uCollide = self.pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask)
lCollide = self.pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask)
if uCollide or lCollide:
return [True, False]
return [False, False]
def pixelCollision(self, rect1, rect2, hitmask1, hitmask2):
"""Checks if two objects collide and not just their rects"""
rect = rect1.clip(rect2)
if rect.width == 0 or rect.height == 0:
return False
x1, y1 = rect.x - rect1.x, rect.y - rect1.y
x2, y2 = rect.x - rect2.x, rect.y - rect2.y
for x in xrange(rect.width):
for y in xrange(rect.height):
if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]:
return True
return False
def getHitmask(self, image):
"""returns a hitmask using an image's alpha."""
mask = []
for x in xrange(image.get_width()):
mask.append([])
for y in xrange(image.get_height()):
mask[x].append(bool(image.get_at((x,y))[3]))
return mask
def get_actions(self):
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):
return 1
| 32.799458
| 130
| 0.673635
| 11,445
| 0.945633
| 0
| 0
| 0
| 0
| 0
| 0
| 2,954
| 0.244072
|
66aded0365be403ed572fa925d74446e3fe43e79
| 4,587
|
py
|
Python
|
vkmini/group/group_longpoll.py
|
Elchinchel/vkmini
|
378ee3893c5826563a19198fd532df47aaa03350
|
[
"MIT"
] | 2
|
2021-08-12T20:22:40.000Z
|
2022-02-06T18:13:38.000Z
|
vkmini/group/group_longpoll.py
|
Elchinchel/vkmini
|
378ee3893c5826563a19198fd532df47aaa03350
|
[
"MIT"
] | null | null | null |
vkmini/group/group_longpoll.py
|
Elchinchel/vkmini
|
378ee3893c5826563a19198fd532df47aaa03350
|
[
"MIT"
] | 3
|
2020-07-31T17:19:20.000Z
|
2021-12-11T11:38:23.000Z
|
from typing import AsyncGenerator, List, Union, Any
from aiohttp.client import ClientSession
from vkmini.utils import AbstractLogger
from vkmini.request import longpoll_get, default_session
from vkmini.exceptions import TokenInvalid
from vkmini import VkApi
class Update:
# TODO ну тут без комментариев
class _Message:
date: int
from_id: int
id: int
out: int
peer_id: int
text: str
conversation_message_id: int
fwd_messages: list
important: bool
attachments: list
is_hidden: bool
client_info: dict
reply_message: dict = None
def __init__(self, object):
self.__dict__.update(object)
type: str
object: dict
message: _Message
vk: VkApi
def __init__(self, update, vk):
self.vk = vk
self.type = update['type']
self.object = update['object']
if self.type == 'message_new':
self.message = self._Message(self.object['message'])
def __getitem__(self, key):
return self.object[key]
async def reply_to_peer(self, message, **kwargs):
return await self.vk.msg_op(1, self.message.peer_id, message, **kwargs)
class GroupLP:
# TODO: чёт старовато капец
wrap_events: bool
group_id: int
server: str
wait: int
key: str
ts: int
time: float
vk: VkApi
_session: Union[ClientSession, None]
__session_owner: bool = False
def __init__(self, vk: VkApi, group_id: int, wait: int = 25,
logger: AbstractLogger = None,
session: ClientSession = default_session) -> None:
"""
Параметр `wait` описан в документации
(https://vk.com/dev/bots_longpoll)
`logger` -- любой объект, имеющий атрибуты info, debug и warning,
по умолчанию None, то есть логирование не ведется
`session` -- экземпляр aiohttp.ClientSession, который будет
использоваться при выполнении запросов к LongPoll серверу
(при использовании класса в контексте, будет создана автоматически,
иначе будет использоваться стандартная общая сессия,
см. vkmini.set_default)
Возвращает "сырой" класс, для подготовки к работе, нужно использовать
его в контексте или вызвать метод `start`
Пример с контекстом:
```
async with GroupLP(vk, group_id) as lp:
print(await lp.check())
```
Пример без контекста:
```
lp = GroupLP(vk, group_id)
await lp.start()
print(await lp.check())
```
"""
self._vk = vk
self.wait = wait
self.group_id = group_id
self.logger = logger or vk.logger
self._session = session
@property
async def check(self) -> List[Union[Update, List[Any]]]:
'Возвращает список событий (updates)'
data = await longpoll_get(
f"{self.server}?act=a_check&key={self.key}" +
f"&ts={self.ts}&wait={self.wait}",
self._session
)
if 'failed' in data:
if data['failed'] == 1:
self.ts = data['ts']
elif data['failed'] == 2:
await self.get_longpoll_data(False)
else:
await self.get_longpoll_data(True)
return []
else:
self.ts = data['ts']
# if self.wrap_events:
# return [Update(update, self.vk) for update in data['updates']]
# else:
return data['updates']
async def get_longpoll_data(self, new_ts: bool) -> None:
data = await self._vk._method(
'groups.getLongPollServer', group_id=self.group_id
)
if not self._vk.excepts:
if data.get('error', {}).get('error_code') == 5:
raise TokenInvalid(data['error'])
self.server = data['server']
self.key = data['key']
if new_ts:
self.ts = data['ts']
async def start(self) -> None:
await self.get_longpoll_data(True)
async def __aenter__(self) -> "GroupLP":
if self._session is None:
self._session = ClientSession()
self.__session_owner = True
await self.get_longpoll_data(True)
return self
async def __aexit__(self, *_) -> None:
if self.__session_owner:
await self._session.close()
async def listen(self) -> AsyncGenerator[Update, None]:
while True:
for update in await self.check:
yield update
| 28.849057
| 79
| 0.58535
| 4,761
| 0.947086
| 148
| 0.029441
| 813
| 0.161727
| 1,913
| 0.380545
| 1,812
| 0.360454
|
66af18eea69ccb8397ca09f7ca83656cd98f0584
| 1,162
|
py
|
Python
|
aswan/tests/unit/test_migrations.py
|
papsebestyen/aswan
|
ed1b2a3dae6a8b7de355edd75de8d4ad577c97cd
|
[
"MIT"
] | 1
|
2021-04-28T23:08:07.000Z
|
2021-04-28T23:08:07.000Z
|
aswan/tests/unit/test_migrations.py
|
papsebestyen/aswan
|
ed1b2a3dae6a8b7de355edd75de8d4ad577c97cd
|
[
"MIT"
] | 1
|
2022-01-22T22:02:55.000Z
|
2022-01-22T22:02:55.000Z
|
aswan/tests/unit/test_migrations.py
|
papsebestyen/aswan
|
ed1b2a3dae6a8b7de355edd75de8d4ad577c97cd
|
[
"MIT"
] | 2
|
2022-01-05T10:01:22.000Z
|
2022-02-16T10:58:46.000Z
|
import tarfile
import pandas as pd
import sqlalchemy as db
from aswan import AswanConfig, ProdConfig, Project
from aswan.migrate import pull, push
from aswan.models import Base
from aswan.object_store import get_object_store
def test_push_pull(tmp_path):
conf = ProdConfig.from_dir(tmp_path / "cfg")
Base.metadata.create_all(db.create_engine(conf.db))
ostore = get_object_store(conf.object_store)
remote = tmp_path / "remote"
df1 = pd.DataFrame([{"A": 10}])
df2 = pd.DataFrame([{"B": 10}])
tabfp = conf.t2_path / "tab"
df1.to_parquet(tabfp)
ostore.dump_str("YAAAY", "fing")
push(conf, str(remote))
df2.to_parquet(tabfp)
tfile = next(remote.glob("**/*.tgz"))
with tarfile.open(tfile, "r:gz") as tar:
names = tar.getnames()
assert "fing" in names
assert not pd.read_parquet(tabfp).equals(df1)
pull(conf, str(remote))
assert pd.read_parquet(tabfp).equals(df1)
def test_project_push_pull(tmp_path):
aconf = AswanConfig.default_from_dir(
tmp_path / "cfg", remote_root=str(tmp_path / "remote")
)
project = Project(aconf)
project.push()
project.pull()
| 24.723404
| 62
| 0.683305
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 72
| 0.061962
|
66b02efea9465e74c9e2945b8ff0942e0ed6931f
| 82
|
py
|
Python
|
backend/src/apps/test/apps.py
|
LucienLuc/project-sts
|
02ad13b515bcefe1c1ef30f0c06104359bff613e
|
[
"MIT"
] | null | null | null |
backend/src/apps/test/apps.py
|
LucienLuc/project-sts
|
02ad13b515bcefe1c1ef30f0c06104359bff613e
|
[
"MIT"
] | null | null | null |
backend/src/apps/test/apps.py
|
LucienLuc/project-sts
|
02ad13b515bcefe1c1ef30f0c06104359bff613e
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class TestConfig(AppConfig):
name = 'test'
| 16.4
| 33
| 0.743902
| 46
| 0.560976
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0.073171
|
66b23735ac5dd60f24c047d430921a774e2c8f6b
| 1,055
|
py
|
Python
|
booking.py
|
kurkurzz/AdminDashboard-BookingWithTimeslot
|
aa34fef7bc0e1f8cabb602adc6d69af925436e5d
|
[
"MIT"
] | null | null | null |
booking.py
|
kurkurzz/AdminDashboard-BookingWithTimeslot
|
aa34fef7bc0e1f8cabb602adc6d69af925436e5d
|
[
"MIT"
] | null | null | null |
booking.py
|
kurkurzz/AdminDashboard-BookingWithTimeslot
|
aa34fef7bc0e1f8cabb602adc6d69af925436e5d
|
[
"MIT"
] | null | null | null |
import datetime as dt
class Booking:
def __init__(self):
self.user_id = ''
self.name =''
self.pet_name = ''
self.time_slot = 0
self.id = ''
self.phone_number = ''
def to_dict(self):
return {
'name' : self.name,
'petname' : self.pet_name,
'timeslot' : self.time_slot,
'userid' : self.user_id,
'phonenumber' : self.phone_number
}
#convert data from db to class
def from_dict(self,dict,id):
self.name = dict['name']
self.pet_name = dict['petname']
self.time_slot = int(dict['timeslot'])
self.user_id = dict['userid']
self.phone_number = dict['phonenumber']
self.id = id
self.datetime = dt.datetime.fromtimestamp(self.time_slot)
return self
def __str__(self):
time_string = self.datetime.strftime("%I:%M %p")
return f'Name: {self.name}\nPhone Number: {self.phone_number}\nPet Name: {self.pet_name}\nTime: {time_string}'
| 31.029412
| 118
| 0.559242
| 1,032
| 0.978199
| 0
| 0
| 0
| 0
| 0
| 0
| 245
| 0.232227
|
66b3e370acc80eb4f8fc537add6850404fc19250
| 148
|
py
|
Python
|
problems/incorrect_division_method.py
|
stereoabuse/codewars
|
d6437afaef38c3601903891b8b9cb0f84c108c54
|
[
"MIT"
] | null | null | null |
problems/incorrect_division_method.py
|
stereoabuse/codewars
|
d6437afaef38c3601903891b8b9cb0f84c108c54
|
[
"MIT"
] | null | null | null |
problems/incorrect_division_method.py
|
stereoabuse/codewars
|
d6437afaef38c3601903891b8b9cb0f84c108c54
|
[
"MIT"
] | null | null | null |
## Incorrect division method
## 8 kyu
## https://www.codewars.com/kata/54d1c59aba326343c80000e7
def divide_numbers(x,y):
return x / y
| 21.142857
| 59
| 0.682432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 99
| 0.668919
|
66b517ab0ecf7dee82c7b5fd1f3ac99536fb011e
| 1,927
|
py
|
Python
|
launch_notebooks.py
|
srivnamrata/openvino
|
aea76984a731fa3e81be9633dc8ffc702fb4e207
|
[
"Apache-2.0"
] | null | null | null |
launch_notebooks.py
|
srivnamrata/openvino
|
aea76984a731fa3e81be9633dc8ffc702fb4e207
|
[
"Apache-2.0"
] | null | null | null |
launch_notebooks.py
|
srivnamrata/openvino
|
aea76984a731fa3e81be9633dc8ffc702fb4e207
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import subprocess
import sys
from pathlib import Path
import os
pythonpath = sys.executable
curdir = Path(__file__).parent.resolve()
parentdir = curdir.parent
# If openvino_env is already activated, launch jupyter lab
# This will also start if openvino_env_2 is activated instead of openvino_env
# The assumption is that that is usually intended
if "openvino_env" in pythonpath:
subprocess.run([pythonpath, "-m", "jupyterlab", "notebooks"])
else:
if sys.platform == "win32":
scripts_dir = "Scripts"
else:
scripts_dir = "bin"
# If openvino_env is not activated, search for the openvino_env folder in the
# current and parent directory and launch the notebooks
try:
pythonpath = os.path.normpath(
os.path.join(curdir, f"openvino_env/{scripts_dir}/python")
)
subprocess.run([pythonpath, "-m", "jupyterlab", "notebooks"])
except:
try:
pythonpath = os.path.normpath(
os.path.join(parentdir, f"openvino_env/{scripts_dir}/python")
)
subprocess.run([pythonpath, "-m", "jupyterlab", "notebooks"])
except:
print(pythonpath)
print(
"openvino_env could not be found in the current or parent "
"directory, or the installation is not complete. Please follow "
"the instructions on "
"https://github.com/openvinotoolkit/openvino_notebooks to "
"install the notebook requirements in a virtual environment.\n\n"
"After installation, you can also launch the notebooks by "
"activating the virtual environment manually (see the README "
"on GitHub, linked above) and typing `jupyter lab notebooks`.\n\n"
f"Current directory: {curdir}"
f"Python executable: {sys.executable}"
)
| 39.326531
| 82
| 0.632071
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,050
| 0.544888
|
66b64a14727f525c1e5bbd7f0c1785592ad8eed7
| 1,143
|
py
|
Python
|
update_last_date.py
|
ankschoubey/testblog
|
f74e93f0f85edaee9c5adbe402e8e4a5252cc64d
|
[
"Apache-2.0"
] | 1
|
2021-07-26T00:58:53.000Z
|
2021-07-26T00:58:53.000Z
|
update_last_date.py
|
ankschoubey/testblog
|
f74e93f0f85edaee9c5adbe402e8e4a5252cc64d
|
[
"Apache-2.0"
] | 15
|
2020-03-28T05:27:53.000Z
|
2022-01-07T17:44:08.000Z
|
update_last_date.py
|
ankschoubey/testblog
|
f74e93f0f85edaee9c5adbe402e8e4a5252cc64d
|
[
"Apache-2.0"
] | 3
|
2021-05-08T19:59:02.000Z
|
2021-05-11T17:14:45.000Z
|
import os.path, os, time
from datetime import datetime
def getLastUpdatedTime(file: str):
return datetime.fromtimestamp(os.path.getmtime(file)).isoformat()
def updatePost(postUrl: str) -> None:
lastUpdatedTime = getLastUpdatedTime(postUrl)
prefix = "last_modified_at"
string = f"{prefix}: {lastUpdatedTime}"
with open(postUrl, "r", encoding="utf8") as file:
lines = file.readlines()
for index, line in enumerate(lines[1:]):
#print(index, line)
if line.startswith("---"):
lines.insert(index - 1, string + '\n')
#print("found break")
break
if line.startswith(prefix):
if line.startswith(string[:28]):
return
lines[index] = string + '\n'
with open(postUrl, "w", encoding="utf8") as file:
file.writelines(lines)
from os import listdir
from os.path import isfile, join
path = "_posts"
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
#print(onlyfiles)
for i in onlyfiles:
completePath = f"{path}/{i}"
updatePost(completePath)
| 30.078947
| 69
| 0.601925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 157
| 0.137358
|
66b70f0759d9cb9c2433981c7b3e962dee37c367
| 4,032
|
py
|
Python
|
basic/19-brownie/brownie_test/tests/exchange/test_eth_to_token.py
|
xiangzhengfeng/Dapp-Learning
|
813fe6e52898206046842d10ecf9eb68b7f336a1
|
[
"MIT"
] | 987
|
2021-12-19T09:57:18.000Z
|
2022-03-31T15:39:45.000Z
|
basic/19-brownie/brownie_test/tests/exchange/test_eth_to_token.py
|
xiangzhengfeng/Dapp-Learning
|
813fe6e52898206046842d10ecf9eb68b7f336a1
|
[
"MIT"
] | 30
|
2021-12-20T03:13:29.000Z
|
2022-03-31T15:00:23.000Z
|
basic/19-brownie/brownie_test/tests/exchange/test_eth_to_token.py
|
xiangzhengfeng/Dapp-Learning
|
813fe6e52898206046842d10ecf9eb68b7f336a1
|
[
"MIT"
] | 207
|
2021-12-19T08:40:38.000Z
|
2022-03-31T13:10:02.000Z
|
from brownie import (accounts, web3)
def test_eth_to_token_swap(HAY_token, hay_token_exchange):
HAY_token.approve(hay_token_exchange, 10 * 10**18, {"from": accounts[0]})
# step 1: initialize exchange
hay_token_exchange.initializeExchange(10 * 10**18, {"from": accounts[0], "amount": 5 * 10**18})
# the swap function needs a timeout parameter
timeout = web3.eth.getBlock(web3.eth.blockNumber).timestamp + 300
assert HAY_token.balanceOf(accounts[2]) == 0
hay_token_exchange.ethToTokenSwap(1, timeout, {"from": accounts[2], "amount": 1 * 10**18})
# step 2: calculate the entries in transforming ETH to Token
# a) 注入ETH,直接先收取0.2%的手续费,最后注入到pool中,以input token的形式收取,这里是ETH,上例中收取0.002 ether
# fee = 0.2% * 1 * 10**18 = 2000000000000000
# b) 计算池子中剩余的token数量: Token pool = (last invariant) / ( ETH pool - fee )
# 注意在计算时,分子分母都要取整数,int(a) // int(b)
# e.g. Token pool = 10 * 10**18 * 5 * 10**18 / (5.998 * 10**18) = 8336112037345781927
# c) 计算返回的token的数量: Token received = original Token amount - Token pool
# = 10 * 10**18 - 8336112037345781927
# = 1663887962654218073
# d) 更新ETH-TOKEN池子的所有状态量:
# invariant = Token pool * ETH pool = 8336112037345781927 * 6 * 10**18 = 50016672224074691562000000000000000000
# Token Pool = 8336112037345781927
# ETH pool = 6 * 10**18
assert hay_token_exchange.ethPool() == 6 * 10**18
assert web3.eth.getBalance(hay_token_exchange.address) == 6 * 10**18
assert hay_token_exchange.tokenPool() == 8336112037345781927
assert HAY_token.balanceOf(hay_token_exchange) == 8336112037345781927
assert hay_token_exchange.invariant() == 50016672224074691562000000000000000000
assert HAY_token.balanceOf(accounts[2]) == 1663887962654218073
def test_fallback_eth_to_token_swap(HAY_token, hay_token_exchange):
# 测试uniswap exchange合约的默认fallback函数,即直接往这个地址转入eth,则默认是用ETH换取TOKEN的操作
HAY_token.approve(hay_token_exchange, 10 * 10**18, {"from": accounts[0]})
# step 1: initialize exchange
hay_token_exchange.initializeExchange(10 * 10**18, {"from": accounts[0], "amount": 5 * 10**18})
timeout = web3.eth.getBlock(web3.eth.blockNumber).timestamp + 300
# step 2: use accounts[2] to do the test
assert HAY_token.balanceOf(accounts[2]) == 0
accounts[2].transfer(hay_token_exchange, 1 * 10**18)
assert hay_token_exchange.ethPool() == 6 * 10 ** 18
assert web3.eth.getBalance(hay_token_exchange.address) == 6 * 10 ** 18
assert hay_token_exchange.tokenPool() == 8336112037345781927
assert HAY_token.balanceOf(hay_token_exchange) == 8336112037345781927
assert hay_token_exchange.invariant() == 50016672224074691562000000000000000000
assert HAY_token.balanceOf(accounts[2]) == 1663887962654218073
def test_eth_to_token_payment(HAY_token, hay_token_exchange):
# 测试eth2token payment函数,与swap函数不同的点是receipt是另一个地址
# 用accounts[2]的ETH取exchange中交易,交易所得TOken发往accounts[3]
HAY_token.approve(hay_token_exchange, 10 * 10 ** 18, {"from": accounts[0]})
# step 1: initialize exchange
hay_token_exchange.initializeExchange(10 * 10 ** 18, {"from": accounts[0], "amount": 5 * 10 ** 18})
timeout = web3.eth.getBlock(web3.eth.blockNumber).timestamp + 300
# 开始的两个地址的TOken数量都为0
assert HAY_token.balanceOf(accounts[2]) == 0
assert HAY_token.balanceOf(accounts[3]) == 0
hay_token_exchange.ethToTokenPayment(1, timeout, accounts[3], {"from": accounts[2], "amount": 1 * 10**18})
assert hay_token_exchange.ethPool() == 6 * 10 ** 18
assert web3.eth.getBalance(hay_token_exchange.address) == 6 * 10 ** 18
assert hay_token_exchange.tokenPool() == 8336112037345781927
assert HAY_token.balanceOf(hay_token_exchange) == 8336112037345781927
assert hay_token_exchange.invariant() == 50016672224074691562000000000000000000
assert HAY_token.balanceOf(accounts[3]) == 1663887962654218073
assert HAY_token.balanceOf(accounts[2]) == 0
| 51.692308
| 117
| 0.705109
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,565
| 0.360101
|
66b7938b4ce230cf1fa2893cf38e7f737bacfde6
| 49
|
py
|
Python
|
hello.py
|
Lifereborn/cs3240-labdemo
|
20db420273e78b4a905ec7e3a21fc717d71dc301
|
[
"MIT"
] | null | null | null |
hello.py
|
Lifereborn/cs3240-labdemo
|
20db420273e78b4a905ec7e3a21fc717d71dc301
|
[
"MIT"
] | null | null | null |
hello.py
|
Lifereborn/cs3240-labdemo
|
20db420273e78b4a905ec7e3a21fc717d71dc301
|
[
"MIT"
] | null | null | null |
from helper import greetings
greetings("hi!")
| 8.166667
| 28
| 0.734694
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 0.102041
|
66b88bc537b297b0b6ea48d2a39575fd0626f252
| 232
|
py
|
Python
|
setup.py
|
h-rub/manzip
|
875e4ed75e08bd06b0d50698ecf1744ab3723e4c
|
[
"MIT"
] | null | null | null |
setup.py
|
h-rub/manzip
|
875e4ed75e08bd06b0d50698ecf1744ab3723e4c
|
[
"MIT"
] | null | null | null |
setup.py
|
h-rub/manzip
|
875e4ed75e08bd06b0d50698ecf1744ab3723e4c
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name="manzip",
version='1.0.0',
py_modules=['manzip'],
install_requires=[
'Click',
],
entry_points='''
[console_scripts]
manzip=app:main
''',
)
| 16.571429
| 28
| 0.547414
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 91
| 0.392241
|
66b95f7f1063980cc02f05f543cab0abf0bce28b
| 199
|
py
|
Python
|
tests/test_mmhelloworld.py
|
manasm11/mmhelloworld
|
2e6907ac0962de90764a036d14046861b5f47521
|
[
"MIT"
] | null | null | null |
tests/test_mmhelloworld.py
|
manasm11/mmhelloworld
|
2e6907ac0962de90764a036d14046861b5f47521
|
[
"MIT"
] | null | null | null |
tests/test_mmhelloworld.py
|
manasm11/mmhelloworld
|
2e6907ac0962de90764a036d14046861b5f47521
|
[
"MIT"
] | null | null | null |
from mmhelloworld import say_hello
def test_say_hello_no_params():
assert say_hello() == "Hello! World"
def test_say_hello_with_param():
assert say_hello("Everyone") == "Hello! Everyone"
| 19.9
| 53
| 0.738693
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 41
| 0.20603
|
66b9ec6f54ec8e5b78556e4fbb86bde48b9e1d35
| 1,167
|
py
|
Python
|
bann/b_container/functions/print_init_net_state.py
|
arturOnRails/BANN
|
027af04349304941fb73c2ede502aca4b76f1ad1
|
[
"MIT"
] | null | null | null |
bann/b_container/functions/print_init_net_state.py
|
arturOnRails/BANN
|
027af04349304941fb73c2ede502aca4b76f1ad1
|
[
"MIT"
] | null | null | null |
bann/b_container/functions/print_init_net_state.py
|
arturOnRails/BANN
|
027af04349304941fb73c2ede502aca4b76f1ad1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
""".. moduleauthor:: Artur Lissin"""
from typing import TypeVar
from bann.b_container.states.general.interface.init_state import InitState
from bann.b_container.states.general.interface.net_state import NetState
from bann.b_container.functions.dict_str_repr import dict_string_repr
from rewowr.public.functions.syncout_dep_functions import logger_print_to_console
from rewowr.public.interfaces.logger_interface import SyncStdoutInterface
_TypeNet = TypeVar('_TypeNet', bound=NetState)
_TypeInit = TypeVar('_TypeInit', bound=InitState)
_TypeState = TypeVar('_TypeState', NetState, InitState)
def _print_to_logger(start_str: str, sync_out: SyncStdoutInterface, states: _TypeState, /) -> None:
output_string = f"The arguments given to {start_str}:\n"
output_string += f"\n\t{dict_string_repr(states.get_kwargs().__dict__)}\n"
logger_print_to_console(sync_out, output_string)
def print_init_net_states(net_state: _TypeNet, initializer: _TypeInit,
sync_out: SyncStdoutInterface, /) -> None:
_print_to_logger("net-state", sync_out, net_state)
_print_to_logger("init-state", sync_out, initializer)
| 44.884615
| 99
| 0.77892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 212
| 0.181662
|
66baa831bc3a0b5f4c002eec9ab7e86c9dd317b9
| 4,578
|
py
|
Python
|
PythonCodes/ScientificPlotting/FigGen_Py_wolfel/Fig3.py
|
Nicolucas/C-Scripts
|
2608df5c2e635ad16f422877ff440af69f98f960
|
[
"MIT"
] | null | null | null |
PythonCodes/ScientificPlotting/FigGen_Py_wolfel/Fig3.py
|
Nicolucas/C-Scripts
|
2608df5c2e635ad16f422877ff440af69f98f960
|
[
"MIT"
] | null | null | null |
PythonCodes/ScientificPlotting/FigGen_Py_wolfel/Fig3.py
|
Nicolucas/C-Scripts
|
2608df5c2e635ad16f422877ff440af69f98f960
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('science')
import os, sys, time
sys.path.insert(0,"/import/freenas-m-03-geodynamics/jhayek/petsc-3.12.5/lib/petsc/bin/")
sys.path.insert(0,"/import/freenas-m-03-geodynamics/jhayek/TEAR/se2wave/utils/python")
sys.path.insert(0,"/import/freenas-m-03-geodynamics/jhayek/TEAR/processing/TEAR/PythonCodes/")
from se2waveload import *
from Lib_GeneralFunctions import *
from GeneratePaperFigs import *
from ModelIllustration import *
SMALL_SIZE = 14
MEDIUM_SIZE = 16
BIGGER_SIZE = 20
FontSizeControlFreak(SMALL_SIZE,MEDIUM_SIZE,BIGGER_SIZE)
from palettable.colorbrewer.diverging import PuOr_11_r as FieldColor
cmap = FieldColor.mpl_colormap
from matplotlib.colors import ListedColormap
import matplotlib.lines as mlines
from palettable.cartocolors.qualitative import Safe_5 as LineColor
cmapProf = ListedColormap(LineColor.mpl_colors[:])
###################################################################
###################### Reference solution
###################################################################
pathRef = "/import/freenas-m-03-geodynamics/jhayek/SharedWolfel/PaperData/References/"
# Reference saved into a list of objects
RefList = [SSCreference(pathRef + "Kostrov/Kos_sem2dpack-{}-receiver-0.txt", "0km"),
SSCreference(pathRef + "Kostrov/Kos_sem2dpack-{}-receiver-1.txt", "2km"),
SSCreference(pathRef + "Kostrov/Kos_sem2dpack-{}-receiver-2.txt", "4km"),
SSCreference(pathRef + "Kostrov/Kos_sem2dpack-{}-receiver-3.txt", "6km"),
SSCreference(pathRef + "Kostrov/Kos_sem2dpack-{}-receiver-4.txt", "8km"),
]
# Reference saved into a list of objects
RefListTPV = [TPV3reference(pathRef + "TPV3/TPV_sem2dpack-{}-receiver-0.0e+00.txt", "0km"),
TPV3reference(pathRef + "TPV3/TPV_sem2dpack-{}-receiver-2.0e+03.txt", "2km"),
TPV3reference(pathRef + "TPV3/TPV_sem2dpack-{}-receiver-4.0e+03.txt", "4km"),
TPV3reference(pathRef + "TPV3/TPV_sem2dpack-{}-receiver-6.0e+03.txt", "6km"),
TPV3reference(pathRef + "TPV3/TPV_sem2dpack-{}-receiver-8.0e+03.txt", "8km"),
]
###################################################################
###################### Reference solution
###################################################################
# Figure 3
start_time = time.time()
fname = "step-{timestep:04}_wavefield.pbin"
path = "/import/freenas-m-03-geodynamics/jhayek/TEAR/Results/T2/Runs/TEAR46_Kos_T20_P3_025x025_A12phi65_Delta2.5_4s/"
i=4630
FieldFilename = os.path.join(path,fname.format(timestep=i))
MeshFilename = os.path.join(path, "default_mesh_coor.pbin")
se2_coor = se2wave_load_coordinates(MeshFilename)
FileList = glob(os.path.join(path,"step-{timestep}_wavefield.pbin".format(timestep="*")))
l = [i.replace(os.path.join(path,'step-'),'').replace('_wavefield.pbin','') for i in FileList]
TimeStepVal, LCoorX, LCoorY, LFieldX, LFieldY, LFieldvelX, LFieldvelY = ExtractFields(FieldFilename, se2_coor)
FolderProfilesPath = "/import/freenas-m-03-geodynamics/jhayek/SharedWolfel/PaperData/CorrectedSimulations/20220325/"
DataProfile = LoadPickleFile(Filename = "TEAR46_Kos_T20_P3_025x025_A12phi65_Delta2.5_4s-Tilt20.0-P3-TPList_t4630_d62.5.pickle",FolderPath = FolderProfilesPath)
x0,y0 = 7350,2675
InsetAxis = [x0-200,x0+200,y0-200,y0+200]
F1, ax = Plot4KomaSetup(LCoorX, LCoorY, LFieldX, LFieldvelX,
["X-Component Displacement ", "X-Component Displacement [m]"],
TimeStepVal,InsetAxis,
cmap=cmap, rasterized=True)
del x0,y0,InsetAxis
# Tilted case plotting
iidx = 0
for iidx,Test1 in enumerate(DataProfile):
ax[0].plot(Test1.Time, Test1.DispX, color= cmapProf.colors[iidx], linewidth=1.5, zorder=iidx)
ax[1].plot(Test1.Time, Test1.VelX, color= cmapProf.colors[iidx], linewidth=1.5, zorder=iidx)
ax[0].set_xlabel("time [s]")
#F1.suptitle("Tilting (20deg) Kostrov simulation")
[item.PlotReference(ax[0], "Slip", filtering=False) for item in RefList]
[item.PlotReference(ax[1], "SlipRate", filtering=False) for item in RefList]
Format_LabelsOnFig_formatAxis(F1, ax[:2],inverted=True, ncols = 3, HeightBbox=1.2)
LabelizeAxisList(ax,Pos=[0.9, 0.9],fontsize=BIGGER_SIZE)
print("Saving Figure...")
OutFile = "/import/freenas-m-03-geodynamics/jhayek/SharedWolfel/Works/se2dr_Paper/Illustrations/FinalFigures/F{}.pdf"
F1.savefig(OutFile.format("3"))
OutFile = "/import/freenas-m-03-geodynamics/jhayek/SharedWolfel/Works/se2dr_Paper/Illustrations/FinalFigures/F{}.png"
F1.savefig(OutFile.format("3"))
| 41.243243
| 159
| 0.68851
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,017
| 0.440585
|
66bba8495cc9b2de4fa5d89e4f271bf43563f4b0
| 3,560
|
py
|
Python
|
setup.py
|
fkie/rosrepo
|
13cdf89e32f0c370d106a61540b0cd102675daf9
|
[
"Apache-2.0"
] | 5
|
2016-09-06T08:02:10.000Z
|
2018-06-10T20:45:21.000Z
|
setup.py
|
fkie/rosrepo
|
13cdf89e32f0c370d106a61540b0cd102675daf9
|
[
"Apache-2.0"
] | 2
|
2019-03-11T21:44:50.000Z
|
2020-03-17T09:20:47.000Z
|
setup.py
|
fkie/rosrepo
|
13cdf89e32f0c370d106a61540b0cd102675daf9
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
#
# ROSREPO
# Manage ROS workspaces with multiple Gitlab repositories
#
# Author: Timo Röhling
#
# Copyright 2016 Fraunhofer FKIE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import fastentrypoints
from setuptools import setup, __version__ as setuptools_version
import os
import sys
srcdir = os.path.normpath(os.path.join(os.path.dirname(__file__), "src"))
if os.path.isfile(os.path.join(srcdir, "rosrepo", "__init__.py")) and os.path.isfile(os.path.join(srcdir, "rosrepo", "main.py")):
sys.path.insert(0, srcdir)
else:
sys.stderr.write("This script is supposed to run from the rosrepo source tree")
sys.exit(1)
from rosrepo import __version__ as rosrepo_version
install_requires = ["catkin_pkg", "catkin_tools", "python-dateutil", "pygit2", "requests", "rosdep", "pyyaml"]
extras_require = {}
# The following code is a somewhat barbaric attempt to get conditional
# dependencies that works on setuptools versions before 18.0 as well:
if int(setuptools_version.split(".", 1)[0]) < 18:
if sys.version_info[0] < 3:
install_requires.append("futures")
if sys.version_info[:2] < (3, 5):
install_requires.append("scandir")
# Unfortunately, the fake conditional dependencies do not work with
# the caching mechanism of bdist_wheel, so if you want to create wheels,
# use at least setuptools version 18
assert "bdist_wheel" not in sys.argv
else:
# We have a reasonably modern setuptools version
from distutils.version import StrictVersion as Version
if Version(setuptools_version) >= Version("36.2"):
# Starting with setuptools 36.2, we can do proper conditional
# dependencies "PEP 508 style", the way God intended
install_requires.append("futures ; python_version<'3'")
install_requires.append("scandir ; python_version<'3.5'")
else:
# No proper conditional dependencies, but we can resort to some
# trickery and get the job done nevertheless
extras_require[":python_version<'3'"] = ["futures"]
extras_require[":python_version<'3.5'"] = ["scandir"]
setup(
name = "rosrepo",
description = "Manage ROS workspaces with multiple Gitlab repositories",
author = "Timo Röhling",
author_email = "timo.roehling@fkie.fraunhofer.de",
license = "Apache Software License",
keywords = ["catkin", "ROS", "Git"],
packages = ["rosrepo"],
package_dir = {"": "src"},
data_files = [("share/bash-completion/completions", ["bash/rosrepo"])],
version = rosrepo_version,
install_requires = install_requires,
extras_require = extras_require,
test_suite = "nose.collector",
entry_points = {
"console_scripts": ["rosrepo = rosrepo.main:main"]
},
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Version Control",
"Programming Language :: Python",
]
)
| 40
| 129
| 0.692416
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,094
| 0.587872
|
66bcb0ae9b3366b6b0c297fee8c32430261239e3
| 2,948
|
py
|
Python
|
structural/decorator_and_proxy/example/proxy.py
|
BruceWW/python_designer_pattern
|
c5f8b5ee32c8984401b4a217fa35364170331063
|
[
"Apache-2.0"
] | 1
|
2020-08-29T09:17:12.000Z
|
2020-08-29T09:17:12.000Z
|
structural/decorator_and_proxy/example/proxy.py
|
BruceWW/python_design_pattern
|
c5f8b5ee32c8984401b4a217fa35364170331063
|
[
"Apache-2.0"
] | null | null | null |
structural/decorator_and_proxy/example/proxy.py
|
BruceWW/python_design_pattern
|
c5f8b5ee32c8984401b4a217fa35364170331063
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Date : 2020/8/30
# @Author : Bruce Liu /Lin Luo
# @Mail : 15869300264@163.com
class Card(object):
"""
卡片类
"""
def __init__(self, name: str, limited: bool = False, limited_num: int = 100000, surplus: int = 0):
"""
初始化一张卡
:param limited: 是否限额
:param limited_num: 限额数量
:param surplus: 余额
"""
self.name = name
# 是否限额
self.limited = limited
# 限额总数
self.limited_num = limited_num
# 余额
self.surplus = surplus
# 本次操作的金额
self.operator_num = 0
def __add__(self, other) -> bool:
"""
将other中本次操作的金额转移到self对象中
即从other中划一部分钱到本卡
:param other:
:return:
"""
# 判断是否可以转账
if (
self.limited and self.surplus + other.operator_num > self.limited_num) or other.surplus - other.operator_num < 0:
return False
else:
# 可以转入
self.surplus += other.operator_num
other.surplus -= other.operator_num
other.operator_num = 0
return True
def __sub__(self, other) -> bool:
"""
将本卡中的一部分钱转到other中
:param other:
:return:
"""
# 判断是否可以转账
if self.surplus - self.operator_num >= 0 and (
not other.limited or other.surplus + self.operator_num <= other.limited_num):
self.surplus -= self.operator_num
other.surplus += self.operator_num
self.operator_num = 0
return True
else:
return False
def trans(source_card: Card, target_card: Card, trans_num: int):
"""
执行转账
:param source_card: 转出卡
:param target_card: 转入卡
:param trans_num: 转账金额
:return:
"""
print(f'trans 100 from {source_card.name} to {target_card.name}')
print(f'surplus of source_card: {source_card.name} before trans: {source_card.surplus}')
print(f'surplus of target_card: {target_card.name} before trans: {target_card.surplus}')
source_card.operator_num = trans_num
res = target_card + source_card
print(f'transfer result: {res}')
print(f'surplus of source_card: {source_card.name} after trans: {source_card.surplus}')
print(f'surplus of target_card: {target_card.name} after trans: {target_card.surplus}')
if __name__ == '__main__':
# 实例化三张卡
# 第一张不限额,且有10000余额
card_1 = Card('card_1', False, 100000, 10000)
# 第二张限额1000,余额为0
card_2 = Card('card_2', True, 1000, 0)
# 第三章限额10000,余额为100
card_3 = Card('card_3', True, 10000, 100)
# 从第二张卡转100到第一张卡
trans(card_2, card_1, 100)
print()
# 从第一张卡转2000到第三张卡
trans(card_1, card_3, 2000)
print()
# 从第一张卡转999到第三张卡
trans(card_1, card_2, 999)
print()
# 从第一张卡转2到第三张卡
trans(card_1, card_2, 2)
print()
# 从第三张卡转10000到第一张卡
trans(card_3, card_1, 10000)
| 27.045872
| 129
| 0.587517
| 1,700
| 0.508373
| 0
| 0
| 0
| 0
| 0
| 0
| 1,597
| 0.477572
|
66bccd1b00412b945cbbdb0f6a0be3ab3a3ef37f
| 158
|
py
|
Python
|
tests/cli.py
|
joesitton/Ciphey
|
862555f13e3915428a2f4ada5538fdf0be77ffcd
|
[
"MIT"
] | 9,908
|
2020-06-06T01:06:50.000Z
|
2022-03-31T21:22:57.000Z
|
tests/cli.py
|
joesitton/Ciphey
|
862555f13e3915428a2f4ada5538fdf0be77ffcd
|
[
"MIT"
] | 423
|
2020-05-30T11:44:37.000Z
|
2022-03-18T03:15:30.000Z
|
tests/cli.py
|
joesitton/Ciphey
|
862555f13e3915428a2f4ada5538fdf0be77ffcd
|
[
"MIT"
] | 714
|
2020-06-09T20:24:41.000Z
|
2022-03-29T15:28:53.000Z
|
import subprocess
from sys import exit
result = subprocess.check_output(["ciphey", "-q", "-t 'hello'"])
if "hello" in result:
exit(0)
else:
exit(1)
| 15.8
| 64
| 0.651899
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 31
| 0.196203
|
66bd2091216a58b01f3847f7b8145c69c89e49b7
| 13,057
|
py
|
Python
|
macro_benchmark/SegLink/seglink/unit_tests.py
|
songhappy/ai-matrix
|
901078e480c094235c721c49f8141aec7a84e70e
|
[
"Apache-2.0"
] | 180
|
2018-09-20T07:27:40.000Z
|
2022-03-19T07:55:42.000Z
|
macro_benchmark/SegLink/seglink/unit_tests.py
|
songhappy/ai-matrix
|
901078e480c094235c721c49f8141aec7a84e70e
|
[
"Apache-2.0"
] | 80
|
2018-09-26T18:55:56.000Z
|
2022-02-10T02:03:26.000Z
|
macro_benchmark/SegLink/seglink/unit_tests.py
|
songhappy/ai-matrix
|
901078e480c094235c721c49f8141aec7a84e70e
|
[
"Apache-2.0"
] | 72
|
2018-08-30T00:49:15.000Z
|
2022-02-15T23:22:40.000Z
|
import math
import os
import tensorflow as tf
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import ops
import utils
import model_fctd
import data
import config
import visualizations as vis
FLAGS = tf.app.flags.FLAGS
def test_encode_decode_synth_data():
batch_size = 50
n_gt_max = 4
image_h = 150
image_w = 300
image_size = [image_h, image_w]
map_sizes = [[19, 38], [10, 19], [5, 10], [3, 5], [2, 3], [1, 1]]
n_stages = len(map_sizes)
# region_sizes = 300. * np.minimum(FLAGS.region_size_alpha / np.asarray([38, 19, 10, 5, 3, 1]), 0.95)
region_sizes = [11.84210526, 23.68421053, 45., 90., 150., 285.]
print(region_sizes)
pos_thresh = 1.5
neg_thresh = 2.0
def _generate_random_gt(batch_size, n_gt_max):
gt_cx = image_w * np.random.uniform(low=0.2, high=0.8, size=[batch_size, n_gt_max, 1])
gt_cy = image_h * np.random.uniform(low=0.2, high=0.8, size=[batch_size, n_gt_max, 1])
gt_w = image_w * np.random.uniform(low=0.2, high=1, size=[batch_size, n_gt_max, 1])
gt_h = image_h * np.random.uniform(low=0.05, high=0.5, size=[batch_size, n_gt_max, 1])
gt_theta = np.random.uniform(low=-0.5, high=0.5, size=[batch_size, n_gt_max, 1])
gt_rboxes = np.concatenate([gt_cx, gt_cy, gt_w, gt_h, gt_theta], axis=2)
return gt_rboxes
def _visualize(ax, match_status, local_gt, gt_rboxes, gt_counts,
decoded_pred, decoded_counts, link_status=None):
"""
Visualize encoded groundtruth
ARGS
ax: pyplot axis
match_status: int [map_h, map_w] match status
local_gt: [map_h, map_w, rbox_dim] encoded groundtruths
gt_rboxes: [5]
gt_counts: []
decoded_pred: [n_decoded_pred_max, 5]
decoded_counts: int []
link_status: int [map_h, map_w, 8] link status
"""
map_h, map_w, _ = local_gt.shape
step_x = float(image_w) / map_w
step_y = float(image_h) / map_h
# visualize regions
region_bboxes = []
for p in range(map_h * map_w):
px = p % map_w
py = int(math.floor(p / map_w))
grid_cx = (0.5 + px) * step_x
grid_cy = (0.5 + py) * step_y
region_bboxes.append([grid_cx, grid_cy, region_size, region_size, 0])
region_bboxes = np.asarray(region_bboxes)
# utils.visualize_rboxes(ax, region_bboxes, edgecolor='pink', facecolor='pink', alpha=0.5)
# visualize groundtruth
vis.visualize_rboxes(ax, gt_rboxes[:gt_counts, :],
verbose=False, edgecolor='green', facecolor='none', linewidth=2)
# visualize grid
for p in range(map_h * map_w):
px = p % map_w
py = p // map_w
grid_cx = (0.5 + px) * step_x
grid_cy = (0.5 + py) * step_y
match_status_p = match_status[py, px]
# draw grid center point as a circle
if match_status_p == 1: # positive
circle_color = 'red'
elif match_status_p == 0: # ignore
circle_color = 'yellow'
else: # negative
circle_color = 'blue'
circle = plt.Circle((grid_cx, grid_cy), 2, color=circle_color)
ax.add_artist(circle)
# # visualize decoded predictions
# utils.visualize_rboxes(ax, decoded_pred[:decoded_counts, :],
# edgecolor='green', facecolor='green', alpha=0.5)
if link_status is not None:
# visulaize link status
for p in range(map_h * map_w):
px = p % map_w
py = int(math.floor(p / map_w))
grid_cx = (0.5 + px) * step_x
grid_cy = (0.5 + py) * step_y
link_status_p = link_status[py, px, :]
idx = 0
for ny in [py - 1, py, py + 1]:
for nx in [px - 1, px, px + 1]:
if ny == py and nx == px:
# skip self link
continue
if link_status_p[idx] != -1:
nb_cx = (0.5 + nx) * step_x
nb_cy = (0.5 + ny) * step_y
if link_status_p[idx] == 1:
link_color = 'red'
elif link_status_p[idx] == 0:
link_color = 'yellow'
else:
raise('Internal error')
ax.plot((grid_cx, nb_cx), (grid_cy, nb_cy),
color=link_color, alpha=0.5, linewidth=2)
idx += 1
# generate random number of random groundtruths
gt_rboxes = _generate_random_gt(batch_size, n_gt_max)
gt_counts = np.random.randint(low=1, high=n_gt_max, size=[batch_size])
node_status_below = [[[]]]
match_indices_below = [[[]]]
# fetch encoding & decoding results on all stages
fetches = {}
for i in range(n_stages):
map_size = map_sizes[i]
region_size = region_sizes[i]
match_status, link_status, local_gt, match_indices = ops.encode_groundtruth(
gt_rboxes, gt_counts, map_size, image_size,
node_status_below, match_indices_below,
region_size=region_size,
pos_scale_diff_thresh=pos_thresh,
neg_scale_diff_thresh=neg_thresh,
cross_links=False)
decoded_pred, decoded_counts = ops.decode_prediction(
match_status, local_gt, image_size, region_size=region_size)
fetches['match_status_%d' % i] = match_status
fetches['link_status_%d' % i] = link_status
fetches['local_gt_%d' % i] = local_gt
fetches['decoded_pred_%d' % i] = decoded_pred
fetches['decoded_counts_%d' % i] = decoded_counts
with tf.Session() as sess:
sess_outputs = sess.run(fetches)
fig = plt.figure()
for i in range(batch_size):
fig.clear()
for j in range(n_stages):
ax = fig.add_subplot(2, 3, j+1)
ax.invert_yaxis()
_visualize(ax,
sess_outputs['match_status_%d' % j][i],
sess_outputs['local_gt_%d' % j][i],
gt_rboxes[i],
gt_counts[i],
sess_outputs['decoded_pred_%d' % j][i],
sess_outputs['decoded_counts_%d' % j][i],
# link_status=None)
link_status=sess_outputs['link_status_%d' % j][i])
ax.set_xlim(0, image_w)
ax.set_ylim(0, image_h)
ax.set_aspect('equal')
save_path = os.path.join('../vis', 'local_gt_%d.png' % i)
plt.savefig(save_path, dpi=200)
print('Visualization saved to %s' % save_path)
def test_encode_decode_real_data():
save_dir = '../vis/gt_link_node/'
utils.mkdir_if_not_exist(save_dir)
batch_size = 233
streams = data.input_stream(FLAGS.train_record_path)
pstreams = data.train_preprocess(streams)
batch = tf.train.batch(pstreams, batch_size, num_threads=1, capacity=100)
image_h = tf.shape(batch['image'])[1]
image_w = tf.shape(batch['image'])[2]
image_size = tf.pack([image_h, image_w])
detector = model_fctd.FctdDetector()
all_maps = detector.build_model(batch['image'])
det_layers = ['det_conv4_3', 'det_fc7', 'det_conv6',
'det_conv7', 'det_conv8', 'det_pool6']
fetches = {}
fetches['images'] = batch['image']
fetches['image_size'] = image_size
for i, det_layer in enumerate(det_layers):
cls_maps, lnk_maps, reg_maps = all_maps[i]
map_h, map_w = tf.shape(cls_maps)[1], tf.shape(cls_maps)[2]
map_size = tf.pack([map_h, map_w])
node_status_below = tf.constant([[[0]]], dtype=tf.int32)
match_indices_below = tf.constant([[[0]]], dtype=tf.int32)
cross_links = False # FIXME
node_status, link_status, local_gt, match_indices = ops.encode_groundtruth(
batch['rboxes'],
batch['count'],
map_size,
image_size,
node_status_below,
match_indices_below,
region_size=detector.region_sizes[i],
pos_scale_diff_thresh=FLAGS.pos_scale_diff_threshold,
neg_scale_diff_thresh=FLAGS.neg_scale_diff_threshold,
cross_links=cross_links)
fetches['node_status_%d' % i] = node_status
fetches['link_status_%d' % i] = link_status
fetches['local_gt_%d' % i] = local_gt
def _visualize_nodes_links(ax, image, node_status, link_status, image_size):
"""
Visualize nodes and links of one example.
ARGS
`node_status`: int [map_h, map_w]
`link_status`: int [map_h, map_w, n_links]
`image_size`: int [2]
"""
ax.clear()
image_display = vis.convert_image_for_visualization(
image, mean_subtracted=True)
ax.imshow(image_display)
vis.visualize_nodes(ax, node_status, image_size)
vis.visualize_links(ax, link_status, image_size)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
tf.train.start_queue_runners(sess=sess)
sess_outputs = sess.run(fetches)
fig = plt.figure()
for i in xrange(batch_size):
fig.clear()
for j, det_layer in enumerate(det_layers):
ax = fig.add_subplot(2, 3, j+1)
_visualize_nodes_links(ax,
sess_outputs['images'][i],
sess_outputs['node_status_%d' % j][i],
sess_outputs['link_status_%d' % j][i],
sess_outputs['image_size'])
save_path = os.path.join(save_dir, 'gt_node_link_%04d.jpg' % i)
plt.savefig(save_path, dpi=200)
print('Visualization saved to %s' % save_path)
def test_clip_rboxes():
def _generate_random_rboxes(n_rboxes):
rboxes = np.zeros((n_rboxes, 5))
rboxes[:,0] = np.random.uniform(low=0.0, high=1.0, size=[n_rboxes]) # cx
rboxes[:,1] = np.random.uniform(low=0.0, high=1.0, size=[n_rboxes]) # cy
rboxes[:,2] = np.random.uniform(low=0.2, high=0.8, size=[n_rboxes]) # width
rboxes[:,3] = np.random.uniform(low=0.0, high=0.3, size=[n_rboxes]) # height
rboxes[:,4] = np.random.uniform(low=-1.0, high=1.0, size=[n_rboxes]) # theta
return rboxes
n_rboxes = 5
rboxes = tf.constant(_generate_random_rboxes(n_rboxes), tf.float32)
crop_bbox = tf.constant([0, 0, 1, 1], tf.float32)
clipped_rboxes = ops.clip_rboxes(rboxes, crop_bbox)
with tf.Session() as sess:
fetches = {'rboxes': rboxes, 'clipped_rboxes': clipped_rboxes}
sess_outputs = sess.run(fetches)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.invert_yaxis() # left-top is the origin
ax.set_aspect('equal')
ax.clear()
# plot rboxes before & after clipping
vis.visualize_rboxes(ax, sess_outputs['rboxes'],
edgecolor='blue', facecolor='none', verbose=True)
vis.visualize_rboxes(ax, sess_outputs['clipped_rboxes'],
edgecolor='green', facecolor='none', verbose=True)
save_path = os.path.join('../vis', 'clipped_rboxes.png')
plt.savefig(save_path)
print('Visualization saved to %s' % save_path)
def test_data_loading_and_preprocess():
fig = plt.figure()
ax = fig.add_subplot(111)
def _visualize_example(save_path, image, gt_rboxes, mean_subtracted=True):
ax.clear()
# convert image
image_display = vis.convert_image_for_visualization(
image, mean_subtracted=mean_subtracted)
# draw image
ax.imshow(image_display)
# draw groundtruths
image_h = image_display.shape[0]
image_w = image_display.shape[1]
vis.visualize_rboxes(ax, gt_rboxes,
edgecolor='yellow', facecolor='none', verbose=False)
# save plot
plt.savefig(save_path)
n_batches = 10
batch_size = 32
save_dir = '../vis/example'
utils.mkdir_if_not_exist(save_dir)
streams = data.input_stream('../data/synthtext_train.tf')
pstreams = data.train_preprocess(streams)
batches = tf.train.shuffle_batch(pstreams, batch_size, capacity=2000, min_after_dequeue=20,
num_threads=1)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
tf.train.start_queue_runners(sess=sess)
for i in xrange(n_batches):
fetches = {'images': batches['image'],
'gt_rboxes': batches['rboxes'],
'gt_counts': batches['count']}
sess_outputs = sess.run(fetches)
for j in xrange(batch_size):
save_path = os.path.join(save_dir, '%04d_%d.jpg' % (i, j))
gt_count = sess_outputs['gt_counts'][j]
_visualize_example(save_path,
sess_outputs['images'][j],
sess_outputs['gt_rboxes'][j, :gt_count],
mean_subtracted=True)
print('Visualization saved to %s' % save_path)
def test_max_pool_on_odd_sized_maps():
size = 5
x = np.random.rand(size, size).reshape(1,size,size,1).astype(np.float32)
print(x[0,:,:,0])
with tf.Session() as sess:
y = tf.nn.max_pool(x, [1,2,2,1], [1,2,2,1], 'SAME')
print(y.eval()[0,:,:,0])
def test_decode_combine_rboxes():
x = [np.random.rand(4,4).astype(np.float32),
np.random.rand(5,5).astype(np.float32),
np.random.rand(6,6).astype(np.float32)]
y, _ = ops.decode_combine_rboxes(x, x, x, [100, 100],
region_size=10, cell_size=10)
import ipdb; ipdb.set_trace()
with tf.Session() as sess:
y.eval()
pass
if __name__ == '__main__':
# test_encode_decode_synth_data()
test_encode_decode_real_data()
# test_clip_rboxes()
# test_data_loading_and_preprocess()
# test_max_pool_on_odd_sized_maps()
# test_decode_combine_rboxes()
| 34.726064
| 103
| 0.635138
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,383
| 0.182507
|
66bdffeb1d31a5333d1015ec0693dc331a8aaed7
| 1,432
|
py
|
Python
|
setup.py
|
thefossgeek/packer.py
|
deda7a708e1968f6a206a939e97149c7aefc1c02
|
[
"Apache-2.0"
] | 24
|
2018-03-24T00:06:04.000Z
|
2022-01-29T19:25:32.000Z
|
setup.py
|
thefossgeek/packer.py
|
deda7a708e1968f6a206a939e97149c7aefc1c02
|
[
"Apache-2.0"
] | 7
|
2018-03-24T00:12:06.000Z
|
2021-07-01T23:29:28.000Z
|
setup.py
|
thefossgeek/packer.py
|
deda7a708e1968f6a206a939e97149c7aefc1c02
|
[
"Apache-2.0"
] | 7
|
2018-10-10T00:36:25.000Z
|
2022-01-27T15:02:17.000Z
|
"""
Copyright 2018 Matthew Aynalem
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from distutils.core import setup
from setuptools import find_packages
setup(
name='packer.py',
version='0.3.0',
author='Matthew Aynalem',
author_email='maynalem@gmail.com',
packages=['packerpy'],
url='https://github.com/mayn/packer.py',
license='Apache License 2.0',
description='packer.py - python library to run hashicorp packer CLI commands',
keywords="hashicorp packer",
long_description=open('README.rst').read(),
install_requires=[
],
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| 34.095238
| 82
| 0.692039
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,059
| 0.739525
|
66be0ddd5abfb03dffd9214bd347839460bf60b7
| 39,732
|
py
|
Python
|
pyreach/impl/logs_directory_client_test.py
|
google-research/pyreach
|
f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159
|
[
"Apache-2.0"
] | 13
|
2021-09-01T01:10:22.000Z
|
2022-03-05T10:01:52.000Z
|
pyreach/impl/logs_directory_client_test.py
|
google-research/pyreach
|
f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159
|
[
"Apache-2.0"
] | null | null | null |
pyreach/impl/logs_directory_client_test.py
|
google-research/pyreach
|
f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159
|
[
"Apache-2.0"
] | 6
|
2021-09-20T21:17:53.000Z
|
2022-03-14T18:42:48.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for logs_directory_client."""
import json
import os
import queue
import tempfile
from typing import Callable, Optional, List, Union
import unittest
from pyreach import core
from pyreach.common.python import types_gen
from pyreach.impl import logs_directory_client
from pyreach.impl import playback_client
from pyreach.impl import playback_client_test
from pyreach.impl import snapshot_impl
from pyreach.impl import utils
class LogsDirectoryClientTest(unittest.TestCase):
_device_data: List[types_gen.DeviceData] = [
types_gen.DeviceData(
device_type="robot", data_type="robot-state", ts=2, seq=1),
types_gen.DeviceData(
device_type="robot",
data_type="robot-state",
ts=3,
seq=2,
inhibit_frame_send=True),
types_gen.DeviceData(
device_type="session-manager",
data_type="connected-clients",
ts=4,
seq=3,
connected_clients=types_gen.ConnectedClients([
types_gen.ConnectedClient(
control_session_active=True, uid="test-1"),
types_gen.ConnectedClient(uid="test-2"),
])),
types_gen.DeviceData(
device_type="robot",
data_type="robot-state",
ts=5,
seq=4,
send_to_clients=[types_gen.SendToClient("test-tag-1", "test-1")]),
types_gen.DeviceData(
device_type="robot",
data_type="robot-state",
ts=6,
seq=5,
send_to_clients=[types_gen.SendToClient("test-tag-2", "test-2")]),
types_gen.DeviceData(
device_type="robot",
data_type="robot-state",
ts=7,
seq=6,
send_to_clients=[
types_gen.SendToClient("test-tag-3", "test-1"),
types_gen.SendToClient("test-tag-4", "test-2")
]),
types_gen.DeviceData(
device_type="depth-camera",
data_type="color-depth",
ts=8,
seq=7,
color="color-8.jpg",
depth="depth-8.pgm",
send_to_clients=[types_gen.SendToClient("test-req-1", "test-1")]),
types_gen.DeviceData(
device_type="depth-camera",
data_type="color-depth",
ts=9,
seq=8,
color="depth-camera/color-9.jpg",
depth="depth-camera/depth-9.pgm",
send_to_clients=[types_gen.SendToClient("test-req-2", "test-1")]),
types_gen.DeviceData(
device_type="depth-camera",
data_type="color-depth",
ts=10,
seq=9,
color="/tmp/log/depth-camera/color-10.jpg",
depth="/tmp/log/depth-camera/depth-10.pgm",
send_to_clients=[types_gen.SendToClient("test-req-3", "test-1")]),
types_gen.DeviceData(
tag="test-in-tag-1",
device_type="robot",
data_type="cmd-status",
ts=11,
seq=10),
types_gen.DeviceData(
tag="test-in-tag-2",
device_type="robot",
data_type="cmd-status",
ts=12,
seq=11),
types_gen.DeviceData(
device_type="session-manager",
data_type="connected-clients",
ts=13,
seq=12,
connected_clients=types_gen.ConnectedClients([])),
types_gen.DeviceData(
device_type="robot", data_type="robot-state", ts=14, seq=13),
]
_device_data_client_1: List[types_gen.DeviceData] = [
types_gen.DeviceData(
device_type="session-manager",
data_type="connected-clients",
ts=4,
seq=3,
connected_clients=types_gen.ConnectedClients([
types_gen.ConnectedClient(
control_session_active=True, is_current=True, uid="test-1"),
types_gen.ConnectedClient(uid="test-2"),
])),
types_gen.DeviceData(
tag="test-tag-1",
device_type="robot",
data_type="robot-state",
ts=5,
seq=4),
types_gen.DeviceData(
tag="test-tag-3",
device_type="robot",
data_type="robot-state",
ts=7,
seq=6),
types_gen.DeviceData(
tag="test-req-1",
device_type="depth-camera",
data_type="color-depth",
ts=8,
seq=7,
color="color-8.jpg",
depth="depth-8.pgm"),
types_gen.DeviceData(
tag="test-req-2",
device_type="depth-camera",
data_type="color-depth",
ts=9,
seq=8,
color="depth-camera/color-9.jpg",
depth="depth-camera/depth-9.pgm"),
types_gen.DeviceData(
tag="test-req-3",
device_type="depth-camera",
data_type="color-depth",
ts=10,
seq=9,
color="/tmp/log/depth-camera/color-10.jpg",
depth="/tmp/log/depth-camera/depth-10.pgm"),
types_gen.DeviceData(
tag="test-in-tag-1",
device_type="robot",
data_type="cmd-status",
ts=11,
seq=10),
]
_device_data_client_2: List[types_gen.DeviceData] = [
types_gen.DeviceData(
device_type="session-manager",
data_type="connected-clients",
ts=4,
seq=3,
connected_clients=types_gen.ConnectedClients([
types_gen.ConnectedClient(
control_session_active=True, uid="test-1"),
types_gen.ConnectedClient(is_current=True, uid="test-2"),
])),
types_gen.DeviceData(
tag="test-tag-2",
device_type="robot",
data_type="robot-state",
ts=6,
seq=5),
types_gen.DeviceData(
tag="test-tag-4",
device_type="robot",
data_type="robot-state",
ts=7,
seq=6),
types_gen.DeviceData(
tag="test-in-tag-2",
device_type="robot",
data_type="cmd-status",
ts=12,
seq=11),
]
_cmd_data: List[types_gen.CommandData] = [
types_gen.CommandData(
ts=2,
seq=1,
origin_client="test-1",
tag="test-in-tag-1",
device_type="robot",
data_type="reach-script",
snapshot=types_gen.Snapshot(
gym_run_id="test-gym-1",
gym_episode=2,
gym_step=1,
),
),
types_gen.CommandData(
ts=3,
seq=2,
origin_client="test-2",
tag="test-in-tag-2",
device_type="robot",
data_type="reach-script",
snapshot=types_gen.Snapshot(
gym_run_id="test-gym-2",
gym_episode=2,
gym_step=1,
),
),
types_gen.CommandData(
ts=4,
seq=3,
origin_client="test-1",
tag="test-req-1",
device_type="robot",
data_type="reach-script",
snapshot=types_gen.Snapshot(
gym_run_id="test-gym-1",
gym_episode=2,
gym_step=2,
),
),
types_gen.CommandData(
ts=5,
seq=4,
origin_client="test-1",
tag="test-req-2",
device_type="robot",
data_type="reach-script",
snapshot=types_gen.Snapshot(
gym_run_id="test-gym-1",
gym_episode=3,
gym_step=1,
),
),
types_gen.CommandData(
ts=5,
seq=4,
origin_client="test-1",
device_type="robot",
data_type="reach-script",
snapshot=types_gen.Snapshot(
gym_run_id="test-gym-3",
gym_episode=2,
gym_step=1,
),
),
]
def test_empty(self) -> None:
with tempfile.TemporaryDirectory() as tempdir:
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir, None,
False, None, False)
try:
self.assertIsNone(c.client_id)
self.assertIsNone(c.gym_run_id)
self._test_empty(c)
finally:
c.close()
for client_id in ["test-invalid", None]:
for select_client in [True, False]:
for gym_run_id in ["test-invalid-gym", None]:
for select_gym in [True, False]:
if (not client_id and not select_client and not gym_run_id and
not select_gym):
continue
self.assertRaises(core.PyReachError,
logs_directory_client.LogsDirectoryClient,
"test-robot", tempdir, client_id, select_client,
gym_run_id, select_gym)
def _test_empty(self, c: logs_directory_client.LogsDirectoryClient) -> None:
self.assertFalse(c.device_data_available())
self.assertIsNone(c.next_device_data())
self.assertRaises(
NotImplementedError, c.send_cmd,
types_gen.CommandData(
data_type="frame-request", device_type="robot", ts=1))
self.assertFalse(c.device_data_available())
self.assertRaises(queue.Empty, c.get_queue().get, False, 0.0)
self.assertRaises(core.PyReachError, c.seek_device_data, None, None)
self.assertRaises(queue.Empty, c.get_queue().get, False, 0.0)
self.assertIsNone(c.seek_device_data(1.0, None))
self.assertRaises(queue.Empty, c.get_queue().get, False, 0.0)
self.assertIsNone(c.seek_device_data(1.0, 1))
self.assertRaises(queue.Empty, c.get_queue().get, False, 0.0)
self.assertIsNone(c.seek_device_data(None, 1))
self.assertRaises(queue.Empty, c.get_queue().get, False, 0.0)
self.assertIsNone(c.next_snapshot())
self.assertIsNone(c.seek_snapshot(None, None, None))
self.assertIsNone(c.seek_snapshot("test", None, None))
self.assertIsNone(c.seek_snapshot(None, 1, None))
self.assertIsNone(c.seek_snapshot(None, 0, 1))
self.assertRaises(queue.Empty, c.get_queue().get, False, 0.0)
c.close()
self.assertIsNone(c.get_queue().get(True, 1.0))
self.assertRaises(queue.Empty, c.get_queue().get, False, 0.0)
def test_client_1(self) -> None:
with tempfile.TemporaryDirectory() as tempdir:
cmd_data = [
utils.copy_command_data(cmd)
for cmd in self._cmd_data
if cmd.origin_client == "test-1"
]
tags = set([cmd.tag for cmd in cmd_data])
for cmd in cmd_data:
cmd.origin_client = ""
self._write_data(tempdir, self._device_data_client_1, cmd_data)
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir, None,
False, None, False)
try:
self.assertIsNone(c.client_id)
self.assertIsNone(c.gym_run_id)
self._test_client(c, tempdir,
self._remove_current(self._device_data_client_1),
cmd_data)
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir, None,
True, None, False)
try:
self.assertEqual(c.client_id, "test-1")
self.assertIsNone(c.gym_run_id)
self._test_client(c, tempdir, [
data for data in self._device_data_client_1
if data.tag in tags or not data.tag
], cmd_data)
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir,
"test-1", False, None,
False)
try:
self.assertEqual(c.client_id, "test-1")
self.assertIsNone(c.gym_run_id)
self._test_client(c, tempdir, [
data for data in self._device_data_client_1
if data.tag in tags or not data.tag
], cmd_data)
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir,
"test-1", True, None, False)
try:
self.assertEqual(c.client_id, "test-1")
self.assertIsNone(c.gym_run_id)
self._test_client(c, tempdir, [
data for data in self._device_data_client_1
if data.tag in tags or not data.tag
], cmd_data)
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir, None,
False, None, True)
try:
self.assertEqual(c.client_id, "test-1")
self.assertEqual(c.gym_run_id, "test-gym-1")
self._test_client(c, tempdir, [
data for data in self._device_data_client_1
if data.tag in tags or not data.tag
], [
cmd for cmd in cmd_data
if cmd.snapshot is None or cmd.snapshot.gym_run_id == "test-gym-1"
])
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir, None,
False, "test-gym-1", True)
try:
self.assertEqual(c.client_id, "test-1")
self.assertEqual(c.gym_run_id, "test-gym-1")
self._test_client(c, tempdir, [
data for data in self._device_data_client_1
if data.tag in tags or not data.tag
], [
cmd for cmd in cmd_data
if cmd.snapshot is None or cmd.snapshot.gym_run_id == "test-gym-1"
])
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir, None,
False, "test-gym-1", False)
try:
self.assertEqual(c.client_id, "test-1")
self.assertEqual(c.gym_run_id, "test-gym-1")
self._test_client(c, tempdir, [
data for data in self._device_data_client_1
if data.tag in tags or not data.tag
], [
cmd for cmd in cmd_data
if cmd.snapshot is None or cmd.snapshot.gym_run_id == "test-gym-1"
])
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir, None,
False, "test-gym-3", False)
try:
self.assertEqual(c.client_id, "test-1")
self.assertEqual(c.gym_run_id, "test-gym-3")
self._test_client(c, tempdir, [
data for data in self._device_data_client_1
if data.tag in tags or not data.tag
], [
cmd for cmd in cmd_data
if cmd.snapshot is None or cmd.snapshot.gym_run_id == "test-gym-3"
])
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir, None,
False, "test-gym-3", True)
try:
self.assertEqual(c.client_id, "test-1")
self.assertEqual(c.gym_run_id, "test-gym-3")
self._test_client(c, tempdir, [
data for data in self._device_data_client_1
if data.tag in tags or not data.tag
], [
cmd for cmd in cmd_data
if cmd.snapshot is None or cmd.snapshot.gym_run_id == "test-gym-3"
])
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir,
"test-1", False,
"test-gym-3", False)
try:
self.assertEqual(c.client_id, "test-1")
self.assertEqual(c.gym_run_id, "test-gym-3")
self._test_client(c, tempdir, [
data for data in self._device_data_client_1
if data.tag in tags or not data.tag
], [
cmd for cmd in cmd_data
if cmd.snapshot is None or cmd.snapshot.gym_run_id == "test-gym-3"
])
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir,
"test-1", True,
"test-gym-3", False)
try:
self.assertEqual(c.client_id, "test-1")
self.assertEqual(c.gym_run_id, "test-gym-3")
self._test_client(c, tempdir, [
data for data in self._device_data_client_1
if data.tag in tags or not data.tag
], [
cmd for cmd in cmd_data
if cmd.snapshot is None or cmd.snapshot.gym_run_id == "test-gym-3"
])
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir, None,
True, "test-gym-3", False)
try:
self.assertEqual(c.client_id, "test-1")
self.assertEqual(c.gym_run_id, "test-gym-3")
self._test_client(c, tempdir, [
data for data in self._device_data_client_1
if data.tag in tags or not data.tag
], [
cmd for cmd in cmd_data
if cmd.snapshot is None or cmd.snapshot.gym_run_id == "test-gym-3"
])
finally:
c.close()
def test_client_2(self) -> None:
with tempfile.TemporaryDirectory() as tempdir:
cmd_data = [
utils.copy_command_data(cmd)
for cmd in self._cmd_data
if cmd.origin_client == "test-2"
]
tags = set([cmd.tag for cmd in cmd_data])
for cmd in cmd_data:
cmd.origin_client = ""
self._write_data(tempdir, self._device_data_client_2, cmd_data)
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir, None,
False, None, False)
try:
self.assertIsNone(c.client_id)
self.assertIsNone(c.gym_run_id)
self._test_client(c, tempdir,
self._remove_current(self._device_data_client_2),
cmd_data)
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir, None,
True, None, False)
try:
self.assertEqual(c.client_id, "test-2")
self.assertIsNone(c.gym_run_id)
self._test_client(c, tempdir, [
data for data in self._device_data_client_2
if data.tag in tags or not data.tag
], cmd_data)
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir,
"test-2", False, None,
False)
try:
self.assertEqual(c.client_id, "test-2")
self.assertIsNone(c.gym_run_id)
self._test_client(c, tempdir, [
data for data in self._device_data_client_2
if data.tag in tags or not data.tag
], cmd_data)
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir,
"test-2", True, None, False)
try:
self.assertEqual(c.client_id, "test-2")
self.assertIsNone(c.gym_run_id)
self._test_client(c, tempdir, [
data for data in self._device_data_client_2
if data.tag in tags or not data.tag
], cmd_data)
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir, None,
False, None, True)
try:
self.assertEqual(c.client_id, "test-2")
self.assertEqual(c.gym_run_id, "test-gym-2")
self._test_client(c, tempdir, [
data for data in self._device_data_client_2
if data.tag in tags or not data.tag
], [
cmd for cmd in cmd_data
if cmd.snapshot is None or cmd.snapshot.gym_run_id == "test-gym-2"
])
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir, None,
False, "test-gym-2", True)
try:
self.assertEqual(c.client_id, "test-2")
self.assertEqual(c.gym_run_id, "test-gym-2")
self._test_client(c, tempdir, [
data for data in self._device_data_client_2
if data.tag in tags or not data.tag
], [
cmd for cmd in cmd_data
if cmd.snapshot is None or cmd.snapshot.gym_run_id == "test-gym-2"
])
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir, None,
False, "test-gym-2", False)
try:
self.assertEqual(c.client_id, "test-2")
self.assertEqual(c.gym_run_id, "test-gym-2")
self._test_client(c, tempdir, [
data for data in self._device_data_client_2
if data.tag in tags or not data.tag
], [
cmd for cmd in cmd_data
if cmd.snapshot is None or cmd.snapshot.gym_run_id == "test-gym-2"
])
finally:
c.close()
def test_serverside(self) -> None:
with tempfile.TemporaryDirectory() as tempdir:
self._write_data(tempdir, self._device_data, self._cmd_data)
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir, None,
False, None, False)
try:
self.assertIsNone(c.client_id)
self.assertIsNone(c.gym_run_id)
self._test_client(c, tempdir, self._device_data, self._cmd_data)
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir, None,
True, None, False)
try:
self.assertEqual(c.client_id, "test-1")
self.assertIsNone(c.gym_run_id)
self._test_client(c, tempdir, self._device_data_client_1,
self._filter_cmd(self._cmd_data, "test-1", None))
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir,
"test-1", True, None, False)
try:
self.assertEqual(c.client_id, "test-1")
self.assertIsNone(c.gym_run_id)
self._test_client(c, tempdir, self._device_data_client_1,
self._filter_cmd(self._cmd_data, "test-1", None))
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir,
"test-1", False, None,
False)
try:
self.assertEqual(c.client_id, "test-1")
self.assertIsNone(c.gym_run_id)
self._test_client(c, tempdir, self._device_data_client_1,
self._filter_cmd(self._cmd_data, "test-1", None))
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir,
"test-1", False,
"test-gym-1", False)
try:
self.assertEqual(c.client_id, "test-1")
self.assertEqual(c.gym_run_id, "test-gym-1")
self._test_client(
c, tempdir, self._device_data_client_1,
self._filter_cmd(self._cmd_data, "test-1", "test-gym-1"))
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir,
"test-1", False,
"test-gym-1", True)
try:
self.assertEqual(c.client_id, "test-1")
self.assertEqual(c.gym_run_id, "test-gym-1")
self._test_client(
c, tempdir, self._device_data_client_1,
self._filter_cmd(self._cmd_data, "test-1", "test-gym-1"))
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir,
"test-1", False, None, True)
try:
self.assertEqual(c.client_id, "test-1")
self.assertEqual(c.gym_run_id, "test-gym-1")
self._test_client(
c, tempdir, self._device_data_client_1,
self._filter_cmd(self._cmd_data, "test-1", "test-gym-1"))
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir, None,
False, "test-gym-1", False)
try:
self.assertEqual(c.client_id, "test-1")
self.assertEqual(c.gym_run_id, "test-gym-1")
self._test_client(
c, tempdir, self._device_data_client_1,
self._filter_cmd(self._cmd_data, "test-1", "test-gym-1"))
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir, None,
False, "test-gym-1", True)
try:
self.assertEqual(c.client_id, "test-1")
self.assertEqual(c.gym_run_id, "test-gym-1")
self._test_client(
c, tempdir, self._device_data_client_1,
self._filter_cmd(self._cmd_data, "test-1", "test-gym-1"))
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir, None,
False, None, True)
try:
self.assertEqual(c.client_id, "test-1")
self.assertEqual(c.gym_run_id, "test-gym-1")
self._test_client(
c, tempdir, self._device_data_client_1,
self._filter_cmd(self._cmd_data, "test-1", "test-gym-1"))
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir, None,
True, "test-gym-1", False)
try:
self.assertEqual(c.client_id, "test-1")
self.assertEqual(c.gym_run_id, "test-gym-1")
self._test_client(
c, tempdir, self._device_data_client_1,
self._filter_cmd(self._cmd_data, "test-1", "test-gym-1"))
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir, None,
True, "test-gym-1", True)
try:
self.assertEqual(c.client_id, "test-1")
self.assertEqual(c.gym_run_id, "test-gym-1")
self._test_client(
c, tempdir, self._device_data_client_1,
self._filter_cmd(self._cmd_data, "test-1", "test-gym-1"))
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir, None,
True, None, True)
try:
self.assertEqual(c.client_id, "test-1")
self.assertEqual(c.gym_run_id, "test-gym-1")
self._test_client(
c, tempdir, self._device_data_client_1,
self._filter_cmd(self._cmd_data, "test-1", "test-gym-1"))
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir,
"test-2", True, None, False)
try:
self.assertEqual(c.client_id, "test-2")
self.assertIsNone(c.gym_run_id)
self._test_client(c, tempdir, self._device_data_client_2,
self._filter_cmd(self._cmd_data, "test-2", None))
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir,
"test-2", False, None,
False)
try:
self.assertEqual(c.client_id, "test-2")
self.assertIsNone(c.gym_run_id)
self._test_client(c, tempdir, self._device_data_client_2,
self._filter_cmd(self._cmd_data, "test-2", None))
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir,
"test-2", False,
"test-gym-2", True)
try:
self.assertEqual(c.client_id, "test-2")
self.assertEqual(c.gym_run_id, "test-gym-2")
self._test_client(
c, tempdir, self._device_data_client_2,
self._filter_cmd(self._cmd_data, "test-2", "test-gym-2"))
finally:
c.close()
c = logs_directory_client.LogsDirectoryClient("test-robot", tempdir,
"test-1", False,
"test-gym-3", True)
try:
self.assertEqual(c.client_id, "test-1")
self.assertEqual(c.gym_run_id, "test-gym-3")
self._test_client(
c, tempdir, self._device_data_client_1,
self._filter_cmd(self._cmd_data, "test-1", "test-gym-3"))
finally:
c.close()
self.assertRaises(core.PyReachError,
logs_directory_client.LogsDirectoryClient, "test-robot",
tempdir, "test-3", False, None, False)
self.assertRaises(core.PyReachError,
logs_directory_client.LogsDirectoryClient, "test-robot",
tempdir, "test-3", False, None, True)
self.assertRaises(core.PyReachError,
logs_directory_client.LogsDirectoryClient, "test-robot",
tempdir, "test-3", False, None, True)
self.assertRaises(core.PyReachError,
logs_directory_client.LogsDirectoryClient, "test-robot",
tempdir, "test-2", False, "test-gym-1", False)
self.assertRaises(core.PyReachError,
logs_directory_client.LogsDirectoryClient, "test-robot",
tempdir, "test-1", False, "test-gym-2", False)
self.assertRaises(core.PyReachError,
logs_directory_client.LogsDirectoryClient, "test-robot",
tempdir, "test-1", True, "test-gym-2", False)
self.assertRaises(core.PyReachError,
logs_directory_client.LogsDirectoryClient, "test-robot",
tempdir, "test-2", True, "test-gym-1", True)
self.assertRaises(core.PyReachError,
logs_directory_client.LogsDirectoryClient, "test-robot",
tempdir, "test-2", True, "test-gym-1", False)
self.assertRaises(core.PyReachError,
logs_directory_client.LogsDirectoryClient, "test-robot",
tempdir, "test-1", False, "test-gym-2", True)
def _remove_current(
self,
device_data: List[types_gen.DeviceData]) -> List[types_gen.DeviceData]:
d = [utils.copy_device_data(data) for data in device_data]
for data in d:
if data.connected_clients and data.connected_clients.clients:
for c in data.connected_clients.clients:
c.is_current = False
return d
def _filter_cmd(self, command_data: List[types_gen.CommandData],
client_id: Optional[str],
gym_run_id: Optional[str]) -> List[types_gen.CommandData]:
output = []
for cmd in command_data:
if ((client_id is None or not cmd.origin_client or
cmd.origin_client == client_id) and
(gym_run_id is None or cmd.snapshot is None or
cmd.snapshot.gym_run_id == gym_run_id)):
output.append(cmd)
return output
def _test_client(self, c: logs_directory_client.LogsDirectoryClient,
tempdir: str, device_data: List[types_gen.DeviceData],
command_data: List[types_gen.CommandData]) -> None:
iteration = 0
while True:
have_data = c.device_data_available()
data = c.next_device_data()
if not data:
self.assertFalse(have_data)
self.assertEqual(iteration, len(device_data))
break
self.assertTrue(have_data)
if iteration >= len(device_data):
self.assertIsNone(data.to_json())
self._assert_data(c, tempdir, data, device_data[iteration])
iteration += 1
for iteration in range(len(device_data) - 1, -1, -1):
data = c.seek_device_data(
utils.time_at_timestamp(device_data[iteration].ts), None)
assert data
self._assert_data(c, tempdir, data, device_data[iteration])
for iteration in range(len(device_data) - 1, -1, -1):
data = c.seek_device_data(
utils.time_at_timestamp(device_data[iteration].ts),
device_data[iteration].seq)
assert data
self._assert_data(c, tempdir, data, device_data[iteration])
for iteration in range(len(device_data) - 1, -1, -1):
data = c.seek_device_data(
utils.time_at_timestamp(device_data[iteration].ts),
device_data[iteration].seq + 1000)
self.assertIsNone(data)
self.assertRaises(queue.Empty, c.get_queue().get, False, 0.0)
for cmd in command_data:
if cmd.snapshot:
snapshot = c.next_snapshot()
self.assertEqual(snapshot, snapshot_impl.reverse_snapshot(cmd.snapshot))
for iteration in range(len(command_data) - 1, -1, -1):
cmd = command_data[iteration]
if cmd.snapshot:
snapshot = c.seek_snapshot(cmd.snapshot.gym_run_id,
cmd.snapshot.gym_episode,
cmd.snapshot.gym_step)
self.assertEqual(snapshot, snapshot_impl.reverse_snapshot(cmd.snapshot))
snapshot = c.seek_snapshot(None, None, None)
self.assertEqual(snapshot,
snapshot_impl.reverse_snapshot(command_data[0].snapshot))
self.assertIsNone(c.seek_device_data(1.0, None))
self.assertRaises(queue.Empty, c.get_queue().get, False, 0.0)
c.close()
self.assertIsNone(c.get_queue().get(True, 1.0))
self.assertRaises(queue.Empty, c.get_queue().get, False, 0.0)
def _write_data(self, tempdir: str, device_data: List[types_gen.DeviceData],
cmd_data: List[types_gen.CommandData]) -> None:
os.mkdir(os.path.join(tempdir, "device-data"))
with open(os.path.join(tempdir, "device-data", "00000.json"), "w") as f:
for data in device_data:
f.write(json.dumps(data.to_json()) + "\n")
os.mkdir(os.path.join(tempdir, "command-data"))
with open(os.path.join(tempdir, "command-data", "00000.json"), "w") as f:
for cmd in cmd_data:
f.write(json.dumps(cmd.to_json()) + "\n")
def _assert_data(self, c: logs_directory_client.LogsDirectoryClient,
tempdir: str, data: types_gen.DeviceData,
expect_data: types_gen.DeviceData) -> None:
expect_data = utils.copy_device_data(expect_data)
if expect_data.depth:
expect_data.depth = os.path.join(tempdir, "depth-camera",
"depth-" + str(expect_data.ts) + ".pgm")
if expect_data.color:
expect_data.color = os.path.join(tempdir, "depth-camera",
"color-" + str(expect_data.ts) + ".jpg")
playback_client_test.log_data_equal(self, data, expect_data)
self.assertEqual(c.get_queue().get(True, 1.0), data)
self.assertRaises(queue.Empty, c.get_queue().get, False, 0.0)
def test_data_reader(self) -> None:
device_data = [
types_gen.DeviceData(
ts=1000,
seq=2,
data_type="robot-state",
device_type="robot",
device_name="test"),
types_gen.DeviceData(
ts=2001,
seq=1,
data_type="robot-state",
device_type="robot",
device_name="test"),
types_gen.DeviceData(
ts=2000,
seq=3,
data_type="robot-state",
device_type="robot",
device_name="test"),
]
def factory(
working_directory: str) -> logs_directory_client._DeviceDataReader:
return logs_directory_client._DeviceDataReader(working_directory,
working_directory)
self._test_directory_iterator(device_data, factory)
def test_command_reader(self) -> None:
command_data = [
types_gen.CommandData(
ts=1000,
seq=2,
data_type="frame-request",
device_type="robot",
device_name="test"),
types_gen.CommandData(
ts=2001,
seq=1,
data_type="frame-request",
device_type="robot",
device_name="test"),
types_gen.CommandData(
ts=2000,
seq=3,
data_type="frame-request",
device_type="robot",
device_name="test"),
]
self._test_directory_iterator(command_data,
logs_directory_client._CommandDataReader)
def test_empty_data_reader(self) -> None:
def factory(
working_directory: str) -> logs_directory_client._DeviceDataReader:
return logs_directory_client._DeviceDataReader(working_directory,
working_directory)
empty_data: List[types_gen.DeviceData] = []
self._test_directory_iterator(empty_data, factory)
def test_empty_command_reader(self) -> None:
empty_cmd: List[types_gen.CommandData] = []
self._test_directory_iterator(empty_cmd,
logs_directory_client._CommandDataReader)
def _test_directory_iterator(
self, data: Union[List[types_gen.DeviceData],
List[types_gen.CommandData]],
factory: Callable[[str],
Union[playback_client.Iterator[types_gen.DeviceData],
playback_client.Iterator[types_gen.CommandData]]]
) -> None:
test_invalid_seqs = [
(0.0, 0),
(1.0, 0),
(1.0, 1),
(2.001, 3),
(2.0, 2),
(1.001, None),
(1.2, None),
(None, 4),
(None, 0),
(0.0, None),
]
with tempfile.TemporaryDirectory() as tempdir:
if data:
self.assertGreater(len(data), 2)
with open(os.path.join(tempdir, "00000.json"), "w") as f:
for element in data[0:2]:
f.write(json.dumps(element.to_json()) + "\n")
with open(os.path.join(tempdir, "00001.json"), "w") as f:
for element in data[2:]:
f.write(json.dumps(element.to_json()) + "\n")
def factory_wrapper(
) -> Union[playback_client.Iterator[types_gen.DeviceData],
playback_client.Iterator[types_gen.CommandData]]:
return factory(tempdir)
if data:
playback_client_test.log_iterator_test(self, data, test_invalid_seqs,
factory_wrapper, False)
else:
playback_client_test.empty_log_iterator_test(self, factory_wrapper)
if __name__ == "__main__":
unittest.main()
| 39.652695
| 80
| 0.55892
| 38,674
| 0.973372
| 0
| 0
| 0
| 0
| 0
| 0
| 4,463
| 0.112328
|
66bff38e64bc42b7572591b13e17cd3a431e4073
| 1,007
|
py
|
Python
|
SoftLayer/CLI/file/duplicate_convert_status.py
|
ko101/softlayer-python
|
f4cc9fa2eb01d97c0e890907ef6735390f1a5b10
|
[
"MIT"
] | null | null | null |
SoftLayer/CLI/file/duplicate_convert_status.py
|
ko101/softlayer-python
|
f4cc9fa2eb01d97c0e890907ef6735390f1a5b10
|
[
"MIT"
] | null | null | null |
SoftLayer/CLI/file/duplicate_convert_status.py
|
ko101/softlayer-python
|
f4cc9fa2eb01d97c0e890907ef6735390f1a5b10
|
[
"MIT"
] | null | null | null |
"""Get status for split or move completed percentage of a given file duplicate volume."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
@click.command(cls=SoftLayer.CLI.command.SLCommand,
epilog="""Get status for split or move completed percentage of a given file duplicate volume.""")
@click.argument('volume-id')
@environment.pass_env
def cli(env, volume_id):
"""Get status for split or move completed percentage of a given file duplicate volume."""
table = formatting.Table(['Username', 'Active Conversion Start Timestamp', 'Completed Percentage'])
file_manager = SoftLayer.FileStorageManager(env.client)
value = file_manager.convert_dupe_status(volume_id)
table.add_row(
[
value['volumeUsername'],
value['activeConversionStartTime'],
value['deDuplicateConversionPercentage']
]
)
env.fout(table)
| 32.483871
| 112
| 0.713009
| 0
| 0
| 0
| 0
| 761
| 0.75571
| 0
| 0
| 467
| 0.463754
|
66c4abe639069bea0f557f4dba81d69a1839cf18
| 392
|
py
|
Python
|
apps/saas/forms.py
|
lucaslucyk/sigec
|
cdf65868e2f8ead35b005603611fcd20446633c7
|
[
"MIT"
] | null | null | null |
apps/saas/forms.py
|
lucaslucyk/sigec
|
cdf65868e2f8ead35b005603611fcd20446633c7
|
[
"MIT"
] | 7
|
2020-02-12T03:10:01.000Z
|
2021-06-10T19:30:50.000Z
|
apps/saas/forms.py
|
lucaslucyk/sigec
|
cdf65868e2f8ead35b005603611fcd20446633c7
|
[
"MIT"
] | null | null | null |
from django import forms
#from pagedown.widgets import PagedownWidget
from apps.saas.models import Offer
class OfferForm(forms.ModelForm):
#content= forms.CharField(widget=PagedownWidget(show_preview=False))
#publish= forms.DateField(widget=forms.SelectDateWidget)
class Meta:
model = Offer
fields= [
"tipo_venta",
"financing",
"hardware",
"empleados",
"modulos",
]
| 21.777778
| 69
| 0.742347
| 286
| 0.729592
| 0
| 0
| 0
| 0
| 0
| 0
| 221
| 0.563776
|
66c4c0ab19cb9fa1cb71b15b0da8a32e24b51bb6
| 5,491
|
py
|
Python
|
Linuxu.py
|
Jefferson-Hsu/Linuxu-shell
|
2bbc42248e05ac01f8d3466479bb8106833c7ab1
|
[
"MIT"
] | 1
|
2022-03-04T05:53:33.000Z
|
2022-03-04T05:53:33.000Z
|
Linuxu.py
|
Jefferson-Hsu/Linuxu-shell
|
2bbc42248e05ac01f8d3466479bb8106833c7ab1
|
[
"MIT"
] | null | null | null |
Linuxu.py
|
Jefferson-Hsu/Linuxu-shell
|
2bbc42248e05ac01f8d3466479bb8106833c7ab1
|
[
"MIT"
] | null | null | null |
#library
import os
#string aphoto
print(" _ _ _ _ __ __ _ _ .____ .__ ")
print("| | | | ___| | | __\\ \\ / /__ _ __| | __| | | | |__| ____ __ _____ _____ __ ")
print("| |_| |/ _ \\ | |/ _ \\ \\ /\\ / / _ \\| '__| |/ _` | | | | |/ \| | \ \/ / | \ ")
print("| _ | __/ | | (_) \\ V V / (_) | | | | (_| | | |___| | | \ | /> <| | / ")
print("|_| |_|\\___|_|_|\\___/ \\_/\\_/ \\___/|_| |_|\\__,_| |_______ \__|___| /____//__/\_ \____/ ")
print(" ")
print(" ")
print(" ")
#password & user name
join_key=3
again_key=4
name="XuFaxin"
password="Xinxin080502"
print("--------------------------------------------------------------------------------------------------------------------------------------------")
input_name=input("Please type the user name: ")
print("--------------------------------------------------------------------------------------------------------------------------------------------")
input_password=input("Please type the password: ")
print("--------------------------------------------------------------------------------------------------------------------------------------------")
print("welcome to Linuxu system!!!")
print(" ")
while(join_key==3):
if input_name=="XuFaxin" and input_password=="Xinxin080502":
print(" ")
print(" ")
else:
print("Bye,you are not user!")
break
#command shell
command=input("XuFaxin@computer% ")
#root command
if(command=="root"):
print(" ")
print("you are rooter!")
print(" ")
print("But don't be happy too soon")
print(" ")
print("-----------------------------------------------------------------------------------------------------------------------------------")
print(" In the world of Linuxu XuFaxin is god!")
print("-----------------------------------------------------------------------------------------------------------------------------------")
print(" ")
#Calculator command
if(command=="math"):
print("Develop by XuFaxin")
counts=3
while counts>0:
str1=input("First number: ")
str2=input("Second number:")
X=int(str1)
Y=int(str2)
print(X+Y)
print(X-Y)
print(X*Y)
print(X/Y)
print(X**Y)
print(X//Y)
break
#game command
if(command=="game"):
print(" ")
print("Welcome to XuFaxin's guess number game!")
print(" ")
print("You have three chances")
print(" ")
print("Guess an integer between 1 and 10")
print(" ")
print("develop by XuFaxin")
print(" ")
print(" ")
import random
answer=random.randint(1,10)
counts=3
while counts>0:
temp=input("Guess a number: ")
guess=int(temp)
if guess==answer:
print(" ")
print("Win")
print(" ")
print("Win!!! But no pay! HAHA!")
else:
if guess>0:
print(" ")
print("Big!")
print(" ")
else:
print(" ")
print("small!")
counts=counts-1
#clear command
if(command=="clear"):
os.system( 'cls' )
os.system("clear")
#list command
if(command=="ls"):
print("-------------------------------------------------------------------------------------------------------------------------------")
print(" ||game|| ||math|| ")
print("-------------------------------------------------------------------------------------------------------------------------------")
#exit command
if(command=="exit"):
print(" ")
print("See you again!")
break
| 44.282258
| 152
| 0.24094
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,628
| 0.660718
|
66c7e494275971e9a3a3aa777ced7402edea752a
| 1,237
|
py
|
Python
|
src/test.py
|
williamyang1991/TET-GAN
|
bdfca141fc14c5917fd9be8d2bc23870f9ad3288
|
[
"MIT"
] | 86
|
2019-01-02T06:20:09.000Z
|
2022-03-23T01:16:32.000Z
|
src/test.py
|
williamyang1991/TET-GAN
|
bdfca141fc14c5917fd9be8d2bc23870f9ad3288
|
[
"MIT"
] | 5
|
2019-01-22T06:18:26.000Z
|
2021-12-16T02:01:34.000Z
|
src/test.py
|
williamyang1991/TET-GAN
|
bdfca141fc14c5917fd9be8d2bc23870f9ad3288
|
[
"MIT"
] | 24
|
2019-01-03T09:36:54.000Z
|
2021-12-14T10:04:11.000Z
|
from options import TestOptions
import torch
from models import TETGAN
from utils import load_image, to_data, to_var, visualize, save_image
import os
#os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def main():
# parse options
parser = TestOptions()
opts = parser.parse()
# data loader
print('--- load data ---')
style = load_image(opts.style_name)
if opts.gpu != 0:
style = to_var(style)
if opts.c2s == 1:
content = load_image(opts.content_name, opts.content_type)
if opts.gpu != 0:
content = to_var(content)
# model
print('--- load model ---')
tetGAN = TETGAN()
tetGAN.load_state_dict(torch.load(opts.model))
if opts.gpu != 0:
tetGAN.cuda()
tetGAN.eval()
print('--- testing ---')
if opts.c2s == 1:
result = tetGAN(content, style)
else:
result = tetGAN.desty_forward(style)
if opts.gpu != 0:
result = to_data(result)
print('--- save ---')
# directory
result_filename = os.path.join(opts.result_dir, opts.name)
if not os.path.exists(opts.result_dir):
os.mkdir(opts.result_dir)
save_image(result[0], result_filename)
if __name__ == '__main__':
main()
| 25.770833
| 68
| 0.609539
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 167
| 0.135004
|
66cc342e6fa18c2dd06d530c8ed54f8e34f04274
| 1,853
|
py
|
Python
|
scripts/bulkLoadUrls.py
|
conveyal/gtfs-data-manager
|
e7269fc1660f1816da269b1c116b43bdf758900b
|
[
"MIT"
] | 25
|
2015-02-11T19:20:07.000Z
|
2021-03-10T07:53:29.000Z
|
scripts/bulkLoadUrls.py
|
conveyal/gtfs-data-manager
|
e7269fc1660f1816da269b1c116b43bdf758900b
|
[
"MIT"
] | 53
|
2015-01-07T20:30:56.000Z
|
2016-10-10T12:47:22.000Z
|
scripts/bulkLoadUrls.py
|
conveyal/gtfs-data-manager
|
e7269fc1660f1816da269b1c116b43bdf758900b
|
[
"MIT"
] | 3
|
2015-01-03T10:17:34.000Z
|
2015-11-10T10:44:27.000Z
|
#!/usr/bin/python
# load many feeds to the GTFS data manager, from a csv with fields name and url
# usage: bulkLoadFeeds.py file.csv http://server.example.com/
import csv
from getpass import getpass
from sys import argv
import json
from cookielib import CookieJar
import urllib2
from urllib import urlencode
if len(argv) != 3:
print 'usage: %s file.csv http://gtfs-data-manager.example.com' % argv[0]
server = argv[2]
with open(argv[1]) as f:
reader = csv.DictReader(f)
# log in to the server
print 'Please authenticate'
uname = raw_input('username: ')
pw = getpass('password: ')
# strip trailing slash to normalize url
server = server if not server.endswith('/') else server[:-1]
# cookie handling
# http://www.techchorus.net/using-cookie-jar-urllib2
cj = CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
# authenticate
opener.open(server + '/authenticate', urlencode(dict(username=uname, password=pw)))
# choose feed collection
colls = json.load(opener.open(server + '/api/feedcollections'))
print 'choose a feed collection: '
for i in xrange(len(colls)):
print '%s. %s' % (i + 1, colls[i]['name'])
while True:
try:
coll = colls[int(raw_input('> ')) - 1]
except ValueError:
continue
else:
break
# load each feed
for feed in reader:
data = dict(
name = feed['name'],
url = feed['url'],
isPublic = True,
autofetch = True,
# every day
feedCollection = coll
)
# http://stackoverflow.com/questions/3290522
req = urllib2.Request(server + '/api/feedsources/', json.dumps(data), {'Content-Type': 'application/json'})
opener.open(req)
| 25.736111
| 115
| 0.611981
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 646
| 0.348624
|
66ce81273371c8d4fdeb7dac39c7d81c55ecac89
| 5,962
|
py
|
Python
|
EQUATIONS/FOR_RESOLUTION_STUDY/BuoyancyResolutionStudy.py
|
mmicromegas/ransX
|
2faaa786e00cfd14dce0e18f0793cd0252428d2a
|
[
"BSD-2-Clause"
] | 4
|
2019-04-22T11:43:47.000Z
|
2020-09-16T00:28:15.000Z
|
EQUATIONS/FOR_RESOLUTION_STUDY/BuoyancyResolutionStudy.py
|
mmicromegas/ransX
|
2faaa786e00cfd14dce0e18f0793cd0252428d2a
|
[
"BSD-2-Clause"
] | 34
|
2019-07-01T09:11:00.000Z
|
2022-03-30T13:35:43.000Z
|
EQUATIONS/FOR_RESOLUTION_STUDY/BuoyancyResolutionStudy.py
|
mmicromegas/ransX
|
2faaa786e00cfd14dce0e18f0793cd0252428d2a
|
[
"BSD-2-Clause"
] | 1
|
2020-09-16T00:28:17.000Z
|
2020-09-16T00:28:17.000Z
|
import numpy as np
from scipy import integrate
import matplotlib.pyplot as plt
from UTILS.Calculus import Calculus
from UTILS.SetAxisLimit import SetAxisLimit
from UTILS.Tools import Tools
from UTILS.Errors import Errors
import sys
# Theoretical background https://arxiv.org/abs/1401.5176
# Mocak, Meakin, Viallet, Arnett, 2014, Compressible Hydrodynamic Mean-Field #
# Equations in Spherical Geometry and their Application to Turbulent Stellar #
# Convection Data #
class BuoyancyResolutionStudy(Calculus, SetAxisLimit, Tools, Errors, object):
def __init__(self, filename, ig, ieos, intc, data_prefix):
super(BuoyancyResolutionStudy, self).__init__(ig)
# load data to list of structured arrays
eht = []
for ffile in filename:
eht.append(self.customLoad(ffile))
# declare data lists
xzn0, nx, ny, nz, xznr, xznl = [], [], [], [], [], []
dd, pp, gg, gamma1, gamma2 = [], [], [], [], []
dlnrhodr, dlnpdr, dlnrhodrs, nsq, br, dx = [], [], [], [], [], []
for i in range(len(filename)):
# load grid
xzn0.append(np.asarray(eht[i].item().get('xzn0')))
xznl.append(np.asarray(eht[i].item().get('xznl')))
xznr.append(np.asarray(eht[i].item().get('xznr')))
nx.append(np.asarray(eht[i].item().get('nx')))
ny.append(np.asarray(eht[i].item().get('ny')))
nz.append(np.asarray(eht[i].item().get('nz')))
# pick specific Reynolds-averaged mean fields according to:
# https://github.com/mmicromegas/ransX/blob/master/DOCS/ransXimplementationGuide.pdf
dd.append(np.asarray(eht[i].item().get('dd')[intc]))
pp.append(np.asarray(eht[i].item().get('pp')[intc]))
gg.append(np.asarray(eht[i].item().get('gg')[intc]))
# override gamma for ideal gas eos (need to be fixed in PROMPI later)
if ieos == 1:
cp = self.getRAdata(eht[i], 'cp')[intc]
cv = self.getRAdata(eht[i], 'cv')[intc]
gamma1.append(cp / cv) # gamma1,gamma2,gamma3 = gamma = cp/cv Cox & Giuli 2nd Ed. page 230, Eq.9.110
gamma2.append(cp / cv) # gamma1,gamma2,gamma3 = gamma = cp/cv Cox & Giuli 2nd Ed. page 230, Eq.9.110)
else:
gamma1.append(np.asarray(eht[i].item().get('gamma1')[intc]))
gamma2.append(np.asarray(eht[i].item().get('gamma2')[intc]))
dlnrhodr.append(self.deriv(np.log(dd[i]), xzn0[i]))
dlnpdr.append(self.deriv(np.log(pp[i]), xzn0[i]))
dlnrhodrs.append((1. / gamma1[i]) * dlnpdr[i])
nsq.append(gg[i] * (dlnrhodr[i] - dlnrhodrs[i]))
dx.append(xznr[i] - xznl[i])
b = []
# print(nsq[0],nx[0],int(nx[0]))
for i in range(len(filename)):
br = np.zeros(int(nx[i]))
for ii in range(0, int(nx[i])):
nsqf = nsq[i]
dxf = dx[i]
br[ii] = br[ii - 1] + nsqf[ii] * dxf[ii]
# print(i,ii)
b.append(br)
# share data globally
self.data_prefix = data_prefix
self.xzn0 = xzn0
self.nx = nx
self.ny = ny
self.nz = nz
self.b = b
self.ig = ig
def plot_buoyancy(self, LAXIS, xbl, xbr, ybu, ybd, ilg):
"""Plot buoyancy in the model"""
if (LAXIS != 2):
print("ERROR(BuoyancyResolutionStudy.py): Only LAXIS=2 is supported.")
sys.exit()
# load x GRID
grd = self.xzn0
# load DATA to plot
plt1 = self.b
nx = self.nx
ny = self.ny
nz = self.nz
# find maximum resolution data
grd_maxres = self.maxresdata(grd)
plt1_maxres = self.maxresdata(plt1)
plt_interp = []
for i in range(len(grd)):
plt_interp.append(np.interp(grd_maxres, grd[i], plt1[i]))
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
plt10_tmp = plt1[0]
plt11_tmp = plt1[0]
plt1_foraxislimit = []
plt1max = np.max(plt1[0])
for plt1i in plt1:
if (np.max(plt1i) > plt1max):
plt1_foraxislimit = plt1i
# set plot boundaries
to_plot = [plt1_foraxislimit]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title('Buoyancy')
for i in range(len(grd)):
plt.plot(grd[i], plt1[i], label=str(self.nx[i]) + ' x ' + str(self.ny[i]) + ' x ' + str(self.nz[i]))
print("[WARNING] (BuoyancyResolutionStudy.py): convective boundary markers taken from 256c run, tavg = 1500 secs")
# taken from 256cubed, tavg 1500 sec
bconv = 4.1e8
tconv = 9.7e8
# convective boundary markers
plt.axvline(bconv, linestyle='--', linewidth=0.7, color='k')
plt.axvline(tconv, linestyle='--', linewidth=0.7, color='k')
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r"x (cm)"
setylabel = r"$buoyancy$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif self.ig == 2:
setxlabel = r"r (cm)"
setylabel = r"$buoyancy$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 18})
# display PLOT
plt.show(block=False)
# save PLOT
plt.savefig('RESULTS/' + self.data_prefix + 'mean_buoyancy.png')
# find data with maximum resolution
def maxresdata(self, data):
tmp = 0
for idata in data:
if idata.shape[0] > tmp:
data_maxres = idata
else:
tmp = idata.shape[0]
return data_maxres
| 32.939227
| 122
| 0.548306
| 5,490
| 0.920832
| 0
| 0
| 0
| 0
| 0
| 0
| 1,442
| 0.241865
|
66d0333de9cb88854cae7ea5468d3e9e83ace47c
| 953
|
py
|
Python
|
quokka/ext/weasyprint.py
|
yencchen/quokka_epus
|
d64aeb9c5ca59ee4bdcd84381f9bb0504680f5f5
|
[
"MIT"
] | 1
|
2020-10-31T03:57:07.000Z
|
2020-10-31T03:57:07.000Z
|
quokka/ext/weasyprint.py
|
yencchen/quokka_epus
|
d64aeb9c5ca59ee4bdcd84381f9bb0504680f5f5
|
[
"MIT"
] | null | null | null |
quokka/ext/weasyprint.py
|
yencchen/quokka_epus
|
d64aeb9c5ca59ee4bdcd84381f9bb0504680f5f5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function
import logging
from flask import url_for
logger = logging.getLogger()
try:
from flask_weasyprint import render_pdf
import_error = False
except (ImportError, OSError) as e:
# print("""
# Error importing flask-weasyprint!
# PDF support is temporarily disabled.
# Manual dependencies may need to be installed.
# See,
# `http://weasyprint.org/docs/install/#by-platform`_
# `https://github.com/Kozea/WeasyPrint/issues/79`_
# """ + str(e))
import_error = True
def configure(app):
# only configure .pdf extension if it's enabled
# and configured correctly in the environment.
if app.config.get('ENABLE_TO_PDF', False) and not import_error:
def render_to_pdf(long_slug):
return render_pdf(url_for('detail', long_slug=long_slug))
app.add_url_rule('/<path:long_slug>.pdf', view_func=render_to_pdf)
| 25.756757
| 74
| 0.684155
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 424
| 0.444911
|
66d0fa4f73c90e59d6dc87d8a6c39b035c3b58f1
| 392
|
py
|
Python
|
jupyter_server_terminals/__init__.py
|
blink1073/jupyter_server_terminals
|
cc0363421ab50fded26c8519ea4694bf1a391fce
|
[
"BSD-3-Clause-Clear"
] | 3
|
2021-12-30T23:55:47.000Z
|
2022-02-18T01:14:54.000Z
|
jupyter_server_terminals/__init__.py
|
blink1073/jupyter_server_terminals
|
cc0363421ab50fded26c8519ea4694bf1a391fce
|
[
"BSD-3-Clause-Clear"
] | 5
|
2021-12-26T21:27:11.000Z
|
2022-03-03T11:37:04.000Z
|
jupyter_server_terminals/__init__.py
|
blink1073/jupyter_server_terminals
|
cc0363421ab50fded26c8519ea4694bf1a391fce
|
[
"BSD-3-Clause-Clear"
] | 4
|
2021-12-26T21:25:45.000Z
|
2022-01-27T02:47:10.000Z
|
from ._version import __version__ # noqa:F401
try:
from .app import TerminalsExtensionApp
except ModuleNotFoundError:
import warnings
warnings.warn("Could not import submodules")
def _jupyter_server_extension_points(): # pragma: no cover
return [
{
"module": "jupyter_server_terminals.app",
"app": TerminalsExtensionApp,
},
]
| 21.777778
| 59
| 0.663265
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 101
| 0.257653
|
66d19b6566c778e3d204fad20cbbd324cf9a6a61
| 5,256
|
py
|
Python
|
CommandsToFunction.py
|
destruc7i0n/CommandsToFunction
|
f1c29c6280524c54cc5876b966c1ff36ab1c2d27
|
[
"MIT"
] | 1
|
2018-03-10T21:09:04.000Z
|
2018-03-10T21:09:04.000Z
|
CommandsToFunction.py
|
destruc7i0n/CommandsToFunction
|
f1c29c6280524c54cc5876b966c1ff36ab1c2d27
|
[
"MIT"
] | null | null | null |
CommandsToFunction.py
|
destruc7i0n/CommandsToFunction
|
f1c29c6280524c54cc5876b966c1ff36ab1c2d27
|
[
"MIT"
] | null | null | null |
# By TheDestruc7i0n https://thedestruc7i0n.ca
# MrGarretto for the code for traversing the command block chain https://mrgarretto.com
import mcplatform
import codecs
__version__ = "V1.4.1"
displayName = "Commands to Function"
inputs = (
("Converts a command block chain into a function.", "label"),
("The filter also includes a polyfill for conditional commands.", "label"),
("Select 1 repeating command block.", "label"),
("Ask for file save", True),
("If above is not checked, it will print the commands to the console.", "label"),
("Area effect cloud tag", ("string", "value=cond")),
("The above sets the tag that the area effect cloud will have, change if you have multiple functions.", "label"),
("Please ensure that there is a SuccessCount dummy objective in the world if you're using conditional command blocks.", "label"),
("Based off a filter by MrGarretto.", "label"),
("By TheDestruc7i0n: https://thedestruc7i0n.ca/", "label"),
)
def addPre():
aec_summon = "summon area_effect_cloud ~ ~ ~ {Tags:[%s],Particle:\"take\"}" % tag
scoreboard_add = "scoreboard players add @e[type=area_effect_cloud,tag=%s] SuccessCount 0" % tag
stats_cmd = "stats entity @e[type=area_effect_cloud,tag=%s] set SuccessCount @s SuccessCount" % tag
return [aec_summon, scoreboard_add, stats_cmd]
def perform(level, box, options):
global tag
tag = options["Area effect cloud tag"] or "cond"
# the aec spawns
pre = []
# the amount of conditionals
conditional_count = 0
prefix = ""
# the main commands
cmds = []
if box.volume != 1:
raise Exception("The box must only be 1x1x1!")
# code below is based from MrGarretto
# since the box is 1x1x1, this is all we need
x, y, z = box.origin
if level.blockAt(x, y, z) == 210 or level.blockAt(x, y, z) == 137:
doneChain = 0
chX = x
chY = y
chZ = z
whileIndex = 0
while (doneChain == 0):
if (level.blockAt(chX, chY, chZ) == 210 and whileIndex == 0) or (level.blockAt(chX, chY, chZ) == 137 and whileIndex == 0) or level.blockAt(chX, chY, chZ) == 211:
bX = chX
bY = chY
bZ = chZ
if level.blockDataAt(chX, chY, chZ) == 0 or level.blockDataAt(chX, chY, chZ) == 8:
chY -= 1
elif level.blockDataAt(chX, chY, chZ) == 1 or level.blockDataAt(chX, chY, chZ) == 9:
chY += 1
elif level.blockDataAt(chX, chY, chZ) == 2 or level.blockDataAt(chX, chY, chZ) == 10:
chZ -= 1
elif level.blockDataAt(chX, chY, chZ) == 3 or level.blockDataAt(chX, chY, chZ) == 11:
chZ += 1
elif level.blockDataAt(chX, chY, chZ) == 4 or level.blockDataAt(chX, chY, chZ) == 12:
chX -= 1
elif level.blockDataAt(chX, chY, chZ) == 5 or level.blockDataAt(chX, chY, chZ) == 13:
chX += 1
# ignore impulse command blocks from conditional checks
if level.blockDataAt(bX, bY, bZ) > 7 and level.blockAt(chX, chY, chZ) != 137:
# check if there are is not an aec there, otherwise add it
if len(pre) < 3:
pre += addPre()
conditional_count += 1
prefix = ""
if conditional_count == 1:
# add init command to the last command
init_command = "execute @e[type=area_effect_cloud,tag=%s] ~ ~ ~ " % tag
cmds[-1] = init_command + cmds[-1]
prefix = "execute @e[type=area_effect_cloud,tag=%s,score_SuccessCount_min=1] ~ ~ ~ " % tag
else:
# reset the prefix and count of conditionals if more than one
conditional_count = 0
prefix = ""
command = level.tileEntityAt(bX, bY, bZ)["Command"].value
# remove preceding slash if the command is non-blank
if command:
if command[0] == "/":
command = command[1:]
cmds.append(prefix + command)
whileIndex += 1
else:
doneChain = 1
# end code from MrGarretto
# join the two lists together
cmds = pre + cmds
# convert to line by line
commands = "\n".join(cmds)
if options["Ask for file save"]:
# Now save the file
file_path = mcplatform.askSaveFile(".", "Save as...", tag, "*.mcfunction", "mcfunction")
if file_path:
with codecs.open(file_path, "w", "utf-8") as file:
file.write(commands)
# raise Exception to not save the world
raise Exception("Saved file.\nPlease ensure that there is a SuccessCount dummy objective in the world if you're using conditional command blocks.\n\nThis is not an error.")
else:
print "#" * 74
print commands
print "#" * 74
# raise Exception to not save the world
raise Exception("Commands have been outputted to the console.\n\nThis is not an error.")
| 39.818182
| 180
| 0.562976
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,105
| 0.400495
|
66d1a62152bf00302159e841b40dff724b8f0c6c
| 420
|
py
|
Python
|
shopping_cart/exc.py
|
godfrey-leung/shopping
|
e57c8246961fc3bd5dbffd05e7ffd2ed43a8867a
|
[
"MIT"
] | null | null | null |
shopping_cart/exc.py
|
godfrey-leung/shopping
|
e57c8246961fc3bd5dbffd05e7ffd2ed43a8867a
|
[
"MIT"
] | null | null | null |
shopping_cart/exc.py
|
godfrey-leung/shopping
|
e57c8246961fc3bd5dbffd05e7ffd2ed43a8867a
|
[
"MIT"
] | null | null | null |
class InstanceNotFound(Exception):
"""
Raise if an instance is not found in
the database
"""
class InvalidValue(Exception):
"""
Raise if an invalid value is given
"""
class OverDemand(Exception):
"""
Raise if excess demand is requested
"""
class InvalidAmount(Exception):
"""
Raise if an invalid total/tax/discount amount
is assigned to a shopping cart
"""
| 16.153846
| 49
| 0.638095
| 409
| 0.97381
| 0
| 0
| 0
| 0
| 0
| 0
| 266
| 0.633333
|
66d42f1fdcd91d122cd938babcc3fe924510d04e
| 2,147
|
py
|
Python
|
src/admin/godmode/actions/base.py
|
aimanow/sft
|
dce87ffe395ae4bd08b47f28e07594e1889da819
|
[
"Apache-2.0"
] | 280
|
2016-07-19T09:59:02.000Z
|
2022-03-05T19:02:48.000Z
|
godmode/actions/base.py
|
YAR-SEN/GodMode2
|
d8a79b45c6d8b94f3d2af3113428a87d148d20d0
|
[
"WTFPL"
] | 3
|
2016-07-20T05:36:49.000Z
|
2018-12-10T16:16:19.000Z
|
godmode/actions/base.py
|
YAR-SEN/GodMode2
|
d8a79b45c6d8b94f3d2af3113428a87d148d20d0
|
[
"WTFPL"
] | 20
|
2016-07-20T10:51:34.000Z
|
2022-01-12T23:15:22.000Z
|
import json
from flask import g, request, render_template
from flask.views import View
from godmode import logging
from godmode.acl import ACL
from godmode.audit_log import audit_log
from godmode.exceptions import AccessDenied
log = logging.getLogger(__name__)
class BaseAction(View):
name = None
title = None
acl = ACL.ADMIN
enable_log = True
style = ""
policy = None
stay_on_page = False
item_limit = None
def __init__(self, app, model=None, view=None):
log.info("Init action: {}".format(self.__class__.__name__))
self.app = app
self.model = model
self.view = view
self.policy = "{}.{}".format(self.view.policy, self.name)
log.info(self.policy)
def url(self):
return
def dispatch_request(self, *args, **kwargs):
has_access = ACL.has_access(g.user, self)
if not has_access:
raise AccessDenied(message="You don't have an access to this page.")
audit_log(
user=g.user,
model=self.model,
ids=kwargs.get("id") or request.args.get("ids"),
action=self.name
)
return self.run(*args, **kwargs)
def run(self, *args, **kwargs):
item_id = kwargs.get("id", None)
if item_id:
return self.do_item_action(*args, **kwargs)
ids = request.args.get("ids")
if not ids:
return json.dumps({
"remove_rows": False
})
id_list = ids.split(",")
if self.item_limit:
id_list = id_list[:self.item_limit]
for item_id in id_list:
try:
item_id = int(item_id)
except (ValueError, TypeError):
continue
kwargs["id"] = item_id
self.do_item_action(*args, **kwargs)
return json.dumps({
"remove_rows": True
})
def render_form(self, *args, **kwargs):
return render_template("actions/button_action.html", url=self.name, button_label="Submit")
def do_item_action(self, *args, **kwargs):
raise NotImplementedError()
| 26.182927
| 98
| 0.583605
| 1,880
| 0.87564
| 0
| 0
| 0
| 0
| 0
| 0
| 153
| 0.071262
|
66d880a9b64fd73b407a720c9fa6817d2609e5bf
| 16,001
|
py
|
Python
|
forever/Warframe.py
|
dss285/4ever
|
bd6f70f92d76d43342da401562f2c504adaf3867
|
[
"MIT"
] | null | null | null |
forever/Warframe.py
|
dss285/4ever
|
bd6f70f92d76d43342da401562f2c504adaf3867
|
[
"MIT"
] | null | null | null |
forever/Warframe.py
|
dss285/4ever
|
bd6f70f92d76d43342da401562f2c504adaf3867
|
[
"MIT"
] | null | null | null |
import discord
import asyncio
import time
import aiohttp
import re
import pathlib
import os
import json
from bs4 import BeautifulSoup
from datetime import datetime
from models.UpdatedMessage import UpdatedMessage
from models.EmbedTemplate import EmbedTemplate
from models.BotMention import BotMention
from forever import Utilities
class SolSystem():
class SolPlanet:
def __init__(self, id, name):
self.id = id
self.name = name
class SolNode:
def __init__(self, id, name, planet):
self.id = id
self.name = name
self.planet = planet
class DropTables():
def __init__(self) -> None:
self.data = {}
self.time_updated = 0
self.interval = 86400
self.session = None
async def getData(self,):
xx = time.time()
if xx - self.time_updated > self.interval: #12h
self.time_updated = time.time()
if self.session:
async with self.session.get("https://n8k6e2y6.ssl.hwcdn.net/repos/hnfvc0o3jnfvc873njb03enrf56.html") as r:
if r.status==200:
parsing = await r.text()
reg = re.findall("<h3 id=\"(\w+)\">(.*?)<\/h3>\s*<table>([\s\S]*?)<\/table>", parsing, re.MULTILINE|re.DOTALL)
for i in reg:
parser = BeautifulSoup(i[2], 'html.parser')
table_rows = parser.find_all('tr')
self.data[i[0]] = {}
self.data[i[0]]["title"] = i[1].replace(":", "")
self.data[i[0]]["data"] = []
tmp = {}
if i[0] == "missionRewards" or i[0] == "keyRewards" or i[0] == "transientRewards":
tmp_mission = None
tmp_rotation = None
for x in table_rows:
text = x.get_text()
if x.select('th') and "Rotation" not in text:
tmp_mission = text
tmp_rotation = None
tmp[tmp_mission] = {}
elif "Rotation" in text:
tmp_rotation = text
tmp[tmp_mission][tmp_rotation] = []
else:
if tmp_rotation:
tmp[tmp_mission][tmp_rotation].append(text)
elif "data" in tmp[tmp_mission]:
tmp[tmp_mission]["data"].append(text)
else:
tmp[tmp_mission]["data"] = []
tmp[tmp_mission]["data"].append(text)
self.data[i[0]]["data"] = tmp
elif i[0] == "relicRewards":
relicname = None
rarity = None
for x in table_rows:
text = x.get_text()
if "Relic" in text:
relic_match = re.match("((?:Axi|Neo|Meso|Lith|Requiem)\s\w{0,3}\d{0,2}\s?Relic)\s\((Radiant|Exceptional|Flawless|Intact)\)", text)
if relic_match.group(1) in tmp:
if relic_match.group(2) not in tmp[relic_match.group(1)]:
tmp[relic_match.group(1)][relic_match.group(2)] = []
rarity = relic_match.group(2)
else:
tmp[relic_match.group(1)] = {}
tmp[relic_match.group(1)][relic_match.group(2)] = []
rarity = relic_match.group(2)
relicname = relic_match.group(1)
else:
tmp[relicname][rarity].append(text)
elif i[0] == "sortieRewards":
tmp = []
for x in table_rows:
text = x.get_text()
if not x.select('th'):
tmp.append(text)
elif i[0] == "cetusRewards" or i[0] == "solarisRewards" or i[0] == "deimosRewards":
tmp = {}
bounty = None
stage = None
rotation = None
for x in table_rows:
text = x.get_text()
if x.select('th'):
if "Bounty" in text:
bounty = text
tmp[bounty] = {}
elif "Rotation" in text:
rotation = text
tmp[bounty][rotation] = {}
elif "Stage" in text:
stage = text
tmp[bounty][rotation][stage] = []
else:
tmp[bounty][rotation][stage].append(text)
elif i[0] in set("modByAvatar", "blueprintByAvatar", "resourceByAvatar", "sigilByAvatar", "additionalItemByAvatar"):
drop = None
for x in table_rows:
text = x.get_text()
itemtitles = re.match(r"^([\s\S]+?)(?:Additional Item|Mod|Resource|Blueprint\/Item|Sigil) Drop Chance: (\d{0,3}\.\d{0,3})\%$", text)
if itemtitles:
drop = itemtitles.group(1)
tmp[drop] = {}
tmp[drop]["chance"] = itemtitles.group(2)
tmp[drop]["data"] = []
else:
tmp[drop]["data"].append(text)
elif i[0] in set("modByDrop", "blueprintByDrop", "resourceByDrop"):
drop = None
for x in table_rows:
text = x.get_text()
if x.select('th'):
if "Source" not in text:
drop = text
tmp[drop] = []
else:
tmp[drop].append(text)
self.data[i[0]]["data"] = tmp
def searchKey(self, key, searched_value):
vals = []
for i in self.data[key]["data"]:
if i.lower().startswith(searched_value.lower()):
vals.append(i)
return vals
def relicSearch(self, searched_value):
vals = self.searchKey("relicRewards", searched_value)
if len(vals) == 1:
em = EmbedTemplate(title=self.data["relicRewards"]["title"], description=vals[0])
for i, j in self.data["relicRewards"]["data"][vals[0]].items():
em.add_field(name=i, value="\n".join(j))
return em
else:
return EmbedTemplate(title=self.data["relicRewards"]["title"], description="\n".join(vals))
class CetusStatus:
def __init__(self, expiry):
self.expiry = expiry
self.start = self.expiry-150*60
def isNight(self,):
if self.minutes_left() <= 50:
return True
else:
return False
def seconds_left(self):
return self.expiry-time.time()
def minutes_left(self,):
return self.seconds_left()//60
def __str__(self,):
return "Night" if self.isNight() else "Day"
class CetusMessage(UpdatedMessage):
def __init__(self, message, mention, client):
self.mention = mention
self.notify_message = None
self.lock = False
self.client = client
super().__init__(message, "poe")
async def refresh(self, cetus):
em = EmbedTemplate(title="Plains of Eidolon", timestamp=datetime.utcnow())
em.add_field(name="Status", value=str(cetus))
em.add_field(name="Time until new rotation", value=f"{cetus.minutes_left() if cetus else 0.00:.0f} min")
await self.message.edit(embed=em)
if not self.lock:
if cetus.isNight() and self.mention:
self.lock = True
self.notify_message = await self.message.channel.send(f"{self.mention.name} - {self.mention.role.mention}")
self.client.loop.call_later(cetus.seconds_left()+60, self.callback)
def callback(self,):
self.client.loop.create_task(self.remove_message())
self.lock = False
async def remove_message(self,):
await self.notify_message.delete()
self.notify_message = None
class FissureItem:
def __init__(self, oid, start_time, expiry_time, mission_type, node, era):
self.start_time = start_time
self.expiry_time = expiry_time
self.mission_type = mission_type
self.node = node
self.era = era
def expiresIn(self,):
return self.expiry_time-time.time()
def __str__(self,):
if type(self.node) == str:
tmp = f"{self.node.title()}, {self.node.title()}"
return f"{tmp}\n{(f'Expires on {Utilities.ts2string(self.expiry_time)}')}\nExpires in {self.expiresIn()//60:.0f} min"
tmp = self.node.planet.name.title()+", "+self.node.name.title()
return f"{tmp}\n{(f'Expires on {Utilities.ts2string(self.expiry_time)}')}\nExpires in {self.expiresIn()//60:.0f} min"
class FissureMessage(UpdatedMessage):
def __init__(self, message, mentions):
super().__init__(message, "fissures")
self.mentions = mentions
async def refresh(self, fissures):
em = EmbedTemplate(title="Fissures", timestamp=datetime.utcnow())
for i in fissures:
em.add_field(name=f"{i.era} {i.mission_type}", value=str(i))
await self.message.edit(embed=em)
class InvasionItem:
def __init__(self, attacker, defender, node, starttime, status):
self.attacker = attacker
self.defender = defender
self.start_time = starttime
self.node = node
self.status = status
class InvasionOpp:
#0 DEFENDING
#1 ATTACKING
def __init__(self, faction, rewards):
self.faction = faction
self.rewards = rewards
class InvasionMessage(UpdatedMessage):
def __init__(self, message, mentions):
super().__init__(message, "invasions")
self.mentions = mentions
async def refresh(self, invasions):
em = EmbedTemplate(title="Invasions", timestamp=datetime.utcnow())
for i in invasions:
vals = []
if type(i.node) == str:
vals.append(f"{i.node.title()}, {i.node.title()}")
else:
vals.append(f"{i.node.planet.name.title()}, {i.node.name.title()}")
vals.append(i.start_time)
vals.append(f"{i.defender.faction} vs {i.attacker.faction}"),
vals.append(i.status)
em.add_field(
name=f"{i.defender.rewards} vs {i.attacker.rewards}",
value=f"{vals[0]}\n{vals[1]}\n{vals[2]}\n{vals[3]}\n\u200b")
await self.message.edit(embed=em)
class NightwaveItem:
def __init__(self, start_time, expiry_time, name, daily=False):
self.start_time = start_time
self.expiry_time = expiry_time
self.name = name
self.daily = daily
class NightwaveMessage(UpdatedMessage):
def __init__(self, message):
super().__init__(message, "nightwave")
async def refresh(self, nightwave_data):
em = EmbedTemplate(title="Nightwave", timestamp=datetime.utcnow())
for i in nightwave_data:
em.add_field(name=i.name, value=(Utilities.ts2string(i.start_time+(60*120))+"\n\n"))
await self.message.edit(embed=em)
class Sorties:
class SortieItem:
def __init__(self, start_time, expiry_time, missions):
self.start_time = start_time
self.expiry_time = expiry_time
self.missions = missions
class SortieMission:
def __init__(self, missionType, node, modifier):
self.mission_type = missionType
self.node = node
self.modifier = modifier
def __str__(self,):
if type(self.node) == str:
return f"{self.mission_type}\n{self.node}\n{self.modifier}"
return f"{self.mission_type}\n{(f'{self.node.name.title()}, {self.node.planet.name.title()}')}\n{self.modifier}"
class SortieMessage(UpdatedMessage):
def __init__(self, message):
super().__init__(message, "sorties")
async def refresh(self, sortie):
em = EmbedTemplate(title="Sorties", timestamp=datetime.utcnow())
count = 1
for i in sortie.missions:
em.add_field(name=f"Mission {count}", value=str(i))
count+=1
await self.message.edit(embed=em)
class Worldstate():
def __init__(self,):
self.runtime = {}
self.fissure_eras = {
"VoidT1" : ["Lith", 1],
"VoidT2" : ["Meso", 2],
"VoidT3" : ["Neo", 3],
"VoidT4" : ["Axi", 4],
"VoidT5" : ["Requiem", 5]
}
self.session = None
self.initRuntime()
def initRuntime(self,):
self.runtime.clear()
self.runtime["invasions"] = []
self.runtime["nightwave"] = []
self.runtime["fissures"] = []
self.runtime["sorties"] = None
self.runtime["poe"] = None
def getInvasions(self, parsing, data_runtime):
for invasion in parsing["Invasions"]:
if not invasion["Completed"]:
start_time = int(invasion["Activation"]["$date"]["$numberLong"])//1000
node = next((x for x in data_runtime["warframe"]["translate"]["solsystem"]["nodes"] if x.id == invasion["Node"]), invasion["Node"])
attack_reward = "N/A"
defender_reward = "N/A"
reward_item = invasion["DefenderReward"]["countedItems"][0]["ItemType"]
translate = data_runtime["warframe"]["translate"]["items"]
defender_reward = f"{invasion['DefenderReward']['countedItems'][0]['ItemCount']}x {translate[reward_item] if reward_item in translate else reward_item}"
if invasion["AttackerReward"]:
reward_item = invasion["AttackerReward"]["countedItems"][0]["ItemType"]
attack_reward = f"{invasion['AttackerReward']['countedItems'][0]['ItemCount']}x { translate[reward_item] if reward_item in translate else reward_item}"
attack_faction = invasion["AttackerMissionInfo"]["faction"].strip("FC_")
defender_faction = invasion["DefenderMissionInfo"]["faction"].strip("FC_")
goal = invasion["Goal"]*2
current = invasion["Count"]+invasion["Goal"]
fraction_attacker = round(current/goal*100,1)
fraction_defender = round((goal-current)/goal*100,1)
attacker = InvasionOpp(attack_faction, attack_reward)
defender = InvasionOpp(defender_faction, defender_reward)
self.runtime["invasions"].append(InvasionItem(attacker, defender, node, Utilities.ts2string(start_time), f"{fraction_defender}% vs {fraction_attacker}%"))
def getNightwave(self, parsing, data_runtime):
translate = data_runtime["warframe"]["translate"]
for nightwave in parsing["SeasonInfo"]["ActiveChallenges"]:
challenge = translate["nightwave"][nightwave["Challenge"]] if nightwave["Challenge"] in translate["nightwave"] else nightwave["Challenge"]
daily = nightwave["Daily"] if "Daily" in nightwave else False
start_time = int(nightwave["Activation"]["$date"]["$numberLong"])//1000
expiry_time = int(nightwave["Expiry"]["$date"]["$numberLong"])//1000
self.runtime["nightwave"].append(NightwaveItem(start_time, expiry_time, challenge, daily))
def getFissure(self, parsing, data_runtime):
translate = data_runtime["warframe"]["translate"]
for fissure in sorted(parsing["ActiveMissions"], key=lambda item: self.fissure_eras[item["Modifier"]][1]):
oid = fissure["_id"]["$oid"]
start_time = int(fissure["Activation"]["$date"]["$numberLong"])//1000
expiry_time = int(fissure["Expiry"]["$date"]["$numberLong"])//1000
mission_type = translate["missions"][fissure["MissionType"]].title() if fissure["MissionType"] in translate["missions"] else fissure["MissionType"]
node = next((x for x in translate["solsystem"]["nodes"] if x.id == fissure["Node"]), fissure["Node"])
era = self.fissure_eras[fissure["Modifier"]][0]
self.runtime["fissures"].append(FissureItem(oid, start_time, expiry_time, mission_type, node, era))
def getSorties(self, parsing, data_runtime):
if parsing["Sorties"]:
start_time = int(parsing["Sorties"][0]["Activation"]["$date"]["$numberLong"])//1000
expiry_time = int(parsing["Sorties"][0]["Expiry"]["$date"]["$numberLong"])//1000
missionsParse = parsing["Sorties"][0]["Variants"]
missions = []
translate = data_runtime["warframe"]["translate"]
for i in missionsParse:
mission_type = translate["missions"][i["missionType"]].title() if i["missionType"] in translate["missions"] else i["missionType"].title()
node = next((x for x in translate["solsystem"]["nodes"] if x.id == i["node"]), i["node"])
modifier = translate["sorties"][i["modifierType"]].title() if i["modifierType"] in translate["sorties"] else i["modifierType"]
missions.append(Sorties.SortieMission(mission_type, node, modifier))
self.runtime["sorties"] = Sorties.SortieItem(start_time, expiry_time, missions)
def getCetus(self, parsing, data_runtime):
expiry_time = next(((int(x["Expiry"]["$date"]["$numberLong"])//1000) for x in parsing["SyndicateMissions"] if x["Tag"] == "CetusSyndicate"), None)
if expiry_time:
self.runtime["poe"] = CetusStatus(expiry_time)
async def get_data(self, data_runtime):
self.initRuntime()
if self.session:
async with self.session.get("http://content.warframe.com/dynamic/worldState.php") as r:
if r.status==200:
parsing = await r.text()
parsing = json.loads(parsing)
if "Invasions" in parsing:
self.getInvasions(parsing, data_runtime)
if "SeasonInfo" in parsing and parsing["SeasonInfo"]:
self.getNightwave(parsing, data_runtime)
if "ActiveMissions" in parsing:
self.getFissure(parsing, data_runtime)
if "Sorties" in parsing:
self.getSorties(parsing, data_runtime)
if "SyndicateMissions" in parsing:
self.getCetus(parsing, data_runtime)
| 41.778068
| 159
| 0.645272
| 15,628
| 0.976689
| 0
| 0
| 0
| 0
| 6,812
| 0.425723
| 3,523
| 0.220174
|
66d8ba6f365049a80533d4986a5c2cf0bb77bfb0
| 2,561
|
py
|
Python
|
config/jupyter/jupyterhub_config.py
|
mhwasil/jupyterhub-on-gcloud
|
9cfe935772d7599fa36c5b998cebb87c17e24277
|
[
"MIT"
] | 3
|
2018-10-06T20:35:08.000Z
|
2019-03-02T08:04:52.000Z
|
config/jupyter/jupyterhub_config.py
|
mhwasil/jupyterhub-on-gcloud
|
9cfe935772d7599fa36c5b998cebb87c17e24277
|
[
"MIT"
] | 4
|
2019-05-15T11:36:43.000Z
|
2019-07-23T09:34:45.000Z
|
config/jupyter/jupyterhub_config.py
|
mhwasil/jupyterhub-on-gcloud
|
9cfe935772d7599fa36c5b998cebb87c17e24277
|
[
"MIT"
] | 2
|
2020-01-09T21:03:44.000Z
|
2020-11-22T16:47:00.000Z
|
c = get_config()
c.JupyterHub.ip = u'127.0.0.1'
c.JupyterHub.port = 8000
c.JupyterHub.cookie_secret_file = u'/srv/jupyterhub/jupyterhub_cookie_secret'
c.JupyterHub.db_url = u'/srv/jupyterhub/jupyterhub.sqlite'
#c.JupyterHub.proxy_auth_token = u'/srv/jupyterhub/proxy_auth_token'
c.ConfigurableHTTPProxy.auth_token = u'/srv/jupyterhub/proxy_auth_token'
c.JupyterHub.spawner_class = 'systemdspawner.SystemdSpawner'
c.SystemdSpawner.user_workingdir = '/home/{USERNAME}'
#c.JupyterHub.config_file = '/home/admin/jupyterhub_config.py'
# Limit memory and cpu usage for each user
c.SystemdSpawner.mem_limit = '0.5G'
c.SystemdSpawner.cpu_limit = 0.5
# create private /tmp to isolate each user info
c.SystemdSpawner.isolate_tmp = True
# Disable or enable user sudo
c.SystemdSpawner.disable_user_sudo = False
# Readonly
c.SystemdSpawner.readonly_paths = None
# Readwrite path
#c.SystemdSpawner.readwrite_paths = None
# use jupyterlab
c.Spawner.cmd = ['jupyter-labhub']
c.Spawner.default_url = '/tree'
# ser default_shell
c.SystemdSpawner.default_shell = '/bin/bash'
c.Authenticator.admin_users = {'admin', 'mrc-grader'}
c.Authenticator.whitelist = {'admin', 'mhm_wasil', 'instructor1',
'instructor2', 'student1', 'student2', 'student3',
'mrc-grader', 'wtus-grader'}
c.LocalAuthenticator.group_whitelist = {'mrc-group'}
#c.LocalAuthenticator.group_whitelist = {'mrc-group', 'wtus-group'}
# sionbg and willingc have access to a shared server:
c.JupyterHub.load_groups = {
'mrc-group': [
'instructor1',
'instructor2'
]
#,
#'wtus-student-group': [
# 'instructor2'
#]
}
service_names = ['shared-mrc-notebook', 'shared-wtus-notebook']
service_ports = [9998, 9999]
group_names = ['mrc-group']
#group_names = ['mrc-student-group', 'wtus-student-group']
# start the notebook server as a service
c.JupyterHub.services = [
{
'name': service_names[0],
'url': 'http://127.0.0.1:{}'.format(service_ports[0]),
'command': [
'jupyterhub-singleuser',
'--group={}'.format(group_names[0]),
'--debug',
],
'user': 'mrc-grader',
'cwd': '/home/mrc-grader'
}
#,
#{
# 'name': service_names[1],
# 'url': 'http://127.0.0.1:{}'.format(service_ports[1]),
# 'command': [
# 'jupyterhub-singleuser',
# '--group={}'.format(group_names[1]),
# '--debug',
# ],
# 'user': 'wtus-grader',
# 'cwd': '/home/wtus-grader'
#}
]
| 31.617284
| 78
| 0.643108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,449
| 0.565795
|
66d95353965e38496015e85b754a89803b392d87
| 11,908
|
py
|
Python
|
legacy/Environment.py
|
LaoKpa/reinforcement_trader
|
1465731269e6d58900a28a040346bf45ffb5cf97
|
[
"MIT"
] | 7
|
2020-09-28T23:36:40.000Z
|
2022-02-22T02:00:32.000Z
|
legacy/Environment.py
|
LaoKpa/reinforcement_trader
|
1465731269e6d58900a28a040346bf45ffb5cf97
|
[
"MIT"
] | 4
|
2020-11-13T18:48:52.000Z
|
2022-02-10T01:29:47.000Z
|
legacy/Environment.py
|
lzcaisg/reinforcement_trader
|
1465731269e6d58900a28a040346bf45ffb5cf97
|
[
"MIT"
] | 3
|
2020-11-23T17:31:59.000Z
|
2021-04-08T10:55:03.000Z
|
import datetime
import warnings
import pandas as pd
import numpy as np
from MongoDBUtils import *
from scipy.optimize import fsolve
import pymongo
TRADING_FEE = 0.008
EARLIEST_DATE = datetime.datetime(2014, 10, 17)
LATEST_DATE = datetime.datetime(2019, 10, 17)
# In any cases, we shouldn't know today's and future value;
# ONLY PROVIDE CALCULATED RESULT
# Handled by Both Environment and Actors
class Environment():
def __init__(self):
self.client = pymongo.MongoClient("mongodb+srv://lzcai:raspberry@freecluster-q4nkd.gcp.mongodb.net/test?retryWrites=true&w=majority")
self.db = self.client["testing"]
def getOneRecord(self, todayDate, date, col_name="S&P 500"):
'''
:param todayDate:
:param date:
:param col_name:
:return: e.g.
{
'_id': ObjectId('5de7325e05597fc4f7b09fad'),
'Date': datetime.datetime(2019, 9, 10, 0, 0),
'Price': 2979.39, 'Open': 2971.01,
'High': 2979.39,
'Low': 2957.01,
'Vol': 0,
'Change': 0.0003
}
'''
if date >= todayDate:
return
collection = self.db[col_name]
query = {"Date": date}
result = collection.find_one(query)
return result
def getAllRecord(self, todayDate, col_name="S&P 500"):
pass
def getRecordFromDateList(self, todayDate, dateList, col_name="S&P 500"):
collection = self.db[col_name]
resultList = []
for date in dateList:
if date >= todayDate:
continue
query = {"Date": date}
result = collection.find_one(query)
if result:
resultList.append(result)
return resultList
def getRecordFromStartLength(self, todayDate, startDate, length, col_name="S&P 500"): # Return Sorted List of Dict
collection = self.db[col_name]
resultList = []
for i in range(length):
newDate = startDate + datetime.timedelta(days=i)
if newDate >= todayDate:
break
query = {"Date": newDate}
result = collection.find_one(query)
if result:
resultList.append(result)
return resultList
def getRecordFromStartLengthByETFList(self, todayDate, startDate, length, etfList):
'''
:param startDate:
:param length:
:param etfList: ["S&P 500", "DAX"]
:return: A Dict
{
"S&P 500": [{one record}, {another record}],
"DAX":[{...}, {...}],
...}
'''
if not isinstance(etfList, list):
warnings.warn("Environment/getRecordFromStartLengthByETFList() Warning: etfList is not List")
return None
resultDict = {}
for etf in etfList:
if etf == "CASH":
continue
else:
etfRecordList = []
collection = self.db[etf]
for i in range(length):
newDate = startDate + datetime.timedelta(days=i)
if newDate >= todayDate:
break
query = {"Date": newDate}
result = collection.find_one(query)
if result:
etfRecordList.append(result)
resultDict[etf] = etfRecordList
return resultDict
def getRecordFromEndLengthByETFList(self, todayDate, endDate, length, etfList):
'''
:param startDate:
:param length:
:param etfList: ["S&P 500", "DAX"]
:return: A Dict
{
"S&P 500": [{one record}, {another record}],
"DAX":[{...}, {...}],
...}
'''
if not isinstance(etfList, list):
warnings.warn("Environment/getRecordFromStartLengthByETFList() Warning: etfList is not List")
return None
resultDict = {}
for etf in etfList:
if etf == "CASH":
continue
else:
etfRecordList = []
collection = self.db[etf]
for i in range(length):
newDate = endDate - datetime.timedelta(days=i)
if newDate >= todayDate:
continue
query = {"Date": newDate}
result = collection.find_one(query)
if result:
etfRecordList.append(result)
resultDict[etf] = etfRecordList
return resultDict
def getPriceByETFList(self, todayDate, date, etfList): # Get PRICE only! Not the full record
'''
:param date:
:param etfList:
:return: A df like this:
Value
Name
Hang Seng 30
S&P 500 40
STI NaN
Shanghai 50
'''
if not isinstance(etfList, list):
warnings.warn("Environment/getRecordFromETFList() Warning: etfList is not List")
return None
resultDF = pd.DataFrame(etfList, columns=["Name"]).set_index('Name', drop=True)
resultDF['Value'] = np.nan
for etf in etfList:
if etf == "CASH":
resultDF['Value'][etf] = 1
else:
collection = self. db[etf]
if date >= todayDate:
continue
query = {"Date": date}
result = collection.find_one(query)
if result:
resultDF['Value'][etf] = result['Price']
return resultDF
def reallocateAndGetAbsoluteReward(self, oldPortfolio, newPortfolio):
'''
oldPortfolio: {
"portfolioDict": {"S&P 500": 0.3, "Hang Seng":0.5} -> 0.2 Cash
"date":
"value":
}
newPortfolio: {
"portfolioDict":
"date":
}
:returns: {
oldCurrentValue: xxx,
newCurrentValue: xxx,
deltaValue: xxx,
portfolio_df: portfolio_df
}
'''
# 1. Check whether the input is legit
if (
("portfolioDict" not in oldPortfolio) or
("date" not in oldPortfolio) or
("value" not in oldPortfolio)
):
warnings.warn("Environment/calculateAbsoluteReward() Warning: Input of oldPortfolio is NOT LEGIT")
return 0
if (
("portfolioDict" not in newPortfolio) or
("date" not in newPortfolio)
):
warnings.warn("Environment/calculateAbsoluteReward() Warning: Input of newPortfolio NOT LEGIT")
return 0
# 2. Check whether the portfolioDict is a dictionary
if not isinstance(oldPortfolio['portfolioDict'], dict):
warnings.warn(
"Environment/calculateAbsoluteReward() Warning: oldPortfolio['portfolioDict'] is not a dictionary")
return 0
if not isinstance(newPortfolio['portfolioDict'], dict):
warnings.warn(
"Environment/calculateAbsoluteReward() Warning: newPortfolio['portfolioDict'] is not a dictionary")
return 0
'''
portfolio_df:[
oldRatio, newRatio, oldPastValue, oldStockHeld, oldCurrentValue, oldCurrentRatio,
deltaRatio, deltaStockHeld, newCurrentValue
]
'''
# 3. Clean the ratio: >1: Normalize; <1: Cash Out
oldRatio_df = pd.DataFrame.from_dict(oldPortfolio['portfolioDict'], orient='index', columns=['ratio'])
newRatio_df = pd.DataFrame.from_dict(newPortfolio['portfolioDict'], orient='index', columns=['ratio'])
oldRatio_df = oldRatio_df.append(pd.DataFrame(index=['CASH'], data={'ratio': np.nan}))
newRatio_df = newRatio_df.append(pd.DataFrame(index=['CASH'], data={'ratio': np.nan}))
if oldRatio_df['ratio'].sum() > 1:
warnings.warn(
"Environment/calculateAbsoluteReward() Warning: oldRatio_df['ratio'].sum() > 1, Auto-Normalized")
oldRatio_df = oldRatio_df / oldRatio_df['ratio'].sum()
elif oldRatio_df['ratio'].sum() < 1:
oldRatio_df['ratio']['CASH'] = 1 - oldRatio_df['ratio'].sum()
if newRatio_df['ratio'].sum() > 1:
warnings.warn(
"Environment/calculateAbsoluteReward() Warning: newRatio_df['ratio'].values().sum() > 1, Auto-Normalized")
newRatio_df = newRatio_df / newRatio_df['ratio'].sum()
elif newRatio_df['ratio'].sum() < 1:
newRatio_df['ratio']['CASH'] = 1 - newRatio_df['ratio'].sum()
portfolio_df = pd.merge(oldRatio_df, newRatio_df, left_index=True, right_index=True, how='outer')
portfolio_df.columns = ['oldRatio', 'newRatio']
portfolio_df = portfolio_df.fillna(0)
# 4. Calculate the current value of the stocks: [oldPastValue, oldStockHeld, oldCurrentValue, oldCurrentRatio]
portfolio_df['oldPastValue'] = portfolio_df.apply(lambda row: row.oldRatio * oldPortfolio['value'], axis=1)
etfList = list(portfolio_df.index)
portfolio_df['oldPrice'] = self.getPriceByETFList(oldPortfolio['date'], etfList)
portfolio_df['newPrice'] = self.getPriceByETFList(newPortfolio['date'], etfList)
portfolio_df['oldStockHeld'] = portfolio_df['oldPastValue'].div(portfolio_df['oldPrice'].values)
portfolio_df['oldCurrentValue'] = portfolio_df['oldStockHeld'].mul(portfolio_df['newPrice'].values)
portfolio_df['oldCurrentRatio'] = portfolio_df['oldCurrentValue'] / portfolio_df['oldCurrentValue'].sum()
# 5. Calculate the deltas [deltaRatio, deltaStockHeld, newStockHeld]
portfolio_df['deltaRatio'] = portfolio_df['newRatio'].sub(portfolio_df['oldCurrentRatio'], fill_value=0)
def equation(n):
left = np.multiply(portfolio_df['oldStockHeld'] + n, portfolio_df['newPrice'])
right = portfolio_df['newRatio'] * (
np.dot(portfolio_df['newPrice'], portfolio_df['oldStockHeld']) - TRADING_FEE * np.dot(
portfolio_df['newPrice'], np.absolute(n)))
return left - right
a0 = np.zeros(portfolio_df['oldStockHeld'].shape)
n = fsolve(equation, a0)
portfolio_df['deltaStockHeld'] = n
portfolio_df['newStockHeld'] = portfolio_df['oldStockHeld'] + portfolio_df['deltaStockHeld']
portfolio_df['newCurrentValue'] = portfolio_df['newStockHeld'].mul(portfolio_df['newPrice'])
# 6. Return stuffs
oldPastValueSum = portfolio_df['oldPastValue'].sum()
newCurrentValueSum = portfolio_df['newCurrentValue'].sum()
return {
"oldPastValue": oldPastValueSum,
"newCurrentValue": newCurrentValueSum,
"deltaValue": newCurrentValueSum - oldPastValueSum,
"portfolio_df": portfolio_df
}
def getFuturePercentile(self, todayDate, delta, col_name="S&P 500"): # Delta includes todayDate!
# 1. To get all future results ang calculate the percentile using getRecordFromStartLength
# Disable the today_check by passing real-world date
resultList = self.getRecordFromStartLength(datetime.datetime.now(), todayDate, delta, col_name=col_name)
# 2. Transform the resultList into dataframe
df = pd.DataFrame(resultList)
todayRank = df['Price'].rank(method = 'average')[0] # The smaller the value, the smaller the rank
todayPercentile = (todayRank-1) / (df.shape[0]-1) # -1 to make it [0, 1], otherwise rank start with 1
# The greater the percentile, the worse the performance in the future
return todayPercentile
| 36.527607
| 141
| 0.570037
| 11,508
| 0.966409
| 0
| 0
| 0
| 0
| 0
| 0
| 4,562
| 0.383104
|
66d9e2205d4a01f644f0a6147e2760e0d6b2de38
| 579
|
py
|
Python
|
examples/Titanic/titanic.py
|
mlflow/mlflow-torchserve
|
91663b630ef12313da3ad821767faf3fc409345b
|
[
"Apache-2.0"
] | 40
|
2020-11-13T02:08:10.000Z
|
2022-03-27T07:41:57.000Z
|
examples/Titanic/titanic.py
|
Ideas2IT/mlflow-torchserve
|
d6300fb73f16d74ee2c7718c249faf485c4f3b62
|
[
"Apache-2.0"
] | 23
|
2020-11-16T11:28:01.000Z
|
2021-09-23T11:28:24.000Z
|
examples/Titanic/titanic.py
|
Ideas2IT/mlflow-torchserve
|
d6300fb73f16d74ee2c7718c249faf485c4f3b62
|
[
"Apache-2.0"
] | 15
|
2020-11-13T10:25:25.000Z
|
2022-02-01T10:13:20.000Z
|
import torch.nn as nn
class TitanicSimpleNNModel(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(12, 12)
self.sigmoid1 = nn.Sigmoid()
self.linear2 = nn.Linear(12, 8)
self.sigmoid2 = nn.Sigmoid()
self.linear3 = nn.Linear(8, 2)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
lin1_out = self.linear1(x)
sigmoid_out1 = self.sigmoid1(lin1_out)
sigmoid_out2 = self.sigmoid2(self.linear2(sigmoid_out1))
return self.softmax(self.linear3(sigmoid_out2))
| 30.473684
| 64
| 0.62867
| 554
| 0.956822
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
66db0c7061bb9a75d8373490465f8ef60bcc3200
| 426
|
py
|
Python
|
api/tacticalrmm/agents/migrations/0049_agent_agents_agen_monitor_df8816_idx.py
|
v2cloud/tacticalrmm
|
12f599f9749985f66ff9b559c5e5abd36064b182
|
[
"MIT"
] | null | null | null |
api/tacticalrmm/agents/migrations/0049_agent_agents_agen_monitor_df8816_idx.py
|
v2cloud/tacticalrmm
|
12f599f9749985f66ff9b559c5e5abd36064b182
|
[
"MIT"
] | null | null | null |
api/tacticalrmm/agents/migrations/0049_agent_agents_agen_monitor_df8816_idx.py
|
v2cloud/tacticalrmm
|
12f599f9749985f66ff9b559c5e5abd36064b182
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.3 on 2022-04-18 14:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('agents', '0048_remove_agent_has_patches_pending_and_more'),
]
operations = [
migrations.AddIndex(
model_name='agent',
index=models.Index(fields=['monitoring_type'], name='agents_agen_monitor_df8816_idx'),
),
]
| 23.666667
| 98
| 0.65493
| 333
| 0.78169
| 0
| 0
| 0
| 0
| 0
| 0
| 159
| 0.373239
|
66dcca39ba0172f5d72111b99f2df6a26ed3cb02
| 6,431
|
py
|
Python
|
src/Datasets.py
|
fauxneticien/bnf_cnn_qbe-std
|
ab7dcb9c9d3d8969f1f17aaa87b7337d3ccfcc30
|
[
"MIT"
] | 4
|
2021-03-26T17:18:59.000Z
|
2022-03-21T18:28:56.000Z
|
src/Datasets.py
|
fauxneticien/bnf_cnn_qbe-std
|
ab7dcb9c9d3d8969f1f17aaa87b7337d3ccfcc30
|
[
"MIT"
] | 1
|
2021-11-02T17:29:46.000Z
|
2021-11-02T17:29:46.000Z
|
src/Datasets.py
|
fauxneticien/bnf_cnn_qbe-std
|
ab7dcb9c9d3d8969f1f17aaa87b7337d3ccfcc30
|
[
"MIT"
] | 1
|
2020-11-11T05:04:55.000Z
|
2020-11-11T05:04:55.000Z
|
import os
import torch
import numpy as np
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from scipy.spatial.distance import cdist
import logging
class STD_Dataset(Dataset):
"""Spoken Term Detection dataset."""
def __init__(self, root_dir, labels_csv, query_dir, audio_dir, apply_vad = False, max_height = 100, max_width = 800):
"""
Args:
root_dir (string): Absolute path to dataset directory with content below
labels_csv (string): Relative path to the csv file with query and test pairs, and labels
(1 = query in test; 0 = query not in test).
query_dir (string): Relative path to directory with all the audio queries.
audio_dir (string): Relative path to directory with all the test audio.
"""
if isinstance(labels_csv, dict):
# Supplying separate csv files for positive and negative labels
pos_frame = pd.read_csv(os.path.join(root_dir, labels_csv['positive_labels']))
neg_frame = pd.read_csv(os.path.join(root_dir, labels_csv['negative_labels']))
# Randomly down-sample neg examples to same number of positive examples
pos_frame = pos_frame.sample(frac = labels_csv['pos_sample_size'], replace = True)
neg_frame = neg_frame.sample(n = pos_frame.shape[0])
self.qtl_frame = pd.concat([pos_frame, neg_frame], axis = 0).sample(frac = 1)
else:
# If a single CSV file, then just read that in
self.qtl_frame = pd.read_csv(os.path.join(root_dir, labels_csv))
self.query_dir = os.path.join(root_dir, query_dir)
self.audio_dir = os.path.join(root_dir, audio_dir)
self.apply_vad = apply_vad
self.max_height = max_height
self.max_width = max_width
if apply_vad is True:
# If using voice activity detection we expect same directory structure
# and file names as feature files for .npy files containing voice activity
# detection (VAD) labels (0 = no speech activity, 1 = speech activity)
# in a 'vad_labels' directory
self.vad_query_dir = os.path.join(root_dir, 'vad_labels', query_dir)
self.vad_audio_dir = os.path.join(root_dir, 'vad_labels', audio_dir)
# Get filenames in audio and query directories
q_files = os.listdir(self.vad_query_dir)
a_files = os.listdir(self.vad_audio_dir)
# Get length of non-zero values in files
q_vlens = np.array([ len(np.flatnonzero(np.load(os.path.join(self.vad_query_dir, f)))) for f in q_files ])
a_vlens = np.array([ len(np.flatnonzero(np.load(os.path.join(self.vad_audio_dir, f)))) for f in a_files ])
# Get files (without .npy extensions) for which there are no non-zero values
zero_qs = [ os.path.splitext(x)[0] for x in np.take(q_files, np.where(q_vlens == 0)).flatten() ]
zero_as = [ os.path.splitext(x)[0] for x in np.take(a_files, np.where(a_vlens == 0)).flatten() ]
if(len(zero_qs) > 0):
logging.info(" Following queries removed from dataset (insufficient frames after VAD): %s" % (", ".join(zero_qs)))
if(len(zero_as) > 0):
logging.info(" Following references removed from dataset (insufficient frames after VAD): %s" % (", ".join(zero_as)))
# Discard from labels irrelevant files
self.qtl_frame = self.qtl_frame[~self.qtl_frame['query'].isin(zero_qs)]
self.qtl_frame = self.qtl_frame[~self.qtl_frame['reference'].isin(zero_as)]
def __len__(self):
return len(self.qtl_frame)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
query_name = self.qtl_frame.iloc[idx, 0]
test_name = self.qtl_frame.iloc[idx, 1]
qt_label = self.qtl_frame.iloc[idx, 2]
# Get features where query = M x f, test = N x f, where M, N number of frames and f number of features
query_feats = np.load(os.path.join(self.query_dir, query_name + ".npy"), allow_pickle=True)
test_feats = np.load(os.path.join(self.audio_dir, test_name + ".npy"), allow_pickle=True)
if self.apply_vad is True:
query_vads = np.load(os.path.join(self.vad_query_dir, query_name + ".npy"), allow_pickle=True)
test_vads = np.load(os.path.join(self.vad_audio_dir, test_name + ".npy"), allow_pickle=True)
# Keep only frames (rows, axis = 0) where voice activity detection by rVAD has returned non-zero (i.e. 1)
query_feats = np.take(query_feats, np.flatnonzero(query_vads), axis = 0)
test_feats = np.take(test_feats, np.flatnonzero(test_vads), axis = 0)
# Create standardised Euclidean distance matrix of dimensions M x N
qt_dists = cdist(query_feats, test_feats, 'seuclidean', V = None)
# Range normalise matrix to [-1, 1]
qt_dists = -1 + 2 * ((qt_dists - qt_dists.min())/(qt_dists.max() - qt_dists.min()))
# Get indices to downsample or pad M x N matrix to max_height x max_width (default 100 x 800)
def get_keep_indices(dim_size, dim_max):
if dim_size <= dim_max:
# no need to downsample if M or N smaller than max_height/max_width
return np.arange(0, dim_size)
else:
# if bigger, return evenly spaced indices for correct height/width
return np.round(np.linspace(0, dim_size - 1, dim_max)).astype(int)
ind_rows = get_keep_indices(qt_dists.shape[0], self.max_height)
ind_cols = get_keep_indices(qt_dists.shape[1], self.max_width)
qt_dists = np.take(qt_dists, ind_rows, axis = 0)
qt_dists = np.take(qt_dists, ind_cols, axis = 1)
# Create empty 100 x 800 matrix, then fill relevant cells with dist values
temp_dists = np.full((self.max_height, self.max_width), qt_dists.min(), dtype='float32')
temp_dists[:qt_dists.shape[0], :qt_dists.shape[1]] = qt_dists
# Reshape to (1xHxW) since to feed into ConvNet with 1 input channel
dists = torch.Tensor(temp_dists).view(1, self.max_height, self.max_width)
label = torch.Tensor([qt_label])
sample = {'query': query_name, 'reference': test_name, 'dists': dists, 'labels': label}
return sample
| 51.448
| 133
| 0.640647
| 6,262
| 0.973721
| 0
| 0
| 0
| 0
| 0
| 0
| 2,118
| 0.329342
|
66de338a8afcfc34368f70df12c0187b512a7430
| 3,209
|
py
|
Python
|
dmz/store.py
|
yuvipanda/edit-stats
|
fb096715f18df999b4af4fb116e6c4130f24c2ec
|
[
"MIT"
] | null | null | null |
dmz/store.py
|
yuvipanda/edit-stats
|
fb096715f18df999b4af4fb116e6c4130f24c2ec
|
[
"MIT"
] | null | null | null |
dmz/store.py
|
yuvipanda/edit-stats
|
fb096715f18df999b4af4fb116e6c4130f24c2ec
|
[
"MIT"
] | null | null | null |
"""Implements a db backed storage area for intermediate results"""
import sqlite3
class Store(object):
"""
Represents an sqlite3 backed storage area that's vaguely key value
modeled for intermediate storage about metadata / data for metrics
about multiple wikis that have some underlying country related basis
"""
_initial_sql_ = [
'CREATE TABLE IF NOT EXISTS meta (key, value);',
'CREATE UNIQUE INDEX IF NOT EXISTS meta_key ON meta(key);',
'CREATE TABLE IF NOT EXISTS wiki_meta (wiki, key, value);',
'CREATE UNIQUE INDEX IF NOT EXISTS wiki_meta_key ON wiki_meta(wiki, key);',
'CREATE TABLE IF NOT EXISTS country_info (wiki, country, key, value);',
'CREATE UNIQUE INDEX IF NOT EXISTS country_info_key ON country_info(wiki, country, key);'
]
def __init__(self, path):
"""Initialize a store at the given path.
Creates the tables required if they do not exist"""
self.db = sqlite3.connect(path)
for sql in Store._initial_sql_:
self.db.execute(sql)
def set_meta(self, key, value):
"""Set generic metadata key value, global to the store"""
self.db.execute("INSERT OR REPLACE INTO meta VALUES (?, ?)", (key, value))
self.db.commit()
def get_meta(self, key):
"""Get generic metadata key value, global to the store"""
try:
cur = self.db.cursor()
cur.execute("SELECT value from meta WHERE key = ?", (key, ))
cur.fetchone()
return cur[0]
finally:
cur.close()
def set_wiki_meta(self, wiki, key, value):
"""Set wiki specific meta key value"""
self.db.execute("INSERT OR REPLACE INTO wiki_meta VALUES (?, ?, ?)", (wiki, key, value))
self.db.commit()
def get_wiki_meta(self, key):
"""Get wiki specific meta key value"""
try:
cur = self.db.cursor()
cur.execute("SELECT value from wiki_meta WHERE wiki = ? AND key = ?", (wiki, key, ))
cur.fetchone()
return cur[0]
finally:
cur.close()
def set_country_info(self, wiki, country, key, value):
"""Set a country and wiki specific key and value"""
self.db.execute("INSERT OR REPLACE INTO country_info VALUES (?, ?, ?, ?)", (wiki, country, key, value))
self.db.commit()
def set_country_info_bulk(self, wiki, key, country_dict):
"""Bulk insert a dictionary of country specific key and value.
The dictionary should be of form {'country': 'value'}
"""
insert_data = [(wiki, k, key, v) for (k, v) in country_dict.iteritems()]
self.db.executemany("INSERT OR REPLACE INTO country_info VALUES (?, ?, ?, ?)", insert_data)
self.db.commit()
def get_country_info(self, wiki, country, key):
"""Get a country and wiki specific value for a given key"""
try:
cur = self.db.cursor()
cur.execute("SELECT value from country_info WHERE wiki = ? AND country = ?AND key = ?",
(wiki, country, key, ))
cur.fetchone()
return cur[0]
finally:
cur.close()
| 38.662651
| 111
| 0.600499
| 3,124
| 0.973512
| 0
| 0
| 0
| 0
| 0
| 0
| 1,602
| 0.499221
|
66e230d59002f2287f49fdc67f96ab043ceba837
| 316
|
py
|
Python
|
static/code/loc/attn.py
|
navivokaj/deepcourse
|
c5385e7a7b8417fe1017f1057e8e5d32b4a24bb5
|
[
"Apache-2.0"
] | 91
|
2021-08-28T05:03:36.000Z
|
2022-01-27T09:46:36.000Z
|
static/code/loc/attn.py
|
faisalbi/deepcourse
|
fe26314e43896013d195955d16fc4f215e39a53a
|
[
"Apache-2.0"
] | 8
|
2021-09-14T13:21:56.000Z
|
2021-12-28T19:45:51.000Z
|
static/code/loc/attn.py
|
faisalbi/deepcourse
|
fe26314e43896013d195955d16fc4f215e39a53a
|
[
"Apache-2.0"
] | 10
|
2021-08-28T22:32:19.000Z
|
2022-03-16T11:13:02.000Z
|
def generate_attention(path, size=(224, 224)):
image = Image.open(path)
image.thumbnail(size, Image.ANTIALIAS)
tensor = transform(image)[None]
with torch.no_grad():
logits = net(tensor)[0]
probs = torch.softmax(logits, dim=0)
attention_map = probs[indexes].sum(dim=0)
return image, attention_map
| 26.333333
| 46
| 0.708861
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
66e356546289b5293424a7a6ad3ffb4afce031ec
| 7,074
|
py
|
Python
|
main.py
|
usdot-its-jpo-data-portal/metadata-query-function
|
589e5df691fab82e264ce74196dd797b9eb17f5e
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
usdot-its-jpo-data-portal/metadata-query-function
|
589e5df691fab82e264ce74196dd797b9eb17f5e
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
usdot-its-jpo-data-portal/metadata-query-function
|
589e5df691fab82e264ce74196dd797b9eb17f5e
|
[
"Apache-2.0"
] | 1
|
2021-12-14T18:00:20.000Z
|
2021-12-14T18:00:20.000Z
|
import boto3
import dateutil
import glob
import json
import logging
import os
import queue
import time
from queries import MetadataQueries
USE_LOCAL_DATA = True # whether to load data from S3 (false) or locally (true)
LOCAL_DATA_REPOSITORY = "s3data/usdot-its-cvpilot-public-data" # path to local directory containing s3 data
### Query to run
METADATA_QUERY = 'query13_listOfLogFilesBefore'
### Data source configuration settings
PREFIX_STRINGS = ["wydot/BSM/2018/12", "wydot/BSM/2019/01", "wydot/BSM/2019/02", "wydot/BSM/2019/03", "wydot/BSM/2019/04", "wydot/TIM/2018/12", "wydot/TIM/2019/01", "wydot/TIM/2019/02", "wydot/TIM/2019/03", "wydot/TIM/2019/04"]
S3_BUCKET = "usdot-its-cvpilot-public-data"
def lambda_handler(event, context):
if USE_LOCAL_DATA:
print("NOTE: Using local data in directory '%s'" % LOCAL_DATA_REPOSITORY)
# Create a list of analyzable S3 files
s3_client = boto3.client('s3')
s3_file_list = []
for prefix in PREFIX_STRINGS:
matched_file_list = list_s3_files_matching_prefix(s3_client, prefix)
print("Queried for S3 files matching prefix string '%s'. Found %d matching files." % (prefix, len(matched_file_list)))
print("Matching files: [%s]" % ", ".join(matched_file_list))
s3_file_list.extend(matched_file_list)
metadataQueries = MetadataQueries()
perform_query(s3_client, s3_file_list, metadataQueries, METADATA_QUERY)
return
def perform_query(s3_client, s3_file_list, query_object, query_function):
total_records = 0
total_records_in_timeframe = 0
total_records_not_in_timeframe = 0
file_num = 1
query_start_time = time.time()
invalid_s3_files = []
for filename in s3_file_list:
file_process_start_time = time.time()
print("============================================================================")
print("Analyzing file (%d/%d) '%s'" % (file_num, len(s3_file_list), filename))
print("Query being performed: %s" % str(METADATA_QUERY))
file_num += 1
record_list = extract_records_from_file(s3_client, filename)
records_in_timeframe = 0
records_not_in_timeframe = 0
for record in record_list:
total_records += 1
if getattr(query_object, query_function)(record):
records_in_timeframe += 1
if METADATA_QUERY == 'query11_invalidS3FileCount' and filename not in invalid_s3_files:
invalid_s3_files.append(filename)
else:
records_not_in_timeframe += 1
print("Records satisfying query constraints found in this file: \t%d" % records_in_timeframe)
print("Total records found satisfying query constraints so far: \t\t%d" % total_records_in_timeframe)
print("Records NOT found satisfying query constraints: \t\t\t\t%d" % records_not_in_timeframe)
print("Total records NOT found satisfying query constraints so far: \t\t\t%d" % total_records_not_in_timeframe)
time_now = time.time()
print("Time taken to process this file: \t\t\t%.3f" % (time_now - file_process_start_time))
time_elapsed = (time_now - query_start_time)
avg_time_per_file = time_elapsed/file_num
avg_time_per_record = time_elapsed/total_records
est_time_remaining = avg_time_per_file * (len(s3_file_list) - file_num)
print("Time elapsed so far: \t\t\t\t\t%.3f" % time_elapsed)
print("Average time per file: \t\t\t\t\t%.3f" % avg_time_per_file)
print("Average time per record: \t\t\t\t%.6f" % avg_time_per_record)
print("Estimated time remaining: \t\t\t\t%.3f" % est_time_remaining)
total_records_in_timeframe += records_in_timeframe
total_records_not_in_timeframe += records_not_in_timeframe
print("============================================================================")
print("Querying complete.")
### Query-specific output
if hasattr(query_object, 'earliest_generated_at'):
print("Earliest record_generated_at: %s" % query_object.earliest_generated_at)
if hasattr(query_object, 'latest_generated_at'):
print("Latest record_generated_at: %s" % query_object.latest_generated_at)
if METADATA_QUERY == 'query11_invalidS3FileCount':
print("Invalid s3 file count: %d" % len(invalid_s3_files))
invalid_s3_file_out = open('invalid_s3_file_list.txt', 'w')
invalid_s3_file_out.write("%s" % "\n".join(invalid_s3_files))
print("Invalid S3 files written to 'invalid_s3_file_list.txt'")
if METADATA_QUERY == 'query13_listOfLogFilesBefore':
print("Invalid log file count: %d" % len(query_object.log_file_list))
invalid_log_file_list_out = open('invalid_log_file_list.txt', 'w')
invalid_log_file_list_out.write("%s" % "\n".join(query_object.log_file_list.keys()))
print("Invalid S3 files written to 'invalid_log_file_list.txt'")
print("Total number of records found satisfying query constraints: %d (Total number of records not found satisfying query constraints: %d" % (total_records_in_timeframe, total_records_not_in_timeframe))
### Returns a list of records from a given file
def extract_records_from_file(s3_client, filename):
if USE_LOCAL_DATA:
with open(filename, 'r') as f:
return f.readlines()
else:
s3_file = s3_client.get_object(
Bucket=S3_BUCKET,
Key=filename,
)
return list(s3_file['Body'].iter_lines()) ### iter_lines() is significantly faster than read().splitlines()
### Returns filenames from an S3 list files (list_objects) query
def list_s3_files_matching_prefix(s3_client, prefix_string):
if USE_LOCAL_DATA:
try:
files_and_directories = glob.glob(LOCAL_DATA_REPOSITORY+"/"+prefix_string+"/**/*", recursive=True)
files_only = []
for filepath in files_and_directories:
if os.path.isfile(filepath):
files_only.append(filepath)
return files_only
except FileNotFoundError as e:
return []
else:
response = list_s3_objects(s3_client, prefix_string)
filenames = []
if response.get('Contents'):
[filenames.append(item['Key']) for item in response.get('Contents')]
while response.get('NextContinuationToken'):
response = list_s3_objects(s3_client, prefix_string, response.get('NextContinuationToken'))
if response.get('Contents'):
[filenames.append(item['Key']) for item in response.get('Contents')]
return filenames
def list_s3_objects(s3_client, prefix_string, continuation_token=None):
if continuation_token:
return s3_client.list_objects_v2(
Bucket=S3_BUCKET,
Prefix=prefix_string,
ContinuationToken=continuation_token,
)
else:
return s3_client.list_objects_v2(
Bucket=S3_BUCKET,
Prefix=prefix_string,
)
if __name__ == "__main__":
lambda_handler(None, None)
| 46.235294
| 227
| 0.669918
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,217
| 0.313401
|
66e36f3c188b5158455460f11322fdc4021ffe06
| 1,070
|
py
|
Python
|
example_config/SecretConfig.py
|
axiegamingph-dev/discordaxieqrbot
|
fac9b3f325b98d21ece12445ec798c125d06f788
|
[
"MIT"
] | null | null | null |
example_config/SecretConfig.py
|
axiegamingph-dev/discordaxieqrbot
|
fac9b3f325b98d21ece12445ec798c125d06f788
|
[
"MIT"
] | null | null | null |
example_config/SecretConfig.py
|
axiegamingph-dev/discordaxieqrbot
|
fac9b3f325b98d21ece12445ec798c125d06f788
|
[
"MIT"
] | 2
|
2022-01-13T18:45:26.000Z
|
2022-03-03T11:50:43.000Z
|
Managers = ['Shim', 'Mike', 'Ryan', 'Kevin', 'Wessa', 'ser0wl']
# google spreedsheet id
ISKO_SPREADSHEET_ID = ''
# the list of names with discord ID
ISKO_DiscordAccount = 'DiscordAccount!A2:B100'
# the list of Names, ronin address, ronin private keys
# eg:
# Name | Address | Privatekey
# Isko1 | ronin:8213789127387543adfgsasdkjsd... | 0x0666c1234567890...
# Isko2 | ronin:8213789127387543adfgsasdkjsd... | 0x0666c1234567890...
# Isko3 | ronin:8213789127387543adfgsasdkjsd... | 0x0666c1234567890...
# note: Name should map to the ISKO_DiscordAccount values
ISKO_Accounts = 'Isko!A2:C100'
# list of names that can request qr code on behalf of that person.
# eg:
# Representative | IskoName
# Isko1 | Isko1
# Isko1 | Isko2
# this means Isko1 can request code for Isko1 and Isko2 using the !qrof Isko1 and !qrof Isko2.
ISKO_Representative = 'Representative!A2:B100'
# Put Your Discord Bot Token Here
DiscordBotToken_Prod = ''
DiscordBotToken_Test = ''
DiscordBotToken = DiscordBotToken_Prod
| 33.4375
| 94
| 0.699065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 850
| 0.794393
|
66e44acc59d85966cbb8120b35805a421dccdbf1
| 566
|
py
|
Python
|
world/dominion/migrations/0011_organization_theories.py
|
stesla/arxcode
|
a0ebf7c4d310de8c1980a8ba2a48948a68bb5a0a
|
[
"MIT"
] | 5
|
2019-03-16T08:26:53.000Z
|
2019-11-27T15:42:16.000Z
|
world/dominion/migrations/0011_organization_theories.py
|
stesla/arxcode
|
a0ebf7c4d310de8c1980a8ba2a48948a68bb5a0a
|
[
"MIT"
] | 7
|
2018-09-29T05:08:15.000Z
|
2021-06-10T21:35:32.000Z
|
world/dominion/migrations/0011_organization_theories.py
|
stesla/arxcode
|
a0ebf7c4d310de8c1980a8ba2a48948a68bb5a0a
|
[
"MIT"
] | 7
|
2018-09-19T21:11:29.000Z
|
2019-11-19T12:46:14.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-08-19 03:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('character', '0015_auto_20170605_2252'),
('dominion', '0010_auto_20170511_0645'),
]
operations = [
migrations.AddField(
model_name='organization',
name='theories',
field=models.ManyToManyField(blank=True, null=True, related_name='orgs', to='character.Theory'),
),
]
| 25.727273
| 108
| 0.637809
| 408
| 0.720848
| 0
| 0
| 0
| 0
| 0
| 0
| 190
| 0.335689
|
66e492eef799f5d354e84f2867ee89f9c4cd7b7a
| 200
|
py
|
Python
|
tests/button_test.py
|
almasgai/Drone
|
1223375976baf79d0f4362d42287d1a4039ba1e9
|
[
"MIT"
] | null | null | null |
tests/button_test.py
|
almasgai/Drone
|
1223375976baf79d0f4362d42287d1a4039ba1e9
|
[
"MIT"
] | null | null | null |
tests/button_test.py
|
almasgai/Drone
|
1223375976baf79d0f4362d42287d1a4039ba1e9
|
[
"MIT"
] | null | null | null |
from gpiozero import Button
import os
from time import sleep
button = Button(2)
i = 0
while True:
if button.is_pressed:
print(i, ". I've been pressed")
i += 1
sleep(0.1)
| 15.384615
| 39
| 0.61
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 21
| 0.105
|
66e5419754e56410c068112926f27e01cdae86bb
| 820
|
py
|
Python
|
reprojection.py
|
ekrell/nir2watermap
|
5253f2cde142a62103eb06fb2931c9aed6431211
|
[
"MIT"
] | null | null | null |
reprojection.py
|
ekrell/nir2watermap
|
5253f2cde142a62103eb06fb2931c9aed6431211
|
[
"MIT"
] | null | null | null |
reprojection.py
|
ekrell/nir2watermap
|
5253f2cde142a62103eb06fb2931c9aed6431211
|
[
"MIT"
] | null | null | null |
import rasterio
from rasterio.plot import show, reshape_as_raster, reshape_as_image, adjust_band
from rasterio import warp
import numpy as np
def reprojectio(img, bounds, transform, projection = "epsg:4326", resolution = 0.00001):
# Reproject
transform, width, height = warp.calculate_default_transform( \
aRaster.crs, {"init" : projection},
img.shape[0], img.shape[1],
left = bounds[0], bottom = bounds[1],
right = bounds[2], top = bounds[3],
resolution = resolution)
out_array = np.ndarray((img.shape[0], height, width), dtype = img.dtype)
warp.reproject(img, out_array, src_crs = aRaster.crs, dst_crs = {"init" : "epsg:4326"},
src_transform = transform,
dst_transform = transform, resampling = warp.Resampling.bilinear)
return out_array
| 37.272727
| 91
| 0.680488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.054878
|
66e80248874252f8ee1fc31cfa1763523a5f99eb
| 4,034
|
py
|
Python
|
opentsdb/push_thread.py
|
razvandimescu/opentsdb-py
|
61c15302468769121f94323493e88cb51efcea15
|
[
"MIT"
] | 48
|
2016-12-27T10:11:41.000Z
|
2021-11-15T16:05:24.000Z
|
opentsdb/push_thread.py
|
razvandimescu/opentsdb-py
|
61c15302468769121f94323493e88cb51efcea15
|
[
"MIT"
] | 8
|
2017-10-08T16:20:30.000Z
|
2022-02-23T08:36:52.000Z
|
opentsdb/push_thread.py
|
razvandimescu/opentsdb-py
|
61c15302468769121f94323493e88cb51efcea15
|
[
"MIT"
] | 17
|
2017-10-01T01:14:55.000Z
|
2021-11-15T16:05:24.000Z
|
from logging import getLogger
from queue import Empty
import threading
import random
import time
logger = getLogger('opentsdb-py')
class PushThread(threading.Thread):
WAIT_NEXT_METRIC_TIMEOUT = 3
def __init__(self, tsdb_connect, metrics_queue, close_client,
send_metrics_limit, send_metrics_batch_limit, statuses):
super().__init__()
self.tsdb_connect = tsdb_connect
self.metrics_queue = metrics_queue
self.close_client_flag = close_client
self.send_metrics_limit = send_metrics_limit
self.send_metrics_batch_limit = send_metrics_batch_limit
self.statuses = statuses
self._retry_send_metrics = None
def run(self):
while not self._is_done():
start_time = time.time()
try:
if self._retry_send_metrics:
data = self._retry_send_metrics
self._retry_send_metrics = None
else:
data = self._next(self.WAIT_NEXT_METRIC_TIMEOUT)
self.send(data)
except StopIteration:
break
except Empty:
continue
except Exception as error:
logger.exception(error)
if self.send_metrics_limit > 0:
self.__metrics_limit_timeout(start_time)
self.tsdb_connect.disconnect()
def _is_done(self):
return self.tsdb_connect.stopped.is_set() or (self.close_client_flag.is_set() and self.metrics_queue.empty())
def _next(self, wait_timeout):
raise NotImplementedError()
def send(self, data):
raise NotImplementedError()
def __metrics_limit_timeout(self, start_time):
pass
def _update_statuses(self, success, failed):
self.statuses['success'] += success
self.statuses['failed'] += failed
class HTTPPushThread(PushThread):
def _next(self, wait_timeout):
total_metrics = self.metrics_queue.qsize()
iter_count = total_metrics if total_metrics <= self.send_metrics_batch_limit else self.send_metrics_batch_limit
metrics = []
if total_metrics:
for _ in range(iter_count):
metrics.append(self.metrics_queue.get_nowait())
else:
metrics.append(self.metrics_queue.get(block=True, timeout=wait_timeout))
if StopIteration in metrics and len(metrics) == 1:
raise StopIteration
elif StopIteration in metrics:
metrics.remove(StopIteration)
self.metrics_queue.put(StopIteration)
return metrics
def send(self, data):
try:
result = self.tsdb_connect.sendall(*data)
except Exception as error:
logger.exception("Push metric failed: %s", error)
self._retry_send_metrics = data
time.sleep(1)
else:
failed = result.get('failed', 0)
self._update_statuses(result.get('success', 0), failed)
if failed:
logger.warning("Push metrics are failed %d/%d" % (failed, len(data)),
extra={'errors': result.get('errors')})
class TelnetPushThread(PushThread):
def _next(self, wait_timeout):
metric = self.metrics_queue.get(block=True, timeout=wait_timeout)
if metric is StopIteration:
raise metric
return metric
def __metrics_limit_timeout(self, start_time):
duration = time.time() - start_time
wait_time = (2.0 * random.random()) / self.send_metrics_limit
if wait_time > duration:
logger.debug("Wait for %s", wait_time - duration)
time.sleep(wait_time - duration)
def send(self, data):
try:
self.tsdb_connect.sendall(data)
except Exception as error:
logger.exception("Push metric failed: %s", error)
self._retry_send_metrics = data
time.sleep(1)
else:
self._update_statuses(1, 0)
| 32.015873
| 119
| 0.617005
| 3,893
| 0.965047
| 0
| 0
| 0
| 0
| 0
| 0
| 155
| 0.038423
|
66e8dfd4ed77fb442ea81a851f7a9c4e599b1de3
| 465
|
py
|
Python
|
projects/generate_pdf/main.py
|
parth-patel-samarthview/batch_201901
|
f407c1bf9575a01e8ddc507adb6f0574f8d2bc09
|
[
"MIT"
] | 2
|
2019-03-17T07:20:24.000Z
|
2019-03-31T05:47:09.000Z
|
projects/generate_pdf/main.py
|
parth-patel-samarthview/batch_201901
|
f407c1bf9575a01e8ddc507adb6f0574f8d2bc09
|
[
"MIT"
] | null | null | null |
projects/generate_pdf/main.py
|
parth-patel-samarthview/batch_201901
|
f407c1bf9575a01e8ddc507adb6f0574f8d2bc09
|
[
"MIT"
] | 2
|
2019-01-28T13:09:48.000Z
|
2019-03-17T07:20:37.000Z
|
from xlrd import open_workbook
wb = open_workbook(r"C:\Users\Lenovo\Documents\excel converter.xlsx")
for s in wb.sheets():
#print 'Sheet:',s.name
values = []
for row in range(s.nrows):
col_value = []
for col in range(s.ncols):
value = (s.cell(row,col).value)
try : value = str(int(value))
except : pass
col_value.append(value)
values.append(col_value)
print(values)
| 31
| 70
| 0.572043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 72
| 0.154839
|
66ebd223e34af9e0e97db29c5f0febdca09f52fb
| 3,068
|
py
|
Python
|
apitaxdrivers/Openstack.py
|
Apitax/Drivers
|
35b2c2f4c8ce8b98615f42fc30f04111d7b9bffe
|
[
"Apache-2.0"
] | null | null | null |
apitaxdrivers/Openstack.py
|
Apitax/Drivers
|
35b2c2f4c8ce8b98615f42fc30f04111d7b9bffe
|
[
"Apache-2.0"
] | 4
|
2018-08-03T20:01:57.000Z
|
2018-10-22T15:32:27.000Z
|
apitaxdrivers/Openstack.py
|
Apitax/Drivers
|
35b2c2f4c8ce8b98615f42fc30f04111d7b9bffe
|
[
"Apache-2.0"
] | null | null | null |
from apitax.drivers.Driver import Driver
from apitax.utilities.Files import getAllFiles
from apitax.ah.Options import Options
from pathlib import Path
from apitax.ah.Credentials import Credentials
from apitax.utilities.Json import read
from apitax.ah.State import State
from apitax.utilities.Files import getPath
class OpenstackDriver(Driver):
def __init__(self):
super().__init__()
self.users = read(getPath(State.paths['root'] + "/app/users.json"))
def getToken(self, response):
return response.getResponseHeaders().get('X-Subject-Token')
def getTokenAuthHeader(self, credentials):
return {'X-Auth-Token': credentials.token}
def getPasswordAuthData(self, credentials):
authObj = {'auth': {'identity': {'methods': ['password'], 'password': {
'user': {'domain': {'id': 'default'}, 'password': credentials.password, 'name': credentials.username}}}}}
if ("project_id" in credentials.extra):
authObj['auth'].update({"scope": {"project": {"id": credentials.extra['project_id']}}})
return authObj
def isCredentialsPosted(self):
return True
def isConfigurable(self):
return True
def getScriptsCatalog(self):
files = getAllFiles(self.config.path + "/grammar/scripts/**/*.ah")
returner = {"scripts": []}
for file in files:
returner['scripts'].append({"label": file.split('/')[-1].split('.')[0].title(), "relative-path": file,
"path": str(Path(file).resolve())})
# print(returner)
return returner
def getCatalog(self, auth):
from apitax.ah.Connector import Connector
import json
connector = Connector(credentials=Credentials(token=auth.token),
command="custom --get --driver OpenstackDriver --url " + self.getCatalogEndpoint(),
options=Options(debug=False, sensitive=True, driver='OpenstackDriver'), parameters=None)
commandHandler = connector.execute()
services = json.loads(commandHandler.getRequest().getResponseBody())
catalog = {}
catalog['endpoints'] = {}
for service in services['catalog']:
endpoints = service['endpoints']
if (len(endpoints) > 0):
for endpoint in endpoints:
if (endpoint['interface'] == 'public'):
name = service['name']
catalog['endpoints'].update({name: {"label": name, "value": endpoint['url']}})
catalog['selected'] = "http://172.25.190.14:5000"
return catalog
def isApiAuthenticated(self):
return True
def piggyBackOffApiAuth(self):
return True
def apitaxAuth(self, authObj):
authObj = authObj['credentials']
authRequest = ['apiAuthRequest']
#print(authRequest)
try:
return self.users[authObj.username]['role']
except:
return None
return None
| 35.264368
| 118
| 0.601695
| 2,752
| 0.897001
| 0
| 0
| 0
| 0
| 0
| 0
| 527
| 0.171773
|
66edc872f39ffc8e4f5e60f1aba0149f68247a58
| 220
|
py
|
Python
|
my_lambdata/my_script.py
|
TheLadyJack/lambdata-pt5
|
0ff4a86f3cd35e502c0a8a1c704e7944298840f2
|
[
"MIT"
] | null | null | null |
my_lambdata/my_script.py
|
TheLadyJack/lambdata-pt5
|
0ff4a86f3cd35e502c0a8a1c704e7944298840f2
|
[
"MIT"
] | null | null | null |
my_lambdata/my_script.py
|
TheLadyJack/lambdata-pt5
|
0ff4a86f3cd35e502c0a8a1c704e7944298840f2
|
[
"MIT"
] | 1
|
2020-05-11T02:49:28.000Z
|
2020-05-11T02:49:28.000Z
|
# my_script.py
import pandas
from my_lambdata.my_mod import enlarge
print('HAPPY TUESDAY NIGHT')
df = pandas.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
print(df.head())
x = 5
print("ENLARGE", x, "TO", enlarge(x))
| 14.666667
| 55
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 54
| 0.245455
|
66ee56f212ce0df2c239268cabb21b8541c895a2
| 1,063
|
py
|
Python
|
Week02/Assignment/jstoppelman_01.py
|
nkruyer/SkillsWorkshop2018
|
2201255ff63eca111635789267d0600a95854c38
|
[
"BSD-3-Clause"
] | 1
|
2020-04-18T03:30:46.000Z
|
2020-04-18T03:30:46.000Z
|
Week02/Assignment/jstoppelman_01.py
|
nkruyer/SkillsWorkshop2018
|
2201255ff63eca111635789267d0600a95854c38
|
[
"BSD-3-Clause"
] | 21
|
2018-07-12T19:12:23.000Z
|
2018-08-10T13:52:45.000Z
|
Week02/Assignment/jstoppelman_01.py
|
nkruyer/SkillsWorkshop2018
|
2201255ff63eca111635789267d0600a95854c38
|
[
"BSD-3-Clause"
] | 60
|
2018-05-08T16:59:20.000Z
|
2018-08-01T14:28:28.000Z
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import simps
from scipy.optimize import curve_fit
def curve3(x,a,b,c,d):
return a*x**3+b*x**2+c*x+d
def BIC(y, yhat, k, weight = 1):
err = y - yhat
sigma = np.std(np.real(err))
n = len(y)
B = n*np.log(sigma**2) + weight*k*np.log(n)
return B
x = [ 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6., 6.5, 7., 7.5, 8., 8.5, 9., 9.5, 10. ]
y = [3.43, 4.94, 6.45, 9.22, 6.32, 6.11, 4.63, 8.95, 7.8, 8.35, 11.45, 14.71, 11.97, 12.46, 17.42, 17.0, 15.45, 19.15, 20.86]
x=np.asarray(x)
y=np.asarray(y)
coeff=np.polyfit(x,y,1)
t=np.poly1d(coeff)
params, covar = curve_fit(curve3,x,y)
y3=np.asarray(curve3(x,*params))
bt3=BIC(y, y3,3)
print(bt3)
bt=BIC(y,t(x),1)
print(bt)
#print("area=", simps(t3(x),x))
plt.scatter(x,y)
plt.plot(x,t(x),'-')
plt.plot(x,curve3(x,*params),'-')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Week 2 Plot')
plt.text(6,5,"area={}".format(simps(curve3(x,*params)),x))
plt.savefig("jstoppelman_01.png")
| 26.575
| 125
| 0.590781
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 106
| 0.099718
|
66eed650aab89b6bac9cd47794d827289659357d
| 1,225
|
py
|
Python
|
apps/wordcount.py
|
marsupialtail/quokka
|
9dfffaf8499a27f121eb5007db5371934ab9fda5
|
[
"Apache-2.0"
] | 4
|
2022-03-09T19:51:02.000Z
|
2022-03-24T22:00:18.000Z
|
apps/wordcount.py
|
marsupialtail/quokka
|
9dfffaf8499a27f121eb5007db5371934ab9fda5
|
[
"Apache-2.0"
] | null | null | null |
apps/wordcount.py
|
marsupialtail/quokka
|
9dfffaf8499a27f121eb5007db5371934ab9fda5
|
[
"Apache-2.0"
] | 1
|
2022-02-10T04:38:24.000Z
|
2022-02-10T04:38:24.000Z
|
import time
import sys
sys.path.append("/home/ubuntu/quokka/pyquokka")
import pyarrow.compute as compute
import pyarrow as pa
import pandas as pd
from pyquokka.quokka_runtime import TaskGraph
from pyquokka.dataset import InputS3CSVDataset
from pyquokka.executors import UDFExecutor, AggExecutor
import ray
from pyquokka.utils import LocalCluster, QuokkaClusterManager
manager = QuokkaClusterManager()
cluster = manager.get_cluster_from_json("config.json")
def udf2(x):
da = compute.list_flatten(compute.ascii_split_whitespace(x["text"]))
c = da.value_counts().flatten()
return pa.Table.from_arrays([c[0], c[1]], names=["word","count"]).to_pandas().set_index("word")
task_graph = TaskGraph(cluster)
reader = InputS3CSVDataset("wordcount-input",["text"], sep="|", stride = 128 * 1024 * 1024)
words = task_graph.new_input_reader_node(reader, batch_func = udf2)
agg = AggExecutor(fill_value=0)
intermediate = task_graph.new_non_blocking_node({0:words},agg)
result = task_graph.new_blocking_node({0:intermediate}, agg, ip_to_num_channel={cluster.leader_private_ip: 1}, partition_key_supplied={0:None})
task_graph.create()
start = time.time()
task_graph.run()
print(time.time() - start)
print(result.to_pandas())
| 36.029412
| 143
| 0.777959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 94
| 0.076735
|
66f14722457fd9966ac9b7749eb637bceaf702bb
| 5,464
|
py
|
Python
|
websauna/system/devop/cmdline.py
|
stevepiercy/websauna
|
2886b86f7920d75900c634958779d61aa73f011b
|
[
"CNRI-Python"
] | null | null | null |
websauna/system/devop/cmdline.py
|
stevepiercy/websauna
|
2886b86f7920d75900c634958779d61aa73f011b
|
[
"CNRI-Python"
] | null | null | null |
websauna/system/devop/cmdline.py
|
stevepiercy/websauna
|
2886b86f7920d75900c634958779d61aa73f011b
|
[
"CNRI-Python"
] | null | null | null |
"""Helper functions to initializer Websauna framework for command line applications."""
# Standard Library
import logging
import os
import sys
import typing as t
# Pyramid
import plaster
from pyramid import router
from pyramid import scripting
from rainbow_logging_handler import RainbowLoggingHandler
# Websauna
from websauna.system import Initializer
from websauna.system.http import Request
from websauna.system.http.utils import make_routable_request
from websauna.system.model.meta import create_dbsession
def prepare_config_uri(config_uri: str) -> str:
"""Make sure a configuration uri has the prefix ws://.
:param config_uri: Configuration uri, i.e.: websauna/conf/development.ini
:return: Configuration uri with the prefix ws://.
"""
if not config_uri.startswith('ws://'):
config_uri = 'ws://{uri}'.format(uri=config_uri)
return config_uri
def get_wsgi_app(config_uri: str, defaults: dict) -> router.Router:
"""Return a Websauna WSGI application given a configuration uri.
:param config_uri: Configuration uri, i.e.: websauna/conf/development.ini.
:param defaults: Extra options to be passed to the app.
:return: A Websauna WSGI Application
"""
config_uri = prepare_config_uri(config_uri)
loader = plaster.get_loader(config_uri)
return loader.get_wsgi_app(defaults=defaults)
def initializer_from_app(app: router.Router) -> Initializer:
"""Return the initializer for the given app.
:param app: Websauna WSGI application
:return: Websauna Initializer
"""
initializer = getattr(app, 'initializer', None)
assert initializer is not None, "Configuration did not yield to Websauna application with Initializer set up"
return initializer
def setup_logging(config_uri, disable_existing_loggers=False):
"""Include-aware Python logging setup from INI config file.
"""
config_uri = prepare_config_uri(config_uri)
loader = plaster.get_loader(config_uri, protocols=['wsgi'])
loader.setup_logging(disable_existing_loggers=disable_existing_loggers)
def setup_console_logging(log_level: t.Optional[str]=None):
"""Setup console logging.
Aimed to give easy sane defaults for logging in command line applications.
Don't use logging settings from INI, but use hardcoded defaults.
"""
formatter = logging.Formatter("[%(asctime)s] [%(name)s %(funcName)s] %(message)s") # same as default
# setup `RainbowLoggingHandler`
# and quiet some logs for the test output
handler = RainbowLoggingHandler(sys.stdout)
handler.setFormatter(formatter)
logger = logging.getLogger()
logger.handlers = [handler]
env_level = os.environ.get("LOG_LEVEL", "info")
log_level = log_level or getattr(logging, env_level.upper())
logger.setLevel(log_level)
logger = logging.getLogger("requests.packages.urllib3.connectionpool")
logger.setLevel(logging.ERROR)
# SQL Alchemy transactions
logger = logging.getLogger("txn")
logger.setLevel(logging.ERROR)
def init_websauna(config_uri: str, sanity_check: bool=False, console_app: bool=False, extra_options: dict=None) -> Request:
"""Initialize Websauna WSGI application for a command line oriented script.
Example:
.. code-block:: python
import sys
from websauna.system.devop.cmdline import init_websauna
config_uri = sys.argv[1]
request = init_websauna(config_uri)
:param config_uri: Path to config INI file
:param sanity_check: Perform database sanity check on start
:param console_app: Set true to setup console-mode logging. See :func:`setup_console_logging`
:param extra_options: Passed through bootstrap() and is available as :attr:`websauna.system.Initializer.global_options`.
:return: Faux Request object pointing to a site root, having registry and every configured.
"""
# Paster thinks we are a string
if sanity_check:
sanity_check = "true"
else:
sanity_check = "false"
options = {
"sanity_check": sanity_check
}
if extra_options:
options.update(extra_options)
app = get_wsgi_app(config_uri, defaults=options)
initializer = initializer_from_app(app)
registry = initializer.config.registry
dbsession = create_dbsession(registry)
# Set up the request with websauna.site_url setting as the base URL
request = make_routable_request(dbsession, registry)
# This exposes the app object for the integration tests e.g test_static_asset
# TODO: Find a cleaner way to do this
request.app = app
return request
def init_websauna_script_env(config_uri: str) -> dict:
"""Initialize Websauna WSGI application for a IPython notebook.
:param config_uri: Path to config INI file
:return: Dictionary of shell variables
"""
options = {"sanity_check": False}
app = get_wsgi_app(config_uri, defaults=options)
initializer = initializer_from_app(app)
registry = initializer.config.registry
dbsession = create_dbsession(registry)
pyramid_env = scripting.prepare(registry=app.initializer.config.registry)
pyramid_env["app"] = app
pyramid_env["initializer"] = initializer
# Websauna specific
# Set up the request with websauna.site_url setting as the base URL
request = make_routable_request(dbsession, registry)
pyramid_env["request"] = request
pyramid_env["dbsession"] = dbsession
return pyramid_env
| 31.583815
| 124
| 0.731149
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,558
| 0.468155
|
dd0515ae81e31b3081572aafa51d5253637ae85f
| 2,010
|
py
|
Python
|
src/apd/aggregation/actions/base.py
|
MatthewWilkes/apd.aggregation
|
427fa908f45332d623295f92e1ccfdaf545d6997
|
[
"BSD-3-Clause"
] | null | null | null |
src/apd/aggregation/actions/base.py
|
MatthewWilkes/apd.aggregation
|
427fa908f45332d623295f92e1ccfdaf545d6997
|
[
"BSD-3-Clause"
] | 11
|
2020-11-23T21:36:48.000Z
|
2022-03-12T00:48:58.000Z
|
src/apd/aggregation/actions/base.py
|
MatthewWilkes/apd.aggregation
|
427fa908f45332d623295f92e1ccfdaf545d6997
|
[
"BSD-3-Clause"
] | 1
|
2020-08-09T01:47:59.000Z
|
2020-08-09T01:47:59.000Z
|
import typing as t
from ..typing import T_value
from ..database import DataPoint
from ..exceptions import NoDataForTrigger
class Trigger(t.Generic[T_value]):
name: str
async def start(self) -> None:
"""Coroutine to do any initial setup"""
return
async def match(self, datapoint: DataPoint) -> bool:
"""Return True if the datapoint is of interest to this
trigger.
This is an optional method, called by the default implementation
of handle(...)."""
raise NotImplementedError
async def extract(self, datapoint: DataPoint) -> T_value:
"""Return the value that this datapoint implies for this trigger,
or raise NoDataForTrigger if no value is appropriate.
Can also raise IncompatibleTriggerError if the value is not readable.
This is an optional method, called by the default implementation
of handle(...).
"""
raise NotImplementedError
async def handle(self, datapoint: DataPoint) -> t.Optional[DataPoint]:
"""Given a data point, optionally return a datapoint that
represents the value of this trigger. Will delegate to the
match(...) and extract(...) functions."""
if not await self.match(datapoint):
# This data point isn't relevant
return None
try:
value = await self.extract(datapoint)
except NoDataForTrigger:
# There was no value for this point
return None
return DataPoint(
sensor_name=self.name,
data=value,
deployment_id=datapoint.deployment_id,
collected_at=datapoint.collected_at,
)
class Action:
async def start(self) -> None:
"""Coroutine to do any initial setup"""
return
async def handle(self, datapoint: DataPoint) -> bool:
"""Apply this datapoint to the action, returning
a boolean to indicate success."""
raise NotImplementedError
| 31.904762
| 77
| 0.636816
| 1,880
| 0.935323
| 0
| 0
| 0
| 0
| 1,784
| 0.887562
| 895
| 0.445274
|
dd05c5af3b4de9bb3a156483a19f52a9e8f9c454
| 1,056
|
py
|
Python
|
scripts/32_Model_Parse_SPRING/24_Collect_Test_Gold_Graphs.py
|
MeghaTiya/amrlib
|
61febbd1ed15d64e3f01126eaeea46211d42e738
|
[
"MIT"
] | null | null | null |
scripts/32_Model_Parse_SPRING/24_Collect_Test_Gold_Graphs.py
|
MeghaTiya/amrlib
|
61febbd1ed15d64e3f01126eaeea46211d42e738
|
[
"MIT"
] | null | null | null |
scripts/32_Model_Parse_SPRING/24_Collect_Test_Gold_Graphs.py
|
MeghaTiya/amrlib
|
61febbd1ed15d64e3f01126eaeea46211d42e738
|
[
"MIT"
] | 1
|
2022-02-09T16:20:42.000Z
|
2022-02-09T16:20:42.000Z
|
#!/usr/bin/python3
import setup_run_dir # Set the working directory and python sys.path to 2 levels above
import os
from glob import glob
from amrlib.graph_processing.amr_loading_raw import load_raw_amr
# Collect all the amr graphs from multiple files and create a gold test file.
# This simply concatenates files and cleans a few bad characters out. The glob pattern
# needs to be exactly the same as what's in generate so the output graph ordering is the same.
if __name__ == '__main__':
glob_pattern = 'amrlib/data/amr_annotation_3.0/data/amrs/split/test/*.txt'
out_fpath = 'amrlib/data/model_parse_spring/test-gold.txt.wiki'
# Load the data
graphs = []
print('Loading data from', glob_pattern)
for fpath in sorted(glob(glob_pattern)):
graphs.extend(load_raw_amr(fpath))
print('Loaded {:,} graphs'.format(len(graphs)))
# Save the collated data
print('Saving data to', out_fpath)
with open(out_fpath, 'w') as f:
for graph in graphs:
f.write('%s\n\n' % graph)
print()
| 37.714286
| 94
| 0.705492
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 566
| 0.535985
|
dd0710c4323697be4edfde027a5e9419170be224
| 1,818
|
py
|
Python
|
migrations/versions/281362c70f34_.py
|
TobiasPrt/Smartphoniker-shop
|
6b74a3cc1c81db7a56d70609dbca29ddeec3053f
|
[
"MIT"
] | 2
|
2020-05-11T08:46:45.000Z
|
2020-05-11T09:09:57.000Z
|
migrations/versions/281362c70f34_.py
|
TobiasPrt/Smartphoniker-shop
|
6b74a3cc1c81db7a56d70609dbca29ddeec3053f
|
[
"MIT"
] | 4
|
2021-02-19T13:31:53.000Z
|
2022-02-20T13:34:10.000Z
|
migrations/versions/281362c70f34_.py
|
TobiasPrt/Smartphoniker-shop
|
6b74a3cc1c81db7a56d70609dbca29ddeec3053f
|
[
"MIT"
] | 5
|
2020-04-27T16:25:39.000Z
|
2020-06-07T16:03:15.000Z
|
"""empty message
Revision ID: 281362c70f34
Revises: acff3391146d
Create Date: 2020-05-27 16:58:11.029790
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '281362c70f34'
down_revision = 'acff3391146d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('color', 'color_code',
existing_type=sa.VARCHAR(length=20),
nullable=False)
op.alter_column('color', 'internal_name',
existing_type=sa.VARCHAR(length=128),
nullable=False)
op.alter_column('color', 'name',
existing_type=sa.VARCHAR(length=128),
nullable=False)
op.alter_column('device_series', 'name',
existing_type=sa.VARCHAR(length=64),
nullable=False)
op.alter_column('manufacturer', 'name',
existing_type=sa.VARCHAR(length=64),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('manufacturer', 'name',
existing_type=sa.VARCHAR(length=64),
nullable=True)
op.alter_column('device_series', 'name',
existing_type=sa.VARCHAR(length=64),
nullable=True)
op.alter_column('color', 'name',
existing_type=sa.VARCHAR(length=128),
nullable=True)
op.alter_column('color', 'internal_name',
existing_type=sa.VARCHAR(length=128),
nullable=True)
op.alter_column('color', 'color_code',
existing_type=sa.VARCHAR(length=20),
nullable=True)
# ### end Alembic commands ###
| 31.894737
| 65
| 0.60011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 559
| 0.307481
|
dd07924629dd0724abf147fb5de6f38d430e1d51
| 136
|
py
|
Python
|
ui/widgets/histogram/__init__.py
|
berendkleinhaneveld/Registrationshop
|
0d6f3ee5324865cdcb419369139f37c39dfe9a1c
|
[
"MIT"
] | 25
|
2015-11-08T16:36:54.000Z
|
2022-01-20T16:03:28.000Z
|
ui/widgets/histogram/__init__.py
|
berendkleinhaneveld/Registrationshop
|
0d6f3ee5324865cdcb419369139f37c39dfe9a1c
|
[
"MIT"
] | 2
|
2016-12-01T23:13:08.000Z
|
2017-07-25T02:40:49.000Z
|
ui/widgets/histogram/__init__.py
|
berendkleinhaneveld/Registrationshop
|
0d6f3ee5324865cdcb419369139f37c39dfe9a1c
|
[
"MIT"
] | 10
|
2016-07-05T14:39:16.000Z
|
2022-01-01T02:05:55.000Z
|
from Histogram import Histogram
from HistogramWidget import HistogramWidget
from TrackingHistogramWidget import TrackingHistogramWidget
| 34
| 59
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
dd083d9565ab68711f5686f12e467a2276370bf5
| 188
|
py
|
Python
|
python_module/numpy_module/numpy_test.py
|
panc-test/python-study
|
fb172ed4a4f7fb521de9a005cd55115ad63a5b6d
|
[
"MIT"
] | 1
|
2021-09-17T09:32:56.000Z
|
2021-09-17T09:32:56.000Z
|
python_module/numpy_module/numpy_test.py
|
panc-test/python-study
|
fb172ed4a4f7fb521de9a005cd55115ad63a5b6d
|
[
"MIT"
] | 2
|
2021-05-11T05:47:13.000Z
|
2021-05-11T05:48:10.000Z
|
python_module/numpy_module/numpy_test.py
|
panc-test/python-study
|
fb172ed4a4f7fb521de9a005cd55115ad63a5b6d
|
[
"MIT"
] | null | null | null |
"""
numpy模块 —— 数据分析
"""
import numpy
#创建矩阵
array = numpy.array([[1,2,3],[4,5,6]])
print(array,'\n',type(array))
#矩阵维度
print(array.ndim)
#行数和列数
print(array.shape)
#元素个数
print(array.size)
| 11.75
| 38
| 0.654255
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 99
| 0.415966
|
dd093842c9dc2bff582de0e411627f80f0d5bed5
| 305
|
py
|
Python
|
flask_config.py
|
khanshifaul/Flask_Template_Skeleton
|
951ccb9fc73ff5a2d501f2bc6a742553b8177fc5
|
[
"Apache-2.0"
] | null | null | null |
flask_config.py
|
khanshifaul/Flask_Template_Skeleton
|
951ccb9fc73ff5a2d501f2bc6a742553b8177fc5
|
[
"Apache-2.0"
] | null | null | null |
flask_config.py
|
khanshifaul/Flask_Template_Skeleton
|
951ccb9fc73ff5a2d501f2bc6a742553b8177fc5
|
[
"Apache-2.0"
] | null | null | null |
class Base(object):
DEBUG = False
TESTING = False
class Production(Base):
DEBUG = False
TESTING = False
class Staging(Base):
DEBUG = True
TESTING = False
class Development(Base):
DEBUG = True
TESTING = True
class Testing(Base):
DEBUG = False
TESTING = True
| 12.708333
| 24
| 0.629508
| 292
| 0.957377
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
dd0944f2e40b50921bc2a055a4736363525bffe8
| 395
|
py
|
Python
|
mellophone/mellophone/settings_production.py
|
nchlswhttkr/mellophone
|
c454c26d06a0cdebabc7b9ec5fef22c9c48d6ea6
|
[
"MIT"
] | 9
|
2019-02-11T03:35:18.000Z
|
2019-07-17T07:51:44.000Z
|
mellophone/mellophone/settings_production.py
|
nchlswhttkr/mellophone
|
c454c26d06a0cdebabc7b9ec5fef22c9c48d6ea6
|
[
"MIT"
] | 26
|
2019-03-13T05:11:08.000Z
|
2020-04-21T03:53:10.000Z
|
mellophone/mellophone/settings_production.py
|
nchlswhttkr/mellophone
|
c454c26d06a0cdebabc7b9ec5fef22c9c48d6ea6
|
[
"MIT"
] | 2
|
2019-07-19T05:24:58.000Z
|
2019-09-20T06:36:44.000Z
|
# pylint: disable=unused-wildcard-import,wildcard-import
"""
Please try and avoid modifying this file where possible, doing so may cause
different behaviours between local (development) and production environments.
Instead consider modifying the base (default) config. This way, the production
config is effectively the default for this project.
"""
from mellophone.settings_default import *
| 32.916667
| 78
| 0.807595
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 349
| 0.883544
|
dd0a966e3c92349746b727ce42ffb0efe29eb341
| 232
|
py
|
Python
|
pythonProject/051.py
|
MontanhaRio/python
|
43ad6173d692ebeb153c23c893e4b8f0bf07afcc
|
[
"MIT"
] | null | null | null |
pythonProject/051.py
|
MontanhaRio/python
|
43ad6173d692ebeb153c23c893e4b8f0bf07afcc
|
[
"MIT"
] | null | null | null |
pythonProject/051.py
|
MontanhaRio/python
|
43ad6173d692ebeb153c23c893e4b8f0bf07afcc
|
[
"MIT"
] | null | null | null |
#051 - Progressão Aritmética
primeiro = int(input('primeiro termo: '))
razao = int(input('razao: '))
decimo = primeiro + (10 - 1) * razao
for c in range(primeiro, decimo + razao, razao):
print(f'{c}', end=' - ')
print('acabou')
| 29
| 48
| 0.637931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 76
| 0.324786
|
dd0b8f696341df5e31ece62f9a50dbeb45afc875
| 5,175
|
py
|
Python
|
ProxyCrawl/ProxyCrawl/rules.py
|
Time1ess/ProxyPool
|
c44e74e8045fc560e5fe905aa41135ecb3e6da98
|
[
"MIT"
] | 18
|
2017-04-25T09:39:08.000Z
|
2022-03-09T08:07:28.000Z
|
ProxyCrawl/ProxyCrawl/rules.py
|
ghosttyq/ProxyPool
|
c44e74e8045fc560e5fe905aa41135ecb3e6da98
|
[
"MIT"
] | null | null | null |
ProxyCrawl/ProxyCrawl/rules.py
|
ghosttyq/ProxyPool
|
c44e74e8045fc560e5fe905aa41135ecb3e6da98
|
[
"MIT"
] | 10
|
2017-05-29T00:53:41.000Z
|
2021-05-08T09:07:52.000Z
|
#!/usr/local/bin/python3
# coding: UTF-8
# Author: David
# Email: youchen.du@gmail.com
# Created: 2017-04-26 11:14
# Last modified: 2017-04-30 15:55
# Filename: rules.py
# Description:
import os
import redis
from scrapy.utils.conf import init_env
from ProxyCrawl.settings import PROJECT_ROOT
conn = redis.Redis(decode_responses=True)
labels = ('name', 'url_fmt', 'row_xpath', 'host_xpath', 'port_xpath',
'addr_xpath', 'mode_xpath', 'proto_xpath', 'vt_xpath', 'max_page')
class Rule:
"""
A rule tells how to crawl proxies from a site.
keys in rule_dict:
name:
url_fmt:
row_xpath: Extract one data row from response
host_xpath: Extract host from data row
port_xpath: Extract port from data row
addr_xpath:
mode_xpath:
proto_xpath:
vt_xpath: validation_time
max_page: 200
status:
Author: David
"""
def __getattr__(self, name):
return self.rule_dict.get(name)
def __str__(self):
return 'Rule:{} - {}'.format(self.name, self.rule_dict)
def __repr__(self):
return 'Rule:{} - <{}>'.format(self.name, self.url_fmt)
def __check_vals(self):
if not all([
self.name, self.url_fmt, self.row_xpath, self.host_xpath,
self.port_xpath, self.addr_xpath, self.mode_xpath,
self.proto_xpath, self.vt_xpath]):
raise ValueError('Rule arguments not set properly')
def __init__(self, rule_dict):
self.rule_dict = rule_dict
self.__check_vals()
@staticmethod
def _load_redis_rule(name=None):
"""
Load rule from redis, raise ValueError if no rule fetched.
Author: David
"""
if name is None:
keys = ['Rule:'+key for key in conn.smembers('Rules')]
rule_dicts = []
for key in keys:
res = conn.hgetall(key)
if not res:
raise ValueError('No rule fetched.')
rule_dicts.append(res)
return rule_dicts
else:
key = 'Rule:' + name
res = conn.hgetall(key)
if not res:
raise ValueError('No rule fetched.')
return res
@staticmethod
def _load_csv_rule(name=None):
data = []
with open(os.path.join(PROJECT_ROOT, 'rules.csv'), 'rb') as f:
for line in f:
data.append(tuple(line.decode('utf-8').strip('\n').split(' ')))
rule_dicts = []
for d in data:
rule_dicts.append({k: v for k, v in zip(labels, d)})
if name:
matches = [r for r in rule_dicts if r['name'] == name]
if not matches:
raise ValueError('No rule fetched.')
elif len(matches) > 1:
raise ValueError('Multiple rules fetched.')
else:
return matches[0]
return rule_dicts
@staticmethod
def _decode_rule(rule, int_keys=('max_page',)):
"""
Decode rule filed, transform str to int.
Author: David
"""
for key in int_keys:
rule[key] = int(rule[key])
return rule
@staticmethod
def _default_status(rule):
"""
Add default status for rule.
Author: David
"""
if not rule.get('status', False):
rule['status'] = 'stopped'
return rule
@classmethod
def _clean_rule(cls, rule, *args, **kwargs):
"""
Clean rule.
Author: David
"""
rule = cls._decode_rule(rule, *args, **kwargs)
rule = cls._default_status(rule)
return rule
@classmethod
def load(cls, name, src='redis'):
"""
Load rule from source and instantiate a new rule item.
Author: David
"""
load_method = getattr(cls, '_load_{}_rule'.format(src))
rule_dict = load_method(name)
rule_dict = cls._clean_rule(rule_dict)
return cls(rule_dict)
@classmethod
def loads(cls, src='redis'):
"""
Load rules from source and instantiate all rule items.
Author: David
"""
load_method = getattr(cls, '_load_{}_rule'.format(src))
rule_dicts = load_method()
rule_dicts = [cls._clean_rule(rule) for rule in rule_dicts]
insts = [cls(rule_dict) for rule_dict in rule_dicts]
return insts
@staticmethod
def _save_redis_rule(rule_dict):
key = 'Rule:' + rule_dict['name']
conn.hmset(key, rule_dict)
conn.sadd('Rules', rule_dict['name'])
@staticmethod
def _save_csv_rule(rule_dict):
raise NotImplementedError
def save(self, dst='redis'):
"""
Save rule to destination.
Author: David
"""
self.__check_vals()
save_method = getattr(self, '_save_{}_rule'.format(dst))
save_method(self.rule_dict)
if __name__ == '__main__':
# rule = Rule.load('xici')
init_env('default')
rules = Rule.loads('csv')
for r in rules:
r.save()
print(rules[0])
# rule.save_rule()
| 26.813472
| 79
| 0.565217
| 4,493
| 0.868213
| 0
| 0
| 3,104
| 0.599807
| 0
| 0
| 1,693
| 0.32715
|
dd0c981b401b14bcc898b39cf9bb3a7382b0f82e
| 1,299
|
py
|
Python
|
bcs-ui/backend/helm/authtoken/views.py
|
kayinli/bk-bcs
|
93a0856175f7b066ef835921572c1cac590dbd8e
|
[
"Apache-2.0"
] | 1
|
2021-11-16T08:15:13.000Z
|
2021-11-16T08:15:13.000Z
|
bcs-ui/backend/helm/authtoken/views.py
|
kayinli/bk-bcs
|
93a0856175f7b066ef835921572c1cac590dbd8e
|
[
"Apache-2.0"
] | null | null | null |
bcs-ui/backend/helm/authtoken/views.py
|
kayinli/bk-bcs
|
93a0856175f7b066ef835921572c1cac590dbd8e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
from rest_framework import viewsets
from backend.utils.views import ActionSerializerMixin, with_code_wrapper
from .models import Token
from .serializers import TokenSLZ, TokenUpdateSLZ
logger = logging.getLogger(__name__)
@with_code_wrapper
class TokenView(ActionSerializerMixin, viewsets.ModelViewSet):
serializer_class = TokenSLZ
lookup_url_kwarg = "token_id"
action_serializers = {
'update': TokenUpdateSLZ,
}
def get_queryset(self):
return Token.objects.filter(username=self.request.user.username)
| 34.184211
| 115
| 0.779831
| 298
| 0.226272
| 0
| 0
| 317
| 0.240699
| 0
| 0
| 772
| 0.586181
|
dd114daf874a5113511e683f27e7e520b01176b1
| 3,469
|
py
|
Python
|
backend/workspaces/Users/actions/login.py
|
makakken/roseguarden
|
9a867f3d5e979b990bf474dcba81e5e9d0814c6a
|
[
"MIT"
] | null | null | null |
backend/workspaces/Users/actions/login.py
|
makakken/roseguarden
|
9a867f3d5e979b990bf474dcba81e5e9d0814c6a
|
[
"MIT"
] | 50
|
2021-03-28T03:06:19.000Z
|
2021-10-18T12:36:16.000Z
|
backend/workspaces/Users/actions/login.py
|
makakken/roseguarden
|
9a867f3d5e979b990bf474dcba81e5e9d0814c6a
|
[
"MIT"
] | 1
|
2021-07-30T07:12:46.000Z
|
2021-07-30T07:12:46.000Z
|
"""
The roseguarden project
Copyright (C) 2018-2020 Marcus Drobisch,
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
__authors__ = ["Marcus Drobisch"]
__contact__ = "roseguarden@fabba.space"
__credits__ = []
__license__ = "GPLv3"
from core.actions.action import Action
from core.logs import logManager
from core.actions import webclientActions
import arrow
class Login(Action):
def __init__(self, app):
# logManager.info("Login of type Action created")
super().__init__(app, uri='login')
def handle(self, action, user, workspace, actionManager):
logManager.info("Execute login action")
replyActions = []
user = (actionManager.userManager.getUser(action['username']))
if user is not None:
if user.account_verified is False:
replyActions.append(
webclientActions.NotificationAction.generate("Your account need to be verified before login.",
"warning"))
return 'success', replyActions
if user.checkPassword(action['password']):
userManager = actionManager.userManager
menuBuilder = actionManager.menuBuilder
# update serverside jwt token
access_token = userManager.updateAccessToken(action['username'])
# update menu
menu = menuBuilder.buildMenu(user)
# build up
replyActions.append(webclientActions.UpdateSessionTokenAction.generate(access_token))
replyActions.append(webclientActions.UpdateMenuAction.generate(menu))
replyActions.append(webclientActions.NotificationAction.generate("Login successful.", "success"))
if 'options' in action and 'redirect' in action['options']:
if action['options']['redirect'] != "":
replyActions.append(webclientActions.RouteAction.generate(action['options']['redirect'], 2))
else:
replyActions.append(webclientActions.RouteAction.generate("dashboard", 2))
replyActions.append(
webclientActions.UpdateUserInfoAction.generate(user.firstname, user.lastname, user.email))
user.sessionValid = True
user.last_login_date = arrow.utcnow()
# actionManager.db.session.commit()
else:
replyActions.append(
webclientActions.NotificationAction.generate("Login failed, username or password is wrong.",
"error"))
else:
replyActions.append(
webclientActions.NotificationAction.generate("Login failed, username or password is wrong.", "error"))
return 'success', replyActions
| 45.051948
| 118
| 0.63563
| 2,539
| 0.731911
| 0
| 0
| 0
| 0
| 0
| 0
| 1,216
| 0.350533
|
dd118309b83096677693134bb6b0d70a964e1ab7
| 1,157
|
py
|
Python
|
fastquotes/fund/__init__.py
|
YangzhenZhao/fastquotes
|
1faba9f7fc7801a11359001e08cefa9cfbc41d64
|
[
"MIT"
] | 4
|
2020-11-18T11:25:00.000Z
|
2021-04-08T01:02:49.000Z
|
fastquotes/fund/__init__.py
|
YangzhenZhao/fastquotes
|
1faba9f7fc7801a11359001e08cefa9cfbc41d64
|
[
"MIT"
] | null | null | null |
fastquotes/fund/__init__.py
|
YangzhenZhao/fastquotes
|
1faba9f7fc7801a11359001e08cefa9cfbc41d64
|
[
"MIT"
] | 1
|
2020-11-18T11:25:01.000Z
|
2020-11-18T11:25:01.000Z
|
import json
import requests
def fund_intro_dict() -> dict:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36"
}
url = "http://fund.eastmoney.com/js/fundcode_search.js"
res = requests.get(url, headers=headers)
text_data = res.text
res_list = json.loads(text_data.strip("var r = ")[:-1])
res_dict = {}
for item in res_list:
res_dict[item[0]] = {"基金代码": item[0], "基金简称": item[2], "基金类型": item[3]}
return res_dict
def etf_list() -> list:
url = (
"http://vip.stock.finance.sina.com.cn/quotes_service/api"
"/jsonp.php/IO.XSRV2.CallbackList['da_yPT46_Ll7K6WD']:"
"/Market_Center.getHQNodeDataSimple"
)
params = {
"page": "1",
"num": "1000",
"sort": "symbol",
"asc": "0",
"node": "etf_hq_fund",
"[object HTMLDivElement]": "qvvne",
}
r = requests.get(url, params=params)
data_text = r.text
data_list = json.loads(data_text[data_text.find("([") + 1 : -2])
return [item["symbol"] for item in data_list]
| 29.666667
| 83
| 0.586863
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 485
| 0.410669
|
dd13e1b360546b453646ec337688f0743f83b569
| 3,374
|
py
|
Python
|
pyrentals/test_pyrentals.py
|
asm128/pyrentals
|
862a0f78d93b18499555dd3c8c1effb7cae9f99b
|
[
"MIT"
] | null | null | null |
pyrentals/test_pyrentals.py
|
asm128/pyrentals
|
862a0f78d93b18499555dd3c8c1effb7cae9f99b
|
[
"MIT"
] | null | null | null |
pyrentals/test_pyrentals.py
|
asm128/pyrentals
|
862a0f78d93b18499555dd3c8c1effb7cae9f99b
|
[
"MIT"
] | null | null | null |
from pyrentals import Cart
import unittest
class Test_test_pyrentals(unittest.TestCase):
def test_method_empty(self):
test_cart_instance = Cart()
test_cart_instance.Rentals = {}
return self.assertTrue(test_cart_instance.empty(), "Cart should be empty when it's just created.")
def test_method_not_empty(self):
test_cart_instance = Cart()
test_rental = {"Type": "Hour", "Time": 2}
test_cart_instance.Rentals = [test_rental, ]
return self.assertFalse(test_cart_instance.empty(), "Cart shouldn't be empty if we just added test_rental.")
def test_calculate_price_empty(self):
test_cart_instance = Cart()
return self.assertEqual(0, test_cart_instance.calculate_price())
def test_price_list(self):
test_cart_instance = Cart()
price_list = {"Hour": 5, "Day": 15, "Month": 60}
test_rental = {"Type": "Day", "Time": 2}
test_cart_instance.Rentals = [test_rental] * 2
output_prices = []
final_price = test_cart_instance.calculate_price(price_list, output_prices)
for price in output_prices:
self.assertEqual(30, price)
self.assertEqual(60, final_price)
return
def test_family_discount(self):
test_cart_instance = Cart()
price_list = {"Hour": 5, "Day": 15, "Month": 60}
test_rental = {"Type": "Day", "Time": 2}
test_cart_instance.Rentals = [test_rental] * 3
output_prices = []
final_price = test_cart_instance.calculate_price(price_list, output_prices)
raw_price = sum([x["Time"] * price_list[x["Type"]] for x in test_cart_instance.Rentals])
self.assertLess(final_price, raw_price)
return self.assertTrue(final_price == raw_price - raw_price * .3)
def test_family_discount_limit(self):
test_cart_instance = Cart()
price_list = {"Hour": 5, "Day": 15, "Month": 60}
test_rental = {"Type": "Day", "Time": 2}
test_cart_instance.Rentals = [test_rental] * 6
output_prices = []
final_price = test_cart_instance.calculate_price(price_list, output_prices)
raw_price = sum([x["Time"] * price_list[x["Type"]] for x in test_cart_instance.Rentals])
return self.assertEqual(final_price, raw_price)
def test_output_prices(self):
test_cart_instance = Cart()
price_list = {"Hour": 5, "Day": 15, "Month": 60}
test_rental = {"Type": "Day", "Time": 2}
test_cart_instance.Rentals = [test_rental] * 3
output_prices = []
final_price = test_cart_instance.calculate_price(price_list, output_prices)
raw_price = sum([x["Time"] * price_list[x["Type"]] for x in test_cart_instance.Rentals])
return self.assertEqual(raw_price, sum(output_prices))
def test_add_rental(self):
test_cart_instance = Cart()
price_list = {"Hour": 5, "Day": 15, "Month": 60}
test_rental = {"Type": "Day", "Time": 2}
for x in range(2):
test_cart_instance.add_rental(test_rental["Type"], test_rental["Time"])
output_prices = []
final_price = test_cart_instance.calculate_price(price_list, output_prices)
for price in output_prices:
self.assertEqual(30, price)
self.assertEqual(60, final_price)
return
if __name__ == '__main__':
unittest.main()
| 43.25641
| 116
| 0.648488
| 3,281
| 0.972436
| 0
| 0
| 0
| 0
| 0
| 0
| 352
| 0.104327
|
dd173340b0cceb447e4578760ece3ab7a5d8df8b
| 63,514
|
py
|
Python
|
src/simple_tools/p4a_processor.py
|
DVSR1966/par4all
|
86b33ca9da736e832b568c5637a2381f360f1996
|
[
"MIT"
] | 51
|
2015-01-31T01:51:39.000Z
|
2022-02-18T02:01:50.000Z
|
src/simple_tools/p4a_processor.py
|
DVSR1966/par4all
|
86b33ca9da736e832b568c5637a2381f360f1996
|
[
"MIT"
] | 7
|
2017-05-29T09:29:00.000Z
|
2019-03-11T16:01:39.000Z
|
src/simple_tools/p4a_processor.py
|
DVSR1966/par4all
|
86b33ca9da736e832b568c5637a2381f360f1996
|
[
"MIT"
] | 12
|
2015-03-26T08:05:38.000Z
|
2022-02-18T02:01:51.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Authors:
# - Grégoire Péan <gregoire.pean@hpc-project.com>
# - Ronan Keryell <ronan.keryell@hpc-project.com>
# + Many others...
# Beware: class p4a_scmp_compiler declared in ../scmp/p4a_scmp_compiler.py
# inherits from class p4a_processor.
# Maybe a common parent class with the minimal set of shared features
# should be defined, from which all compilers (say p4a_cuda_compiler,
# p4a_openmp_compiler) would inherit. BC.
import p4a_util
import p4a_astrad
import optparse
import subprocess
import sys
import os
import re
import shutil
import pypsex
'''
Par4All processing
'''
# Basic properties to be used in Par4All:
default_properties = dict(
# Useless to go on if something goes wrong... :-(
#ABORT_ON_USER_ERROR = True,
ABORT_ON_USER_ERROR = False,
# Compute the intraprocedural preconditions at the same
# Compute the intraprocedural preconditions at the same
# time as transformers and use them to improve the
# accuracy of expression and statement transformers:
SEMANTICS_COMPUTE_TRANSFORMERS_IN_CONTEXT = True,
# Use the more precise fix point operator to cope with
# while loops:
SEMANTICS_FIX_POINT_OPERATOR = "derivative",
# Try to restructure the code for more precision:
UNSPAGHETTIFY_TEST_RESTRUCTURING = True,
UNSPAGHETTIFY_RECURSIVE_DECOMPOSITION = True,
# Simplify for loops into Fortran do-loops internally for
# better precision of analysis:
FOR_TO_DO_LOOP_IN_CONTROLIZER = True,
# Warning: assume that there is no aliasing between IO
# streams ('FILE *' variables):
ALIASING_ACROSS_IO_STREAMS = False,
# Warning: this is a work in progress. Assume no weird
# aliasing
CONSTANT_PATH_EFFECTS = False,
# Prevents automatic pretty-printing of OpenMP directives when
# unsplitting. We will add them using ompify if requested.
PRETTYPRINT_SEQUENTIAL_STYLE = "do",
# Required property since floating point arithmetic operations are not
# associative because of rounding. PIPS does not take that into
# account now and is based on theoretical math... cf PIPS TRAC #551
# Well, François Irigoin seems to have improved this, so avoid
# the spam of parenthesis...
# PRETTYPRINT_ALL_PARENTHESES = True
SCALARIZATION_USE_REGISTERS = False
)
# The default values of some PIPS properties are OK for C but has to be
# redefined for FORTRAN
default_fortran_cuda_properties = dict(
GPU_KERNEL_PREFIX = "P4A_KERNEL",
GPU_WRAPPER_PREFIX = "P4A_WRAPPER",
GPU_LAUNCHER_PREFIX = "P4A_LAUNCHER",
GPU_FORTRAN_WRAPPER_PREFIX = "P4A_F08_WRAPPER",
CROUGH_SCALAR_BY_VALUE_IN_FCT_DECL = True,
CROUGH_SCALAR_BY_VALUE_IN_FCT_CALL = True,
PRETTYPRINT_STATEMENT_NUMBER = False,
CROUGH_FORTRAN_USES_INTERFACE = True,
KERNEL_LOAD_STORE_LOAD_FUNCTION = "P4A_COPY_TO_ACCEL",
KERNEL_LOAD_STORE_LOAD_FUNCTION_1D = "P4A_COPY_TO_ACCEL_1D",
KERNEL_LOAD_STORE_LOAD_FUNCTION_2D = "P4A_COPY_TO_ACCEL_2D",
KERNEL_LOAD_STORE_LOAD_FUNCTION_3D = "P4A_COPY_TO_ACCEL_3D",
KERNEL_LOAD_STORE_LOAD_FUNCTION_4D = "P4A_COPY_TO_ACCEL_4D",
KERNEL_LOAD_STORE_LOAD_FUNCTION_5D = "P4A_COPY_TO_ACCEL_5D",
KERNEL_LOAD_STORE_LOAD_FUNCTION_6D = "P4A_COPY_TO_ACCEL_6D",
KERNEL_LOAD_STORE_ALLOCATE_FUNCTION = "P4A_ACCEL_MALLOC",
KERNEL_LOAD_STORE_STORE_FUNCTION = "P4A_COPY_FROM_ACCEL",
KERNEL_LOAD_STORE_STORE_FUNCTION_1D = "P4A_COPY_FROM_ACCEL_1D",
KERNEL_LOAD_STORE_STORE_FUNCTION_2D = "P4A_COPY_FROM_ACCEL_2D",
KERNEL_LOAD_STORE_STORE_FUNCTION_3D = "P4A_COPY_FROM_ACCEL_3D",
KERNEL_LOAD_STORE_STORE_FUNCTION_4D = "P4A_COPY_FROM_ACCEL_4D",
KERNEL_LOAD_STORE_STORE_FUNCTION_5D = "P4A_COPY_FROM_ACCEL_5D",
KERNEL_LOAD_STORE_STORE_FUNCTION_6D = "P4A_COPY_FROM_ACCEL_6D",
KERNEL_LOAD_STORE_DEALLOCATE_FUNCTION = "P4A_ACCEL_FREE",
KERNEL_LOAD_STORE_VAR_SUFFIX = "_num"
)
# Import of pyps will be done manually.
# Module instance will be held in following variable.
pyps = None
def apply_user_requested_phases(modules=None, phases_to_apply=[]):
"""Apply user requested phases to modules
"""
for ph in phases_to_apply:
# Apply requested phases to modules:
getattr(modules, ph)()
class p4a_processor_output(object):
files = []
database_dir = ""
exception = None
class p4a_processor_input(object):
"""Store options given to the process engine, mainly digested by PyPS.
Some of the options are used during the output file generation.
"""
project_name = ""
noalias = False
pointer_analysis = False
accel = False
cuda = False
opencl = False
com_optimization = False
cuda_cc = 2
fftw3 = False
openmp = False
scmp = False
fine_grain = False
c99 = False
select_modules = ""
exclude_modules = ""
cpp_flags = ""
files = []
recover_includes = True
native_recover_includes = False
properties = {}
output_dir=None
output_suffix=""
output_prefix=""
brokers=""
# To store some arbitrary Python code to be executed inside p4a_process:
execute_some_python_code_in_process = None
apply_phases={}
class p4a_processor(object):
"""Process program files with PIPS and other tools
"""
# If the main language is Fortran, set to True:
fortran = None
workspace = None
main_filter = None
# The project name:
project_name = None
# Set to True to try to do some #include tracking and recovering
recover_includes = None
native_recover_includes = None
# Initialize some lists of files:
# - the list of the input files
files = []
# - the list of the p4a stub files
accel_files = []
# The list of the "on the fly" resources generated by PIPS:
# - generated modules (by PIPS):
generated_modules = []
# - the list of the crough modules:
crough_modules = []
# - the list of module with interfaces:
interface_modules = []
# - the generated header files:
header_files = []
# - the set of CUDA modules:
cuda_modules = set ()
# - the set of C modules:
c_modules = set ()
# - the list of Fortran modules:
fortran_modules = set ()
# - the list of kernel names:
kernels = []
# - the list of launcher names:
launchers = []
# Some constants to be used for the PIPS generated files:
new_files_folder = "p4a_new_files"
new_files_include = new_files_folder + "_include.h"
# The typedef to be used in CUDA to flag kernels
kernel_return_type = "P4A_accel_kernel"
wrapper_return_type = "P4A_accel_kernel_wrapper"
astrad_postproc = None
astrad_module_name = None
astrad_kernel_name = None
def __init__(self, workspace = None, project_name = "", cpp_flags = "",
verbose = False, files = [], filter_select = None,
filter_exclude = None, noalias = False,
pointer_analysis = False,
accel = False, cuda = False, opencl = False, openmp = False,
spear=False, astrad=False,
com_optimization = False, cuda_cc=2, fftw3 = False,
recover_includes = True, native_recover_includes = False,
c99 = False, use_pocc = False, pocc_options = "",
atomic = False, kernel_unroll=0, brokers="",
properties = {}, apply_phases={}, activates = []
, **unused_kwords):
self.noalias = noalias
self.pointer_analysis = pointer_analysis
self.recover_includes = recover_includes
self.native_recover_includes = native_recover_includes
self.accel = accel
self.cuda = cuda
self.opencl = opencl
self.openmp = openmp
self.spear = spear
self.astrad = astrad
self.com_optimization = com_optimization
self.cuda_cc = cuda_cc
self.fftw3 = fftw3
self.c99 = c99
self.pocc = use_pocc
self.pocc_options = pocc_options
self.atomic = atomic
self.kernel_unroll = kernel_unroll
self.apply_phases = apply_phases
if workspace:
# There is one provided: use it!
self.workspace = workspace
else:
# This is because pyps.workspace.__init__ will test for empty
# strings...
if cpp_flags is None:
cpp_flags = ""
if not project_name:
raise p4a_util.p4a_error("Missing project_name")
self.project_name = project_name
# The generated kernels source files will go into a directory named
# with project_name.generated
self.new_files_folder = self.project_name + '.generated'
if self.recover_includes and not self.native_recover_includes:
# Use a special preprocessor to track #include by a
# man-in-the-middle attack :-).
# Use -ffreestanding to prevent gcc 4.8 from including
# <stdc-predef.h> that breaks assumptions in
# p4a_recover_includes. See "Pre-processor pre-includes"
# http://gcc.gnu.org/gcc-4.8/porting_to.html
# Use -D_GNU_SOURCE to have less hostile
# __bswap_32(), __bswap_64()... for PIPS
os.environ['PIPS_CPP'] = 'p4a_recover_includes --simple -E -ffreestanding -D_GNU_SOURCE'
for file in files:
if self.fortran is None:
# Track the language for an eventual later compilation
# by a back-end target compiler. The first file type
# select the type for all the workspace:
if p4a_util.fortran_file_p(file):
self.fortran = True
else:
self.fortran = False
if not os.path.exists(file):
raise p4a_util.p4a_error("File does not exist: " + file)
self.files = files
if accel:
accel_stubs_name = None
# Analyze this stub file so that PIPS interprocedurality
# is happy about the run-time we use:
if self.fortran:
accel_stubs_name = "p4a_stubs.f"
else:
accel_stubs_name = "p4a_stubs.c"
# The stubs are here in our distribution:
accel_stubs = os.path.join(os.environ["P4A_ACCEL_DIR"],
accel_stubs_name)
# Add the stubs file to the list to use in PIPS:
self.files += [ accel_stubs ]
# Mark this file as a stub to avoid copying it out later:
self.accel_files += [ accel_stubs ]
if (self.astrad and (not self.fortran) and (not self.spear)):
astrad_stubs = os.path.join(os.environ["P4A_ASTRAD_DIR"],
"p4a_astrad_stubs.c")
# Add the stubs file to the list to use in PIPS:
self.files += [ astrad_stubs ]
# Mark this file as a stub to avoid copying it out later:
self.accel_files += [ astrad_stubs ]
# Late import of pyps to avoid importing it until
# we really need it.
global pyps
global broker
global pocc
try:
pyps = __import__("pyps")
broker = __import__("broker")
if self.pocc:
pocc = __import__("pocc")
except:
raise
# If we have #include recovery and want to use the native one:
recover_Include = self.recover_includes and self.native_recover_includes
# Create the PyPS workspace:
if brokers != "":
brokers+=","
brokers+="p4a_stubs_broker"
self.workspace = broker.workspace(*self.files,
name = self.project_name,
verbose = verbose,
cppflags = cpp_flags,
recoverInclude = recover_Include,
brokersList=brokers)
# Array regions are a must! :-) Ask for most precise array
# regions:
self.workspace.activate("MUST_REGIONS")
if self.noalias:
# currently, as default PIPS phases do not select pointer analysis, setting
# properties is sufficient.
# activating phases may become necessary if the default behavior
# changes in Pips
properties["CONSTANT_PATH_EFFECTS"] = False
properties["TRUST_CONSTANT_PATH_EFFECTS_IN_CONFLICTS"] = True
if pointer_analysis:
properties["ABSTRACT_HEAP_LOCATIONS"]="context-sensitive"
self.workspace.activate("proper_effects_with_points_to")
self.workspace.activate("cumulated_effects_with_points_to")
self.workspace.activate("must_regions_with_points_to")
# set the workspace properties
self.set_properties(properties)
# Skip the compilation units and the modules of P4A runtime, they
# are just here so that PIPS has a global view of what is going
# on, not to be parallelized :-)
skip_p4a_runtime_and_compilation_unit_re = re.compile("P4A_.*|.*!$")
# Also filter out modules based on --include-modules and
# --exclude-modules.
filter_select_re = None
if filter_select:
filter_select_re = re.compile(filter_select)
filter_exclude_re = None
if filter_exclude:
filter_exclude_re = re.compile(filter_exclude)
# Combine the 3 filters in one:
self.main_filter = (lambda module: not skip_p4a_runtime_and_compilation_unit_re.match(module.name)
and (filter_exclude_re == None or not filter_exclude_re.match(module.name))
and (filter_select_re == None or filter_select_re.match(module.name)))
def set_properties (self, user_properties):
""" Initialize the properties according to the default defined properties
and to the user defined ones.
"""
global default_properties
global default_fortran_cuda_properties
all_properties = default_properties
# if accel (might be cuda) and fortran add some properties
if ((self.accel == True) and (self.fortran == True)):
for k in default_fortran_cuda_properties:
all_properties[k] = default_fortran_cuda_properties[k]
# overwrite default properties with the user defined ones
for k in user_properties:
all_properties[k] = user_properties[k]
# apply the properties to the workspace
for k in all_properties:
p4a_util.debug("Property " + k + " = " + str(all_properties[k]))
setattr(self.workspace.props,k, all_properties[k])
return
def get_database_directory(self):
"Return the directory of the current PIPS database"
return os.path.abspath(self.workspace.dirname)
def filter_modules(self, filter_select = None, filter_exclude = None, other_filter = lambda x: True):
"""Filter modules according to their names and select them if they
match all the 3 following conditions.
If filter_exclude regex if defined, then matching modules are
filtered out.
If filter_select regex if defined, the matching modules are kept.
If other_filter regex if defined, select also according to this
matching.
"""
filter_select_re = None
if filter_select:
filter_select_re = re.compile(filter_select)
filter_exclude_re = None
if filter_exclude:
filter_exclude_re = re.compile(filter_exclude)
# don't try to do anything with astrad projects libraries modules
# this is however incomplete
filter_exclude_desc_re = None
filter_exclude_astrad_re = None
filter_exclude_bswap_re = None
if (self.astrad):
filter_exclude_desc_re = re.compile('DESC')
filter_exclude_astrad_re = re.compile('astrad')
filter_exclude_bswap_re = re.compile('bswap')
filter = (lambda module: self.main_filter(module)
and (filter_exclude_re == None or not filter_exclude_re.match(module.name))
and (filter_exclude_astrad_re == None or not filter_exclude_astrad_re.search(module.name))
and (filter_exclude_desc_re == None or not filter_exclude_desc_re.match(module.name))
and (filter_exclude_bswap_re == None or not filter_exclude_bswap_re.match(module.name))
and (filter_select_re == None or filter_select_re.match(module.name))
and other_filter(module.name))
# Select the interesting modules:
return self.workspace.filter(filter)
# RK: I think the following should be in another file because it
# clutters the global compilation sketch
def post_process_fortran_wrapper (self, file_name, subroutine_name):
""" All the dirty thing about C and Fortran interoperability is hidden
in one unique file, the Fortran wrapper. This method does the last
modification to make the file compilable by gfortran.
Those steps are done:
1 - insert the needed "use" statement
2 - insert the pointer declaration for P4A inserted variable
3 - insert the 64 bit integer var
3 - types are written with a () in the size_of function
4 - remove the f77 multiline. Also remove the first blank chars
5 - replace f77 comments with f95 comments
6 - remove the (void **) & that is not useful in fortran
7 - remove the * in front of the inserted variables
8 - declare the origin_var_name as a target
9 - make DMA transfer variable to be 64 bit variables
"""
p4a_util.debug ("Processing fortran_wrapper " + file_name)
indent = " "
# Get the code to be post process:
code = p4a_util.read_file (file_name, True)
# Step 1
# Insert the needed use statement right after the subroutine
# declaration: common interface to be used:
use_string = indent + "use iso_c_binding\n"
use_string += indent + "use p4a_runtime_interface\n"
# Add the dedicated interface i.e use the KERNEL_LAUNCHER prefix
# instead of the FORTRAN_WRAPPER prefix:
use_string += indent + "use "
use_string += subroutine_name.replace(self.get_fortran_wrapper_prefix(),
self.get_launcher_prefix())
use_string += "_interface\n"
# Step 2
# First identify the inserted variable:
var_prefix = self.get_kernel_load_store_var_prefix ()
var_suffix = self.get_kernel_load_store_var_suffix ()
var_re = re.compile(var_prefix + "\\w+" + var_suffix + "\\d+")
inserted_var_l = var_re.findall (code)
origin_var_s = set ()
inserted_var_s = set ()
p4a_util.add_list_to_set (inserted_var_l, inserted_var_s)
inserted_var_decl = indent + "type (c_ptr) ::"
first = True
for var in inserted_var_s:
if (first == True):
first = False
else:
inserted_var_decl += ","
inserted_var_decl += " " + var
# Extract the original variable name:
origin_var_s.add (var.replace (var_prefix, "").replace (var_suffix, ""))
inserted_var_decl += "\n" + indent + "integer (c_size_t), target :: p4a_zero"
# Step 3
c_sizeof_replace = dict()
c_sizeof_replace ["CHARACTER()"] = "c_char"
c_sizeof_replace ["LOGICAL()"] = "c_bool"
c_sizeof_replace ["INTEGER()"] = "c_int"
c_sizeof_replace ["INTEGER*4()"] = "c_int"
c_sizeof_replace ["INTEGER*8()"] = "c_long_long"
c_sizeof_replace ["REAL()"] = "c_float"
c_sizeof_replace ["REAL*4()"] = "c_float"
c_sizeof_replace ["REAL*8()"] = "c_double"
for k, v in c_sizeof_replace.iteritems ():
code = code.replace (k, v)
# Step 4
F77_INDENTATION = "\n "
F95_INDENTATION = "\n" + indent
F77_CONTINUATION = "\n &"
F95_CONTINUATION = "&\n "
code = code.replace (F77_CONTINUATION, " ")
code = code.replace (F77_INDENTATION, F95_INDENTATION)
# Step 5
F77_CONTINUATION = "\nC"
F95_CONTINUATION = "\n!"
code = code.replace (F77_CONTINUATION, F95_CONTINUATION)
# Step 6
code = code.replace ("(void **) &", "")
# Step 7
for var in inserted_var_s:
code = code.replace ("*" + var, var)
# Step 8
# Insert the target attribute for all declared variables:
types_l = ["CHARACTER", "LOGICAL", "INTEGER*4", "INTEGER*8", "INTEGER",
"REAL*4", "REAL*8","REAL"]
for t in types_l:
code = code.replace (t, t.lower () + ", target ::")
# Step 9
function_l = ["P4A_COPY_FROM_ACCEL_2D", "P4A_COPY_TO_ACCEL_2D"]
for func in function_l:
# This RE matches the full line where the CALL to the DMA
# transfer happens:
func_line_re = re.compile("^ *CALL " + func + "\(.*\)",
re.MULTILINE)
# This RE matches the same line and extract the parameters:
func_sig_re = re.compile("^ *CALL " + func + "\((.*)\)",
re.MULTILINE)
# This RE match the function name:
func_name_re = re.compile("^ *(CALL " + func + ")\(.*\)",
re.MULTILINE)
new_line_l = []
line_l = func_line_re.findall (code)
name_l = func_name_re.findall (code)
sig_l = func_sig_re.findall (code)
insert_init = True
for index in range (len (line_l)):
# For each match we need to ensure that parameter 2..7 are
# 64 bit long:
no_space = sig_l[index].replace (" ", "")
arg_l = no_space.split (",")
for arg_num in range (1,7):
arg_l[arg_num] += "+p4a_zero"
new_line = indent + name_l [index] +"("
first = True
for arg in arg_l:
if first:
first = False
else:
new_line += ","
new_line += arg
new_line += ")"
if insert_init:
insert_init = False
code = code.replace (line_l[index], indent + "p4a_zero = 0\n" + line_l[index])
code = code.replace (line_l[index], new_line)
# Step 10
# Identify where to insert the USE string and the inserted variable
# declaration:
subroutine_line_re = re.compile("SUBROUTINE " + subroutine_name + ".*$",
re.MULTILINE)
subroutine_l = subroutine_line_re.findall (code)
assert (len (subroutine_l) == 1)
code = code.replace (subroutine_l[0], subroutine_l[0] + "\n" +
use_string + inserted_var_decl)
# Write the post processed code:
p4a_util.write_file(file_name, code, True)
return
def generated_modules_is_empty (self):
return (len (self.generated_modules) == 0)
def crough_modules_is_empty (self):
return (len (self.crough_modules) == 0)
def interface_modules_is_empty (self):
return (len (self.interface_modules) == 0)
def get_launcher_prefix (self):
return self.workspace.props.GPU_LAUNCHER_PREFIX
def get_kernel_prefix (self):
return self.workspace.props.GPU_KERNEL_PREFIX
def get_wrapper_prefix (self):
return self.workspace.props.GPU_WRAPPER_PREFIX
def get_fortran_wrapper_prefix (self):
return self.workspace.props.GPU_FORTRAN_WRAPPER_PREFIX
def get_kernel_load_store_var_prefix (self):
return self.workspace.props.KERNEL_LOAD_STORE_VAR_PREFIX
def get_kernel_load_store_var_suffix (self):
return self.workspace.props.KERNEL_LOAD_STORE_VAR_SUFFIX
def fortran_wrapper_p (self, file_name):
prefix = self.get_fortran_wrapper_prefix()
fortran_wrapper_file_name_re = re.compile(prefix + "_\\w+.f[0-9]*")
m = fortran_wrapper_file_name_re.match (os.path.basename (file_name))
return (m != None)
def parallelize(self, fine_grain = False, filter_select = None,
filter_exclude = None, apply_phases_before = [], apply_phases_after = [], omp=False):
"""Apply transformations to parallelize the code in the workspace
"""
all_modules = self.filter_modules(filter_select, filter_exclude)
if (self.astrad and not self.spear):
# find top function name
top_function_name = ""
for m in all_modules:
if (not m.callers) and not ('bswap' in m.name):
top_function_name = m.name
#print("ASTRAD: top function name " + m.name)
break
if ('_kernel' in top_function_name):
self.astrad_module_name = top_function_name.split('_kernel')[0]
#print ("ASTRAD: astrad module name " + self.astrad_module_name)
self.astrad_kernel_name = top_function_name
else:
self.astrad_module_name = top_function_name
#print ("ASTRAD: astrad module name " + self.astrad_module_name)
self.astrad_kernel_name = top_function_name + '_kernel'
if fine_grain:
# Set to False (mandatory) for A&K algorithm on C source file
self.workspace.props.memory_effects_only = self.fortran
# Apply requested phases before parallezation
apply_user_requested_phases(all_modules, apply_phases_before)
# Try to privatize all the scalar variables in loops:
all_modules.privatize_module()
# Use a different //izing scheme for openmp and the other accelerators
# Wait for p4a 2.0 for better engineering
if omp:
# first step is to find big parallel loops
all_modules.coarse_grain_parallelization(concurrent=True)
# and the one with reductions
all_modules.flag_parallel_reduced_loops_with_openmp_directives(concurrent=True)
# on the **others**, try to distribute them
if fine_grain:
self.workspace.props.parallelize_again_parallel_code=False
self.workspace.props.memory_effects_only = False # mandatory for internalize_parallel_code
all_modules.internalize_parallel_code(concurrent=True)
# and flag the remaining reductions if possible
# !! Show first a test case where it is useful !!
# all_modules.flag_parallel_reduced_loops_with_openmp_directives(concurrent=True)
else:
if fine_grain:
# Use a fine-grain parallelization à la Allen & Kennedy:
all_modules.internalize_parallel_code(concurrent=True)
# Always use a coarse-grain parallelization with regions:
all_modules.coarse_grain_parallelization(concurrent=True)
#all_modules.flatten_code(unroll=False,concurrent=True)
#all_modules.simplify_control(concurrent=True)
all_modules.loop_fusion(concurrent=True)
#all_modules.localize_declaration(concurrent=True)
# Scalarization doesn't preserve perfect loop nest at that time
#all_modules.scalarization(concurrent=True)
# Privatization information has been lost because of flatten_code
#all_modules.privatize_module()
#if fine_grain:
# Use a fine-grain parallelization à la Allen & Kennedy:
#all_modules.internalize_parallel_code(concurrent=True)
# Apply requested phases after parallelization:
apply_user_requested_phases(all_modules, apply_phases_after)
def gpuify(self, filter_select = None,
filter_exclude = None,
fine_grain = False,
apply_phases_kernel = [],
apply_phases_kernel_launcher = [],
apply_phases_wrapper = [],
apply_phases_after = []):
"""Apply transformations to the parallel loop nested found in the
workspace to generate GPU-oriented code
"""
all_modules = self.filter_modules(filter_select, filter_exclude)
# Some "risky" optimizations
#all_modules.flatten_code(unroll=False,concurrent=True)
#all_modules.simplify_control(concurrent=True)
#all_modules.loop_fusion(concurrent=True)
# Have to debug (see polybench/2mm.c)
#all_modules.localize_declaration(concurrent=True)
all_modules.scalarization(concurrent=True,keep_perfect_parallel_loop_nests=True)
#all_modules.gpu_promote_sequential()
# We handle atomic operations here
if self.atomic:
# Idem for this phase:
all_modules.replace_reduction_with_atomic(concurrent=True)
# In CUDA there is a limitation on 2D grids of thread blocks, in
# OpenCL there is a 3D limitation, so limit parallelism at 2D
# top-level loops inside parallel loop nests:
# Fermi and more recent device allows a 3D grid :)
if self.cuda_cc >= 2 :
all_modules.limit_nested_parallelism(NESTED_PARALLELISM_THRESHOLD = 3, concurrent=True)
else:
all_modules.limit_nested_parallelism(NESTED_PARALLELISM_THRESHOLD = 2, concurrent=True)
# First, only generate the launchers to work on them later. They
# are generated by outlining all the parallel loops. In the
# Fortran case, we want the launcher to be wrapped in an
# independent Fortran function so that it will be prettyprinted
# later in... C (for OpenCL or CUDA kernel definition). :-)
# go through the call graph in a top - down fashion
def gpuify_all(module):
module.gpu_ify(GPU_USE_WRAPPER = False,
GPU_USE_KERNEL = False,
GPU_USE_FORTRAN_WRAPPER = self.fortran,
GPU_USE_LAUNCHER = True,
GPU_USE_LAUNCHER_INDEPENDENT_COMPILATION_UNIT = self.c99,
GPU_USE_KERNEL_INDEPENDENT_COMPILATION_UNIT = self.c99,
GPU_USE_WRAPPER_INDEPENDENT_COMPILATION_UNIT = self.c99,
OUTLINE_WRITTEN_SCALAR_BY_REFERENCE = False, # unsure
OUTLINE_CALLEES_PREFIX="p4a_device_",
annotate_loop_nests = True) # annotate for recover parallel loops later
# recursive walk through
[gpuify_all(c) for c in module.callees if c.name.find(self.get_launcher_prefix ()) !=0 and c not in all_modules]
# call gpuify_all recursively starting from the heads of the callgraph
# Keep in mind that all_modules can be filtered !!!
# this is broken if m=>n=>p and n is filtered :-(
[ gpuify_all(m) for m in all_modules if not [val for val in all_modules if val.name in [ n.name for n in m.callers]]]
# Select kernel launchers by using the fact that all the generated
# functions have their names beginning with the launcher prefix:
launcher_prefix = self.get_launcher_prefix ()
kernel_launcher_filter_re = re.compile(launcher_prefix + "_.*[^!]$")
kernel_launchers = self.workspace.filter(lambda m: kernel_launcher_filter_re.match(m.name) and not m.static_p())
# We flag loops in kernel launchers as parallel, based on the annotation
# previously made
kernel_launchers.gpu_parallelize_annotated_loop_nest();
kernel_launchers.gpu_clear_annotations_on_loop_nest();
# Normalize all loops in kernels to suit hardware iteration spaces:
kernel_launchers.loop_normalize(
# Loop normalize to be GPU friendly, even if the step is already 1:
LOOP_NORMALIZE_ONE_INCREMENT = True,
# Arrays start at 0 in C, 1 in Fortran so the iteration loops:
LOOP_NORMALIZE_LOWER_BOUND = 1 if self.fortran else 0,
# It is legal in the following by construction (...hmmm to verify)
LOOP_NORMALIZE_SKIP_INDEX_SIDE_EFFECT = True,
concurrent=True)
# Apply requested phases to kernel_launchers:
apply_user_requested_phases(kernel_launchers, apply_phases_kernel_launcher)
# End to generate the wrappers and kernel contents, but not the
# launchers that have already been generated:
kernel_launchers.gpu_ify(GPU_USE_LAUNCHER = False,
# opencl option will produce independent kernel and wrapper files
GPU_USE_KERNEL_INDEPENDENT_COMPILATION_UNIT = self.opencl,
GPU_USE_WRAPPER_INDEPENDENT_COMPILATION_UNIT = False,
OUTLINE_INDEPENDENT_COMPILATION_UNIT = self.c99,
OUTLINE_WRITTEN_SCALAR_BY_REFERENCE = False, # unsure
concurrent=True)
# Select kernels by using the fact that all the generated kernels
# have their names of this form:
kernel_prefix = self.get_kernel_prefix ()
kernel_filter_re = re.compile(kernel_prefix + "_\\w+$")
kernels = self.workspace.filter(lambda m: kernel_filter_re.match(m.name))
# scalarization is a nice optimization :)
# currently it's very limited when applied in kernel, but cannot be applied outside neither ! :-(
#kernels.scalarization(concurrent=True)
if int(self.kernel_unroll) > 0:
for k in kernels:
for l in k.loops():
l.unroll(rate=int(self.kernel_unroll),
loop_unroll_with_prologue=False)
# Apply requested phases to kernel:
apply_user_requested_phases(kernels, apply_phases_kernel)
# Select wrappers by using the fact that all the generated wrappers
# have their names of this form:
wrapper_prefix = self.get_wrapper_prefix()
wrapper_filter_re = re.compile(wrapper_prefix + "_\\w+$")
wrappers = self.workspace.filter(lambda m: wrapper_filter_re.match(m.name))
# clean all, this avoid lot of warnings at compile time
all_modules.clean_declarations()
kernels.clean_declarations()
wrappers.clean_declarations()
kernel_launchers.clean_declarations()
if hasattr(self, 'spear') and self.spear:
# No communication for Spear mode
pass
if not self.com_optimization :
# Add communication around all the call site of the kernels. Since
# the code has been outlined, any non local effect is no longer an
# issue:
#kernel_launchers.display("print_code_regions")
#kernels.display("print_code_regions")
#kernels.display("print_code_preconditions")
kernel_launchers.kernel_load_store(concurrent=True,
ISOLATE_STATEMENT_EVEN_NON_LOCAL = True
)
else :
# The following should be done somewhere else with a generic
# stub concept... When it is available.
# Identify kernels first
kernel_launchers.flag_kernel()
# Kernels for fftw3 runtime:
fftw3_kernel_filter_re = re.compile("^fftw.?_execute")
fftw3_kernels = self.workspace.filter(lambda m: fftw3_kernel_filter_re.match(m.name))
fftw3_kernels.flag_kernel()
self.workspace.fun.main.kernel_data_mapping(KERNEL_LOAD_STORE_LOAD_FUNCTION="P4A_runtime_copy_to_accel",KERNEL_LOAD_STORE_STORE_FUNCTION="P4A_runtime_copy_from_accel")
# Apply requested phases to wrappers:
apply_user_requested_phases(wrappers, apply_phases_wrapper)
# Wrap kernel launch for communication optimization runtime:
if self.com_optimization :
wrappers.wrap_kernel_argument(WRAP_KERNEL_ARGUMENT_FUNCTION_NAME="P4A_runtime_host_ptr_to_accel_ptr")
wrappers.cast_at_call_sites()
# Select Fortran wrappers by using the fact that all the generated
# Fortran wrappers have their names of this form:
f_wrapper_prefix = self.get_fortran_wrapper_prefix ()
f_wrapper_filter_re = re.compile(f_wrapper_prefix + "_\\w+$")
f_wrappers = self.workspace.filter(lambda m: f_wrapper_filter_re.match(m.name))
# Unfortunately CUDA (at least up to 4.0) does not accept C99
# array declarations with sizes also passed as parameters in
# kernels. So, we degrade the quality of the generated code by
# generating array declarations as pointers and by accessing them
# as array[linearized expression]:
if self.c99 or self.fortran or self.opencl:
skip_static_length_arrays = self.c99 and not self.opencl
use_pointer = self.c99 or self.opencl
kernel_launchers.linearize_array(use_pointers=use_pointer,cast_at_call_site=True,skip_static_length_arrays=skip_static_length_arrays)
wrappers.linearize_array(use_pointers=use_pointer,cast_at_call_site=True,skip_static_length_arrays=skip_static_length_arrays)
def linearize_all(k):
k.linearize_array(use_pointers=use_pointer,cast_at_call_site=True,skip_static_length_arrays=skip_static_length_arrays, skip_local_arrays=True) # always skip locally declared arrays for kernels. Assume there is no VLA in the kernel, which woul elad to an alloca anyway
[ linearize_all(c) for c in k.callees ]
[ linearize_all(c) for c in kernels ]
# SG: not usefull anymore. Uncomment this if you want to try it again, this is the right place to do it
## Unfold kernel, usually won't hurt code size, but less painful with
## static functions declared in accelerator compilation units
#kernels.unfold()
# add sentinel around loop nests in launcher, used to replace the loop
# nest with a call kernel in post-processing
kernel_launchers.gpu_loop_nest_annotate(parallel=True);
# Update the list of CUDA modules:
p4a_util.add_list_to_set (map(lambda x:x.name, kernels),
self.cuda_modules)
p4a_util.add_list_to_set (map(lambda x:x.name, wrappers),
self.cuda_modules)
# Set return type for wrappers && kernel:
if (self.fortran == False):
wrappers.set_return_type_as_typedef(SET_RETURN_TYPE_AS_TYPEDEF_NEW_TYPE=self.wrapper_return_type)
kernels.set_return_type_as_typedef(SET_RETURN_TYPE_AS_TYPEDEF_NEW_TYPE=self.kernel_return_type)
if (self.c99 == True):
self.generated_modules.extend (map(lambda x:x.name, kernel_launchers))
#self.generated_modules.extend (map(lambda x:x.name, wrappers))
#self.generated_modules.extend (map(lambda x:x.name, kernels))
else:
# RK: in the following, I don't understand why we display things...
# Generate the C version of kernels, wrappers and launchers.
# Kernels and wrappers need to be prettyprinted with arrays as
# pointers because they will be .cu files
kernels.display ("c_printed_file",
CROUGH_INCLUDE_FILE_LIST="p4a_accel.h",
DO_RETURN_TYPE_AS_TYPEDEF=True,
CROUGH_ARRAY_PARAMETER_AS_POINTER=True,
SET_RETURN_TYPE_AS_TYPEDEF_NEW_TYPE=self.kernel_return_type)
wrappers.display ("c_printed_file",
CROUGH_INCLUDE_FILE_LIST="p4a_accel.h",
DO_RETURN_TYPE_AS_TYPEDEF=True,
CROUGH_ARRAY_PARAMETER_AS_POINTER=True,
SET_RETURN_TYPE_AS_TYPEDEF_NEW_TYPE=self.wrapper_return_type)
# RK: following comment to be fixed...
# Apply the set_return_type_as_typedef phase using regular
# expressions because the phase is not available in Fortran
# kernel_launchers will be .c file so C99 is allowed
kernel_launchers.display ("c_printed_file",
DO_RETURN_TYPE_AS_TYPEDEF=False,
CROUGH_ARRAY_PARAMETER_AS_POINTER=False)
# RK: should be done in 1 line...
# Those newly generated modules have to be appended to the
# dedicated list for later processing:
self.crough_modules.extend (map(lambda x:x.name, kernels))
self.crough_modules.extend (map(lambda x:x.name, wrappers))
self.crough_modules.extend (map(lambda x:x.name, kernel_launchers))
# Generate the interface of the wrappers. This will be used to call
# the C functions of the wrappers from the fortran_wrapper
# subroutines:
kernel_launchers.print_interface ()
self.interface_modules.extend (map(lambda x:x.name, kernel_launchers))
self.generated_modules.extend (map(lambda x:x.name, f_wrappers))
# Apply requested phases to kernels, wrappers and kernel_launchers
# after gpuify():
apply_user_requested_phases(kernels, apply_phases_after)
apply_user_requested_phases(wrappers, apply_phases_after)
apply_user_requested_phases(kernel_launchers, apply_phases_after)
#self.workspace.all_functions.display()
# Save the list of kernels for later work:
self.kernels.extend (map(lambda x:x.name, kernels))
if self.cuda:
self.launchers.extend (map(lambda x:x.name, kernel_launchers))
if self.opencl:
# Comment the place where the opencl wrapper declaration must be placed
# from the post-process
for launcher in kernel_launchers:
self.workspace[launcher.name].prepend_comment(PREPEND_COMMENT = "Opencl wrapper declaration\n")
self.generated_modules.extend(map(lambda x:x.name, wrappers))
# To be able to inject Par4All accelerator run time initialization
# later:
if "main" in self.workspace:
self.workspace["main"].prepend_comment(PREPEND_COMMENT = "// Prepend here P4A_init_accel\n")
else:
p4a_util.warn('''
There is no "main()" function in the given sources.
That means the P4A Accel runtime initialization can not be
inserted and that the compiled application may not work.
If you build a P4A executable from partial p4a output, you
should add a
#include <p4a_accel.h>
at the beginning of the .c file containing the main()
and add at the beginning of main() a line with:
P4A_init_accel;
''')
def ompify(self,
filter_select = None,
filter_exclude = None,
apply_phases_before = [],
apply_phases_after = []):
"""Add OpenMP #pragma from loop-parallel flag internal
representation to generate... OpenMP code!"""
modules = self.filter_modules(filter_select, filter_exclude);
# Apply requested phases before ompify to modules:
apply_user_requested_phases(modules, apply_phases_before)
modules.ompify_code(concurrent=True)
modules.omp_merge_pragma(concurrent=True)
if self.pocc:
for m in modules:
try:
m.poccify(options=self.pocc_options)
except RuntimeError:
e = sys.exc_info()
p4a_util.warn("PoCC returned an error : " + str(e[1]))
# Apply requested phases after ompify to modules:
apply_user_requested_phases(modules, apply_phases_after)
def accel_post(self, file, dest_dir = None):
'''Method for post processing "accelerated" files'''
p4a_util.info("Post-processing " + file)
post_process_script = os.path.join(p4a_util.get_program_dir(), "p4a_post_processor.py")
args = [ post_process_script ]
if dest_dir:
args += [ '--dest-dir', dest_dir ]
args.append(file)
p4a_util.run(args,force_locale = None)
#~ subprocess.call(args)
def get_p4a_accel_defines (self):
defines = []
defines.append ("-D")
defines.append ("__thread=""")
defines.append ("-DP4A_ACCEL_OPENMP")
defines.append ("-D" + self.wrapper_return_type + "=void")
defines.append ("-D" + self.kernel_return_type + "=void")
return defines
def kernel_to_wrapper_name (self, name):
""" Return the wrapper name according to the kernel name using the
good pips property.
"""
return name.replace (self.get_kernel_prefix (), self.get_wrapper_prefix ())
def kernel_to_launcher_name (self, name):
""" Return the launcher name according to the kernel name using the
good pips property.
"""
return name.replace (self.get_kernel_prefix (), self.get_launcher_prefix ())
def launchers_insert_extern_C (self):
"""Insert the extern C block construct to the whole file. The all
the file functions will be callable from a C code.
"""
for launcher in self.launchers:
# Where the file does well in the .database workspace:
launcher_file = os.path.join(self.workspace.dirname, "Src",
launcher + ".c")
# First open for read and get content:
src = open (launcher_file, 'r')
lines = src.readlines ()
src.close ()
# Then add the extern C block:
dst = open (launcher_file, 'w')
dst.write ('#ifdef __cplusplus\nextern "C" {\n#endif\n')
for line in lines:
dst.write (line)
dst.write ("\n#ifdef __cplusplus\n}\n#endif\n")
dst.close ()
def merge_lwk (self):
""" merge launcher wrapper and kernel in one file. The order is
important the launcher call the wrapper that call the kernel. So
they have to be in the inverse order into the file.
"""
for kernel in self.kernels:
# find the associated wrapper with the kernel
wrapper = self.kernel_to_wrapper_name (kernel)
launcher = self.kernel_to_launcher_name (kernel)
# merge the files in the kernel file
# Where the files do dwell in the .database workspace:
wrapper_file = os.path.join(self.workspace.dirname, "Src",
wrapper + ".c")
kernel_file = os.path.join(self.workspace.dirname, "Src",
kernel + ".c")
launcher_file = os.path.join(self.workspace.dirname, "Src",
launcher + ".c")
if self.cuda:
p4a_util.merge_files (kernel_file, [wrapper_file, launcher_file])
# remove the wrapper from the modules to be processed since already
#in the kernel
self.generated_modules.remove (wrapper)
self.generated_modules.remove (launcher)
def save_header (self, output_dir, name):
content = "/*All the generated includes are summarized here*/\n\n"
for header in self.header_files:
content += '#include "' + header + '"\n'
p4a_util.write_file (os.path.join (output_dir, name), content)
def save_crough (self, output_dir):
""" Save the crough files that might have been generated by
PIPS during the p4a process. Those files need a special handling since
they are not produced in the standard Src folder by the unsplit phase.
"""
result = []
for name in self.crough_modules:
# Where the file does well in the .database workspace:
pips_file = os.path.join(self.workspace.dirname,
name, name + ".c")
# set the destination file
output_name = name + ".c"
if name in self.cuda_modules:
if self.cuda:
output_name = p4a_util.change_file_ext(output_name, ".cu")
# generate the header file
header_file = os.path.join(output_dir, name + ".h")
self.header_files.append (name + ".h")
p4a_util.generate_c_header (pips_file, header_file,
self.get_p4a_accel_defines ())
# The final destination
output_file = os.path.join(output_dir, output_name)
# Copy the PIPS production to its destination:
shutil.copyfile(pips_file, output_file)
result.append(output_file)
return result
def save_generated (self, output_dir, subs_dir):
""" Save the generated files that might have been generated by
PIPS during the p4a process.
"""
result = []
if (self.fortran == True):
extension_in = ".f"
extension_out = ".f08"
elif (self.opencl == True):
extension_in = ".cl"
extension_out = ".cl"
else:
extension_in = ".c"
if (self.cuda == True):
extension_out = ".cu"
# elif (self.opencl == True):
#extension_in = ".cl"
# extension_out = ".cl"
else:
extension_out = ".c"
#p4a_util.warn("generated modules length "+str(len(self.generated_modules)))
for name in self.generated_modules:
p4a_util.debug("Save generated : '" + name + "'")
# Where the file actually is in the .database workspace:
pips_file = os.path.join(self.workspace.dirname, "Src",
name + extension_in)
output_name = name + extension_out
#p4a_util.warn("pips_file " +pips_file)
if self.accel and (p4a_util.c_file_p(pips_file) or p4a_util.opencl_file_p(pips_file)):
# We generate code for P4A Accel, so first post process
# the output and produce the result in the P4A subdiretory
# of the .database
self.accel_post(pips_file,
os.path.join(self.workspace.dirname, "P4A"))
# update the pips file to the postprocess one
pips_file = os.path.join(self.workspace.dirname, "P4A", name + extension_in)
if (self.opencl == True):
shutil.copyfile(pips_file, pips_file + ".tmp")
os.remove(pips_file)
h_file = os.path.join(os.environ["P4A_ROOT"],"share","p4a_accel","p4a_accel_wrapper-OpenCL.h")
p4a_util.merge_files (pips_file, [h_file, pips_file+".tmp"])
os.remove(pips_file+".tmp")
end_file = os.path.join(subs_dir, output_name)
shutil.copyfile(pips_file, end_file)
#p4a_util.warn("end_file after join "+end_file)
# The final destination
output_file = os.path.join(output_dir, output_name)
if (self.fortran_wrapper_p (pips_file) == True):
self.post_process_fortran_wrapper (pips_file, name)
# Copy the PIPS production to its destination:
shutil.copyfile(pips_file, output_file)
result.append(output_file)
if (self.fortran == False):
# for C generate the header file
header_file = os.path.join(output_dir, name + ".h")
self.header_files.append (name + ".h")
p4a_util.generate_c_header (pips_file, header_file,
self.get_p4a_accel_defines ())
if (self.astrad and not self.spear):
self.astrad_postproc.set_generated_kernel_files(result)
return result
def save_interface (self, output_dir):
""" Save the interface files that might have been generated during by
PIPS during the p4a process. Those files need a special handling since
they are not produced in the standard Src folder by the unsplit phase.
"""
result = []
flag = True
for name in self.interface_modules:
# Where the file does well in the .database workspace:
pips_file = os.path.join(self.workspace.dirname, name,
name + "_interface.f08")
output_name = name + "_interface.f08"
# The final destination
output_file = os.path.join(output_dir, output_name)
# Copy the PIPS production to its destination:
shutil.copyfile(pips_file, output_file)
result.append(output_file)
if flag:
result.append (os.path.join(os.environ["P4A_ACCEL_DIR"],
"p4a_runtime_interface.f95"))
flag = False
return result
def save_user_file (self, dest_dir, prefix, suffix):
""" Save the user file appended to the Workspace at the begining
"""
result = []
# For all the user defined files from the workspace:
for file in self.files:
if file in self.accel_files:
# We do not want to remove the stubs file from the
# distribution... :-/
#os.remove(file)
continue
if (self.astrad and not self.spear):
self.astrad_postproc.add_source_name(file)
(dir, name) = os.path.split(file)
# Where the file does well in the .database workspace:
pips_file = os.path.join(self.workspace.dirname, "Src", name)
#p4a_util.warn("pips_file save_user_file "+pips_file)
# Recover the includes in the given file only if the flags have
# been previously set and this is a C program:
if self.recover_includes and not self.native_recover_includes and p4a_util.c_file_p(file):
subprocess.call([ 'p4a_recover_includes',
'--simple', pips_file ])
# Update the destination directory if one was given:
if dest_dir:
dir = dest_dir
if not (os.path.isdir(dir)):
os.makedirs (dir)
output_name = prefix + name
if suffix:
output_name = p4a_util.file_add_suffix(output_name, suffix)
# The final destination
output_file = os.path.join(dir, output_name)
if (self.astrad and not self.spear):
self.astrad_postproc.add_output_file_name(output_file)
if self.accel and p4a_util.c_file_p(file):
# We generate code for P4A Accel, so first post process
# the output:
self.accel_post(pips_file,
os.path.join(self.workspace.dirname, "P4A"))
# Where the P4A output file does dwell in the .database
# workspace:
p4a_file = os.path.join(self.workspace.dirname, "P4A", name)
# Update the normal location then:
pips_file = p4a_file
#p4a_util.warn("pips_file save_user_file 2 "+pips_file)
if (self.cuda == True) and (self.c99 == False):
# some C99 syntax is forbidden with Cuda. That's why there is
# a --c99 option that allows to generate a unique call site into the
# c99 original file to the wrappers (and then kenel). In such a case
# the original files will remain standard c99 files and the cuda files
# will only be the wrappers and the kernel (cf save_generated).
output_file = p4a_util.change_file_ext(output_file, ".cu")
#if (self.opencl == True):
#self.merge_function_launcher(pips_file)
#self.accel_post(pips_file)
#output_file = p4a_util.change_file_ext(output_file, ".c")
# Copy the PIPS production to its destination:
shutil.copyfile(pips_file, output_file)
result.append (output_file)
return result
def save(self, dest_dir = None, prefix = "", suffix = "p4a"):
"""Final post-processing and save the files of the workspace. This
includes the original files defined by the user and also all new
files that might have been generated by PIPS, including headers.
"""
if self.astrad and not self.spear:
# find output dialect and initialize astrad postprocessor
if self.cuda:
dialect = "cuda"
elif self.opencl:
dialect = "opencl"
elif self.openmp:
dialect = "openmp"
else:
p4a_util.die("ASTRAD post processor ERROR: unexpected output dialect")
self.astrad_postproc = p4a_astrad.p4a_astrad_postprocessor(dialect, dest_dir)
self.astrad_postproc.set_module_name(self.astrad_module_name)
output_files = []
# Do not allow None suffix or prefix:
if prefix is None:
prefix = ""
if suffix is None:
suffix = ""
# Set the suffix if needed to avoid file destruction
if (dest_dir == None) and ( prefix == "") and (suffix == ""):
suffix = "p4a"
# Append or prepend the . to prefix or suffix
if not (suffix == ""):
suffix = "." + suffix
if not (prefix == ""):
prefix = prefix + "."
# Regenerate the sources file in the workspace. Do not generate
# OpenMP-style output since we have already added OpenMP
# decorations:
self.workspace.props.PRETTYPRINT_SEQUENTIAL_STYLE = "do"
# The default place is fine for us since we work later on the files:
self.workspace.save()
# Create the folder for p4a new files if needed
new_file_flag = self.new_file_generated ()
if (self.astrad and (not self.spear) and dest_dir):
self.astrad_postproc.set_output_directory(dest_dir)
output_dir = dest_dir
else:
output_dir = os.path.join(os.getcwd(), self.new_files_folder)
#p4a_util.warn("p4a new file " + output_dir)
if dest_dir:
output_dir = os.path.join(dest_dir,self.new_files_folder)
if ((not (os.path.isdir(output_dir))) and (new_file_flag == True)):
os.makedirs (output_dir)
# For the opencl kernels that have been pushed in generated_files
# but must be saved at the working place (substitutive directory)
subs_dir = os.getcwd()
if dest_dir:
subs_dir = dest_dir
# Nvcc compiles .cu files as C++, thus we add extern C { declaration
# to prevent mangling
if ((self.c99 == True) and (self.cuda == True)):
self.launchers_insert_extern_C ()
#no longer needed
#self.merge_lwk ()
# save the user files
output_files.extend (self.save_user_file (dest_dir, prefix, suffix))
# astrad: generate kernel.dsl file from Pips xml output
if (self.astrad and self.openmp and not self.spear):
# find out C99 inner function knowing that its name
# is the name of the top-level module plus the _kernel suffix
# beware of compilation units
if self.astrad_kernel_name in self.workspace:
kernel = self.workspace[self.astrad_kernel_name]
else:
kernel_filter_re = re.compile(".*!" + self.astrad_kernel_name)
possible_kernels = self.workspace.filter(lambda m: kernel_filter_re.match(m.name))
if len(possible_kernels) ==0:
p4a_util.die("ASTRAD post processor ERROR: no C99 kernel found")
elif len(possible_kernels) > 1:
p4a_util.die("ASTRAD post processor ERROR: several possible C99 kernels found")
else: # there is a single module
[kernel] = tuple(possible_kernels)
kernel.print_xml_application()
xml_file = os.path.join(self.workspace.dirname, kernel.show("XML_PRINTED_FILE"))
self.astrad_postproc.save_kernel_dsl_file(xml_file)
if self.opencl:
# HACK inside : we expect the wrapper and the kernel to be in the
# same file which MUST be called wrapper_name.c
for kernel in self.kernels:
# find the associated wrapper with the kernel
src_dir = os.path.join(self.workspace.dirname, "Src")
wrapper = os.path.join(src_dir,self.kernel_to_wrapper_name(kernel)+".cl")
kernel = os.path.join(src_dir,kernel+".c")
shutil.copyfile(kernel, wrapper)
# save pips generated files in the dedicated folder
output_files.extend (self.save_crough (output_dir))
output_files.extend (self.save_interface (output_dir))
output_files.extend (self.save_generated (output_dir, subs_dir))
#output_files.extend (self.save_generated (output_dir))
#p4a_util.warn("output_dir "+ output_dir)
# generate one header to warp all the generated header files
if (new_file_flag == True):
self.save_header (output_dir, self.new_files_include)
if self.astrad and not self.spear:
self.astrad_postproc.save_dsl_file()
self.astrad_postproc.rename_module()
return output_files
def new_file_generated (self):
return not (self.generated_modules_is_empty () and
self.crough_modules_is_empty () and
self.interface_modules_is_empty ())
def __del__(self):
# Waiting for pyps.workspace.close!
if self.workspace:
del self.workspace
if __name__ == "__main__":
print(__doc__)
print("This module is not directly executable")
# Some Emacs stuff:
### Local Variables:
### mode: python
### mode: flyspell
### ispell-local-dictionary: "american"
### tab-width: 4
### End:
| 43.592313
| 283
| 0.616211
| 58,867
| 0.926762
| 0
| 0
| 0
| 0
| 0
| 0
| 23,015
| 0.362333
|
dd173502c78ac900cdc7ccca958ee936158c16b0
| 4,790
|
py
|
Python
|
hivwholeseq/cross_sectional/get_subtype_entropy_synonymous.py
|
iosonofabio/hivwholeseq
|
d504c63b446c3a0308aad6d6e484ea1666bbe6df
|
[
"MIT"
] | 3
|
2016-12-01T03:12:06.000Z
|
2021-07-03T01:29:26.000Z
|
hivwholeseq/cross_sectional/get_subtype_entropy_synonymous.py
|
iosonofabio/hivwholeseq
|
d504c63b446c3a0308aad6d6e484ea1666bbe6df
|
[
"MIT"
] | null | null | null |
hivwholeseq/cross_sectional/get_subtype_entropy_synonymous.py
|
iosonofabio/hivwholeseq
|
d504c63b446c3a0308aad6d6e484ea1666bbe6df
|
[
"MIT"
] | 3
|
2016-01-17T03:43:46.000Z
|
2020-03-25T07:00:11.000Z
|
# vim: fdm=marker
'''
author: Fabio Zanini
date: 12/01/15
content: Get subtype entropy from alignments, since it's used so often.
'''
# Modules
import os
import argparse
import cPickle as pickle
import numpy as np
from hivwholeseq.utils.miseq import alpha
from hivwholeseq.utils.one_site_statistics import get_entropy
from hivwholeseq.cross_sectional.filenames import (
get_subtype_reference_alignment_filename,
get_subtype_reference_alignment_entropy_syn_filename)
from hivwholeseq.cross_sectional.get_subtype_reference_alignment import get_subtype_reference_alignment
# Functions
def get_ali_entropy_syn(alim, positions=None, alpha=alpha[:5], VERBOSE=0):
'''Get entropy of alignment at some positions'''
from collections import defaultdict
from hivwholeseq.utils.sequence import translate_with_gaps as translate
if len(ali[0]) % 3:
raise ValueError('The alignment length is not a multiple of 3')
if positions is None:
positions = np.arange(len(ali[0]) // 3)
# The data structure is a nested dict by position and amino acid
S = {}
# Iterate over codons
for pos in positions:
if VERBOSE >= 3:
print pos
asub = alim[:, pos * 3: (pos + 1) * 3]
aacount = defaultdict(lambda: defaultdict(int))
for cod in asub:
cod = ''.join(cod)
aacount[translate(cod)][cod] += 1
Spos = {}
for aa, codd in aacount.iteritems():
af = np.array(codd.values(), float)
af /= af.sum()
Spos[aa] = get_entropy(af)
S[pos] = Spos
return S
def get_subtype_reference_alignment_entropy_syn(region, subtype='B', VERBOSE=0,
refname='HXB2',
type='nuc'):
'''Get the entropy of a large subtype reference alignment'''
import cPickle as pickle
fn = get_subtype_reference_alignment_entropy_syn_filename(region,
subtype=subtype,
refname=refname,
type=type,
VERBOSE=VERBOSE)
with open(fn, 'r') as f:
data = pickle.load(f)
return data
# Script
if __name__ == '__main__':
# Parse input args
parser = argparse.ArgumentParser(
description='Calculate entropy of subtype alignment',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--regions', nargs='+', required=True,
help='Regions to analyze (e.g. F1 p17)')
parser.add_argument('--verbose', type=int, default=0,
help='Verbosity level [0-4]')
parser.add_argument('--subtype', default='B',
help='Subtype of the alignment')
parser.add_argument('--reference', default='HXB2',
help='Reference of the alignment')
parser.add_argument('--save', action='store_true',
help='Save to file')
args = parser.parse_args()
regions = args.regions
VERBOSE = args.verbose
subtype = args.subtype
refname = args.reference
use_save = args.save
Ss = {}
for region in regions:
if VERBOSE >= 1:
print region
if use_save:
if VERBOSE >= 2:
print 'Get alignment'
ali = get_subtype_reference_alignment(region, subtype=subtype,
refname=refname,
VERBOSE=VERBOSE)
alim = np.array(ali, 'S1')
if VERBOSE >= 2:
print 'Compute entropy'
S = get_ali_entropy_syn(alim, VERBOSE=VERBOSE)
if VERBOSE >= 2:
print 'Store to file'
fn_out = get_subtype_reference_alignment_entropy_syn_filename(region,
subtype=subtype,
refname=refname,
VERBOSE=VERBOSE)
with open(fn_out, 'wb') as f:
pickle.dump(S, f)
else:
if VERBOSE >= 2:
print 'Get entropy from file'
S = get_subtype_reference_alignment_entropy_syn(region,
subtype=subtype,
refname=refname,
VERBOSE=VERBOSE)
Ss[region] = S
| 33.971631
| 103
| 0.524426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 779
| 0.16263
|
dd17680bbd248da6c5086919dd5e04da84e0dd2e
| 15,119
|
py
|
Python
|
udebs/interpret.py
|
recrm/Udebs
|
d7e8e248e7afaf6559f2a96ce5dd6e2698d65af7
|
[
"MIT"
] | 6
|
2017-08-20T02:48:12.000Z
|
2020-09-04T21:46:35.000Z
|
udebs/interpret.py
|
recrm/Udebs
|
d7e8e248e7afaf6559f2a96ce5dd6e2698d65af7
|
[
"MIT"
] | null | null | null |
udebs/interpret.py
|
recrm/Udebs
|
d7e8e248e7afaf6559f2a96ce5dd6e2698d65af7
|
[
"MIT"
] | 1
|
2019-08-28T00:48:43.000Z
|
2019-08-28T00:48:43.000Z
|
import copy
import json
import itertools
import os
import operator
from .errors import *
# ---------------------------------------------------
# Imports and Variables -
# ---------------------------------------------------
class Standard:
"""
Basic functionality wrappers.
Do not import any of these, included only as reference for udebs config file syntax.
"""
@staticmethod
def print(*args):
"""
prints extra output to console.
.. code-block:: xml
<i>print arg1 arg2 ...</i>
"""
print(*args)
return True
@staticmethod
def logicif(cond, value, other):
"""
returns value if condition else other.
TODO: Other is executed even if value is true.
.. code-block:: xml
<i>if cond value other</i>
"""
return value if cond else other
@staticmethod
def inside(before, after, amount=1):
"""
Returns true if before in after amount times else false.
.. code-block:: xml
<i>value in obj</i>
"""
if isinstance(after, str):
return before in after
if amount == 0:
return True
count = 0
for item in after:
if item == before:
count += 1
if count >= amount:
return True
return False
@staticmethod
def notin(*args, **kwargs):
"""
Returns false if value in obj else true.
.. code-block:: xml
<i>value in obj</i>
"""
return not Standard.inside(*args, **kwargs)
@staticmethod
def equal(*args):
"""Checks for equality of args.
.. code-block:: xml
<i>== arg1 arg2 ...</i>
<i>arg1 == arg2</i>
"""
x = args[0]
for y in args:
if y != x:
return False
return True
@staticmethod
def notequal(*args):
"""Checks for inequality of args.
.. code-block:: xml
<i>!= arg1 arg2 ...</i>
<i>arg1 != arg2</i>
"""
x = args[0]
for y in args[1:]:
if x == y:
return False
return True
@staticmethod
def gt(before, after):
"""Checks if before is greater than after
.. code-block:: xml
<i>before > after</i>
"""
return before > after
@staticmethod
def lt(before, after):
"""Checks if before is less than after
.. code-block:: xml
<i>before < after</i>
"""
return before < after
@staticmethod
def gtequal(before, after):
"""Checks if before is greater than or equal to after
.. code-block:: xml
<i>before >= after</i>
"""
return before >= after
@staticmethod
def ltequal(before, after):
"""Checks if before is less than or equal to after
.. code-block:: xml
<i>before <= after</i>
"""
return before <= after
@staticmethod
def plus(*args):
"""Sums arguments
.. code-block:: xml
<i>arg1 + arg2</i>
<i>+ arg1 arg2 ...</i>
"""
return sum(args)
@staticmethod
def multiply(*args):
"""Multiplies arguments
.. code-block:: xml
<i>arg1 * arg2</i>
<i>* arg1 arg2 ...</i>
"""
i = 1
for number in args:
i *= number
return i
@staticmethod
def logicor(*args, storage=None, field=None):
"""
returns true if even one of args is true.
Note: All arguments are processed unless extra arguments are quoted.
.. code-block:: xml
<i>arg1 or arg2</i>
<i>or arg1 arg2 ...</i>
"""
env = _getEnv(storage, {"self": field})
for i in args:
if isinstance(i, UdebsStr):
i = field.getEntity(i).testRequire(env)
if i:
return True
return False
@staticmethod
def mod(before, after):
"""Returns before mod after.
.. code-block:: xml
<i>before % after</i>
"""
return before % after
@staticmethod
def setvar(storage, variable, value):
"""Stores value inside of variable.
Note: always returns true so can be used in require block.
.. code-block:: xml
<i>variable = value</i>
<i>variable -> value</i>
"""
storage[variable] = value
return True
@staticmethod
def getvar(storage, variable):
"""Retrieves a variable
.. code-block:: xml
<i>$ variable</i>
<i>$variable</i>
"""
return storage[variable]
@staticmethod
def div(before, after):
"""Returns before divided by after.
.. code-block:: xml
<i>before / after</i>
"""
return before / after
@staticmethod
def logicnot(element):
"""Switches a boolean from true to false and vice versa
.. code-block:: xml
<i>! element</i>
<i>!element</i>
"""
return not element
@staticmethod
def minus(before, element):
"""Returns before - element. (before defaults to 0 if not given)
.. code-block:: xml
<i>before - element</i>
<i>-element</i>
"""
return before - element
@staticmethod
def sub(array, i):
"""Gets the ith element of array.
.. code-block:: xml
<i>array sub i</i>
"""
return next(itertools.islice(array, int(i), None), 'empty')
@staticmethod
def length(list_):
"""Returns the length of an iterable.
.. code-block:: xml
<i>length list_</i>
"""
return len(list(list_))
@staticmethod
def quote(string):
"""Treats input as string literal and does not process commands.
.. code-block:: xml
@staticmethod
<i>`(caster CAST target move)</i>
"""
return UdebsStr(string)
class Variables:
versions = [0, 1]
modules = {
-1: {},
}
env = {
"__builtins__": {"abs": abs, "min": min, "max": max, "len": len},
"standard": Standard,
"operator": operator,
"storage": {},
}
default = {
"f": "",
"args": [],
"kwargs": {},
"all": False,
"default": {},
"string": [],
}
@staticmethod
def keywords(version=1):
return dict(Variables.modules[version], **Variables.modules[-1])
def importFunction(f, args):
"""
Allows a user to import a single function into udebs.
**deprecated - please use udebs.utilities.register
"""
module = {
f.__name__: {
"f": f.__name__
}
}
module[f.__name__].update(args)
importModule(module, {f.__name__: f})
def importModule(dicts=None, globs=None, version=-1):
"""
Allows user to extend base variables available to the interpreter.
Should be run before the instance object is created.
**deprecated for users - please use udebs.utilities.register
"""
if globs is None:
globs = {}
if dicts is None:
dicts = {}
if version not in Variables.modules:
Variables.modules[version] = {}
Variables.modules[version].update(dicts)
Variables.env.update(globs)
def importSystemModule(name, globs=None):
"""Convenience script for import system keywords."""
if globs is None:
globs = {}
path = os.path.dirname(__file__)
for version in Variables.versions:
filename = "{}/keywords/{}-{}.json".format(path, name, str(version))
with open(filename) as fp:
importModule(json.load(fp), globs, version)
def _getEnv(local, glob=None):
"""Retrieves a copy of the base variables."""
value = copy.copy(Variables.env)
if glob:
value.update(glob)
value["storage"] = local
return value
# ---------------------------------------------------
# Interpreter Functions -
# ---------------------------------------------------
def formatS(string, version):
"""Converts a string into its python representation."""
string = str(string)
if string == "self":
return string
elif string == "false":
return "False"
elif string == "true":
return "True"
elif string == "None":
return string
elif string.isdigit():
return string
# String quoted by user.
elif string[0] == string[-1] and string[0] in {"'", '"'}:
return string
# String has already been handled by call
elif string[-1] == ")":
return string
elif string in Variables.env:
return string
# In case prefix notation used in keyword defaults.
elif string[0] in Variables.keywords(version):
return interpret(string, version)
else:
return "'" + string + "'"
def call(args, version):
"""Converts callList into functionString."""
# Find keyword
keywords = [i for i in args if i in Variables.keywords(version)]
# Too many keywords is a syntax error.
if len(keywords) > 1:
raise UdebsSyntaxError("CallList contains to many keywords '{}'".format(args))
# No keywords creates a tuple object.
elif len(keywords) == 0:
return "(" + ",".join(formatS(i, version) for i in args) + ")"
keyword = keywords[0]
# Get and fix data for this keyword.
data = copy.copy(Variables.default)
data.update(Variables.keywords(version)[keyword])
# Create dict of values
current = args.index(keyword)
nodes = copy.copy(data["default"])
for index in range(len(args)):
value = "$" if index >= current else "-$"
value += str(abs(index - current))
if args[index] != keyword:
nodes[value] = args[index]
# Force strings into quoted arguments.
for string in data["string"]:
nodes[string] = "'" + str(nodes[string]).replace("'", "\\'") + "'"
# Claim keyword arguments.
kwargs = {}
for key, value in data["kwargs"].items():
if value in nodes:
new_value = nodes[value]
del nodes[value]
else:
new_value = value
kwargs[key] = formatS(new_value, version)
arguments = []
# Insert positional arguments
for key in data["args"]:
if key in nodes:
arguments.append(formatS(nodes[key], version))
del nodes[key]
else:
arguments.append(formatS(key, version))
# Insert ... arguments.
if data["all"]:
for key in sorted(nodes.keys(), key=lambda x: int(x.replace("$", ""))):
arguments.append(formatS(nodes[key], version))
del nodes[key]
if len(nodes) > 0:
raise UdebsSyntaxError("Keyword contains unused arguments. '{}'".format(" ".join(args)))
# Insert keyword arguments.
for key in sorted(kwargs.keys()):
arguments.append(str(key) + "=" + str(kwargs[key]))
return data["f"] + "(" + ",".join(arguments) + ")"
def split_callstring(raw, version):
"""Converts callString into call_list."""
open_bracket = {'(', '{', '['}
close_bracket = {')', '}', ']'}
call_list = []
buf = ''
in_brackets = 0
in_quotes = False
dot_legal = True
for char in raw.strip():
if char in {'"', "'"}:
in_quotes = not in_quotes
elif not in_quotes:
if char in open_bracket:
in_brackets += 1
elif char in close_bracket:
in_brackets -= 1
elif not in_brackets:
if dot_legal:
if char == ".":
call_list.append(buf)
buf = ''
continue
elif char.isspace():
dot_legal = False
if call_list:
call_list = [".".join(call_list) + "." + buf]
buf = ''
if char.isspace():
if buf:
call_list.append(buf)
buf = ''
continue
buf += char
call_list.append(buf)
if in_brackets:
raise UdebsSyntaxError("Brackets are mismatched. '{}'".format(raw))
if '' in call_list:
raise UdebsSyntaxError("Empty element in call_list. '{}'".format(raw))
# Length one special cases.
if len(call_list) == 1:
value = call_list[0]
# Prefix calling.
if value not in Variables.keywords(version):
if value[0] in Variables.keywords(version):
return [value[0], value[1:]]
return call_list
def interpret(string, version=1, debug=False):
"""Recursive function that parses callString"""
try:
_list = split_callstring(string, version)
if debug:
print("Interpret:", string)
print("Split:", _list)
found = []
for entry in _list:
if entry[0] == "(" and entry[-1] == ")":
found.append(interpret(entry[1:-1], version, debug))
elif "." in entry:
found.append(interpret(entry, version, debug))
elif entry[0] in Variables.keywords(version) and entry not in Variables.keywords(version):
found.append(interpret(entry, version, debug))
else:
found.append(entry)
comp = call(found, version)
if debug:
print("call:", _list)
print("computed:", comp)
return UdebsStr(comp)
except Exception:
print(string)
raise
# ---------------------------------------------------
# Script Main Class -
# ---------------------------------------------------
# An easy way to distinguish between interpreted strings.
class UdebsStr(str):
pass
class Script:
def __init__(self, effect, version=1, debug=False):
# Raw text given to script.
self.raw = effect
self.interpret = effect
if not isinstance(effect, UdebsStr):
self.interpret = interpret(effect, version, debug)
self.code = compile(self.interpret, '<string>', "eval")
def __repr__(self):
return "<Script " + self.raw + ">"
def __str__(self):
return self.raw
def __call__(self, env):
return eval(self.code, env)
def __eq__(self, other):
if not isinstance(other, Script):
return False
return self.raw == other.raw
# ---------------------------------------------------
# Runtime -
# ---------------------------------------------------
importSystemModule("base")
importSystemModule("udebs")
| 25.031457
| 102
| 0.511674
| 7,232
| 0.478339
| 0
| 0
| 5,858
| 0.387459
| 0
| 0
| 5,614
| 0.371321
|
dd19eb06c6b535902edc1e166510cf7dc3e3ac06
| 425
|
py
|
Python
|
src/aequitas/plot/__init__.py
|
antoinelb/aequitas
|
5a912a3c1751b04c8688ad9e0c09ed87a6c48870
|
[
"MIT"
] | 469
|
2018-04-24T23:11:45.000Z
|
2022-03-29T07:54:07.000Z
|
src/aequitas/plot/__init__.py
|
antoinelb/aequitas
|
5a912a3c1751b04c8688ad9e0c09ed87a6c48870
|
[
"MIT"
] | 62
|
2018-04-16T00:14:56.000Z
|
2021-11-12T10:35:01.000Z
|
src/aequitas/plot/__init__.py
|
antoinelb/aequitas
|
5a912a3c1751b04c8688ad9e0c09ed87a6c48870
|
[
"MIT"
] | 94
|
2018-05-21T16:13:57.000Z
|
2022-03-25T20:07:25.000Z
|
from aequitas.plot.summary_chart import plot_summary_chart as summary
from aequitas.plot.bubble_disparity_chart import plot_disparity_bubble_chart as disparity
from aequitas.plot.bubble_metric_chart import plot_metric_bubble_chart as absolute
from aequitas.plot.bubble_concatenation_chart import plot_concatenated_bubble_charts as disparity_absolute
from aequitas.plot.xy_metrics_chart import plot_xy_metrics_chart as scatter
| 85
| 106
| 0.908235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
dd1a3f013274c36a04a9e56b4c6a20cb322ded06
| 287
|
py
|
Python
|
python-safety/predict.py
|
alexeyinkin/geonames-to-json
|
6055d4f3484f859a784aa8b9bfd96682a74fcd1b
|
[
"MIT"
] | null | null | null |
python-safety/predict.py
|
alexeyinkin/geonames-to-json
|
6055d4f3484f859a784aa8b9bfd96682a74fcd1b
|
[
"MIT"
] | null | null | null |
python-safety/predict.py
|
alexeyinkin/geonames-to-json
|
6055d4f3484f859a784aa8b9bfd96682a74fcd1b
|
[
"MIT"
] | null | null | null |
import numpy as np
from model import get_model
def get_trained_model():
model = get_model()
model.load_weights('weights.ckpt')
return model
#model = get_fit_model('random.tsv')
#inputs_raw = [[0,0,0,0,0]]
#inputs_np = np.array(inputs_raw)
#print(model.predict(inputs_np))
| 20.5
| 38
| 0.71777
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 142
| 0.494774
|
dd1a79c02a429daf639fa22cee8d29423011e935
| 12,150
|
py
|
Python
|
src/predict.py
|
yzhhome/JDProductSummaryGeneration
|
4939f061ca90ad7ddd69b5a1794735f962e45bc0
|
[
"MIT"
] | 1
|
2021-09-18T07:42:36.000Z
|
2021-09-18T07:42:36.000Z
|
src/predict.py
|
yzhhome/JDProductSummaryGeneration
|
4939f061ca90ad7ddd69b5a1794735f962e45bc0
|
[
"MIT"
] | null | null | null |
src/predict.py
|
yzhhome/JDProductSummaryGeneration
|
4939f061ca90ad7ddd69b5a1794735f962e45bc0
|
[
"MIT"
] | null | null | null |
'''
@Author: dzy
@Date: 2021-09-13 11:07:48
@LastEditTime: 2021-09-26 20:25:17
@LastEditors: dzy
@Description: Helper functions or classes used for the model.
@FilePath: /JDProductSummaryGeneration/src/predict.py
'''
import random
import os
import sys
import pathlib
import torch
import jieba
import config
from model import PGN
from dataset import PairDataset
from utils import source2ids, outputids2words, Beam, timer, add2heap, replace_oovs
abs_path = pathlib.Path(__file__).parent.absolute()
class Predict():
@timer(module='initalize predicter')
def __init__(self):
self.DEVICE = torch.DEVICE
dataset = PairDataset(config.data_path,
max_src_len=config.max_src_len,
max_tgt_len=config.max_tgt_len,
truncate_src=config.truncate_src,
truncate_tgt=config.truncate_tgt)
self.vocab = dataset.build_vocab(embed_file=config.embed_file)
self.model = PGN(self.vocab)
# 停用词index索引列表
self.stop_word = list(set([self.vocab[x.strip()]
for x in open(config.stop_word_file).readlines()]))
self.model.load_model()
self.model.to(self.DEVICE)
def greedy_search(self, x, max_sum_len, len_oovs, x_padding_masks):
"""Function which returns a summary by always picking
the highest probability option conditioned on the previous word.
Args:
x (Tensor): Input sequence as the source.
max_sum_len (int): The maximum length a summary can have.
len_oovs (Tensor): Numbers of out-of-vocabulary tokens.
x_padding_masks (Tensor):
The padding masks for the input sequences
with shape (batch_size, seq_len).
Returns:
summary (list): The token list of the result summary.
"""
# 获取encoder的输出和hidden states
encoder_output, encoder_states = self.model.encoder(
replace_oovs(x, self.vocab))
# 初始化decoder hidden states为encoder hidden states
# encoder为双向lstm,decoder为单向lstm,所以需要降维
decoder_states = self.model.reduce_state(encoder_states)
# decoder在time step 0的输入为SOS起始符
x_t = torch.ones(1) * self.vocab.SOS
x_t = x_t.to(self.DEVICE, dtype=torch.int64)
# summary第一个词为SOS
summary = [self.vocab.SOS]
# 初始化coverage_vector
coverage_vector = torch.zeros((1, x.shape[1])).to(self.DEVICE)
# 没有碰到结束符且summary的长度小于最大summary长度继续生成
while int(x_t.item()) != (self.vocab.EOS) and \
len(summary) < max_sum_len:
context_vector, attention_weights, coverage_vector = \
self.model.attention(decoder_states,
encoder_output,
x_padding_masks,
coverage_vector)
p_vocab, decoder_states, p_gen = \
self.model.decoder(x_t.unsqueeze(1),
decoder_states,
context_vector)
final_dist = self.model.get_final_distribution(
x,
p_gen,
p_vocab,
attention_weights,
torch.max(len_oovs))
# 获取final distribution中最大概率的词
x_t = torch.argmax(final_dist, dim=1).to(self.DEVICE)
decoder_word_idx = x_t.item()
# 添加到生成的summary
summary.append(decoder_word_idx)
# 替换输入中的oov,继续下次生成
x_t = replace_oovs(x_t, self.vocab)
return summary
# @timer('best k')
def best_k(self, beam, k, encoder_output, x_padding_masks, x, len_oovs):
"""Get best k tokens to extend the current sequence at the current time step.
Args:
beam (untils.Beam): The candidate beam to be extended.
k (int): Beam size.
encoder_output (Tensor): The lstm output from the encoder.
x_padding_masks (Tensor):
The padding masks for the input sequences.
x (Tensor): Source token ids.
len_oovs (Tensor): Number of oov tokens in a batch.
Returns:
best_k (list(Beam)): The list of best k candidates.
"""
# use decoder to generate vocab distribution for the next token
decoder_input_t = torch.tensor(beam.tokens[-1]).reshape(1, 1)
decoder_input_t = decoder_input_t.to(self.DEVICE)
# Get context vector from attention network.
context_vector, attention_weights, coverage_vector = \
self.model.attention(beam.decoder_states,
encoder_output,
x_padding_masks,
beam.coverage_vector)
# Replace the indexes of OOV words with the index of OOV token
# to prevent index-out-of-bound error in the decoder.
p_vocab, decoder_states, p_gen = \
self.model.decoder(replace_oovs(decoder_input_t, self.vocab),
beam.decoder_states,
context_vector)
final_dist = self.model.get_final_distribution(x,
p_gen,
p_vocab,
attention_weights,
torch.max(len_oovs))
# Calculate log probabilities.
log_probs = torch.log(final_dist.squeeze())
# Filter forbidden tokens.
if len(beam.tokens) == 1:
forbidden_ids = [
self.vocab[u"这"],
self.vocab[u"此"],
self.vocab[u"采用"],
self.vocab[u","],
self.vocab[u"。"],
]
log_probs[forbidden_ids] = -float('inf')
# EOS token penalty. Follow the definition in
# https://opennmt.net/OpenNMT/translation/beam_search/.
log_probs[self.vocab.EOS] *= \
config.gamma * x.size()[1] / len(beam.tokens)
log_probs[self.vocab.UNK] = -float('inf')
# Get top k tokens and the corresponding logprob.
topk_probs, topk_idx = torch.topk(log_probs, k)
# Extend the current hypo with top k tokens, resulting k new hypos.
best_k = [beam.extend(x,
log_probs[x],
decoder_states,
coverage_vector) for x in topk_idx.tolist()]
return best_k
def beam_search(self, x,
max_sum_len,
beam_width,
len_oovs,
x_padding_masks):
"""Using beam search to generate summary.
Args:
x (Tensor): Input sequence as the source.
max_sum_len (int): The maximum length a summary can have.
beam_width (int): Beam size.
max_oovs (int): Number of out-of-vocabulary tokens.
x_padding_masks (Tensor):
The padding masks for the input sequences.
Returns:
result (list(Beam)): The list of best k candidates.
"""
# run body_sequence input through encoder
encoder_output, encoder_states = self.model.encoder(
replace_oovs(x, self.vocab))
coverage_vector = torch.zeros((1, x.shape[1])).to(self.DEVICE)
# initialize decoder states with encoder forward states
decoder_states = self.model.reduce_state(encoder_states)
# initialize the hypothesis with a class Beam instance.
init_beam = Beam([self.vocab.SOS],
[0],
decoder_states,
coverage_vector)
# get the beam size and create a list for stroing current candidates
# and a list for completed hypothesis
k = beam_width
curr, completed = [init_beam], []
# use beam search for max_sum_len (maximum length) steps
for _ in range(max_sum_len):
# get k best hypothesis when adding a new token
topk = []
for beam in curr:
# When an EOS token is generated, add the hypo to the completed
# list and decrease beam size.
if beam.tokens[-1] == self.vocab.EOS:
completed.append(beam)
k -= 1
continue
for can in self.best_k(beam,
k,
encoder_output,
x_padding_masks,
x,
torch.max(len_oovs)
):
# Using topk as a heap to keep track of top k candidates.
# Using the sequence scores of the hypos to campare
# and object ids to break ties.
add2heap(topk, (can.seq_score(), id(can), can), k)
curr = [items[2] for items in topk]
# stop when there are enough completed hypothesis
if len(completed) == beam_width:
break
# When there are not engouh completed hypotheses,
# take whatever when have in current best k as the final candidates.
completed += curr
# sort the hypothesis by normalized probability and choose the best one
result = sorted(completed,
key=lambda x: x.seq_score(),
reverse=True)[0].tokens
return result
@timer(module='doing prediction')
def predict(self, text, tokenize=True, beam_search=True):
"""Generate summary.
Args:
text (str or list): Source.
tokenize (bool, optional):
Whether to do tokenize or not. Defaults to True.
beam_search (bool, optional):
Whether to use beam search or not.
Defaults to True (means using greedy search).
Returns:
str: The final summary.
"""
if isinstance(text, str) and tokenize:
text = list(jieba.cut(text))
x, oov = source2ids(text, self.vocab)
x = torch.tensor(x).to(self.DEVICE)
len_oovs = torch.tensor([len(oov)]).to(self.DEVICE)
x_padding_masks = torch.ne(x, 0).byte().float()
if beam_search:
summary = self.beam_search(x.unsqueeze(0),
max_sum_len=config.max_dec_steps,
beam_width=config.beam_size,
len_oovs=len_oovs,
x_padding_masks=x_padding_masks)
else:
summary = self.greedy_search(x.unsqueeze(0),
max_sum_len=config.max_dec_steps,
len_oovs=len_oovs,
x_padding_masks=x_padding_masks)
# 输出summary中词index到词的转换
summary = outputids2words(summary, oov, self.vocab)
# <SOS>和<EOS>不显示出来
return summary.replace('<SOS>', '').replace('<EOS>', '').strip()
if __name__ == "__main__":
pred = Predict()
print('vocab_size: ', len(pred.vocab))
# 从测试集中随机选取一个样本进行预测
with open(config.test_data_path, 'r') as test:
picked = random.choice(list(test))
source, ref = picked.strip().split('<sep>')
print('source: ', source, '\n')
greedy_prediction = pred.predict(source.split(), beam_search=False)
print('greedy: ', greedy_prediction, '\n')
beam_prediction = pred.predict(source.split(), beam_search=True)
print('beam: ', beam_prediction, '\n')
print('reference: ', ref, '\n')
| 38.087774
| 85
| 0.535885
| 11,307
| 0.910533
| 0
| 0
| 2,390
| 0.192463
| 0
| 0
| 4,370
| 0.351909
|