hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d2eb2ceac648c31116fdafa89e303bfa757ec167 | 6,050 | py | Python | calliope_app/client/views/engage.py | NREL/engage | c25bacc1e98f84671ba0c0e53fc98bace090a4d8 | [
"BSD-3-Clause"
] | 3 | 2021-01-25T18:13:00.000Z | 2021-04-30T12:17:42.000Z | calliope_app/client/views/engage.py | NREL/engage | c25bacc1e98f84671ba0c0e53fc98bace090a4d8 | [
"BSD-3-Clause"
] | 8 | 2020-12-11T22:28:17.000Z | 2022-03-05T02:08:27.000Z | calliope_app/client/views/engage.py | NREL/engage | c25bacc1e98f84671ba0c0e53fc98bace090a4d8 | [
"BSD-3-Clause"
] | 1 | 2021-09-15T22:15:12.000Z | 2021-09-15T22:15:12.000Z | import os
from pytz import common_timezones
from django.conf import settings
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AdminPasswordChangeForm
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth import authenticate, login, update_session_auth_hash
from django.contrib import messages
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.urls import reverse
from api.models.engage import Help_Guide
from api.models.configuration import Model, Model_User
@login_required
def home_view(request):
"""
View the "Home" page
Returns: HttpResponse
Example:
http://0.0.0.0:8000/
"""
user_models = Model_User.objects.filter(
user=request.user,
model__snapshot_version=None,
model__public=False).order_by('-last_access')
model_ids = user_models.values_list(
"model_id", flat=True)
snapshots = Model.objects.filter(
snapshot_base_id__in=model_ids)
public_models = Model.objects.filter(
snapshot_version=None,
public=True)
user_models = list(user_models)
if len(user_models) > 0:
last_model = user_models[0].model
elif len(public_models) > 0:
last_model = public_models[0]
else:
last_model = None
context = {
"timezones": common_timezones,
"last_model": last_model,
"user_models": user_models,
"public_models": public_models,
"snapshots": snapshots,
"mapbox_token": settings.MAPBOX_TOKEN,
"help_content": Help_Guide.get_safe_html('home'),
}
return render(request, "home.html", context)
@login_required
def share_view(request):
"""
View the "Model Sharing" page
Parameters:
model_uuid (uuid): required
Returns: HttpResponse
Example:
http://0.0.0.0:8000/share/
"""
selected_model_uuid = request.GET.get('model_uuid', None)
user_models = Model_User.objects.filter(user=request.user,
model__is_uploading=False)
users = User.objects.all().exclude(
id=request.user.id).order_by('last_name', 'first_name')
context = {
"timezones": common_timezones,
"user": request.user,
"users": users,
"user_models": user_models,
"selected_model_uuid": str(selected_model_uuid),
"help_content": Help_Guide.get_safe_html('share'),
}
return render(request, "share.html", context)
@login_required
def password_view(request):
"""
View the "Password Manager" page
Parameters:
model_uuid (uuid): required
Returns: HttpResponse
Example:
http://0.0.0.0:8000/settings/password/
"""
user = request.user
if user.has_usable_password():
PasswordForm = PasswordChangeForm
else:
PasswordForm = AdminPasswordChangeForm
if request.method == 'POST':
form = PasswordForm(user, request.POST)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
messages.success(request,
"Your password was successfully updated!")
return redirect('password')
else:
messages.error(request, 'Please correct the error below')
else:
form = PasswordForm(user)
context = {
'user': user,
'form': form,
}
return render(request, "password.html", context)
@login_required
def admin_login_view(request):
"""
Redirect to login view
Parameters:
model_uuid (uuid): required
Returns: HttpResponse
Example:
http://0.0.0.0:8000/share/
"""
context = {}
return render(request, "share.html", context)
def user_login(request):
"""
View the "Login" page
Returns: HttpResponse
Example:
http://0.0.0.0:8000/login/
"""
redirect_to = request.GET.get('next', '')
status = request.GET.get('status', 'login')
status_messages = {'active': ("Account has been activated!\n"
"Please proceed to login..."),
'inactive': ("Account has not yet been activated!\n"
"Email must be verified first..."),
'invalid-email': ("Email is invalid!\n"
"Please try again..."),
'invalid-password': ("Password is invalid!\n"
"Please try again...")}
if request.user.is_authenticated:
return HttpResponseRedirect(reverse('home'))
if request.method == 'POST':
email = request.POST.get('email').lower()
password = request.POST.get('password')
user = User.objects.filter(username=email).first()
if user:
if not user.is_active:
url = "%s?status=inactive" % reverse('login')
return HttpResponseRedirect(url)
else:
user = authenticate(username=email, password=password)
if user:
login(request, user)
if redirect_to:
return HttpResponseRedirect(redirect_to)
else:
return HttpResponseRedirect(reverse('home'))
else:
url = "%s?status=invalid-password" % reverse('login')
return HttpResponseRedirect(url)
else:
url = "%s?status=invalid-email" % reverse('login')
return HttpResponseRedirect(url)
else:
public_models = Model.objects.filter(
snapshot_version=None,
public=True)
context = {'redirect_to': redirect_to,
'status': status_messages.get(status, ''),
'public_models': public_models}
return render(request, 'registration/login.html', context)
| 29.368932 | 77 | 0.603471 | import os
from pytz import common_timezones
from django.conf import settings
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AdminPasswordChangeForm
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth import authenticate, login, update_session_auth_hash
from django.contrib import messages
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.urls import reverse
from api.models.engage import Help_Guide
from api.models.configuration import Model, Model_User
@login_required
def home_view(request):
"""
View the "Home" page
Returns: HttpResponse
Example:
http://0.0.0.0:8000/
"""
user_models = Model_User.objects.filter(
user=request.user,
model__snapshot_version=None,
model__public=False).order_by('-last_access')
model_ids = user_models.values_list(
"model_id", flat=True)
snapshots = Model.objects.filter(
snapshot_base_id__in=model_ids)
public_models = Model.objects.filter(
snapshot_version=None,
public=True)
user_models = list(user_models)
if len(user_models) > 0:
last_model = user_models[0].model
elif len(public_models) > 0:
last_model = public_models[0]
else:
last_model = None
context = {
"timezones": common_timezones,
"last_model": last_model,
"user_models": user_models,
"public_models": public_models,
"snapshots": snapshots,
"mapbox_token": settings.MAPBOX_TOKEN,
"help_content": Help_Guide.get_safe_html('home'),
}
return render(request, "home.html", context)
@login_required
def share_view(request):
"""
View the "Model Sharing" page
Parameters:
model_uuid (uuid): required
Returns: HttpResponse
Example:
http://0.0.0.0:8000/share/
"""
selected_model_uuid = request.GET.get('model_uuid', None)
user_models = Model_User.objects.filter(user=request.user,
model__is_uploading=False)
users = User.objects.all().exclude(
id=request.user.id).order_by('last_name', 'first_name')
context = {
"timezones": common_timezones,
"user": request.user,
"users": users,
"user_models": user_models,
"selected_model_uuid": str(selected_model_uuid),
"help_content": Help_Guide.get_safe_html('share'),
}
return render(request, "share.html", context)
@login_required
def password_view(request):
"""
View the "Password Manager" page
Parameters:
model_uuid (uuid): required
Returns: HttpResponse
Example:
http://0.0.0.0:8000/settings/password/
"""
user = request.user
if user.has_usable_password():
PasswordForm = PasswordChangeForm
else:
PasswordForm = AdminPasswordChangeForm
if request.method == 'POST':
form = PasswordForm(user, request.POST)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
messages.success(request,
"Your password was successfully updated!")
return redirect('password')
else:
messages.error(request, 'Please correct the error below')
else:
form = PasswordForm(user)
context = {
'user': user,
'form': form,
}
return render(request, "password.html", context)
@login_required
def admin_login_view(request):
"""
Redirect to login view
Parameters:
model_uuid (uuid): required
Returns: HttpResponse
Example:
http://0.0.0.0:8000/share/
"""
context = {}
return render(request, "share.html", context)
def user_login(request):
"""
View the "Login" page
Returns: HttpResponse
Example:
http://0.0.0.0:8000/login/
"""
redirect_to = request.GET.get('next', '')
status = request.GET.get('status', 'login')
status_messages = {'active': ("Account has been activated!\n"
"Please proceed to login..."),
'inactive': ("Account has not yet been activated!\n"
"Email must be verified first..."),
'invalid-email': ("Email is invalid!\n"
"Please try again..."),
'invalid-password': ("Password is invalid!\n"
"Please try again...")}
if request.user.is_authenticated:
return HttpResponseRedirect(reverse('home'))
if request.method == 'POST':
email = request.POST.get('email').lower()
password = request.POST.get('password')
user = User.objects.filter(username=email).first()
if user:
if not user.is_active:
url = "%s?status=inactive" % reverse('login')
return HttpResponseRedirect(url)
else:
user = authenticate(username=email, password=password)
if user:
login(request, user)
if redirect_to:
return HttpResponseRedirect(redirect_to)
else:
return HttpResponseRedirect(reverse('home'))
else:
url = "%s?status=invalid-password" % reverse('login')
return HttpResponseRedirect(url)
else:
url = "%s?status=invalid-email" % reverse('login')
return HttpResponseRedirect(url)
else:
public_models = Model.objects.filter(
snapshot_version=None,
public=True)
context = {'redirect_to': redirect_to,
'status': status_messages.get(status, ''),
'public_models': public_models}
return render(request, 'registration/login.html', context)
| 0 | 0 | 0 |
867d163493e614d03a717a39fb0f6d0418a75d16 | 12,647 | py | Python | tests/datadriven/test_genotype_phenotype.py | dkucsc/mavas | db3f7bd35944b7d0f510c62760eaa071bf53d7d8 | [
"Apache-2.0"
] | 1 | 2016-12-14T21:05:47.000Z | 2016-12-14T21:05:47.000Z | tests/datadriven/test_genotype_phenotype.py | dkucsc/mavas | db3f7bd35944b7d0f510c62760eaa071bf53d7d8 | [
"Apache-2.0"
] | null | null | null | tests/datadriven/test_genotype_phenotype.py | dkucsc/mavas | db3f7bd35944b7d0f510c62760eaa071bf53d7d8 | [
"Apache-2.0"
] | null | null | null | """
Data-driven tests for g2p.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import rdflib
import ga4gh.datamodel.genotype_phenotype as genotype_phenotype
import ga4gh.datamodel.datasets as datasets
import ga4gh.protocol as protocol
import tests.datadriven as datadriven
import tests.paths as paths
| 46.496324 | 79 | 0.591761 | """
Data-driven tests for g2p.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import rdflib
import ga4gh.datamodel.genotype_phenotype as genotype_phenotype
import ga4gh.datamodel.datasets as datasets
import ga4gh.protocol as protocol
import tests.datadriven as datadriven
import tests.paths as paths
def testG2P():
testDataDir = os.path.join(
paths.testDataDir, 'datasets/dataset1/phenotypes')
for test in datadriven.makeTests(testDataDir, PhenotypeAssociationSetTest):
yield test
class PhenotypeAssociationSetTest(datadriven.DataDrivenTest):
def __init__(self, localId, baseDir):
self._dataset = datasets.Dataset("ds")
super(PhenotypeAssociationSetTest, self).__init__(localId, baseDir)
self.phenotypeAssocationSet = self.getDataModelInstance(
localId, baseDir)
def getDataModelInstance(self, localId, dataPath):
return genotype_phenotype.RdfPhenotypeAssociationSet(
self._dataset, localId, dataPath)
def getProtocolClass(self):
return protocol.PhenotypeAssociationSet
def testDetailTuples(self):
test_uriRefs = [
rdflib.term.URIRef(u'http://ohsu.edu/cgd/27d2169c'),
rdflib.term.URIRef(u'http://ohsu.edu/cgd/87752f6c')
]
details = self.phenotypeAssocationSet._detailTuples(test_uriRefs)
self.assertEqual(len(details), 6)
sample1 = {
u'predicate': u'http://www.w3.org/2000/01/rdf-schema#label',
u'object': u'GIST with decreased sensitivity to therapy',
u'subject': u'http://ohsu.edu/cgd/87752f6c',
}
sample2 = {
u'predicate': u'http://purl.obolibrary.org/obo/RO_0002200',
u'object': u'http://ohsu.edu/cgd/87752f6c',
u'subject': u'http://ohsu.edu/cgd/27d2169c',
}
self.assertIn(sample1, details)
self.assertIn(sample2, details)
def testBindingsToDict(self):
bindings = {
rdflib.term.Variable(u'association'):
rdflib.term.URIRef(u'http://ohsu.edu/cgd/fe484b5c'),
rdflib.term.Variable(u'feature'):
rdflib.term.URIRef(
u'http://cancer.sanger.ac.uk/cosmic/mutation/overview?id=965'
),
rdflib.term.Variable(u'phenotype_label'):
rdflib.term.Literal(
u'Papillary thyroid carcinoma with sensitivity to therapy'
),
rdflib.term.Variable(u'environment_label'):
rdflib.term.Literal(u'sunitinib'),
rdflib.term.Variable(u'environment'):
rdflib.term.URIRef(u'http://www.drugbank.ca/drugs/DB01268'),
rdflib.term.Variable(u'evidence_type'):
rdflib.term.URIRef(u'http://purl.obolibrary.org/obo/ECO_0000033'),
rdflib.term.Variable(u'sources'):
rdflib.term.Literal(
u'http://www.ncbi.nlm.nih.gov/pubmed/21470995|'
'http://www.ncbi.nlm.nih.gov/pubmed/21470995'),
rdflib.term.Variable(u'phenotype'):
rdflib.term.URIRef(u'http://ohsu.edu/cgd/30ebfd1a'),
rdflib.term.Variable(u'feature_label'):
rdflib.term.Literal(u'COSM965'),
}
myDict = self.phenotypeAssocationSet._bindingsToDict(bindings)
sampleDict = {
u'environment_label': u'sunitinib',
u'feature_label': u'COSM965',
u'evidence_type': u'http://purl.obolibrary.org/obo/ECO_0000033',
u'feature':
u'http://cancer.sanger.ac.uk/cosmic/mutation/overview?id=965',
u'environment': u'http://www.drugbank.ca/drugs/DB01268',
u'sources':
u'http://www.ncbi.nlm.nih.gov/pubmed/21470995|'
'http://www.ncbi.nlm.nih.gov/pubmed/21470995',
u'phenotype': u'http://ohsu.edu/cgd/30ebfd1a',
u'phenotype_label':
u'Papillary thyroid carcinoma with sensitivity to therapy',
u'association': u'http://ohsu.edu/cgd/fe484b5c',
}
self.assertEqual(myDict, sampleDict)
def testGetDetails(self):
uriRef = 'http://www.drugbank.ca/drugs/DB01268'
associations_details = [
{u'predicate': u'http://purl.obolibrary.org/obo/RO_0002606',
u'object': u'http://ohsu.edu/cgd/54039374',
u'subject': u'http://www.drugbank.ca/drugs/DB01268'},
{u'predicate': u'http://purl.obolibrary.org/obo/RO_0002606',
u'object': u'http://ohsu.edu/cgd/983a1528',
u'subject': u'http://www.drugbank.ca/drugs/DB01268'},
{u'predicate': u'http://purl.obolibrary.org/obo/RO_0002606',
u'object': u'http://ohsu.edu/cgd/71fe9f0f',
u'subject': u'http://www.drugbank.ca/drugs/DB01268'},
{u'predicate': u'http://www.w3.org/2000/01/rdf-schema#subClassOf',
u'object': u'http://purl.obolibrary.org/obo/CHEBI_23888',
u'subject': u'http://www.drugbank.ca/drugs/DB01268'}
]
sample_details = {
u'http://purl.obolibrary.org/obo/RO_0002606':
u'http://ohsu.edu/cgd/71fe9f0f',
u'http://www.w3.org/2000/01/rdf-schema#subClassOf':
u'http://purl.obolibrary.org/obo/CHEBI_23888',
u'id': u'http://www.drugbank.ca/drugs/DB01268'}
details = self.phenotypeAssocationSet._getDetails(
uriRef, associations_details)
self.assertEqual(details, sample_details)
def testToNamespaceURL(self):
sample_term = 'DrugBank:DB01268'
result = self.phenotypeAssocationSet._toNamespaceURL(sample_term)
self.assertEqual('http://www.drugbank.ca/drugs/DB01268', result)
def testGetIdentifier(self):
sample_url = 'http://www.drugbank.ca/drugs/DB01268'
result = self.phenotypeAssocationSet._getIdentifier(sample_url)
self.assertEqual('DB01268', result)
def testGetPrefix(self):
sample_url = 'http://www.drugbank.ca/drugs/'
result = self.phenotypeAssocationSet._getPrefix(sample_url)
self.assertEqual('DrugBank', result)
def testGetPrefixURL(self):
sample_url = 'http://www.drugbank.ca/drugs/DDD'
result = self.phenotypeAssocationSet._getPrefixURL(sample_url)
self.assertEqual('http://www.drugbank.ca/drugs/', str(result))
def testExtractAssociationsDetails(self):
sample_query = """
PREFIX OBAN: <http://purl.org/oban/>
PREFIX OBO: <http://purl.obolibrary.org/obo/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT
?association
?environment
?environment_label
?feature
?feature_label
?phenotype
?phenotype_label
(GROUP_CONCAT(?source; separator="|") AS ?sources)
?evidence_type
WHERE {
?association a OBAN:association .
?association OBO:RO_0002558 ?evidence_type .
?association OBO:RO_has_environment ?environment .
OPTIONAL { ?association dc:source ?source } .
?association OBAN:association_has_subject ?feature .
?association OBAN:association_has_object ?phenotype .
?environment rdfs:label ?environment_label .
?phenotype rdfs:label ?phenotype_label .
?feature rdfs:label ?feature_label .
FILTER ((?feature = <http://ohsu.edu/cgd/27d2169c> ))
}
GROUP BY ?association
ORDER BY ?association
"""
sample_associations = \
self.phenotypeAssocationSet._rdfGraph.query(sample_query)
result = self.phenotypeAssocationSet._extractAssociationsDetails(
sample_associations)
self.assertEqual(len(result), 3)
self.assertEqual(result[0].toPython(), 'http://ohsu.edu/cgd/27d2169c')
self.assertEqual(
result[1].toPython(),
'http://www.drugbank.ca/drugs/DB00619')
self.assertEqual(result[2].toPython(), 'http://ohsu.edu/cgd/87752f6c')
def testToGA4GH(self):
sample_associations = {
u'environment_label': u'sunitinib',
u'feature_label': u'RET M918T missense mutation',
u'evidence_type': u'http://purl.obolibrary.org/obo/ECO_0000033',
u'feature': {
u'http://purl.obolibrary.org/obo/GENO_0000408':
u'http://www.ncbi.nlm.nih.gov/gene/5979',
u'http://purl.obolibrary.org/obo/GENO_reference_amino_acid':
u'M',
u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type':
u'http://purl.obolibrary.org/obo/SO_0001059',
u'http://biohackathon.org/resource/faldo#location':
u'http://www.monarchinitiative.org/_918918UniProtKB:'
'P07949#P07949-1Region',
u'http://purl.obolibrary.org/obo/GENO_reference_nucleotide':
u'T',
u'http://purl.obolibrary.org/obo/'
'GENO_results_in_amino_acid_change':
u'T',
u'http://purl.obolibrary.org/obo/RO_0002200':
u'http://ohsu.edu/cgd/3774b1d2',
u'http://purl.obolibrary.org/obo/RO_0002205':
u'http://www.ncbi.nlm.nih.gov/CCDS/CcdsBrowse.cgi?'
'REQUEST=CCDS&DATA=7200.1',
u'http://purl.obolibrary.org/obo/GENO_altered_nucleotide':
u'C',
u'http://www.w3.org/2000/01/rdf-schema#label':
u'RET M918T missense mutation',
u'id': u'http://cancer.sanger.ac.uk/cosmic/mutation/'
'overview?id=965',
u'http://www.w3.org/2002/07/owl#sameAs':
u'http://www.ncbi.nlm.nih.gov/SNP/74799832',
},
u'evidence': u'http://ohsu.edu/cgd/sensitivity',
u'environment': {
u'http://purl.obolibrary.org/obo/RO_0002606':
u'http://ohsu.edu/cgd/71fe9f0f',
u'http://www.w3.org/2000/01/rdf-schema#subClassOf':
u'http://purl.obolibrary.org/obo/CHEBI_23888',
u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type':
u'http://www.w3.org/2002/07/owl#Class',
u'http://www.w3.org/2000/01/rdf-schema#label': u'sunitinib',
u'id': u'http://www.drugbank.ca/drugs/DB01268',
},
u'sources':
u'http://www.ncbi.nlm.nih.gov/pubmed/21470995|'
'http://www.ncbi.nlm.nih.gov/pubmed/21470995',
u'phenotype': {
u'http://purl.obolibrary.org/obo/BFO_0000159':
u'http://ohsu.edu/cgd/sensitivity',
u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type':
u'http://purl.obolibrary.org/obo/DOID_3969',
u'http://www.w3.org/2000/01/rdf-schema#label':
u'Papillary thyroid carcinoma with sensitivity to therapy',
u'id': u'http://ohsu.edu/cgd/30ebfd1a',
},
u'phenotype_label':
u'Papillary thyroid carcinoma with sensitivity to therapy',
u'id': u'http://ohsu.edu/cgd/fe484b5c',
u'association': u'http://ohsu.edu/cgd/fe484b5c',
}
result = self.phenotypeAssocationSet._toGA4GH(sample_associations)
self.assertEqual(
result.__class__.__name__, 'FeaturePhenotypeAssociation')
fpa_dict = protocol.toJsonDict(result)
description = 'Association: genotype:[RET M918T missense mutation]' \
' phenotype:[Papillary thyroid carcinoma with ' \
'sensitivity to therapy] environment:[sunitinib]' \
' evidence:[sensitivity] publications:' \
'[http://www.ncbi.nlm.nih.gov/pubmed/21470995|' \
'http://www.ncbi.nlm.nih.gov/pubmed/21470995]'
self.assertEqual(fpa_dict['description'], description)
self.assertIn('featureIds', fpa_dict.keys())
self.assertIn('evidence', fpa_dict.keys())
self.assertIn('environmentalContexts', fpa_dict.keys())
self.assertEqual(len(fpa_dict['featureIds']), 1)
self.assertEqual(len(fpa_dict['evidence']), 1)
self.assertEqual(len(fpa_dict['environmentalContexts']), 1)
| 11,857 | 40 | 369 |
1d74781886368ff345a19968c4cc9756cfcc6038 | 341 | pyde | Python | processing/Mod.12/sketch_12_1_l88/sketch_12_1_l88.pyde | nanam0rgana/2019-fall-polytech-cs | 1a31acb3cf22edc930318dec17324b05dd7788d5 | [
"MIT"
] | null | null | null | processing/Mod.12/sketch_12_1_l88/sketch_12_1_l88.pyde | nanam0rgana/2019-fall-polytech-cs | 1a31acb3cf22edc930318dec17324b05dd7788d5 | [
"MIT"
] | null | null | null | processing/Mod.12/sketch_12_1_l88/sketch_12_1_l88.pyde | nanam0rgana/2019-fall-polytech-cs | 1a31acb3cf22edc930318dec17324b05dd7788d5 | [
"MIT"
] | null | null | null | add_library('video')
| 15.5 | 40 | 0.56305 | add_library('video')
def setup():
size(640, 480)
smooth()
background(0)
noStroke()
video = Capture(this, width, height)
video.start()
global video
def draw():
global video
if video.available():
video.read()
pushMatrix()
scale(-1, 1)
image(video, -width, 0)
popMatrix()
| 269 | 0 | 50 |
b0642b784353a4b4d014f9ce73d02931abf6f082 | 7,018 | py | Python | model.py | ashikagah/CarND_behavioral_cloning | dc2bd8396fe808613eec1e7be6f43af1739d7451 | [
"MIT"
] | null | null | null | model.py | ashikagah/CarND_behavioral_cloning | dc2bd8396fe808613eec1e7be6f43af1739d7451 | [
"MIT"
] | null | null | null | model.py | ashikagah/CarND_behavioral_cloning | dc2bd8396fe808613eec1e7be6f43af1739d7451 | [
"MIT"
] | null | null | null | # Added recovery lap data - counter-steering angle 1.0
#import os
import csv
#import cv2
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, SpatialDropout2D, ELU
from keras.layers import Conv2D, MaxPooling2D, Cropping2D
from keras.layers.core import Lambda
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils
from keras.callbacks import ModelCheckpoint
from keras.models import model_from_json
from keras.optimizers import Adam
## Load udacity sample data
samples = []
with open('./data_udacity_recovery/driving_log_udacity.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples.append(line)
samples = samples[1:] # Remove header
num_samples = len(samples)
## Add left camera data
samples_addleft = samples
for i in range(0, num_samples):
r=samples[i]
left_name = './data_udacity_recovery/IMG/'+r[1].split('/')[-1]
left_angle = float(r[3])+0.25 # counter-steering angle
r[0]=left_name
r[3]=left_angle
samples_addleft.append(r)
samples = samples_addleft
## Add right camera data
samples_addright = samples
for i in range(0, num_samples):
r=samples[i]
right_name = './data_udacity_recovery/IMG/'+r[2].split('/')[-1]
right_angle = float(r[3])-0.25 # counter-steering angle
r[0]=right_name
r[3]=right_angle
samples_addright.append(r)
samples = samples_addright
## Load recovery lap data - left lane
samples_addrecoveryleft = samples
with open('./data_udacity_recovery/driving_log_recovery_left.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
recovery_left_angle = float(r[3])+1. # counter-steering angle
line[3]=recovery_left_angle
samples_addrecoveryleft.append(line)
samples = samples_addrecoveryleft
## Load recovery lap data - right lane
samples_addrecoveryright = samples
with open('./data_udacity_recovery/driving_log_recovery_right.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
recovery_right_angle = float(r[3])-1. # counter-steering angle
line[3]=recovery_right_angle
samples_addrecoveryright.append(line)
samples = samples_addrecoveryright
N_orig = len(samples)
angle_orig = []
for i in range(0, N_orig):
r = samples[i]
angle_orig.append(float(r[3]))
print("Sample size (Original): ",N_orig)
## Cull sample data with low steering angles
samples_cull = []
for i in range(0, N_orig):
r = samples[i]
if abs(float(r[3]))>.05:
samples_cull.append(r)
elif np.random.randint(10) > 8: # Remove 80% of sample data with low steering angles
samples_cull.append(r)
samples = samples_cull
N_cull = len(samples)
angle_cull = []
for i in range(0, N_cull):
r = samples[i]
angle_cull.append(float(r[3]))
print("Sample size (Culled): ",N_cull)
# Frequency distribution of steering angles
hist, bins = np.histogram(angle_orig, bins=25, range=(-1,1))
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show() # Original data
hist, bins = np.histogram(angle_cull, bins=25, range=(-1,1))
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show() # Culled data
## Split samples into training (80%) and test sets (20%)
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
## Define generator
## Define image resizing function to fit Comma.ai model
## Compile model using generator
train_generator = generator(train_samples,batch_size=32)
validation_generator = generator(validation_samples,batch_size=32)
## Comma.ai model
model = Sequential()
model.add(Cropping2D(cropping=((70,25),(0,0)), # Crop 70 pixels from the top and 25 from the bottom
input_shape=(160,320,3),data_format="channels_last"))
model.add(Lambda(resize_image))# Resize image
model.add(Lambda(lambda x: (x/127.5) - 1.)) # Normalize signal intensity
model.add(Conv2D(16,(8,8),strides=(4,4),padding="same",activation="elu"))# Conv layer 1
model.add(Conv2D(32,(5,5),strides=(2,2),padding="same",activation="elu"))# Conv layer 2
model.add(Conv2D(64,(5,5),strides=(2,2),padding="same"))# Conv layer 3
model.add(Flatten())
model.add(Dropout(.5))
model.add(ELU())
model.add(Dense(512))# Fully connected layer 1
model.add(Dropout(.5))
model.add(ELU())
model.add(Dense(50))# Fully connected layer 2
model.add(ELU())
model.add(Dense(1))
model.compile(loss='mse', optimizer=Adam(lr=0.0001),metrics=['accuracy'])
print("Model summary:\n", model.summary())
history_object = model.fit_generator(train_generator,
steps_per_epoch=len(train_samples),
validation_data=validation_generator,
validation_steps=len(validation_samples),
epochs=20,verbose=1)
## print the keys contained in the history object
print(history_object.history.keys())
### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
## Save model
name = '20170401_model_test'
with open(name + '.json', 'w') as output:
output.write(model.to_json())
model.save(name + '.h5')
print("Saved model to disk")
# python drive.py 20170401_model_test.h5 run01
# python video.py run01 | 37.329787 | 110 | 0.698632 | # Added recovery lap data - counter-steering angle 1.0
#import os
import csv
#import cv2
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, SpatialDropout2D, ELU
from keras.layers import Conv2D, MaxPooling2D, Cropping2D
from keras.layers.core import Lambda
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils
from keras.callbacks import ModelCheckpoint
from keras.models import model_from_json
from keras.optimizers import Adam
## Load udacity sample data
samples = []
with open('./data_udacity_recovery/driving_log_udacity.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples.append(line)
samples = samples[1:] # Remove header
num_samples = len(samples)
## Add left camera data
samples_addleft = samples
for i in range(0, num_samples):
r=samples[i]
left_name = './data_udacity_recovery/IMG/'+r[1].split('/')[-1]
left_angle = float(r[3])+0.25 # counter-steering angle
r[0]=left_name
r[3]=left_angle
samples_addleft.append(r)
samples = samples_addleft
## Add right camera data
samples_addright = samples
for i in range(0, num_samples):
r=samples[i]
right_name = './data_udacity_recovery/IMG/'+r[2].split('/')[-1]
right_angle = float(r[3])-0.25 # counter-steering angle
r[0]=right_name
r[3]=right_angle
samples_addright.append(r)
samples = samples_addright
## Load recovery lap data - left lane
samples_addrecoveryleft = samples
with open('./data_udacity_recovery/driving_log_recovery_left.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
recovery_left_angle = float(r[3])+1. # counter-steering angle
line[3]=recovery_left_angle
samples_addrecoveryleft.append(line)
samples = samples_addrecoveryleft
## Load recovery lap data - right lane
samples_addrecoveryright = samples
with open('./data_udacity_recovery/driving_log_recovery_right.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
recovery_right_angle = float(r[3])-1. # counter-steering angle
line[3]=recovery_right_angle
samples_addrecoveryright.append(line)
samples = samples_addrecoveryright
N_orig = len(samples)
angle_orig = []
for i in range(0, N_orig):
r = samples[i]
angle_orig.append(float(r[3]))
print("Sample size (Original): ",N_orig)
## Cull sample data with low steering angles
samples_cull = []
for i in range(0, N_orig):
r = samples[i]
if abs(float(r[3]))>.05:
samples_cull.append(r)
elif np.random.randint(10) > 8: # Remove 80% of sample data with low steering angles
samples_cull.append(r)
samples = samples_cull
N_cull = len(samples)
angle_cull = []
for i in range(0, N_cull):
r = samples[i]
angle_cull.append(float(r[3]))
print("Sample size (Culled): ",N_cull)
# Frequency distribution of steering angles
hist, bins = np.histogram(angle_orig, bins=25, range=(-1,1))
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show() # Original data
hist, bins = np.histogram(angle_cull, bins=25, range=(-1,1))
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show() # Culled data
## Split samples into training (80%) and test sets (20%)
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
## Define generator
def generator(samples,batch_size): # Split the samples in batch_size batches an repeat the process
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size): # Repeat until all samples are loaded
batch_samples = samples[offset:offset+batch_size] # Each batch_samples contains batch_size samples
images = []
angles = []
for batch_sample in batch_samples: # Repeat unil all images in the batch are loaded
# Load center camera data
name = './data_udacity_recovery/IMG/'+batch_sample[0].split('/')[-1]
image = mpimg.imread(name) # mpimg and opencv reads img very differently...
angle = float(batch_sample[3])
images.append(image)
angles.append(angle)
X_train = np.array(images)
y_train = np.array(angles)
yield shuffle(X_train, y_train)
## Define image resizing function to fit Comma.ai model
def resize_image(image):
import tensorflow as tf # This import is required here otherwise the model cannot be loaded in drive.py
return tf.image.resize_images(image, (40,160))
## Compile model using generator
train_generator = generator(train_samples,batch_size=32)
validation_generator = generator(validation_samples,batch_size=32)
## Comma.ai model
model = Sequential()
model.add(Cropping2D(cropping=((70,25),(0,0)), # Crop 70 pixels from the top and 25 from the bottom
input_shape=(160,320,3),data_format="channels_last"))
model.add(Lambda(resize_image))# Resize image
model.add(Lambda(lambda x: (x/127.5) - 1.)) # Normalize signal intensity
model.add(Conv2D(16,(8,8),strides=(4,4),padding="same",activation="elu"))# Conv layer 1
model.add(Conv2D(32,(5,5),strides=(2,2),padding="same",activation="elu"))# Conv layer 2
model.add(Conv2D(64,(5,5),strides=(2,2),padding="same"))# Conv layer 3
model.add(Flatten())
model.add(Dropout(.5))
model.add(ELU())
model.add(Dense(512))# Fully connected layer 1
model.add(Dropout(.5))
model.add(ELU())
model.add(Dense(50))# Fully connected layer 2
model.add(ELU())
model.add(Dense(1))
model.compile(loss='mse', optimizer=Adam(lr=0.0001),metrics=['accuracy'])
print("Model summary:\n", model.summary())
history_object = model.fit_generator(train_generator,
steps_per_epoch=len(train_samples),
validation_data=validation_generator,
validation_steps=len(validation_samples),
epochs=20,verbose=1)
## print the keys contained in the history object
print(history_object.history.keys())
### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
## Save model
name = '20170401_model_test'
with open(name + '.json', 'w') as output:
output.write(model.to_json())
model.save(name + '.h5')
print("Saved model to disk")
# python drive.py 20170401_model_test.h5 run01
# python video.py run01 | 1,170 | 0 | 44 |
164c805f2bb90c14d224c3d80f1df5b784d5f218 | 238 | py | Python | properties_and_methods/base_wrapper.py | Sunchasing/python-common | bc9f11fe4585ef9abca7006c0bf64b11062742fd | [
"Apache-2.0"
] | 5 | 2021-08-15T23:04:25.000Z | 2021-09-06T18:32:53.000Z | properties_and_methods/base_wrapper.py | Sunchasing/python-common | bc9f11fe4585ef9abca7006c0bf64b11062742fd | [
"Apache-2.0"
] | null | null | null | properties_and_methods/base_wrapper.py | Sunchasing/python-common | bc9f11fe4585ef9abca7006c0bf64b11062742fd | [
"Apache-2.0"
] | 1 | 2022-01-28T13:12:23.000Z | 2022-01-28T13:12:23.000Z | from typing import Callable
| 23.8 | 42 | 0.680672 | from typing import Callable
class BaseDecorator:
def __init__(self, wrapped: Callable):
self._func = wrapped
self.__name__ = wrapped.__name__
self.__doc__ = wrapped.__doc__
wrapped.__wrapped__ = self
| 161 | -1 | 49 |
b797b74c40d1212ca834de3021f0067716ed947e | 5,361 | py | Python | MOOSE_web/view/sentiment.py | XYChang-cxy/MOOSE | 6cca5a1ed34451e6c525ca480b59ce504bba3f49 | [
"Apache-2.0"
] | 1 | 2021-09-02T09:32:41.000Z | 2021-09-02T09:32:41.000Z | MOOSE_web/view/sentiment.py | XYChang-cxy/MOOSE | 6cca5a1ed34451e6c525ca480b59ce504bba3f49 | [
"Apache-2.0"
] | null | null | null | MOOSE_web/view/sentiment.py | XYChang-cxy/MOOSE | 6cca5a1ed34451e6c525ca480b59ce504bba3f49 | [
"Apache-2.0"
] | 3 | 2021-08-30T08:34:51.000Z | 2021-09-30T13:14:31.000Z | from django.shortcuts import render,redirect
from django.http import HttpResponse
from model.community import *
from model.oss import *
from view.common import *
from django.core.paginator import Paginator,EmptyPage,PageNotAnInteger
from django.db.models import Sum, Count
from operator import itemgetter
from django.http import JsonResponse
from influxdb_metrics.utils import query
from influxdb import InfluxDBClient
import time
import datetime
import statsmodels.api as sm
import pandas as pd
import math
import numpy as np
client = InfluxDBClient('106.52.93.154', 8086, 'moose', 'moose', 'moose')
| 48.297297 | 159 | 0.709756 | from django.shortcuts import render,redirect
from django.http import HttpResponse
from model.community import *
from model.oss import *
from view.common import *
from django.core.paginator import Paginator,EmptyPage,PageNotAnInteger
from django.db.models import Sum, Count
from operator import itemgetter
from django.http import JsonResponse
from influxdb_metrics.utils import query
from influxdb import InfluxDBClient
import time
import datetime
import statsmodels.api as sm
import pandas as pd
import math
import numpy as np
client = InfluxDBClient('106.52.93.154', 8086, 'moose', 'moose', 'moose')
def sentiment(request):
extra_info = dict()
uid = request.session['user_id']
cid = request.GET.get('cid')
community = get_nav_list(uid, cid)
extra_info.update(community)
sentiment_type_yearmonth = MOOSEStatisticSentimentType.objects.filter(community_id=int(cid))
sentiment_type_date = ''
sentiment_type_debate = ''
sentiment_type_bug = ''
sentiment_type_confuse = ''
sentiment_type_apologize = ''
sentiment_type_third_party = ''
sentiment_type_doc_standard = ''
sentiment_type_work = ''
sentiment_type_comment_id = ''
#情感分类
if sentiment_type_yearmonth.count() > 0:
for per_sentiment_type_yearmonth in sentiment_type_yearmonth:
sentiment_type_date += per_sentiment_type_yearmonth.yearmonth + ','
#sentiment_type_debate += "{value:"+str(per_sentiment_type_yearmonth.debate)+",commit_id:'"+str(per_sentiment_type_yearmonth.comment_id)+"'}" + ','
sentiment_type_debate += str(per_sentiment_type_yearmonth.debate) + ','
sentiment_type_comment_id += str(per_sentiment_type_yearmonth.comment_id) + ','
sentiment_type_bug += str(per_sentiment_type_yearmonth.bug) + ','
sentiment_type_confuse += str(per_sentiment_type_yearmonth.confuse) + ','
sentiment_type_apologize += str(per_sentiment_type_yearmonth.apologize) + ','
sentiment_type_third_party += str(per_sentiment_type_yearmonth.third_party) + ','
sentiment_type_doc_standard += str(per_sentiment_type_yearmonth.doc_standard) + ','
sentiment_type_work += str(per_sentiment_type_yearmonth.work) + ','
# 获取情感分析
sentiment_yearmonth = MOOSEStatisticSentiment.objects.filter(community_id=int(cid))
sentiment_date = ''
sentiment_pos = ''
sentiment_neg = ''
sentiment_neu = ''
sentiment_agv = ''
if sentiment_yearmonth.count() > 0:
for per_sentiment_yearmonth in sentiment_yearmonth:
sentiment_date += per_sentiment_yearmonth.yearmonth + ','
sentiment_pos += str(per_sentiment_yearmonth.pos) + ','
sentiment_neg += str(per_sentiment_yearmonth.neg) + ','
sentiment_neu += str(per_sentiment_yearmonth.neu) + ','
if (per_sentiment_yearmonth.pos + per_sentiment_yearmonth.neg + per_sentiment_yearmonth.neu) != 0:
sentiment_agv += str(round((per_sentiment_yearmonth.pos - per_sentiment_yearmonth.neg) / (
per_sentiment_yearmonth.pos + per_sentiment_yearmonth.neg + per_sentiment_yearmonth.neu),
2)) + ','
else:
sentiment_agv += str(0) + ','
extra_info.update({'sentiment_date': sentiment_date})
extra_info.update({'sentiment_pos': sentiment_pos})
extra_info.update({'sentiment_neg': sentiment_neg})
extra_info.update({'sentiment_neu': sentiment_neu})
extra_info.update({'sentiment_agv': sentiment_agv})
extra_info.update({'sentiment_type_date': sentiment_type_date})
extra_info.update({'sentiment_type_comment_id': sentiment_type_comment_id})
extra_info.update({'sentiment_type_debate': sentiment_type_debate})
extra_info.update({'sentiment_type_bug': sentiment_type_bug})
extra_info.update({'sentiment_type_confuse': sentiment_type_confuse})
extra_info.update({'sentiment_type_apologize': sentiment_type_apologize})
extra_info.update({'sentiment_type_third_party': sentiment_type_third_party})
extra_info.update({'sentiment_type_doc_standard': sentiment_type_doc_standard})
extra_info.update({'sentiment_type_work': sentiment_type_work})
extra_info.update({'path': 7})
return render(request, 'sentiment.html', extra_info)
def sentiment_comment(request):
comment_id = request.POST.get('comment_id')
comment_ids = comment_id.split('|')
comment_info = []
for per_comment_id in comment_ids:
comment_dict = dict()
comment = MOOSEStatisticSentimentComment.objects.filter(comment_id=int(per_comment_id))
if comment.count() > 0:
for per_comment in comment:
comment_dict["body"] = per_comment.comment_body
comment_dict["user_name"] = per_comment.user_name
comment_dict["user_id"] = per_comment.user_id
comment_dict["oss_name"] = per_comment.oss_name
comment_dict["oss_id"] = per_comment.oss_id
comment_dict["issue_number"] = per_comment.issue_number
comment_dict["create_time"] = per_comment.create_time
comment_info.append(comment_dict)
comment_json = json.dumps(comment_info, ensure_ascii=False)
return HttpResponse(comment_json, content_type='application/json') | 4,733 | 0 | 46 |
1f9624bd11aeebbc9e2f7ff78f60288187a5fd2b | 4,501 | py | Python | vu_meter.py | lemariva/wipy2.0-MIC | ca43d19398e46ed45b8bdad7be529e774dae41cb | [
"Apache-2.0"
] | 1 | 2017-12-31T16:21:17.000Z | 2017-12-31T16:21:17.000Z | vu_meter.py | lemariva/wipy2.0-MIC | ca43d19398e46ed45b8bdad7be529e774dae41cb | [
"Apache-2.0"
] | null | null | null | vu_meter.py | lemariva/wipy2.0-MIC | ca43d19398e46ed45b8bdad7be529e774dae41cb | [
"Apache-2.0"
] | null | null | null | #Copyright [2017] [Mauro Riva <lemariva@mail.com> <lemariva.com>]
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
import math as m
import utime
from machine import ADC
from ws2812 import WS2812
| 35.164063 | 163 | 0.574761 | #Copyright [2017] [Mauro Riva <lemariva@mail.com> <lemariva.com>]
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
import math as m
import utime
from machine import ADC
from ws2812 import WS2812
class vu_meter:
ledsColors = []
def __init__(self, ledNumber=144, ledPower = 100, adcWindow = 1500, adcMax = 100, adcPin = 'P13', pinLEDs = 'P22'):
self.ledPower = ledPower
self.ledNumber = ledNumber
self.pinLeds = pinLEDs
self.adcPin = adcPin
self.adcWindow = adcWindow
self.ledsColors = []
self.adcIn = 0.0
self.adcMax = adcMax
self.adcMaxDynamic = False
# inizialize ADC
self.init_adc()
self.init_leds()
def init_adc(self):
self.adc = ADC(0)
self.adcUnit = self.adc.channel(pin=self.adcPin)
self.adcMean = 0
def init_leds(self):
self.ledsColors = []
for x in range(0, self.ledNumber):
color = self.color_vu_meter (x)
self.ledsColors.append(color)
self.ledChain = WS2812( ledNumber=self.ledNumber, brightness=self.ledPower, dataPin=self.pinLeds ) # dataPin is for LoPy board only
self.ledChain.show( self.ledsColors )
def test_leds(self):
testData = self.ledsColors
for x in range(0, self.ledNumber):
testData = testData[1:] + testData[0:1]
self.ledChain.show( testData )
self.ledChain.show([])
def lighter(self, color, percent):
percent = percent / 100
if(percent == 1):
return color
if(percent == 0):
return ([0, 0, 0])
#if(percent < 0.65): # driver not working ok with percent under 0.65
# percent = 0.65
rcolor = color[0] - color[0] * (1-percent)
gcolor = color[1] - color[1] * (1-percent)
bcolor = color[2] - color[2] * (1-percent)
newcolor = ([(rcolor), (gcolor), (bcolor)])
return newcolor
def color_vu_meter(self, position):
rcolor = (255 * position) / self.ledNumber
gcolor = (255 * (self.ledNumber - position)) / self.ledNumber
bcolor= 0
newcolor = self.lighter([(rcolor), (gcolor), (bcolor)], self.ledPower)
return newcolor
def adc_max_dynamic(self, state = True, adcMax = 100):
self.adcMaxDynamic = state
self.adcMax = adcMax
return self.adcMaxDynamic
def adc_max(self):
return self.adcMax
def zero_calibration(self):
self.adcMean = 0
for y in range(0, self.adcWindow):
self.adcMean = self.adcMean + self.adcUnit.value()
self.adcMean = self.adcMean / self.adcWindow
return self.adcMean
def update_rms(self):
t1 = utime.ticks_ms()
power = 0
self.audioPower = 0
for x in range(0, self.adcWindow):
adc_value = self.adcUnit.value() - self.adcMean
power = power + m.pow(adc_value, 2)
power = (m.sqrt(power / self.adcWindow))
self.audioPower = power
t2 = utime.ticks_ms()
time_elapsed = t2 - t1
if(self.adcMaxDynamic):
if(self.adcMax < power):
self.adcMax = power
self.normalizedPower = power / self.adcMax
#20 * log10(sqrt(sum / count))
if(self.normalizedPower > 1):
self.normalizedPower = 1
return [time_elapsed, power]
def update_leds(self):
leds_count = m.floor(self.normalizedPower * self.ledNumber)
self.ledChain.show( self.ledsColors[1:leds_count] )
| 3,278 | 390 | 23 |
15c7bb60cb2e8ad87a7bda89b5d645f9ccbc4651 | 1,301 | py | Python | data_helper.py | ace18zz/COM3610-Sentiment-Detection-and-Tracking-in-Social-Media-Streams | 466536b7c6bb592ef1461e13e5b3a402819b6398 | [
"MIT"
] | null | null | null | data_helper.py | ace18zz/COM3610-Sentiment-Detection-and-Tracking-in-Social-Media-Streams | 466536b7c6bb592ef1461e13e5b3a402819b6398 | [
"MIT"
] | null | null | null | data_helper.py | ace18zz/COM3610-Sentiment-Detection-and-Tracking-in-Social-Media-Streams | 466536b7c6bb592ef1461e13e5b3a402819b6398 | [
"MIT"
] | null | null | null | '''
Author: your name
Date: 2021-05-17 03:11:15
LastEditTime: 2021-05-18 14:46:09
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: /twitterSententAnalyse 2/data_helper.py
'''
#coding:utf-8
from ext_en import *
import sys
stops = loadStops()
X, y = getTxt()
X_feather = []
fw = open('./filter_txt_train_data.txt', 'w')
for x,y in zip(X, y):
text = x.strip('"')
token_words = tokenize(text)
token_words = stem(token_words)
token_words = delete_stopwords(token_words)
token_words = delete_characters(token_words)
token_words = to_lower(token_words)
fw.write(' '.join(token_words)+'\t'+y+'\n')
fw.flush()
| 25.509804 | 71 | 0.554958 | '''
Author: your name
Date: 2021-05-17 03:11:15
LastEditTime: 2021-05-18 14:46:09
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: /twitterSententAnalyse 2/data_helper.py
'''
#coding:utf-8
from ext_en import *
import sys
def loadStops():
stops = []
with open('./stopword.txt', encoding='utf-8') as fr:
for line in fr:
stops.append(line.strip())
return stops
def getTxt():
y = []
X = []
with open('./data/{}'.format(sys.argv[1])) as fr:
for line in fr:
try:
newline = line.strip().split('\t')
if newline[-2] in ['positive', 'negative', 'neutral']:
y.append(newline[-2])
X.append(newline[-1])
except:
pass
return X, y
stops = loadStops()
X, y = getTxt()
X_feather = []
fw = open('./filter_txt_train_data.txt', 'w')
for x,y in zip(X, y):
text = x.strip('"')
token_words = tokenize(text)
token_words = stem(token_words)
token_words = delete_stopwords(token_words)
token_words = delete_characters(token_words)
token_words = to_lower(token_words)
fw.write(' '.join(token_words)+'\t'+y+'\n')
fw.flush()
| 554 | 0 | 50 |
f451d800559ebdde5b088813a307f19c504d1dae | 6,186 | py | Python | satella/configuration/schema/basic.py | piotrmaslanka/satella | bf4ba7a21ad2ac93a366442a2b4574dc5568b87e | [
"MIT"
] | 12 | 2019-12-13T10:17:38.000Z | 2022-01-05T09:01:36.000Z | satella/configuration/schema/basic.py | piotrmaslanka/satella | bf4ba7a21ad2ac93a366442a2b4574dc5568b87e | [
"MIT"
] | 26 | 2016-04-01T11:55:26.000Z | 2021-12-30T17:03:59.000Z | satella/configuration/schema/basic.py | piotrmaslanka/satella | bf4ba7a21ad2ac93a366442a2b4574dc5568b87e | [
"MIT"
] | 1 | 2021-05-31T08:45:22.000Z | 2021-05-31T08:45:22.000Z | import codecs
import os
import re
import typing as tp
from satella.exceptions import ConfigurationValidationError
from .base import Descriptor, ConfigDictValue
from .registry import register_custom_descriptor
@staticmethod
@register_custom_descriptor('bool')
class Boolean(Descriptor):
"""
This value must be a boolean, or be converted to one
"""
BASIC_MAKER = _make_boolean
@register_custom_descriptor('int')
class Integer(Descriptor):
"""
This value must be an integer, or be converted to one
"""
BASIC_MAKER = int
@register_custom_descriptor('float')
class Float(Descriptor):
"""
This value must be a float, or be converted to one
"""
BASIC_MAKER = float
@register_custom_descriptor('str')
class String(Descriptor):
"""
This value must be a string, or be converted to one
"""
BASIC_MAKER = str
class FileObject:
"""
What you get for values in schema of :class:`~satella.configuration.schema.File`.
This object is comparable and hashable, and is equal to the string of it's path
"""
__slots__ = 'path',
def get_value(self, encoding: tp.Optional[str] = None) -> tp.Union[str, bytes]:
"""
Read in the entire file into memory
:param encoding: optional encoding to apply. If None given, bytes will be returned
:return: file contents
"""
with open(self.path, 'rb') as f_in:
data = f_in.read()
if encoding:
return data.decode(encoding)
else:
return data
def open(self, mode: str):
"""
Open the file in specified mode
:param mode: mode to open the file in
:return: file handle
"""
return open(self.path, mode)
class DirectoryObject:
"""
What you get for values in schema of :class:`~satella.configuration.schema.Directory`.
This object is comparable and hashable, and is equal to the string of it's path
"""
__slots__ = 'path',
def get_files(self) -> tp.Iterable[str]:
"""
Return a list of files inside this directory
:return:
"""
return os.listdir(self.path)
@staticmethod
@register_custom_descriptor('file')
class File(Descriptor):
"""
This value must be a valid path to a file. The value in your schema will be
an instance of :class:`~satella.configuration.schema.basic.FileObject`
"""
BASIC_MAKER = _make_file
@register_custom_descriptor('file_contents')
class FileContents(Descriptor):
"""
This value must be a valid path to a file. The value in your schema will be
the contents of this file, applied with encoding (if given). By default, bytes will be read in
"""
@staticmethod
@register_custom_descriptor('dir')
class Directory(Descriptor):
"""
This value must be a valid path to a file. The value in your schema will be
an instance of :class:`~satella.configuration.schema.basic.FileObject`
"""
BASIC_MAKER = _make_directory
class Regexp(String):
"""
Base class for declaring regexp-based descriptors. Overload it's attribute REGEXP. Use as
following:
>>> class IPv6(Regexp):
>>> REGEXP = '(([0-9a-f]{1,4}:)' ...
"""
__slots__ = ('regexp',)
REGEXP = r'.*'
@register_custom_descriptor('ipv4')
class IPv4(Regexp):
"""
This must be a valid IPv4 address (no hostnames allowed)
"""
REGEXP = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}'
| 26.323404 | 98 | 0.606046 | import codecs
import os
import re
import typing as tp
from satella.exceptions import ConfigurationValidationError
from .base import Descriptor, ConfigDictValue
from .registry import register_custom_descriptor
@staticmethod
def _make_boolean(v: tp.Any) -> bool:
if isinstance(v, str):
if v.upper() == 'TRUE':
return True
elif v.upper() == 'FALSE':
return False
else:
raise ConfigurationValidationError('Unknown value of "%s" posing to be a bool'
% (v,))
else:
return bool(v)
@register_custom_descriptor('bool')
class Boolean(Descriptor):
"""
This value must be a boolean, or be converted to one
"""
BASIC_MAKER = _make_boolean
@register_custom_descriptor('int')
class Integer(Descriptor):
"""
This value must be an integer, or be converted to one
"""
BASIC_MAKER = int
@register_custom_descriptor('float')
class Float(Descriptor):
"""
This value must be a float, or be converted to one
"""
BASIC_MAKER = float
@register_custom_descriptor('str')
class String(Descriptor):
"""
This value must be a string, or be converted to one
"""
BASIC_MAKER = str
class FileObject:
"""
What you get for values in schema of :class:`~satella.configuration.schema.File`.
This object is comparable and hashable, and is equal to the string of it's path
"""
__slots__ = 'path',
def __init__(self, path: str):
self.path = path
def __repr__(self):
return '<File object %s>' % (self.path,)
def __str__(self):
return self.path
def __eq__(self, other) -> bool:
return self.path == str(other) and isinstance(other, FileObject)
def __hash__(self) -> int:
return hash(self.path)
def get_value(self, encoding: tp.Optional[str] = None) -> tp.Union[str, bytes]:
"""
Read in the entire file into memory
:param encoding: optional encoding to apply. If None given, bytes will be returned
:return: file contents
"""
with open(self.path, 'rb') as f_in:
data = f_in.read()
if encoding:
return data.decode(encoding)
else:
return data
def open(self, mode: str):
"""
Open the file in specified mode
:param mode: mode to open the file in
:return: file handle
"""
return open(self.path, mode)
class DirectoryObject:
"""
What you get for values in schema of :class:`~satella.configuration.schema.Directory`.
This object is comparable and hashable, and is equal to the string of it's path
"""
__slots__ = 'path',
def __init__(self, path: str):
self.path = path
def __repr__(self):
return '<Directory object %s>' % (self.path,)
def __str__(self):
return self.path
def __eq__(self, other) -> bool:
return self.path == str(other) and isinstance(other, DirectoryObject)
def __hash__(self) -> int:
return hash(self.path)
def get_files(self) -> tp.Iterable[str]:
"""
Return a list of files inside this directory
:return:
"""
return os.listdir(self.path)
@staticmethod
def _make_file(v: str) -> bool:
if not os.path.isfile(v):
raise ConfigurationValidationError('Expected to find a file under %s'
% (v,))
return FileObject(v)
@register_custom_descriptor('file')
class File(Descriptor):
"""
This value must be a valid path to a file. The value in your schema will be
an instance of :class:`~satella.configuration.schema.basic.FileObject`
"""
BASIC_MAKER = _make_file
@register_custom_descriptor('file_contents')
class FileContents(Descriptor):
"""
This value must be a valid path to a file. The value in your schema will be
the contents of this file, applied with encoding (if given). By default, bytes will be read in
"""
def __init__(self, encoding: tp.Optional[str] = None, strip_afterwards: bool = False):
super().__init__()
self.encoding = encoding
self.strip_afterwards = strip_afterwards
def BASIC_MAKER(self, c: str):
if not self.encoding:
with open(c, 'rb') as f_in:
y = f_in.read()
else:
with codecs.open(c, 'r', encoding=self.encoding) as f_in:
y = f_in.read()
if self.strip_afterwards:
y = y.strip()
return y
@staticmethod
def _make_directory(v: str) -> bool:
if not os.path.isdir(v):
raise ConfigurationValidationError('Expected to find a directory under %s'
% (v,))
return DirectoryObject(v)
@register_custom_descriptor('dir')
class Directory(Descriptor):
"""
This value must be a valid path to a file. The value in your schema will be
an instance of :class:`~satella.configuration.schema.basic.FileObject`
"""
BASIC_MAKER = _make_directory
class Regexp(String):
"""
Base class for declaring regexp-based descriptors. Overload it's attribute REGEXP. Use as
following:
>>> class IPv6(Regexp):
>>> REGEXP = '(([0-9a-f]{1,4}:)' ...
"""
__slots__ = ('regexp',)
REGEXP = r'.*'
def __init__(self):
super().__init__()
if isinstance(self.REGEXP, str):
self.regexp = re.compile(self.REGEXP)
else:
self.regexp = self.REGEXP
def __call__(self, value: ConfigDictValue) -> str:
value = super(Regexp, self).__call__(value)
match = self.regexp.match(value)
if not match:
raise ConfigurationValidationError('value does not match %s' % (self.REGEXP.pattern,),
value)
return match.group(0)
def __str__(self):
return 'Regexp(%s)' % (self.REGEXP.pattern,)
@register_custom_descriptor('ipv4')
class IPv4(Regexp):
"""
This must be a valid IPv4 address (no hostnames allowed)
"""
REGEXP = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}'
| 2,236 | 0 | 471 |
36be5a7e634805e69a55d92b7163a3190282a05b | 4,044 | py | Python | cli/sawtooth_cli/network_command/peers.py | RomarQ/sawtooth-core | b900f4f3a2ffda9f6fb32a57fd4eacc41be86e5a | [
"Apache-2.0"
] | 1,530 | 2016-05-07T16:13:52.000Z | 2022-03-30T04:32:48.000Z | cli/sawtooth_cli/network_command/peers.py | RomarQ/sawtooth-core | b900f4f3a2ffda9f6fb32a57fd4eacc41be86e5a | [
"Apache-2.0"
] | 906 | 2016-05-11T16:45:17.000Z | 2022-01-27T19:24:21.000Z | cli/sawtooth_cli/network_command/peers.py | RomarQ/sawtooth-core | b900f4f3a2ffda9f6fb32a57fd4eacc41be86e5a | [
"Apache-2.0"
] | 891 | 2016-05-11T16:45:32.000Z | 2022-03-20T07:25:46.000Z | # Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import os
import json
from sawtooth_cli.network_command.parent_parsers import base_multinode_parser
from sawtooth_cli.network_command.parent_parsers import split_comma_append_args
from sawtooth_cli.network_command.parent_parsers import make_rest_apis
from sawtooth_cli.exceptions import CliException
DOT_FILE = 'peers.dot'
| 28.27972 | 80 | 0.642928 | # Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import os
import json
from sawtooth_cli.network_command.parent_parsers import base_multinode_parser
from sawtooth_cli.network_command.parent_parsers import split_comma_append_args
from sawtooth_cli.network_command.parent_parsers import make_rest_apis
from sawtooth_cli.exceptions import CliException
DOT_FILE = 'peers.dot'
def add_peers_parser(subparsers, parent_parser):
help_text = 'Shows the peering arrangment of a network'
parser = subparsers.add_parser(
'peers',
help=help_text,
description='{}.'.format(help_text))
peers_parsers = parser.add_subparsers(
title='subcommands',
dest='peers_command')
peers_parsers.required = True
_add_list_parser(peers_parsers, parent_parser)
_add_graph_parser(peers_parsers, parent_parser)
def _add_list_parser(parser, parent_parser):
help_text = 'Lists peers for validators with given URLs'
list_parser = parser.add_parser(
'list',
help=help_text,
description='{}.'.format(help_text),
parents=[parent_parser, base_multinode_parser()])
list_parser.add_argument(
'--pretty', '-p',
action='store_true',
help='Pretty-print the results')
def do_peers(args):
if args.peers_command == 'list':
_do_peers_list(args)
elif args.peers_command == 'graph':
_do_peers_graph(args)
else:
raise CliException('Invalid command: {}'.format(args.subcommand))
def _do_peers_list(args):
urls = split_comma_append_args(args.urls)
users = split_comma_append_args(args.users)
clients = make_rest_apis(urls, users)
print(
json.dumps(
_get_peer_endpoints(clients),
sort_keys=True,
indent=(4 if args.pretty else 0)
)
)
def _get_peer_endpoints(clients):
statuses = [client.get_status() for client in clients]
return {
status['endpoint']: [
peer['endpoint']
for peer in status['peers']
]
for status in statuses
}
def _add_graph_parser(parser, parent_parser):
help_text = "Generates a file to graph a network's peering arrangement"
graph_parser = parser.add_parser(
'graph',
help=help_text,
description='{}.'.format(help_text),
parents=[parent_parser, base_multinode_parser()])
graph_parser.add_argument(
'-o', '--output',
help='The path of the dot file to be produced (defaults to peers.dot)')
graph_parser.add_argument(
'--force',
action='store_true',
help='TODO')
def _do_peers_graph(args):
urls = split_comma_append_args(args.urls)
users = split_comma_append_args(args.users)
clients = make_rest_apis(urls, users)
status_dict = _get_peer_endpoints(clients)
path = args.output if args.output else DOT_FILE
if not args.force and os.path.isfile(path):
raise CliException(
'{} already exists; '
'rerun with `--force` to overwrite'.format(path))
with open(path, 'w') as dot:
print(
'strict graph peers {',
file=dot)
for node, peers in status_dict.items():
for peer in peers:
print(
' "{}" -- "{}"'.format(node, peer),
file=dot)
print(
'}',
file=dot)
| 2,886 | 0 | 161 |
496e311b9e9f78f4ce9833d11ae1ce300d6e3c15 | 4,011 | py | Python | pjproject_android/pkgconfig.py | WachterJud/qaul.net_legacy | 9c2be0a38ad6e90fadc0d1150340e37d220997ae | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2019-07-30T14:44:29.000Z | 2019-07-30T14:44:29.000Z | pjproject_android/pkgconfig.py | WachterJud/qaul.net_legacy | 9c2be0a38ad6e90fadc0d1150340e37d220997ae | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | pjproject_android/pkgconfig.py | WachterJud/qaul.net_legacy | 9c2be0a38ad6e90fadc0d1150340e37d220997ae | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | import sys
import os
REMOVE_THESE = ["-I/usr/include", "-I/usr/include/", "-L/usr/lib", "-L/usr/lib/"]
if __name__ == "__main__":
pkg_names = []
pkg_dict = {}
commands = []
exist_check = False
for i in range(1,len(sys.argv)):
if sys.argv[i][0] == '-':
cmd = sys.argv[i]
commands.append(cmd)
if cmd=='--exists':
exist_check = True
elif cmd=="--help":
print "This is not very helpful, is it"
sys.exit(0)
elif cmd=="--version":
print "0.1"
sys.exit(0)
else:
pkg_names.append(sys.argv[i])
# Fix search path
PKG_CONFIG_PATH = os.getenv("PKG_CONFIG_PATH", "").strip()
if not PKG_CONFIG_PATH:
PKG_CONFIG_PATH="/usr/local/lib/pkgconfig:/usr/lib/pkgconfig"
PKG_CONFIG_PATH = PKG_CONFIG_PATH.replace(";", ":")
# Parse files
for pkg_name in pkg_names:
pkg = Pkg(pkg_name)
if not pkg.parse(PKG_CONFIG_PATH):
sys.exit(1)
pkg_dict[pkg_name] = pkg
if exist_check:
sys.exit(0)
# Calculate priority based on dependency
for pkg_name in pkg_dict.keys():
pkg = pkg_dict[pkg_name]
pkg.priority = calculate_pkg_priority(pkg, pkg_dict, 1)
# Sort package based on dependency
pkg_names = sorted(pkg_names, key=lambda pkg_name: pkg_dict[pkg_name].priority, reverse=True)
# Get the options
opts = []
for cmd in commands:
if cmd=='--libs':
for pkg_name in pkg_names:
libs = pkg_dict[pkg_name].libs()
for lib in libs:
opts.append(lib)
if lib[:2]=="-l":
break
for pkg_name in pkg_names:
opts += pkg_dict[pkg_name].libs()
elif cmd=='--cflags':
for pkg_name in pkg_names:
opts += pkg_dict[pkg_name].cflags()
elif cmd[0]=='-':
sys.stderr.write("pkgconfig.py: I don't know how to handle " + sys.argv[i] + "\n")
filtered_opts = []
for opt in opts:
opt = opt.strip()
if not opt:
continue
if REMOVE_THESE.count(opt) != 0:
continue
if filtered_opts.count(opt) != 0:
continue
filtered_opts.append(opt)
print ' '.join(filtered_opts)
| 23.319767 | 99 | 0.630765 | import sys
import os
REMOVE_THESE = ["-I/usr/include", "-I/usr/include/", "-L/usr/lib", "-L/usr/lib/"]
class Pkg:
def __init__(self, pkg_name):
self.name = pkg_name
self.priority = 0
self.vars = {}
def parse(self, pkg_config_path):
f = None
for pkg_path in pkg_config_path.split(':'):
if pkg_path[-1] != '/':
pkg_path += '/'
fname = pkg_path + self.name + '.pc'
try:
f = open(fname, "r")
break
except:
continue
if not f:
#sys.stderr.write("pkgconfig.py: unable to find %s.pc in %s\n" % (self.name, pkg_config_path))
return False
for line in f.readlines():
line = line.strip()
if not line:
continue
if line[0]=='#':
continue
pos1 = line.find('=')
pos2 = line.find(':')
if pos1 > 0 and (pos1 < pos2 or pos2 < 0):
pos = pos1
elif pos2 > 0 and (pos2 < pos1 or pos1 < 0):
pos = pos2
else:
continue
name = line[:pos].lower()
value = line[pos+1:]
self.vars[name] = value
f.close()
for name in self.vars.keys():
value = self.vars[name]
while True:
pos1 = value.find("${")
if pos1 < 0:
break
pos2 = value.find("}")
if pos2 < 0:
break
value = value.replace(value[pos1:pos2+1], self.vars[value[pos1+2:pos2]])
self.vars[name] = value
return True
def requires(self):
if not 'requires' in self.vars:
return []
deps = []
req_list = self.vars['requires']
for req_item in req_list.split(','):
req_item = req_item.strip()
for i in range(len(req_item)):
if "=<>".find(req_item[i]) >= 0:
deps.append(req_item[:i].strip())
break
return deps
def libs(self):
if not 'libs' in self.vars:
return []
return self.vars['libs'].split(' ')
def cflags(self):
if not 'cflags' in self.vars:
return []
return self.vars['cflags'].split(' ')
def calculate_pkg_priority(pkg, pkg_dict, loop_cnt):
if loop_cnt > 10:
sys.stderr.write("Circular dependency with pkg %s\n" % (pkg))
return 0
reqs = pkg.requires()
prio = 1
for req in reqs:
if not req in pkg_dict:
continue
req_pkg = pkg_dict[req]
prio += calculate_pkg_priority(req_pkg, pkg_dict, loop_cnt+1)
return prio
if __name__ == "__main__":
pkg_names = []
pkg_dict = {}
commands = []
exist_check = False
for i in range(1,len(sys.argv)):
if sys.argv[i][0] == '-':
cmd = sys.argv[i]
commands.append(cmd)
if cmd=='--exists':
exist_check = True
elif cmd=="--help":
print "This is not very helpful, is it"
sys.exit(0)
elif cmd=="--version":
print "0.1"
sys.exit(0)
else:
pkg_names.append(sys.argv[i])
# Fix search path
PKG_CONFIG_PATH = os.getenv("PKG_CONFIG_PATH", "").strip()
if not PKG_CONFIG_PATH:
PKG_CONFIG_PATH="/usr/local/lib/pkgconfig:/usr/lib/pkgconfig"
PKG_CONFIG_PATH = PKG_CONFIG_PATH.replace(";", ":")
# Parse files
for pkg_name in pkg_names:
pkg = Pkg(pkg_name)
if not pkg.parse(PKG_CONFIG_PATH):
sys.exit(1)
pkg_dict[pkg_name] = pkg
if exist_check:
sys.exit(0)
# Calculate priority based on dependency
for pkg_name in pkg_dict.keys():
pkg = pkg_dict[pkg_name]
pkg.priority = calculate_pkg_priority(pkg, pkg_dict, 1)
# Sort package based on dependency
pkg_names = sorted(pkg_names, key=lambda pkg_name: pkg_dict[pkg_name].priority, reverse=True)
# Get the options
opts = []
for cmd in commands:
if cmd=='--libs':
for pkg_name in pkg_names:
libs = pkg_dict[pkg_name].libs()
for lib in libs:
opts.append(lib)
if lib[:2]=="-l":
break
for pkg_name in pkg_names:
opts += pkg_dict[pkg_name].libs()
elif cmd=='--cflags':
for pkg_name in pkg_names:
opts += pkg_dict[pkg_name].cflags()
elif cmd[0]=='-':
sys.stderr.write("pkgconfig.py: I don't know how to handle " + sys.argv[i] + "\n")
filtered_opts = []
for opt in opts:
opt = opt.strip()
if not opt:
continue
if REMOVE_THESE.count(opt) != 0:
continue
if filtered_opts.count(opt) != 0:
continue
filtered_opts.append(opt)
print ' '.join(filtered_opts)
| 1,896 | -11 | 165 |
752524c18d71f5aeef66f7d24378493d86dd4a29 | 1,875 | py | Python | channels_jobmanager/migrations/0001_initial.py | datadvance/DjangoChannelsJobmanager | 7c7bd4e5c8bdb9bc8c4d9d1018a286321ddbef72 | [
"MIT"
] | 6 | 2018-06-27T12:33:51.000Z | 2018-06-28T10:47:34.000Z | channels_jobmanager/migrations/0001_initial.py | datadvance/DjangoChannelsJobmanager | 7c7bd4e5c8bdb9bc8c4d9d1018a286321ddbef72 | [
"MIT"
] | null | null | null | channels_jobmanager/migrations/0001_initial.py | datadvance/DjangoChannelsJobmanager | 7c7bd4e5c8bdb9bc8c4d9d1018a286321ddbef72 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.6 on 2018-06-20 10:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import jsonfield.fields
import uuid
| 44.642857 | 228 | 0.605333 | # Generated by Django 2.0.6 on 2018-06-20 10:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import jsonfield.fields
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Job',
fields=[
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('is_cancelable', models.BooleanField(default=True)),
('state', models.CharField(choices=[('PENDING', 'Pending'), ('WORKING', 'Working'), ('CANCELING', 'Canceling'), ('DONE', 'Done'), ('ERROR', 'Error'), ('CANCELED', 'Canceled')], default='PENDING', max_length=16)),
('submitted', models.DateTimeField(auto_now_add=True)),
('started', models.DateTimeField(default=None, null=True)),
('updated', models.DateTimeField(auto_now=True)),
('channel_name', models.CharField(max_length=128)),
('function_name', models.CharField(max_length=128, verbose_name='job function name')),
('args', jsonfield.fields.JSONField(default=[], verbose_name='job function args')),
('kwds', jsonfield.fields.JSONField(default={}, verbose_name='job function kwds')),
('message', models.TextField(default='')),
('payload', jsonfield.fields.JSONField(default={})),
('readiness', models.FloatField(default=None, null=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| 0 | 1,659 | 23 |
5c7beda1d4d9d9ea4d2b4753ca805b5cb43200a3 | 556 | py | Python | bii-ide/bii_ide/common/biicode/biicode_dependencies.py | biicode/bii-ide | 5565859bcfc28f22f095f8a353325270192fbffc | [
"MIT"
] | 2 | 2015-01-15T09:50:38.000Z | 2015-01-27T14:00:59.000Z | bii-ide/bii_ide/common/biicode/biicode_dependencies.py | biicode/bii-ide | 5565859bcfc28f22f095f8a353325270192fbffc | [
"MIT"
] | null | null | null | bii-ide/bii_ide/common/biicode/biicode_dependencies.py | biicode/bii-ide | 5565859bcfc28f22f095f8a353325270192fbffc | [
"MIT"
] | 2 | 2015-04-20T21:39:54.000Z | 2016-05-29T07:21:54.000Z | import subprocess
import sys
import platform
import os
finder = {"Linux": "which",
"Darwin": "which",
"Windows": "where"}
| 24.173913 | 60 | 0.566547 | import subprocess
import sys
import platform
import os
finder = {"Linux": "which",
"Darwin": "which",
"Windows": "where"}
def dependencies_finder():
p = subprocess.Popen([finder[platform.system()], 'bii'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if err:
return False
if platform.system() == 'Windows':
sys.path.insert(0, os.path.dirname(out))
else:
sys.path.insert(0, "/usr/lib/biicode")
return True
| 389 | 0 | 23 |
6b9ff6c3a986ee07237c5eecc3dc4397f2203ada | 972 | py | Python | LeetCode/String/1371. Find the Longest Substring Containing Vowels in Even Counts.py | thehanemperor/LeetCode | 8d120162657a1e29c3e821b51ac4121300fc7a12 | [
"MIT"
] | null | null | null | LeetCode/String/1371. Find the Longest Substring Containing Vowels in Even Counts.py | thehanemperor/LeetCode | 8d120162657a1e29c3e821b51ac4121300fc7a12 | [
"MIT"
] | null | null | null | LeetCode/String/1371. Find the Longest Substring Containing Vowels in Even Counts.py | thehanemperor/LeetCode | 8d120162657a1e29c3e821b51ac4121300fc7a12 | [
"MIT"
] | null | null | null |
# MEDIUM
# cur records the count of "aeiou"
# cur & 1 = the records of a % 2
# cur & 2 = the records of e % 2
# cur & 4 = the records of i % 2
# cur & 8 = the records of o % 2
# cur & 16 = the records of u % 2
# seen note the index of first occurrence of cur
# a e i o u other
# "aeiou".indexOf(s.charAt(i)) + 1 1 2 3 4 5 0
# 1 << tmp 2 4 8 16 32 1
# (1 << tmp) >> 1 1 2 4 8 16 0 | 33.517241 | 88 | 0.403292 |
# MEDIUM
# cur records the count of "aeiou"
# cur & 1 = the records of a % 2
# cur & 2 = the records of e % 2
# cur & 4 = the records of i % 2
# cur & 8 = the records of o % 2
# cur & 16 = the records of u % 2
# seen note the index of first occurrence of cur
# a e i o u other
# "aeiou".indexOf(s.charAt(i)) + 1 1 2 3 4 5 0
# 1 << tmp 2 4 8 16 32 1
# (1 << tmp) >> 1 1 2 4 8 16 0
class Solution:
def findTheLongestSubstring(self, s: str) -> int:
result= 0
curr = 0
n = len(s)
vow = "aeiou"
seen = {0:-1}
for i in range(n):
curr ^= 1<< ("aeiou".find(s[i])+1) >> 1
if curr not in seen:
seen[curr]= i
result = max(result,i-seen[curr])
return result | 349 | -6 | 49 |
109b484042bca981645598724b609d879545a1e6 | 1,261 | py | Python | sztuczna_inteligencja/2-lab/komandosASTAR.py | Magikis/Uniwersity | 06964ef31d721af85740df1dce3f966006ab9f78 | [
"MIT"
] | 12 | 2017-11-30T08:45:48.000Z | 2018-04-26T14:15:45.000Z | sztuczna_inteligencja/2-lab/komandosASTAR.py | Magikis/Uniwersity | 06964ef31d721af85740df1dce3f966006ab9f78 | [
"MIT"
] | null | null | null | sztuczna_inteligencja/2-lab/komandosASTAR.py | Magikis/Uniwersity | 06964ef31d721af85740df1dce3f966006ab9f78 | [
"MIT"
] | 9 | 2017-10-16T09:42:59.000Z | 2018-01-27T19:48:45.000Z | from komandos import *
import heapq as H
if __name__ == '__main__':
mem = set()
initState = getInitialState()
randPath = []
path = []
# for i in range(140):
# move = random.choice(getMoves())
# initState = apllyMove(initState, move)
# randPath.append(move)
path = aStar(mem, initState, h)
saveOtput(randPath + path)
| 28.659091 | 65 | 0.529738 | from komandos import *
import heapq as H
def aStar(mem, initalState, heuristic):
nodes = 0
mem.clear()
mem.add(initalState)
queue = []
queue.append((0, 0, initalState, []))
while queue:
nodes += 1
# if nodes % 100000 == 0:
# print('NODES_{}'.format(nodes), end='')
(priority, costOfComming, state, path) = H.heappop(queue)
# showBoard(state)
for (nextState, move) in genNewStates(state):
if nextState in mem:
continue
mem.add(nextState)
if isWin(nextState):
print(format('Nodes: {}_'.format(nodes)), end='')
return path + [move]
H.heappush(queue, (
heuristic(nextState) + costOfComming + 1,
costOfComming + 1,
nextState,
path + [move]
))
raise Exception('No solution for game')
if __name__ == '__main__':
mem = set()
initState = getInitialState()
randPath = []
path = []
# for i in range(140):
# move = random.choice(getMoves())
# initState = apllyMove(initState, move)
# randPath.append(move)
path = aStar(mem, initState, h)
saveOtput(randPath + path)
| 867 | 0 | 23 |
d2c0d59792337935b6f55cddc7d30ccc0dd9e648 | 2,719 | py | Python | 2021-01/demo20-1.py | zhouyuanmin/MyDemo | 664977a6243992c77931e58b98f5262745759d1a | [
"MIT"
] | null | null | null | 2021-01/demo20-1.py | zhouyuanmin/MyDemo | 664977a6243992c77931e58b98f5262745759d1a | [
"MIT"
] | null | null | null | 2021-01/demo20-1.py | zhouyuanmin/MyDemo | 664977a6243992c77931e58b98f5262745759d1a | [
"MIT"
] | null | null | null | # import time
# import random
# seed = 1
# list1 = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
# temp = random.randint(0, 25)
# count = 0
# for i in range(0,100):
# if i % 9 == 0 and i <= 81:
# print(str(i) + ':' + list1[temp], end="\t")
# count += 1
# else:
# print(str(i) + ':' + list1[random.randint(0, 25)], end="\t")
# count += 1
# if count % 10 == 0:
# print()
# print()
# print('请在脑海中想一个数字,将它减去十位上的数字,再减去个位上的数字,得到最终数字,并记住最终数字对应的字母')
# s = input('记住字母后按回车开始读心!')
# for i in range(5, 0, -1):
# print(i)
# time.sleep(1)
# print("你心中的字母是:" + list1[temp])
# 任务一:
import random
list1 = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z']
temp = random.randint(0, 25)
print(list1)
print(temp)
# 任务二:
import random
list1 = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z']
temp = random.randint(0, 25)
count = 0
for i in range(0, 100):
if i % 9 == 0 and i <= 81:
print(str(i) + ':' + list1[temp], end="\t")
count += 1
else:
print(str(i) + ':' + list1[random.randint(0, 25)], end="\t")
count += 1
if count % 10 == 0:
print()
# 任务三:
import time
import random
seed = 1
list1 = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z']
temp = random.randint(0, 25)
count = 0
for i in range(0, 100):
if i % 9 == 0 and i <= 81:
print(str(i) + ':' + list1[temp], end="\t")
count += 1
else:
print(str(i) + ':' + list1[random.randint(0, 25)], end="\t")
count += 1
if count % 10 == 0:
print()
print()
print('请在脑海中想一个数字,将它减去十位上的数字,再减去个位上的数字,得到最终数字,并记住最终数字对应的字母')
s = input('记住字母后按回车开始读心!')
print("你心中的字母是:" + list1[temp])
# 最终代码
import time
import random
seed = 1
list1 = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z']
temp = random.randint(0, 25)
count = 0
for i in range(0, 100):
if i % 9 == 0 and i <= 81:
print(str(i) + ':' + list1[temp], end="\t")
count += 1
else:
print(str(i) + ':' + list1[random.randint(0, 25)], end="\t")
count += 1
if count % 10 == 0:
print()
print()
print('请在脑海中想一个数字,将它减去十位上的数字,再减去个位上的数字,得到最终数字,并记住最终数字对应的字母')
s = input('记住字母后按回车开始读心!')
for i in range(5, 0, -1):
print(i)
time.sleep(1)
print("你心中的字母是:" + list1[temp]) | 24.944954 | 140 | 0.447223 | # import time
# import random
# seed = 1
# list1 = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
# temp = random.randint(0, 25)
# count = 0
# for i in range(0,100):
# if i % 9 == 0 and i <= 81:
# print(str(i) + ':' + list1[temp], end="\t")
# count += 1
# else:
# print(str(i) + ':' + list1[random.randint(0, 25)], end="\t")
# count += 1
# if count % 10 == 0:
# print()
# print()
# print('请在脑海中想一个数字,将它减去十位上的数字,再减去个位上的数字,得到最终数字,并记住最终数字对应的字母')
# s = input('记住字母后按回车开始读心!')
# for i in range(5, 0, -1):
# print(i)
# time.sleep(1)
# print("你心中的字母是:" + list1[temp])
# 任务一:
import random
list1 = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z']
temp = random.randint(0, 25)
print(list1)
print(temp)
# 任务二:
import random
list1 = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z']
temp = random.randint(0, 25)
count = 0
for i in range(0, 100):
if i % 9 == 0 and i <= 81:
print(str(i) + ':' + list1[temp], end="\t")
count += 1
else:
print(str(i) + ':' + list1[random.randint(0, 25)], end="\t")
count += 1
if count % 10 == 0:
print()
# 任务三:
import time
import random
seed = 1
list1 = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z']
temp = random.randint(0, 25)
count = 0
for i in range(0, 100):
if i % 9 == 0 and i <= 81:
print(str(i) + ':' + list1[temp], end="\t")
count += 1
else:
print(str(i) + ':' + list1[random.randint(0, 25)], end="\t")
count += 1
if count % 10 == 0:
print()
print()
print('请在脑海中想一个数字,将它减去十位上的数字,再减去个位上的数字,得到最终数字,并记住最终数字对应的字母')
s = input('记住字母后按回车开始读心!')
print("你心中的字母是:" + list1[temp])
# 最终代码
import time
import random
seed = 1
list1 = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z']
temp = random.randint(0, 25)
count = 0
for i in range(0, 100):
if i % 9 == 0 and i <= 81:
print(str(i) + ':' + list1[temp], end="\t")
count += 1
else:
print(str(i) + ':' + list1[random.randint(0, 25)], end="\t")
count += 1
if count % 10 == 0:
print()
print()
print('请在脑海中想一个数字,将它减去十位上的数字,再减去个位上的数字,得到最终数字,并记住最终数字对应的字母')
s = input('记住字母后按回车开始读心!')
for i in range(5, 0, -1):
print(i)
time.sleep(1)
print("你心中的字母是:" + list1[temp]) | 0 | 0 | 0 |
f60a572537dba80613b967a061f1489eb08b1f8d | 2,168 | py | Python | examples/poll_example.py | Hofei90/telegram_api | 8e910e15d7147db4b3828fa6fd1cfe2f5d33c077 | [
"MIT"
] | null | null | null | examples/poll_example.py | Hofei90/telegram_api | 8e910e15d7147db4b3828fa6fd1cfe2f5d33c077 | [
"MIT"
] | 4 | 2019-04-24T12:56:34.000Z | 2020-06-25T20:16:56.000Z | examples/poll_example.py | Hofei90/telegram_api | 8e910e15d7147db4b3828fa6fd1cfe2f5d33c077 | [
"MIT"
] | null | null | null | import os
import time
import toml
import telegram_bot_api as api
SKRIPTPFAD = os.path.abspath(os.path.dirname(__file__))
CONFIG = config_laden()
USER = CONFIG["telegram"]["user"]
BOT = api.Bot(CONFIG["telegram"]["token"])
ANTWORTEN = ["Komme gleich", "1 Min", "5 Min"]
if __name__ == "__main__":
main()
| 27.794872 | 86 | 0.635609 | import os
import time
import toml
import telegram_bot_api as api
def config_laden():
configfile = os.path.join(SKRIPTPFAD, "vorlage_example_cfg.toml")
with open(configfile) as file:
return toml.loads(file.read())
SKRIPTPFAD = os.path.abspath(os.path.dirname(__file__))
CONFIG = config_laden()
USER = CONFIG["telegram"]["user"]
BOT = api.Bot(CONFIG["telegram"]["token"])
ANTWORTEN = ["Komme gleich", "1 Min", "5 Min"]
class LiveNachricht:
def __init__(self, options):
self.antworten = {}
self.option_id_mapping = {}
for option_id, option in enumerate(options):
self.option_id_mapping[option_id] = option
self.antworten[option_id] = [0]
self.id = None
r = BOT.send_message(USER, self.text_generieren())
print(r)
self.message_id = r["result"]["message_id"]
def text_generieren(self):
text = ""
for key, value in self.antworten.items():
kategorie = option_id_mapping_value(self.option_id_mapping, key)
anzahl = value[0]
namen = ",".join(value[1:])
text = f"{text} {kategorie}: Gesamt: {anzahl}, {namen}\n"
return text
def add_antwort(self, antwort):
print(antwort["option_ids"][0])
self.antworten[int(antwort["option_ids"][0])][0] += 1
self.antworten[antwort["option_ids"][0]].append(antwort["user"]["first_name"])
text = self.text_generieren()
r = BOT.edit_message_text(USER, text, self.message_id)
print(r)
def option_id_mapping_value(option_in_mapping, id_):
return option_in_mapping[id_]
def einsatz_erstellen(poll_frage, optionen):
result = BOT.send_poll(USER, poll_frage, optionen, is_anonymous=False)
return result
def main():
einsatz = "Einsatz XY"
r = einsatz_erstellen(einsatz, ANTWORTEN)
live_nachricht = LiveNachricht(ANTWORTEN)
while True:
r = BOT.get_updates()
print(r)
for antwort in r:
if "poll_answer" in antwort:
live_nachricht.add_antwort(antwort["poll_answer"])
time.sleep(0.5)
if __name__ == "__main__":
main()
| 1,654 | -1 | 195 |
ed0aaf22952503c583362871b4936f5c75ae6d9f | 4,792 | py | Python | main.py | sunsetmountain/gcpBudgetMonitoring | 0818cf5d6ce0dfd373c3fcff3470906eb5302760 | [
"MIT"
] | null | null | null | main.py | sunsetmountain/gcpBudgetMonitoring | 0818cf5d6ce0dfd373c3fcff3470906eb5302760 | [
"MIT"
] | null | null | null | main.py | sunsetmountain/gcpBudgetMonitoring | 0818cf5d6ce0dfd373c3fcff3470906eb5302760 | [
"MIT"
] | null | null | null | import base64
import logging
import json
from calendar import monthrange
import datetime
from httplib2 import Http
from json import dumps
def handle_notification(event, context):
"""Triggered from a message on a Cloud Pub/Sub topic.
Args:
event (dict): Event payload.
context (google.cloud.functions.Context): Metadata for the event.
"""
pubsub_message = base64.b64decode(event['data']).decode('utf-8')
logging.info('Budget information: {}'.format(pubsub_message))
jsonPayload = json.loads(pubsub_message)
costAmount = jsonPayload['costAmount']
budgetAmount = jsonPayload['budgetAmount']
percentOfBudget = round((costAmount/budgetAmount) * 100,2)
budgetDisplayName = jsonPayload['budgetDisplayName']
costIntervalStart = jsonPayload['costIntervalStart']
percentOfMonth = calcMonthPercent(costIntervalStart)
trendingPercent = round(percentOfBudget - percentOfMonth,2)
#logging.info('costAmount: {}'.format(costAmount))
#logging.info('budgetAmount: {}'.format(budgetAmount))
#logging.info('percentOfBudget: {}'.format(percentOfBudget))
#logging.info('budgetDisplayName: {}'.format(budgetDisplayName))
if trendingPercent >= 1:
message_text = "{}".format(budgetDisplayName) + ": {}".format(trendingPercent) + "% higher than last month (${:.2f}".format(costAmount) + "/${:.2f}".format(budgetAmount) + ")"
elif trendingPercent < 1 and trendingPercent > -1:
message_text = "{}".format(budgetDisplayName) + ": On target (+/- 1%) (${:.2f}".format(costAmount) + "/${:.2f}".format(budgetAmount) + ")"
else:
message_text = "{}".format(budgetDisplayName) + ": {}".format(trendingPercent) + "% lower than last month (${:.2f}".format(costAmount) + "/${:.2f}".format(budgetAmount) + ")"
logging.info('message_text: {}'.format(message_text))
timeToSend = chatLimiter(percentOfBudget, percentOfMonth)
if timeToSend == True:
sendChatMessage(message_text)
| 43.963303 | 183 | 0.691152 | import base64
import logging
import json
from calendar import monthrange
import datetime
from httplib2 import Http
from json import dumps
def handle_notification(event, context):
"""Triggered from a message on a Cloud Pub/Sub topic.
Args:
event (dict): Event payload.
context (google.cloud.functions.Context): Metadata for the event.
"""
pubsub_message = base64.b64decode(event['data']).decode('utf-8')
logging.info('Budget information: {}'.format(pubsub_message))
jsonPayload = json.loads(pubsub_message)
costAmount = jsonPayload['costAmount']
budgetAmount = jsonPayload['budgetAmount']
percentOfBudget = round((costAmount/budgetAmount) * 100,2)
budgetDisplayName = jsonPayload['budgetDisplayName']
costIntervalStart = jsonPayload['costIntervalStart']
percentOfMonth = calcMonthPercent(costIntervalStart)
trendingPercent = round(percentOfBudget - percentOfMonth,2)
#logging.info('costAmount: {}'.format(costAmount))
#logging.info('budgetAmount: {}'.format(budgetAmount))
#logging.info('percentOfBudget: {}'.format(percentOfBudget))
#logging.info('budgetDisplayName: {}'.format(budgetDisplayName))
if trendingPercent >= 1:
message_text = "{}".format(budgetDisplayName) + ": {}".format(trendingPercent) + "% higher than last month (${:.2f}".format(costAmount) + "/${:.2f}".format(budgetAmount) + ")"
elif trendingPercent < 1 and trendingPercent > -1:
message_text = "{}".format(budgetDisplayName) + ": On target (+/- 1%) (${:.2f}".format(costAmount) + "/${:.2f}".format(budgetAmount) + ")"
else:
message_text = "{}".format(budgetDisplayName) + ": {}".format(trendingPercent) + "% lower than last month (${:.2f}".format(costAmount) + "/${:.2f}".format(budgetAmount) + ")"
logging.info('message_text: {}'.format(message_text))
timeToSend = chatLimiter(percentOfBudget, percentOfMonth)
if timeToSend == True:
sendChatMessage(message_text)
def calcMonthPercent(costIntervalStart):
#Convert the interval timestamp to a DateTime object
intervalStart = datetime.datetime.strptime(costIntervalStart,"%Y-%m-%dT%H:%M:%SZ")
#Get a DateTime object for the date and time right now
timeNow = datetime.datetime.now()
#Calculate the difference between the start of the billing period and now
toNowCalc = timeNow - intervalStart
toNowDifference = toNowCalc.days * 86400 + toNowCalc.seconds
#logging.info('toNow: {}'.format(toNowDifference))
#Get a DateTime object for the end of the billing period
intervalMonth = intervalStart.month
intervalYear = intervalStart.year
daysInIntervalMonth = monthrange(intervalYear, intervalMonth)[1]
intervalEndTimestamp = str(intervalYear) + "-" + str(intervalMonth) + "-" + str(daysInIntervalMonth) + " 23:59:59"
intervalEndTime = datetime.datetime.strptime(intervalEndTimestamp, "%Y-%m-%d %H:%M:%S")
#Calculate the difference between the start and end of the billing period
toMonthEndCalc = intervalEndTime - intervalStart
toMonthEndDifference = toMonthEndCalc.days * 86400 + toMonthEndCalc.seconds
#logging.info('toMonthEnd: {}'.format(toMonthEndDifference))
#Calculate position in the billing period expressed as a percent
intervalPercent = round(toNowDifference/toMonthEndDifference * 100, 2)
#logging.info('intervalPercent: {}'.format(intervalPercent))
return intervalPercent
def chatLimiter(budgetPercent, intervalPercent):
#Get a DateTime object for the date and time right now
timeNow = datetime.datetime.now()
logging.info('timeNow: {}'.format(timeNow))
dayNow = timeNow.day
hourNow = timeNow.hour
minuteNow = timeNow.minute
overUnder = budgetPercent - intervalPercent
if overUnder > 1: #if over budget by more than 1%
if minuteNow >= 0 and minuteNow < 30: #PubSub notifications should arrive every 20-30 minutes
return True
else:
return False
else: #if not over budget by more than 1%
if minuteNow >= 0 and minuteNow < 30 and hourNow == 14: #send notifications for the 7AM Mountain time hour only (offset for GMT)
return True
else:
return False
def sendChatMessage(message_text):
url = 'https://chat.googleapis.com/v1/spaces/...' #insert your Google Chat Webhook URL here
bot_message = {'text' : '{}'.format(message_text)}
message_headers = { 'Content-Type': 'application/json; charset=UTF-8'}
http_obj = Http()
response = http_obj.request(
uri=url,
method='POST',
headers=message_headers,
body=dumps(bot_message),
)
logging.info('Message sent')
logging.info('Response: {}'.format(response))
| 2,726 | 0 | 77 |
7f52994b7156dbe1243c5663af9d59ad23d89c7e | 6,970 | py | Python | attendanceGui.py | vuwestin/AttendanceTaker | 0209ec64ad79180fd97c9e375ccf8ae1b55ca660 | [
"MIT"
] | null | null | null | attendanceGui.py | vuwestin/AttendanceTaker | 0209ec64ad79180fd97c9e375ccf8ae1b55ca660 | [
"MIT"
] | null | null | null | attendanceGui.py | vuwestin/AttendanceTaker | 0209ec64ad79180fd97c9e375ccf8ae1b55ca660 | [
"MIT"
] | null | null | null | from attendanceTaker import Student, classDatabase
import tkinter as tk
if __name__ == "__main__":
main()
| 32.268519 | 189 | 0.699713 | from attendanceTaker import Student, classDatabase
import tkinter as tk
class attendanceGui(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
self.classDb = classDatabase("courseName", "leaderName", "dayAndTimeOfSession", "numSessions")
#self.classDb.readDatabase("attendance.txt")
self.currentWeek = 0
self.currentDay = 0
self.container = tk.Frame(self)
self.title("Attendance Taker V.1")
self.container.pack(side = "top", fill = "both", expand = True)
self.container.grid_rowconfigure(0, weight = 1)
self.container.grid_columnconfigure(0, weight = 1)
self.frames = {}
self.setupFrames()
#assign widgets to the grid
self.show_frame(StartPage)
def setupFrames(self):
for F in (StartPage, addStudentPage,attPage):
frame = F(self.container,self)
self.frames[F] = frame
frame.grid(row = 0, column= 0, sticky = "nsew")
def show_frame(self, controller):
#print("hi")
if controller == attPage:
self.updateClassDb("attendance.txt")
elif controller == addStudentPage:
self.frames[addStudentPage].clearEntries()
frame = self.frames[controller]
frame.tkraise()
# self.window = Tk()
# self.window.title("title yeah baby")
# self.window.geometry("350x200")
# self.panel = Frame(self.window)
# self.panel.pack()
# self.window.mainloop()
#call mainloop in main function. duh
def setDayAndWeek(self, week, day):
#print("hi")
if week.isdigit() == False or day.isdigit() == False: #checks for bad input
invalidLabel = tk.Label(self.frames[StartPage], text = "Invalid Input")
invalidLabel.grid(row = 3, column = 1)
else:
week = int(week)
day = int(day)
if (week > 0 and week < 16) and (day > 0 and day < 3):
self.currentWeek = str(week)
self.currentDay = str(day)
self.show_frame(attPage)
#make attPage
def addStud(self, name, email, prof, sectionNum):
self.classDb.addStudent(Student(name, email, prof, sectionNum))
self.classDb.saveDatabase()
self.show_frame(StartPage)
#added students not showing in attpage
def saveClassDb(self, checkBoxList):
presentList = []
for x in checkBoxList:
#presentList is a list of tk.IntVar()
if x.get():
presentList.append(1)
else:
presentList.append(0)
for x in range(0, len(presentList)):
self.classDb.dataBase[x].addDay(int(self.currentWeek),int(self.currentDay), presentList[x])
self.classDb.saveDatabase()
self.show_frame(StartPage)
def updateClassDb(self, filename):
self.classDb.readDatabase(filename)
#print("update")
# container = tk.Frame(self)
# container.pack(side = "top", fill = "both", expand = True)
# container.grid_rowconfigure(0, weight = 1)
# container.grid_columnconfigure(0, weight = 1)
self.frames[attPage] = attPage(self.container,self)
self.frames[attPage].grid(row = 0, column= 0, sticky = "nsew")
class StartPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text = "Attendance Taker V 1.0")
label.grid(row = 0, column = 0)
weekLabel = tk.Label(self, text = "Week")
weekLabel.grid(row = 1, column = 0)
entry1 = tk.Entry(self,width = 5)
entry1.grid(row = 1, column = 1)
dayLabel = tk.Label(self, text = "Day (1 or 2)")
dayLabel.grid(row = 2, column = 0)
dayEntry = tk.Entry(self, width = 5)
dayEntry.grid(row = 2, column = 1)
button1 = tk.Button(self, text = "Continue", command = lambda: controller.setDayAndWeek(entry1.get(), dayEntry.get()))
button1.grid(row = 3, column = 2)
#add an addstudent button
addButton = tk.Button(self,text = "Add Student", command = lambda: controller.show_frame(addStudentPage))
addButton.grid(row = 3, column = 0)
#button1.grid_forget()
#grid_forget removes stuff
class addStudentPage(tk.Frame):
def __init__(self,parent,controller):
tk.Frame.__init__(self, parent)
self.entryList = []
addLabel = tk.Label(self, text = "Enter the name of the student to add")
firstNameLabel = tk.Label(self, text = "First Name")
lastNameLabel = tk.Label(self, text = "Last Name")
emailLabel = tk.Label(self, text = "Email (without @csu.fullerton.edu)")
profLabel = tk.Label(self, text = "Professor's Last Name")
sectionNumLabel = tk.Label(self, text = "Class Section Number")
firstNameEntry = tk.Entry(self, width = 10)
self.entryList.append(firstNameEntry)
lastNameEntry = tk.Entry(self, width = 10)
self.entryList.append(lastNameEntry)
emailEntry = tk.Entry(self, width = 15)
self.entryList.append(emailEntry)
profEntry = tk.Entry(self, width = 10)
self.entryList.append(profEntry)
sectionNumEntry = tk.Entry(self, width = 5)
self.entryList.append(sectionNumEntry)
backButton = tk.Button(self, text = "Back", command = lambda: controller.show_frame(StartPage))
addButton = tk.Button(self, text = "Add", command = lambda: controller.addStud(lastNameEntry.get() + "," + firstNameEntry.get(), emailEntry.get(), profEntry.get(), sectionNumEntry.get()))
addLabel.grid(row = 0, column = 0)
firstNameLabel.grid(row = 1, column = 0)
lastNameLabel.grid(row = 2, column = 0)
emailLabel.grid(row = 3, column = 0)
profLabel.grid(row = 4, column = 0)
sectionNumLabel.grid(row = 5, column = 0)
firstNameEntry.grid(row = 1, column = 1)
lastNameEntry.grid(row = 2, column = 1)
emailEntry.grid(row = 3, column = 1)
profEntry.grid(row = 4, column = 1)
sectionNumEntry.grid(row = 5, column = 1)
backButton.grid(row = 6, column = 0)
addButton.grid(row = 6, column = 1)
def clearEntries(self):
for entry in self.entryList:
entry.delete(0,"end")
class attPage(tk.Frame):
def __init__(self,parent,controller):
#parent is a Frame, controller is an attendanceGui
tk.Frame.__init__(self, parent)
controller.classDb.readDatabase("attendance.txt")
self.studentCounter = 0
self.checkBoxArray = []
for student in controller.classDb.dataBase:
studLabel = tk.Label(self, text = (student.name))
studLabel.grid(row = self.studentCounter, column = 0)
self.checkBoxArray.append(tk.IntVar())
hereCheckButton = tk.Checkbutton(self,text = "Here", variable = self.checkBoxArray[self.studentCounter])
hereCheckButton.grid(row = self.studentCounter, column = 1)
#self.checkBoxArray.append(hereCheckButton)
self.studentCounter = self.studentCounter + 1
#do this
saveButton = tk.Button(self, text = "Save", command = lambda: controller.saveClassDb(self.checkBoxArray))
backButton = tk.Button(self, text = "Back", command = lambda: controller.show_frame(StartPage))
backButton.grid(row = self.studentCounter, column = 0)
saveButton.grid(row = self.studentCounter, column = 3)
#REDO THIS SO THAT ITS ONLY ONE CHECKBOX NEXT TO THE NAME FOR HERE
#OR NOT
#FIND OUT HOW TO TELL WHICH CHECKBOX GOT TICKED
def stringCount(string,c):
count = 0
for x in string:
if c == x:
count = count + 1
return count
def main():
p = attendanceGui()
p.mainloop()
if __name__ == "__main__":
main()
| 6,435 | 24 | 402 |
223944df2603e7c3ba409e514cd2839bc021b579 | 134 | py | Python | topojoin/__init__.py | SimmonsRitchie/topojoin | 5c5848ae05a249c38babdc449dbd89df1b644b61 | [
"MIT"
] | 1 | 2020-08-04T04:36:05.000Z | 2020-08-04T04:36:05.000Z | topojoin/__init__.py | SimmonsRitchie/topojoin | 5c5848ae05a249c38babdc449dbd89df1b644b61 | [
"MIT"
] | null | null | null | topojoin/__init__.py | SimmonsRitchie/topojoin | 5c5848ae05a249c38babdc449dbd89df1b644b61 | [
"MIT"
] | null | null | null | """Top-level package for TopoJoin."""
__author__ = "DSR"
__email__ = "info@simmonsritchie.com"
__version__ = "__version__ = '0.3.1'"
| 22.333333 | 37 | 0.701493 | """Top-level package for TopoJoin."""
__author__ = "DSR"
__email__ = "info@simmonsritchie.com"
__version__ = "__version__ = '0.3.1'"
| 0 | 0 | 0 |
74d5ead0146bebed5e35db9f31f7e4195759c558 | 17,127 | py | Python | protein_pka/mcce/mcce.py | SCP-028/UGA | 6c150ee2728a0c83ab88db9b2e137deb9c3f94f3 | [
"Apache-2.0"
] | null | null | null | protein_pka/mcce/mcce.py | SCP-028/UGA | 6c150ee2728a0c83ab88db9b2e137deb9c3f94f3 | [
"Apache-2.0"
] | 1 | 2019-10-07T17:34:49.000Z | 2019-10-07T17:34:49.000Z | protein_pka/mcce/mcce.py | SCP-028/UGA | 6c150ee2728a0c83ab88db9b2e137deb9c3f94f3 | [
"Apache-2.0"
] | 1 | 2019-01-10T07:33:07.000Z | 2019-01-10T07:33:07.000Z | #!python3
"""
Predict protein pKa based on MCCE method.
http://pka.engr.ccny.cuny.edu/
Require MCCE 3.0 to work: https://anaconda.org/SalahSalah/mcce/files
"""
import asyncio
import glob
import gzip
import locale
import logging
import math
import os
import re
import shutil
import subprocess
import sys
import time
from multiprocessing import Pool
from urllib.request import urlopen
import aioftp
import pandas as pd
import uvloop
# Sapelo Locale is broken, quick fix
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
# Set working directory
ROOTPATH = os.path.dirname(os.path.realpath(sys.argv[0]))
os.chdir(ROOTPATH)
# Log settings
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler(f"./pKa_calculation_{__file__}.log")
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s\t%(levelname)s\t"
"[%(filename)s:%(lineno)s -%(funcName)12s()]\t%(message)s"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
if __name__ == "__main__":
x = pdb()
x.load_id()
urls = x.get_link(x.download_ids)
x.make_dirs(x.all_ids)
x.download_queue(urls)
x.check_mcce()
for id in x.unzip_ids:
x.unzip(id)
for id in x.preprocess_ids:
try:
x.preprocess(id)
x.set_params(id)
except Exception as e:
x.error_ids.append(id)
logger.warning(f"Preprocess of {id}: {e}")
# subprocess.run(["find", ".", "-type", "d", "-empty", "-delete"])
x.split_ready_ids(0) # 0 - 9, run 0 first to generate other lists
with Pool(os.cpu_count()) as p:
p.map(x.calc_pka, x.working_ids)
with open("./results/finished_ids.list", "a") as f:
f.write("\n".join(x.working_ids))
with open("./results/error_ids.list", "a") as f:
f.write("\n".join(x.error_ids))
| 40.016355 | 167 | 0.527179 | #!python3
"""
Predict protein pKa based on MCCE method.
http://pka.engr.ccny.cuny.edu/
Require MCCE 3.0 to work: https://anaconda.org/SalahSalah/mcce/files
"""
import asyncio
import glob
import gzip
import locale
import logging
import math
import os
import re
import shutil
import subprocess
import sys
import time
from multiprocessing import Pool
from urllib.request import urlopen
import aioftp
import pandas as pd
import uvloop
# Sapelo Locale is broken, quick fix
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
# Set working directory
ROOTPATH = os.path.dirname(os.path.realpath(sys.argv[0]))
os.chdir(ROOTPATH)
# Log settings
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler(f"./pKa_calculation_{__file__}.log")
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s\t%(levelname)s\t"
"[%(filename)s:%(lineno)s -%(funcName)12s()]\t%(message)s"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
class pdb:
def __init__(self):
self.all_ids = []
self.download_ids = [] # Download -> Unzip -> Preprocess -> Calculate
self.unzip_ids = [] # Unzip -> Preprocess -> Calculate
self.preprocess_ids = [] # Preprocess -> Calculate
self.ready_ids = [] # Calculate
self.finished_ids = [] # Successfully calculated IDs
self.error_ids = [] # Error in download, unzip, or calculation
# IDs this script will work on (messy queue implementation)
self.working_ids = []
def load_id(self):
"""
First try to get existing pKa values,
then get the list of PDB files to download.
"""
for folder in ["./pdb", "./annotation", "./results"]:
try:
os.makedirs(folder)
except OSError:
pass
self.finished_ids = [id[-8:-4] for id in glob.glob("./results/*.pka")]
logger.debug(f"{len(self.finished_ids)} finished files.")
# Create file even at first run so that the results folder doesn't get deleted
with open("./results/finished_ids.list", "a") as f:
f.write("\n".join(self.finished_ids))
self.ready_ids = list(set(
[id[-12:-8].upper() for id in glob.glob("./pdb/*/*.pdb.bak")]) - set(self.finished_ids))
logger.debug(f"{len(self.ready_ids)} files ready to be calculated.")
self.preprocess_ids = list(set([id[-8:-4].upper() for id in glob.glob(
"./pdb/*/*.pdb") if "out" not in id]) - set(self.finished_ids) - set(self.ready_ids))
logger.debug(
f"{len(self.preprocess_ids)} files ready to be preprocessed.")
self.unzip_ids = [id[-11:-7].upper() for id in glob.glob("./*.ent.gz")]
logger.debug(f"{len(self.unzip_ids)} files ready to be unzipped.")
if not os.path.exists("./annotation/uniprot_id_mapping.dat"):
with urlopen("ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/idmapping/by_organism/HUMAN_9606_idmapping.dat.gz") as remotefile:
logger.debug(
"Saving UniProt ID mapping data since it doesn't exist...")
with open("./annotation/uniprot_id_mapping.dat.gz", "wb") as f:
f.write(remotefile.read())
with gzip.open(
"./annotation/uniprot_id_mapping.dat.gz", "rb") as inFile, open(
"./annotation/uniprot_id_mapping.dat", "wb") as outFile:
shutil.copyfileobj(inFile, outFile)
os.remove("./annotation/uniprot_id_mapping.dat.gz")
else:
logger.debug("UniProt ID mapping data exists.")
logger.debug("Reading all possible PDB IDs...")
annot = pd.read_csv("./annotation/uniprot_id_mapping.dat",
sep="\t", header=None,
names=["uniprot", "id", "value"])
self.all_ids = annot.loc[annot.id == "PDB", "value"].tolist()
self.download_ids = list(set(self.all_ids) - set(self.unzip_ids) - set(
self.preprocess_ids) - set(self.ready_ids) - set(self.finished_ids))
logger.info(
f"{len(self.download_ids)} PDB files need to be downloaded.")
def get_link(self, ids):
""" Get PDB file links from:
ftp://ftp.wwpdb.org/pub/pdb/data/structures/divided/pdb/ ,
and create folders to store the files.
Parameters
----------
ids: list
The PDB IDs to download.
Returns
-------
Links to download.
"""
if isinstance(ids, list):
ids = [id[:4].lower() for id in ids] # pdb file IDs
pdb_names = [f"{id}.ent.gz" for id in ids] # pdb filenames
# subdirectory of the pdb files
pdbDirs = [id[1:3].lower() for id in ids]
remoteaddr = [
f"ftp://ftp.wwpdb.org/pub/pdb/data/structures/divided/pdb/{pdbDir}/pdb{pdb_name}" for pdbDir, pdb_name in zip(pdbDirs, pdb_names)]
else:
raise TypeError(f"{id} is not a string or list.")
return remoteaddr
def make_dirs(self, ids):
"""Make sure the download directory exists."""
for id in ids:
try:
os.makedirs(os.path.join(ROOTPATH, "pdb", id.upper()))
except OSError:
pass
async def download_worker(self, session, url):
"""Download the given url to working directory."""
url = url[len("ftp://ftp.wwpdb.org"):]
logger.debug(f"Downloading {url}")
try:
await session.download(url)
self.unzip_ids.append(url[-11:-7].upper())
except Exception as e:
self.error_ids.append(url[-11:-7].upper())
logger.warning(f"Error when downloading {url}: {e}")
async def download_session(self, sem, work_queue):
""" Get urls from the queue and pass to worker.
Parameters
----------
sem: asyncio.Semaphore object
work_queue: asyncio.Queue object
"""
while not work_queue.empty():
url = await work_queue.get()
logger.debug(f"Got url from queue: {url}")
async with sem:
async with aioftp.ClientSession("ftp.wwpdb.org") as session:
await self.download_worker(session, url)
def download_queue(self, urls):
""" Create a queue to download all the given urls.
Parameters
----------
urls: list
A list of urls to download.
Returns
-------
Downloaded "*.ent.gz" files in working directory.
"""
logger.debug(f"{len(urls)} urls to download.")
loop = uvloop.new_event_loop()
asyncio.set_event_loop(loop)
q = asyncio.Queue()
sem = asyncio.Semaphore(10)
[q.put_nowait(url) for url in urls]
tasks = [asyncio.ensure_future(self.download_session(sem, q))
for _ in range(len(urls))]
loop.run_until_complete(asyncio.gather(*tasks))
# Zero-sleep to allow underlying connections to close
loop.run_until_complete(asyncio.sleep(0))
loop.close()
def check_mcce(self):
"""Check if MCCE 3.0 exists."""
if not os.path.exists(os.path.join(ROOTPATH, "mcce3.0")):
if not os.path.exists(os.path.join(ROOTPATH, "mcce3.0.tar.bz2")):
logger.debug("MCCE isn't downloaded yet. Retrieving...")
with urlopen("https://anaconda.org/SalahSalah/mcce/3.0/download/linux-64/mcce-3.0-0.tar.bz2") as remotefile:
with open("./mcce-3.0-0.tar.bz2", 'wb') as f:
f.write(remotefile.read())
subprocess.run(["tar", "-xjf", "mcce-3.0-0.tar.bz2"])
shutil.move("./info/recipe/mcce3.0", "./mcce3.0")
shutil.rmtree(os.path.join(ROOTPATH, "info"), ignore_errors=True)
shutil.rmtree(os.path.join(ROOTPATH, "bin"), ignore_errors=True)
else:
logger.info("MCCE 3.0 exists, proceeding to calculation...")
def unzip(self, id):
"""Unzip downloaded *.ent.gz file."""
try:
saved_pdb = os.path.join(ROOTPATH, "pdb", id, f"{id}.pdb")
with gzip.open(f"pdb{id.lower()}.ent.gz", "rb") as inFile, open(saved_pdb, "wb") as outFile:
shutil.copyfileobj(inFile, outFile)
os.remove(f"pdb{id.lower()}.ent.gz")
self.preprocess_ids.append(id)
except Exception as e:
self.error_ids.append(id)
logger.warning(f"Unzip of {id} unsuccessful: {e}")
def preprocess(self, id, backup=True):
"""
This program will:
1) strip lines other than ATOM and HETATM records
2) keep the first model of an NMR structure
3) delete H and D atoms
4) MSE to MET residue
5) keep only one atom alternate position
6) keep defined chains, if chain ID(s) are given in command
7) remove some cofactors and salt ions
Parameters
----------
id: str
The PDB ID to find the file.
backup: bool, optional
Whether to backup the original file or not. Default is True,
and save to "original.bak".
Returns
-------
Nothing, modify the file in place.
"""
removable_res = [
" ZN", "PCA", "XYP", " NA", " CL", " CA", " MG", " MN", "HOH"
]
model_start = False
newlines = []
ID = id.upper()
filepath = os.path.join(ROOTPATH, "pdb", ID, f"{ID}.pdb")
if backup:
shutil.copy2(filepath, f"{filepath}.bak")
with open(filepath) as f:
for line in f:
if line[:5] == "MODEL":
model_start = True
if model_start and line[:6] == "ENDMDL":
break
if line[:6] != "ATOM " and line[:6] != "HETATM":
continue # discard non ATOM records
if line[13] == "H" or line[12] == "H":
continue
if line[16] == "A":
line = f"{line[:16]} {line[17:]}"
elif line[16] != " ":
continue # delete this line, alternative posion is not A or empty
if line[:6] == "HETATM" and line[17:20] == "MSE":
if line[12:15] == "SE ":
line = f"ATOM {line[6:12]} SD{line[15:17]}MET{line[20:]}"
else:
line = f"ATOM {line[6:17]}MET{line[20:]}"
res = line[17:20]
if res in removable_res:
continue
newlines.append(line.rstrip())
with open(filepath, "w") as f:
f.write("\n".join(newlines))
logger.debug(f"{ID} preprocessing complete.")
def set_params(self, id, quickrun=True):
"""
Set the parameters for MCCE.
Parameters
----------
id: str
The PDB ID of the file.
quickrun: bool, optional
Use "run.prm.quick" or "run.prm.default".
Returns
-------
run.prm: a file describing the parameters that points to the PDB file.
"""
pkgpath = os.path.join(ROOTPATH, "mcce3.0")
ID = id.upper()
filepath = os.path.join(ROOTPATH, "pdb", ID)
newlines = []
if quickrun:
shutil.copy2(
os.path.join(pkgpath, "run.prm.quick"),
os.path.join(filepath, "run.prm")
)
else:
shutil.copy2([
os.path.join(pkgpath, "run.prm.default"),
os.path.join(filepath, "run.prm")
])
with open(os.path.join(filepath, "run.prm")) as f:
for line in f:
line = line.rstrip()
if line.endswith("(INPDB)"):
line = re.sub(r"^[^\s]+", fr"{id}.pdb", line)
if line.endswith(("(DO_PREMCCE)", "(DO_ROTAMERS)",
"(DO_ENERGY)", "(DO_MONTE)")):
line = re.sub(r"^f", r"t", line)
if line.endswith("(EPSILON_PROT)"):
line = re.sub(r"^[\d\.]+", r"8.0", line)
if line.startswith("/home/mcce/mcce3.0"):
line = re.sub(r"^/.*3\.0", pkgpath,
line)
newlines.append(line)
with open(os.path.join(filepath, "run.prm"), "w") as f:
f.write("\n".join(newlines))
self.ready_ids.append(ID)
logger.debug(f"Parameters set for {ID}.")
def split_ready_ids(self, num):
""" A naive queue implementation for multiple scripts.
Parameters
----------
num: int
Which part of the IDs to work on.
Returns
-------
A list of the actual IDs to work on, and save the lists of IDs for
other scripts to work with if this is the first instance.
"""
if os.path.isfile(os.path.join(ROOTPATH, "results", "working_ids.list")):
with open(os.path.join(ROOTPATH, "results", f"working_ids.list{num}"), "r") as f:
self.working_ids = [line.strip() for line in f]
else:
n = math.ceil(len(self.ready_ids) / 10)
self.working_ids = [self.ready_ids[i:i + n]
for i in range(0, len(self.ready_ids), n)]
metafile = []
for i, ids in enumerate(self.working_ids):
metafile.append(os.path.join(
ROOTPATH, "results", f"working_ids.list{i}"))
with open(os.path.join(ROOTPATH, "results", f"working_ids.list{i}"), "w") as f:
f.write("\n".join(ids))
logger.debug(
f"Saved {len(ids)} IDs to file working_ids.list{i} .")
with open(os.path.join(ROOTPATH, "results", "working_ids.list"), "w") as f:
f.write("\n".join(metafile))
self.working_ids = self.working_ids[num]
def calc_pka(self, id, clean=True):
""" Calculate protein pKa values using MCCE.
https://sites.google.com/site/mccewiki/home
Parameters
----------
id: str
The PDB ID of the protein calculated.
clean: bool, optional
Only keep the PDB file, run log and pKa output.
Returns
-------
A set of files in a subdirectory named after the ID.
See user manual for detail.
"""
id = id.upper()
os.chdir(os.path.realpath(os.path.join(ROOTPATH, "pdb", id)))
logger.info(f"{id} calculation started.")
start = time.time()
with open(f"{id}.run.log", "w") as f:
subprocess.run(f"{ROOTPATH}/mcce3.0/mcce", stdout=f)
with open(f"{id}.run.log", "rb") as f:
last = f.readlines()[-1].decode().lstrip()
if last.startswith(("Fatal", "FATAL", "WARNING", "STOP")):
self.error_ids.append(id)
logger.warning(
f"{id} calculation aborted after {time.time() - start}s, due to {last}")
else:
self.finished_ids.append(id)
logger.info(
f"{id} calculation finished, used {time.time() - start}s.")
shutil.move("pK.out", os.path.join(
ROOTPATH, "results", f"{id}.pka"))
if clean:
del_list = [i for i in os.listdir() if i not in (
"pK.out", f"{id}.run.log", f"{id}.pdb.bak")]
[os.remove(item) for item in del_list]
if __name__ == "__main__":
x = pdb()
x.load_id()
urls = x.get_link(x.download_ids)
x.make_dirs(x.all_ids)
x.download_queue(urls)
x.check_mcce()
for id in x.unzip_ids:
x.unzip(id)
for id in x.preprocess_ids:
try:
x.preprocess(id)
x.set_params(id)
except Exception as e:
x.error_ids.append(id)
logger.warning(f"Preprocess of {id}: {e}")
# subprocess.run(["find", ".", "-type", "d", "-empty", "-delete"])
x.split_ready_ids(0) # 0 - 9, run 0 first to generate other lists
with Pool(os.cpu_count()) as p:
p.map(x.calc_pka, x.working_ids)
with open("./results/finished_ids.list", "a") as f:
f.write("\n".join(x.working_ids))
with open("./results/error_ids.list", "a") as f:
f.write("\n".join(x.error_ids))
| 509 | 14,673 | 25 |
eec845711b94d039c2036a3c956f5be998a02f7f | 2,433 | py | Python | meraki/models/update_network_traffic_analysis_settings_model.py | bossypants22/python-sdk-test | 37701d62dc18c2abb910eb790ab978913adcaf7b | [
"MIT"
] | 37 | 2019-04-24T14:01:33.000Z | 2022-01-28T01:37:21.000Z | meraki/models/update_network_traffic_analysis_settings_model.py | ankita66666666/meraki-python-sdk | 9894089eb013318243ae48869cc5130eb37f80c0 | [
"MIT"
] | 10 | 2019-07-09T16:35:11.000Z | 2021-12-07T03:47:53.000Z | meraki/models/update_network_traffic_analysis_settings_model.py | ankita66666666/meraki-python-sdk | 9894089eb013318243ae48869cc5130eb37f80c0 | [
"MIT"
] | 17 | 2019-04-30T23:53:21.000Z | 2022-02-07T22:57:44.000Z | # -*- coding: utf-8 -*-
"""
meraki
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
import meraki.models.custom_pie_chart_item_model
class UpdateNetworkTrafficAnalysisSettingsModel(object):
"""Implementation of the 'updateNetworkTrafficAnalysisSettings' model.
TODO: type model description here.
Attributes:
mode (Mode1Enum): The traffic analysis mode for the network. Can be
one of 'disabled' (do not collect traffic types), 'basic'
(collect generic traffic categories), or 'detailed' (collect
destination hostnames).
custom_pie_chart_items (list of CustomPieChartItemModel): The list of
items that make up the custom pie chart for traffic reporting.
"""
# Create a mapping from Model property names to API property names
_names = {
"mode":'mode',
"custom_pie_chart_items":'customPieChartItems'
}
def __init__(self,
mode=None,
custom_pie_chart_items=None):
"""Constructor for the UpdateNetworkTrafficAnalysisSettingsModel class"""
# Initialize members of the class
self.mode = mode
self.custom_pie_chart_items = custom_pie_chart_items
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
mode = dictionary.get('mode')
custom_pie_chart_items = None
if dictionary.get('customPieChartItems') != None:
custom_pie_chart_items = list()
for structure in dictionary.get('customPieChartItems'):
custom_pie_chart_items.append(meraki.models.custom_pie_chart_item_model.CustomPieChartItemModel.from_dictionary(structure))
# Return an object of this model
return cls(mode,
custom_pie_chart_items)
| 33.328767 | 140 | 0.634196 | # -*- coding: utf-8 -*-
"""
meraki
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
import meraki.models.custom_pie_chart_item_model
class UpdateNetworkTrafficAnalysisSettingsModel(object):
"""Implementation of the 'updateNetworkTrafficAnalysisSettings' model.
TODO: type model description here.
Attributes:
mode (Mode1Enum): The traffic analysis mode for the network. Can be
one of 'disabled' (do not collect traffic types), 'basic'
(collect generic traffic categories), or 'detailed' (collect
destination hostnames).
custom_pie_chart_items (list of CustomPieChartItemModel): The list of
items that make up the custom pie chart for traffic reporting.
"""
# Create a mapping from Model property names to API property names
_names = {
"mode":'mode',
"custom_pie_chart_items":'customPieChartItems'
}
def __init__(self,
mode=None,
custom_pie_chart_items=None):
"""Constructor for the UpdateNetworkTrafficAnalysisSettingsModel class"""
# Initialize members of the class
self.mode = mode
self.custom_pie_chart_items = custom_pie_chart_items
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
mode = dictionary.get('mode')
custom_pie_chart_items = None
if dictionary.get('customPieChartItems') != None:
custom_pie_chart_items = list()
for structure in dictionary.get('customPieChartItems'):
custom_pie_chart_items.append(meraki.models.custom_pie_chart_item_model.CustomPieChartItemModel.from_dictionary(structure))
# Return an object of this model
return cls(mode,
custom_pie_chart_items)
| 0 | 0 | 0 |
c1503a325fc1b79d7e0234ed09fea6452d121e9c | 5,704 | py | Python | Projects/3_Adversarial Search/my_custom_player.py | xuebai1990/AI-Nanodegree | bd6699a7f72059e8cd985b7550c7c49ce013e784 | [
"MIT"
] | null | null | null | Projects/3_Adversarial Search/my_custom_player.py | xuebai1990/AI-Nanodegree | bd6699a7f72059e8cd985b7550c7c49ce013e784 | [
"MIT"
] | null | null | null | Projects/3_Adversarial Search/my_custom_player.py | xuebai1990/AI-Nanodegree | bd6699a7f72059e8cd985b7550c7c49ce013e784 | [
"MIT"
] | null | null | null |
from sample_players import DataPlayer
import random
class CustomPlayer(DataPlayer):
""" Implement your own agent to play knight's Isolation
The get_action() method is the only *required* method. You can modify
the interface for get_action by adding named parameters with default
values, but the function MUST remain compatible with the default
interface.
**********************************************************************
NOTES:
- You should **ONLY** call methods defined on your agent class during
search; do **NOT** add or call functions outside the player class.
The isolation library wraps each method of this class to interrupt
search when the time limit expires, but the wrapper only affects
methods defined on this class.
- The test cases will NOT be run on a machine with GPU access, nor be
suitable for using any other machine learning techniques.
**********************************************************************
"""
def get_action(self, state):
""" Employ an adversarial search technique to choose an action
available in the current state calls self.queue.put(ACTION) at least
This method must call self.queue.put(ACTION) at least once, and may
call it as many times as you want; the caller is responsible for
cutting off the function after the search time limit has expired.
See RandomPlayer and GreedyPlayer in sample_players for more examples.
**********************************************************************
NOTE:
- The caller is responsible for cutting off search, so calling
get_action() from your own code will create an infinite loop!
Refer to (and use!) the Isolation.play() function to run games.
**********************************************************************
"""
# TODO: Replace the example implementation below with your own search
# method by combining techniques from lecture
#
# EXAMPLE: choose a random move without any search--this function MUST
# call self.queue.put(ACTION) at least once before time expires
# (the timer is automatically managed for you)
# if state.board in self.data:
# self.queue.put(self.data[state.board])
# return
self.queue.put(random.choice(state.actions()))
self.depth = 1
while True:
self.queue.put(self.decision(state, self.depth))
self.depth += 1
# To call the heuristic function
# Number of open cells available withon a 5*5 squre
# Number of available next moves
# Number of open cells available within a 3*3 squre
| 43.212121 | 130 | 0.573457 |
from sample_players import DataPlayer
import random
class CustomPlayer(DataPlayer):
""" Implement your own agent to play knight's Isolation
The get_action() method is the only *required* method. You can modify
the interface for get_action by adding named parameters with default
values, but the function MUST remain compatible with the default
interface.
**********************************************************************
NOTES:
- You should **ONLY** call methods defined on your agent class during
search; do **NOT** add or call functions outside the player class.
The isolation library wraps each method of this class to interrupt
search when the time limit expires, but the wrapper only affects
methods defined on this class.
- The test cases will NOT be run on a machine with GPU access, nor be
suitable for using any other machine learning techniques.
**********************************************************************
"""
def get_action(self, state):
""" Employ an adversarial search technique to choose an action
available in the current state calls self.queue.put(ACTION) at least
This method must call self.queue.put(ACTION) at least once, and may
call it as many times as you want; the caller is responsible for
cutting off the function after the search time limit has expired.
See RandomPlayer and GreedyPlayer in sample_players for more examples.
**********************************************************************
NOTE:
- The caller is responsible for cutting off search, so calling
get_action() from your own code will create an infinite loop!
Refer to (and use!) the Isolation.play() function to run games.
**********************************************************************
"""
# TODO: Replace the example implementation below with your own search
# method by combining techniques from lecture
#
# EXAMPLE: choose a random move without any search--this function MUST
# call self.queue.put(ACTION) at least once before time expires
# (the timer is automatically managed for you)
# if state.board in self.data:
# self.queue.put(self.data[state.board])
# return
self.queue.put(random.choice(state.actions()))
self.depth = 1
while True:
self.queue.put(self.decision(state, self.depth))
self.depth += 1
def decision(self, state, depth):
alpha = float("-inf")
beta = float("inf")
best_score = float("-inf")
best_move = None
for action in state.actions():
v = self.min_value(state.result(action), alpha, beta, depth - 1)
if v > best_score:
best_score = v
best_move = action
alpha = max(alpha, v)
return best_move if best_move else state.actions()[0]
def min_value(self, state, alpha, beta, depth):
if state.terminal_test(): return state.utility(self.player_id)
if depth <= 0: return self.heuristic(state)
v = float("inf")
actions = state.actions()
# actions.sort(key=lambda x: self.nummove(state.result(x), 1 - self.player_id), reverse=True)
for action in actions:
v = min(v, self.max_value(state.result(action), alpha, beta, depth - 1))
if v <= alpha: return v
beta = min(beta, v)
return v
def max_value(self, state, alpha, beta, depth):
if state.terminal_test(): return state.utility(self.player_id)
if depth <= 0: return self.heuristic(state)
v = float("-inf")
actions = state.actions()
# actions.sort(key=lambda x: self.nummove(state.result(x), self.player_id), reverse=True)
for action in state.actions():
v = max(v, self.min_value(state.result(action), alpha, beta, depth - 1))
if v >= beta: return v
alpha = max(alpha, v)
return v
# To call the heuristic function
def heuristic(self, state):
# my_move = self.nummove(state, self.player_id)
# oppo_move = self.nummove(state, 1 - self.player_id)
my_count = self.liberty(state, self.player_id)
oppo_count = self.liberty(state, 1 - self.player_id)
# return my_move - oppo_move
return my_count - oppo_count
# Number of open cells available withon a 5*5 squre
def liberty(self, state, player):
if not state.locs[player]: return 24
width = 11
S, N, W, E = -width - 2, width + 2, 1, -1
actions = (S, N, W, E, N + W, N + E, S + W, S + E, \
2*N+2*W, 2*N+W, 2*N, 2*N+E, 2*N+2*E, N+2*W, N+2*E, 2*W, 2*E, S+2*W, S+2*E, 2*S+2*W, 2*S+W, 2*S, 2*S+E, 2*S+2*E)
count = 0
for action in actions:
if (action + state.locs[player]) > 0 and (state.board & (1 << (action + state.locs[player]))):
count += 1
return count
# Number of available next moves
def nummove(self, state, player):
return len(state.liberties(state.locs[player]))
# Number of open cells available within a 3*3 squre
def liberty2(self, state, player):
if not state.locs[player]: return 8
width = 11
S, N, W, E = -width - 2, width + 2, 1, -1
actions = (S, N, W, E, N + W, N + E, S + W, S + E)
count = 0
for action in actions:
if (action + state.locs[player]) > 0 and (state.board & (1 << (action + state.locs[player]))):
count += 1
return count
| 2,755 | 0 | 185 |
cb14186c43c095e3d1d220b66316bbc7c2711a8d | 257 | py | Python | mlmo/eval/metrics/__init__.py | prashantlv/mltoolkit | acc192bafc66b7661d541ef4f604b5e5ab7df5ca | [
"MIT"
] | 1 | 2020-10-03T05:23:31.000Z | 2020-10-03T05:23:31.000Z | mlmo/eval/metrics/__init__.py | prashantlv/mltoolkit | acc192bafc66b7661d541ef4f604b5e5ab7df5ca | [
"MIT"
] | null | null | null | mlmo/eval/metrics/__init__.py | prashantlv/mltoolkit | acc192bafc66b7661d541ef4f604b5e5ab7df5ca | [
"MIT"
] | null | null | null | from .base_metric import BaseMetric
from .accuracy import Accuracy
from .exact_matches import ExactMatches
from .pr_rec_f1 import PrRecF1
from .rouge_multi import RougeMulti
from .multi_ref_rouge import MultiRefRouge
from .soft_pr_rec_f1 import SoftPrRecF1
| 32.125 | 42 | 0.863813 | from .base_metric import BaseMetric
from .accuracy import Accuracy
from .exact_matches import ExactMatches
from .pr_rec_f1 import PrRecF1
from .rouge_multi import RougeMulti
from .multi_ref_rouge import MultiRefRouge
from .soft_pr_rec_f1 import SoftPrRecF1
| 0 | 0 | 0 |
852b4d6653795d573b39ff6adfb58dab19bbc906 | 2,989 | py | Python | questions/q351_largest_BST_subtree_size/code.py | aadhityasw/Competitive-Programs | 901a48d35f024a3a87c32a45b7f4531e8004a203 | [
"MIT"
] | null | null | null | questions/q351_largest_BST_subtree_size/code.py | aadhityasw/Competitive-Programs | 901a48d35f024a3a87c32a45b7f4531e8004a203 | [
"MIT"
] | 1 | 2021-05-15T07:56:51.000Z | 2021-05-15T07:56:51.000Z | questions/q351_largest_BST_subtree_size/code.py | aadhityasw/Competitive-Programs | 901a48d35f024a3a87c32a45b7f4531e8004a203 | [
"MIT"
] | null | null | null |
# Return the size of the largest sub-tree which is also a BST
import sys
sys.setrecursionlimit(1000000)
from collections import deque
# Tree Node
# Function to Build Tree
if __name__ == "__main__":
t = int(input())
for _ in range(0, t):
s = input()
root = buildTree(s)
print(largestBst(root))
| 23.535433 | 61 | 0.52827 | def recurse(root) :
if root is None :
return (None, None, 0)
if root.left is None and root.right is None :
return (root.data, root.data, 1)
if root.left and root.right :
left = False
left_part = recurse(root.left)
(l1, r1, s1) = left_part
if r1 is not None and r1 < root.data :
left = True
right = False
right_part = recurse(root.right)
(l2, r2, s2) = right_part
if l2 is not None and l2 > root.data :
right = True
if left and right :
return (l1, r2, s1+s2+1)
return (None, None, max(s1, s2))
if root.left :
left_part = recurse(root.left)
(l1, r1, s1) = left_part
if r1 is not None and r1 < root.data :
return (l1, root.data, s1+1)
return (None, None, s1)
if root.right :
right_part = recurse(root.right)
(l2, r2, s2) = right_part
if l2 is not None and l2 > root.data :
return (root.data, r2, s2+1)
return (None, None, s2)
# Return the size of the largest sub-tree which is also a BST
def largestBst(root):
_, _, size = recurse(root)
return size
import sys
sys.setrecursionlimit(1000000)
from collections import deque
# Tree Node
class Node:
def __init__(self, val):
self.right = None
self.data = val
self.left = None
# Function to Build Tree
def buildTree(s):
# Corner Case
if (len(s) == 0 or s[0] == "N"):
return None
# Creating list of strings from input
# string after spliting by space
ip = list(map(str, s.split()))
# Create the root of the tree
root = Node(int(ip[0]))
size = 0
q = deque()
# Push the root to the queue
q.append(root)
size = size + 1
# Starting from the second element
i = 1
while size > 0 and i < len(ip):
# Get and remove the front of the queue
currNode = q[0]
q.popleft()
size = size - 1
# Get the current node's value from the string
currVal = ip[i]
# If the left child is not null
if (currVal != "N"):
# Create the left child for the current node
currNode.left = Node(int(currVal))
# Push it to the queue
q.append(currNode.left)
size = size + 1
# For the right child
i = i + 1
if (i >= len(ip)):
break
currVal = ip[i]
# If the right child is not null
if (currVal != "N"):
# Create the right child for the current node
currNode.right = Node(int(currVal))
# Push it to the queue
q.append(currNode.right)
size = size + 1
i = i + 1
return root
if __name__ == "__main__":
t = int(input())
for _ in range(0, t):
s = input()
root = buildTree(s)
print(largestBst(root))
| 2,545 | -10 | 114 |
30ef724c05ff5a5653adc901b4c963db375ecbfa | 145,145 | py | Python | rlkit/core/rl_algorithm.py | NagisaZj/oyster | 069a510fe63bb29ecd9871e0e189e58b03c8cad9 | [
"MIT"
] | null | null | null | rlkit/core/rl_algorithm.py | NagisaZj/oyster | 069a510fe63bb29ecd9871e0e189e58b03c8cad9 | [
"MIT"
] | null | null | null | rlkit/core/rl_algorithm.py | NagisaZj/oyster | 069a510fe63bb29ecd9871e0e189e58b03c8cad9 | [
"MIT"
] | null | null | null | import abc
from collections import OrderedDict
import time
import gtimer as gt
import numpy as np
from rlkit.core import logger, eval_util
from rlkit.data_management.env_replay_buffer import MultiTaskReplayBuffer,EnvReplayBuffer
from rlkit.data_management.path_builder import PathBuilder
from rlkit.samplers.in_place import SMMInPlacePathSampler, InPlacePathSampler,SeedInPlacePathSampler, ExpInPlacePathSampler,ExpInPlacePathSamplerSimple
from rlkit.torch import pytorch_util as ptu
from rlkit.smm.smm_policy import hard_smm_point
from rlkit.smm.smm_sampler import SMMSampler
from rlkit.policies.base import ExplorationPolicy
import pickle
import torch
| 40.554624 | 178 | 0.595935 | import abc
from collections import OrderedDict
import time
import gtimer as gt
import numpy as np
from rlkit.core import logger, eval_util
from rlkit.data_management.env_replay_buffer import MultiTaskReplayBuffer,EnvReplayBuffer
from rlkit.data_management.path_builder import PathBuilder
from rlkit.samplers.in_place import SMMInPlacePathSampler, InPlacePathSampler,SeedInPlacePathSampler, ExpInPlacePathSampler,ExpInPlacePathSamplerSimple
from rlkit.torch import pytorch_util as ptu
from rlkit.smm.smm_policy import hard_smm_point
from rlkit.smm.smm_sampler import SMMSampler
from rlkit.policies.base import ExplorationPolicy
import pickle
import torch
class MetaRLAlgorithm(metaclass=abc.ABCMeta):
def __init__(
self,
env,
agent,
train_tasks,
eval_tasks,
meta_batch=64,
num_iterations=100,
num_train_steps_per_itr=1000,
num_initial_steps=100,
num_tasks_sample=100,
num_steps_prior=100,
num_steps_posterior=100,
num_extra_rl_steps_posterior=100,
num_evals=10,
num_steps_per_eval=1000,
batch_size=1024,
embedding_batch_size=1024,
embedding_mini_batch_size=1024,
max_path_length=1000,
discount=0.99,
replay_buffer_size=1000000,
reward_scale=1,
num_exp_traj_eval=1,
update_post_train=1,
eval_deterministic=False,
render=False,
save_replay_buffer=False,
save_algorithm=False,
save_environment=False,
render_eval_paths=False,
dump_eval_paths=False,
plotter=None,
use_SMM=False,
load_SMM =False,
use_history=False,
SMM_path=None,
num_skills = 1,
seed_sample=False,
attention=False,
snail=False,
sample_interval=5
):
"""
:param env: training env
:param agent: agent that is conditioned on a latent variable z that rl_algorithm is responsible for feeding in
:param train_tasks: list of tasks used for training
:param eval_tasks: list of tasks used for eval
see default experiment config file for descriptions of the rest of the arguments
"""
self.env = env
self.agent = agent
self.exploration_agent = agent # Can potentially use a different policy purely for exploration rather than also solving tasks, currently not being used
self.train_tasks = train_tasks
self.eval_tasks = eval_tasks
self.meta_batch = meta_batch
self.num_iterations = num_iterations
self.num_train_steps_per_itr = num_train_steps_per_itr
self.num_initial_steps = num_initial_steps
self.num_tasks_sample = num_tasks_sample
self.num_steps_prior = num_steps_prior
self.num_steps_posterior = num_steps_posterior
self.num_extra_rl_steps_posterior = num_extra_rl_steps_posterior
self.num_evals = num_evals
self.num_steps_per_eval = num_steps_per_eval
self.batch_size = batch_size
self.embedding_batch_size = embedding_batch_size
self.embedding_mini_batch_size = embedding_mini_batch_size
self.max_path_length = max_path_length
self.discount = discount
self.replay_buffer_size = replay_buffer_size
self.reward_scale = reward_scale
self.update_post_train = update_post_train
self.num_exp_traj_eval = num_exp_traj_eval
self.eval_deterministic = eval_deterministic
self.render = render
self.save_replay_buffer = save_replay_buffer
self.save_algorithm = save_algorithm
self.save_environment = save_environment
self.eval_statistics = None
self.render_eval_paths = render_eval_paths
self.dump_eval_paths = dump_eval_paths
self.plotter = plotter
self.use_SMM = use_SMM
self.load_SMM = load_SMM
self.use_history = use_history,
self.SMM_path = SMM_path
self.num_skills = num_skills
self.seed_sample = seed_sample
self.sampler = InPlacePathSampler(
env=env,
policy=agent,
max_path_length=self.max_path_length,
)
if self.seed_sample:
self.seedsampler = SeedInPlacePathSampler(
env=env,
policy=agent,
max_path_length=self.max_path_length,
sample_interval=sample_interval
)
if self.use_SMM:
self.smm_sampler = SMMSampler(
env=env,
max_path_length=max_path_length,
agent = agent,
load_SMM=self.load_SMM,
use_history=self.use_history,
SMM_path=self.SMM_path,
num_skills = self.num_skills
)
# separate replay buffers for
# - training RL update
# - training encoder update
self.replay_buffer = MultiTaskReplayBuffer(
self.replay_buffer_size,
env,
self.train_tasks,
)
self.enc_replay_buffer = MultiTaskReplayBuffer(
self.replay_buffer_size,
env,
self.train_tasks,
)
self._n_env_steps_total = 0
self._n_train_steps_total = 0
self._n_rollouts_total = 0
self._do_train_time = 0
self._epoch_start_time = None
self._algo_start_time = None
self._old_table_keys = None
self._current_path_builder = PathBuilder()
self._exploration_paths = []
def make_exploration_policy(self, policy):
return policy
def make_eval_policy(self, policy):
return policy
def sample_task(self, is_eval=False):
'''
sample task randomly
'''
if is_eval:
idx = np.random.randint(len(self.eval_tasks))
else:
idx = np.random.randint(len(self.train_tasks))
return idx
def train(self):
'''
meta-training loop
'''
self.pretrain()
params = self.get_epoch_snapshot(-1)
logger.save_itr_params(-1, params)
gt.reset()
gt.set_def_unique(False)
self._current_path_builder = PathBuilder()
# at each iteration, we first collect data from tasks, perform meta-updates, then try to evaluate
for it_ in gt.timed_for(
range(self.num_iterations),
save_itrs=True,
):
self._start_epoch(it_)
self.training_mode(True)
if it_ == 0:
print('collecting initial pool of data for train and eval')
# temp for evaluating
for idx in self.train_tasks:
self.task_idx = idx
self.env.reset_task(idx)
if not self.use_SMM:
if not self.seed_sample:
self.collect_data(self.num_initial_steps, 1, np.inf)
else:
self.collect_data(self.num_initial_steps, 1, np.inf)
self.collect_data_seed(self.num_initial_steps, 1, np.inf,accumulate_context=False)
else:
self.collect_data_smm(self.num_initial_steps)
self.collect_data_policy(self.num_initial_steps, 1, np.inf)
# Sample data from train tasks.
for i in range(self.num_tasks_sample):
idx = np.random.randint(len(self.train_tasks))
self.task_idx = idx
self.env.reset_task(idx)
self.enc_replay_buffer.task_buffers[idx].clear()
if not self.use_SMM:
if not self.seed_sample:
# collect some trajectories with z ~ prior
if self.num_steps_prior > 0:
self.collect_data(self.num_steps_prior, 1, np.inf)
# collect some trajectories with z ~ posterior
if self.num_steps_posterior > 0:
self.collect_data(self.num_steps_posterior, 1, self.update_post_train)
# even if encoder is trained only on samples from the prior, the policy needs to learn to handle z ~ posterior
if self.num_extra_rl_steps_posterior > 0:
self.collect_data(self.num_extra_rl_steps_posterior, 1, self.update_post_train,
add_to_enc_buffer=False)
else:
if self.num_steps_prior > 0:
self.collect_data(self.num_steps_prior, 1, np.inf)
self.collect_data_seed(self.num_steps_prior, 1, np.inf,accumulate_context=False)
if self.num_steps_posterior > 0:
self.collect_data(self.num_steps_posterior, 1, self.update_post_train)
self.collect_data_seed(self.num_steps_posterior, 1, self.update_post_train)
if self.num_extra_rl_steps_posterior > 0:
self.collect_data(self.num_extra_rl_steps_posterior, 1, self.update_post_train,
add_to_enc_buffer=False)
self.collect_data_seed(self.num_extra_rl_steps_posterior, 1, self.update_post_train,
add_to_enc_buffer=False)
else:
if self.num_steps_prior > 0:
self.collect_data_smm(self.num_steps_prior)
self.collect_data_policy(self.num_steps_prior, 1, np.inf)
# collect some trajectories with z ~ posterior
if self.num_steps_posterior > 0:
self.collect_data_smm(self.num_steps_posterior)
self.collect_data_policy(self.num_steps_posterior, 1, self.update_post_train)
# even if encoder is trained only on samples from the prior, the policy needs to learn to handle z ~ posterior
if self.num_extra_rl_steps_posterior > 0:
self.collect_data_policy(self.num_extra_rl_steps_posterior, 1, self.update_post_train)
# Sample train tasks and compute gradient updates on parameters.
for train_step in range(self.num_train_steps_per_itr):
indices = np.random.choice(self.train_tasks, self.meta_batch)
self._do_training(indices)
self._n_train_steps_total += 1
gt.stamp('train')
self.training_mode(False)
# eval
self._try_to_eval(it_)
gt.stamp('eval')
self._end_epoch()
def pretrain(self):
"""
Do anything before the main training phase.
"""
pass
def collect_data_smm(self,num_samples):
'''
Notice that SMM data should only be available for the encoder
:param num_samples: number of transitions to sample
:return:
'''
num_transitions = 0
while num_transitions < num_samples:
paths, n_samples = self.smm_sampler.obtain_samples(max_samples=num_samples - num_transitions,
max_trajs=np.inf)
num_transitions += n_samples
self.enc_replay_buffer.add_paths(self.task_idx, paths)
self._n_env_steps_total += num_transitions
gt.stamp('smm sample')
def collect_data_policy(self, num_samples, resample_z_rate, update_posterior_rate):
'''
get trajectories from current env in batch mode with given policy
collect complete trajectories until the number of collected transitions >= num_samples
:param agent: policy to rollout
:param num_samples: total number of transitions to sample
:param resample_z_rate: how often to resample latent context z (in units of trajectories)
:param update_posterior_rate: how often to update q(z | c) from which z is sampled (in units of trajectories)
:param add_to_enc_buffer: whether to add collected data to encoder replay buffer
'''
# start from the prior
self.agent.clear_z()
num_transitions = 0
while num_transitions < num_samples:
paths, n_samples = self.sampler.obtain_samples(max_samples=num_samples - num_transitions,
max_trajs=update_posterior_rate,
accum_context=False,
resample=resample_z_rate)
num_transitions += n_samples
self.replay_buffer.add_paths(self.task_idx, paths)
if update_posterior_rate != np.inf:
context = self.sample_context(self.task_idx)
self.agent.infer_posterior(context)
self._n_env_steps_total += num_transitions
gt.stamp('policy sample')
def collect_data(self, num_samples, resample_z_rate, update_posterior_rate, add_to_enc_buffer=True,add_to_policy_buffer=True):
'''
get trajectories from current env in batch mode with given policy
collect complete trajectories until the number of collected transitions >= num_samples
:param agent: policy to rollout
:param num_samples: total number of transitions to sample
:param resample_z_rate: how often to resample latent context z (in units of trajectories)
:param update_posterior_rate: how often to update q(z | c) from which z is sampled (in units of trajectories)
:param add_to_enc_buffer: whether to add collected data to encoder replay buffer
'''
# start from the prior
self.agent.clear_z()
num_transitions = 0
while num_transitions < num_samples:
paths, n_samples = self.sampler.obtain_samples(max_samples=num_samples - num_transitions,
max_trajs=update_posterior_rate,
accum_context=False,
resample=resample_z_rate)
num_transitions += n_samples
#for p in paths:
# print(p['actions'],p['rewards'])
if add_to_policy_buffer:
self.replay_buffer.add_paths(self.task_idx, paths)
if add_to_enc_buffer:
self.enc_replay_buffer.add_paths(self.task_idx, paths)
if update_posterior_rate != np.inf:
context = self.sample_context(self.task_idx)
self.agent.infer_posterior(context)
self._n_env_steps_total += num_transitions
gt.stamp('sample')
def collect_data_seed(self, num_samples, resample_z_rate, update_posterior_rate, add_to_enc_buffer=True,add_to_policy_buffer=True,accumulate_context=True):
self.agent.clear_z()
num_transitions = 0
while num_transitions < num_samples:
paths, n_samples = self.seedsampler.obtain_samples(max_samples=num_samples - num_transitions,
max_trajs=1,
accum_context=accumulate_context
)
num_transitions += n_samples
if add_to_policy_buffer:
self.replay_buffer.add_paths(self.task_idx, paths)
if add_to_enc_buffer:
self.enc_replay_buffer.add_paths(self.task_idx, paths)
#if update_posterior_rate != np.inf:
# context = self.prepare_context(self.task_idx)
# self.agent.infer_posterior(context)
self._n_env_steps_total += num_transitions
gt.stamp('sample')
def _try_to_eval(self, epoch):
logger.save_extra_data(self.get_extra_data_to_save(epoch))
if self._can_evaluate():
self.evaluate(epoch)
params = self.get_epoch_snapshot(epoch)
logger.save_itr_params(epoch, params)
table_keys = logger.get_table_key_set()
if self._old_table_keys is not None:
assert table_keys == self._old_table_keys, (
"Table keys cannot change from iteration to iteration."
)
self._old_table_keys = table_keys
logger.record_tabular(
"Number of train steps total",
self._n_train_steps_total,
)
logger.record_tabular(
"Number of env steps total",
self._n_env_steps_total,
)
logger.record_tabular(
"Number of rollouts total",
self._n_rollouts_total,
)
times_itrs = gt.get_times().stamps.itrs
train_time = times_itrs['train'][-1]
if not self.use_SMM:
sample_time = times_itrs['sample'][-1]
else:
sample_time = times_itrs['policy sample'][-1]
eval_time = times_itrs['eval'][-1] if epoch > 0 else 0
epoch_time = train_time + sample_time + eval_time
total_time = gt.get_times().total
logger.record_tabular('Train Time (s)', train_time)
logger.record_tabular('(Previous) Eval Time (s)', eval_time)
logger.record_tabular('Sample Time (s)', sample_time)
logger.record_tabular('Epoch Time (s)', epoch_time)
logger.record_tabular('Total Train Time (s)', total_time)
logger.record_tabular("Epoch", epoch)
logger.dump_tabular(with_prefix=False, with_timestamp=False)
else:
logger.log("Skipping eval for now.")
def _can_evaluate(self):
"""
One annoying thing about the logger table is that the keys at each
iteration need to be the exact same. So unless you can compute
everything, skip evaluation.
A common example for why you might want to skip evaluation is that at
the beginning of training, you may not have enough data for a
validation and training set.
:return:
"""
# eval collects its own context, so can eval any time
return True
def _can_train(self):
return all([self.replay_buffer.num_steps_can_sample(idx) >= self.batch_size for idx in self.train_tasks])
def _get_action_and_info(self, agent, observation):
"""
Get an action to take in the environment.
:param observation:
:return:
"""
agent.set_num_steps_total(self._n_env_steps_total)
return agent.get_action(observation,)
def _start_epoch(self, epoch):
self._epoch_start_time = time.time()
self._exploration_paths = []
self._do_train_time = 0
logger.push_prefix('Iteration #%d | ' % epoch)
def _end_epoch(self):
logger.log("Epoch Duration: {0}".format(
time.time() - self._epoch_start_time
))
logger.log("Started Training: {0}".format(self._can_train()))
logger.pop_prefix()
##### Snapshotting utils #####
def get_epoch_snapshot(self, epoch):
data_to_save = dict(
epoch=epoch,
exploration_policy=self.exploration_policy,
)
if self.save_environment:
data_to_save['env'] = self.training_env
return data_to_save
def get_extra_data_to_save(self, epoch):
"""
Save things that shouldn't be saved every snapshot but rather
overwritten every time.
:param epoch:
:return:
"""
if self.render:
self.training_env.render(close=True)
data_to_save = dict(
epoch=epoch,
)
if self.save_environment:
data_to_save['env'] = self.training_env
if self.save_replay_buffer:
data_to_save['replay_buffer'] = self.replay_buffer
if self.save_algorithm:
data_to_save['algorithm'] = self
return data_to_save
def collect_paths(self, idx, epoch, run):
self.task_idx = idx
self.env.reset_task(idx)
self.agent.clear_z()
paths = []
num_transitions = 0
num_trajs = 0
if self.use_SMM:
if not self.load_SMM:
path, num = self.smm_sampler.obtain_samples(max_samples=self.max_path_length, max_trajs=1,
accum_context=True)
num_transitions += num
self.agent.infer_posterior(self.agent.context)
while num_transitions < self.num_steps_per_eval:
path, num = self.sampler.obtain_samples(deterministic=self.eval_deterministic,
max_samples=self.num_steps_per_eval - num_transitions,
max_trajs=1, accum_context=False)
paths += path
num_transitions += num
num_trajs += 1
if num_trajs >= self.num_exp_traj_eval:
self.agent.infer_posterior(self.agent.context)
else:
while num_transitions < self.num_steps_per_eval:
path, num = self.smm_sampler.obtain_samples(max_samples=self.max_path_length, max_trajs=1,
accum_context=True)
num_transitions += num
#paths+=path
num_trajs += 1
path, num = self.sampler.obtain_samples(deterministic=self.eval_deterministic,
max_samples=self.num_steps_per_eval - num_transitions,
max_trajs=1, accum_context=False)
paths += path
num_transitions += num
num_trajs += 1
self.agent.infer_posterior(self.agent.context)
else:
while num_transitions < self.num_steps_per_eval:
if self.seed_sample:
path, num = self.seedsampler.obtain_samples(deterministic=self.eval_deterministic,
max_samples=self.num_steps_per_eval - num_transitions,
max_trajs=1, accum_context=True)
else:
path, num = self.sampler.obtain_samples(deterministic=self.eval_deterministic,
max_samples=self.num_steps_per_eval - num_transitions,
max_trajs=1, accum_context=True)
paths += path
num_transitions += num
num_trajs += 1
if num_trajs >= self.num_exp_traj_eval:
self.agent.infer_posterior(self.agent.context)
if self.sparse_rewards:
for p in paths:
sparse_rewards = np.stack(e['sparse_reward'] for e in p['env_infos']).reshape(-1, 1)
p['rewards'] = sparse_rewards
goal = self.env._goal
for path in paths:
path['goal'] = goal # goal
if hasattr(self.env,"_pitfall"):
pitfall = self.env._pitfall
for path in paths:
path['pitfall'] = pitfall
# save the paths for visualization, only useful for point mass
if self.dump_eval_paths:
logger.save_extra_data(paths, path='eval_trajectories/task{}-epoch{}-run{}'.format(idx, epoch, run))
return paths
def _do_eval(self, indices, epoch):
final_returns = []
online_returns = []
for idx in indices:
all_rets = []
for r in range(self.num_evals):
paths = self.collect_paths(idx, epoch, r)
all_rets.append([eval_util.get_average_returns([p]) for p in paths])
final_returns.append(np.mean([np.mean(a) for a in all_rets]))
# record online returns for the first n trajectories
n = min([len(a) for a in all_rets])
all_rets = [a[:n] for a in all_rets]
all_rets = np.mean(np.stack(all_rets), axis=0) # avg return per nth rollout
online_returns.append(all_rets)
n = min([len(t) for t in online_returns])
online_returns = [t[:n] for t in online_returns]
return final_returns, online_returns
def evaluate(self, epoch):
if self.eval_statistics is None:
self.eval_statistics = OrderedDict()
### sample trajectories from prior for debugging / visualization
if self.dump_eval_paths:
# 100 arbitrarily chosen for visualizations of point_robot trajectories
# just want stochasticity of z, not the policy
self.agent.clear_z()
if not self.use_SMM:
prior_paths, _ = self.sampler.obtain_samples(deterministic=self.eval_deterministic,
max_samples=self.max_path_length * 20,
accum_context=False,
resample=1)
else:
prior_paths, _ = self.smm_sampler.obtain_samples(
max_samples=self.max_path_length * 20,
)
logger.save_extra_data(prior_paths, path='eval_trajectories/prior-epoch{}'.format(epoch))
### train tasks
# eval on a subset of train tasks for speed
indices = np.random.choice(self.train_tasks, len(self.eval_tasks))
eval_util.dprint('evaluating on {} train tasks'.format(len(indices)))
### eval train tasks with posterior sampled from the training replay buffer
train_returns = []
for idx in indices:
self.task_idx = idx
self.env.reset_task(idx)
paths = []
for _ in range(self.num_steps_per_eval // self.max_path_length):
context = self.sample_context(idx)
self.agent.infer_posterior(context)
p, _ = self.sampler.obtain_samples(deterministic=self.eval_deterministic, max_samples=self.max_path_length,
accum_context=False,
max_trajs=1,
resample=np.inf)
paths += p
#for p in paths:
# print(p['actions'],p['rewards'])
if self.sparse_rewards:
for p in paths:
sparse_rewards = np.stack(e['sparse_reward'] for e in p['env_infos']).reshape(-1, 1)
p['rewards'] = sparse_rewards
train_returns.append(eval_util.get_average_returns(paths))
train_returns = np.mean(train_returns)
### eval train tasks with on-policy data to match eval of test tasks
train_final_returns, train_online_returns = self._do_eval(indices, epoch)
eval_util.dprint('train online returns')
eval_util.dprint(train_online_returns)
### test tasks
eval_util.dprint('evaluating on {} test tasks'.format(len(self.eval_tasks)))
test_final_returns, test_online_returns = self._do_eval(self.eval_tasks, epoch)
eval_util.dprint('test online returns')
eval_util.dprint(test_online_returns)
# save the final posterior
self.agent.log_diagnostics(self.eval_statistics)
#if hasattr(self.env, "log_diagnostics"):
# self.env.log_diagnostics(paths, prefix=None)
avg_train_return = np.mean(train_final_returns)
avg_test_return = np.mean(test_final_returns)
avg_train_online_return = np.mean(np.stack(train_online_returns), axis=0)
avg_test_online_return = np.mean(np.stack(test_online_returns), axis=0)
self.eval_statistics['AverageTrainReturn_all_train_tasks'] = train_returns
self.eval_statistics['AverageReturn_all_train_tasks'] = avg_train_return
self.eval_statistics['AverageReturn_all_test_tasks'] = avg_test_return
logger.save_extra_data(avg_train_online_return, path='online-train-epoch{}'.format(epoch))
logger.save_extra_data(avg_test_online_return, path='online-test-epoch{}'.format(epoch))
for key, value in self.eval_statistics.items():
logger.record_tabular(key, value)
self.eval_statistics = None
if self.render_eval_paths:
self.env.render_paths(paths)
if self.plotter:
self.plotter.draw()
@abc.abstractmethod
def training_mode(self, mode):
"""
Set training mode to `mode`.
:param mode: If True, training will happen (e.g. set the dropout
probabilities to not all ones).
"""
pass
@abc.abstractmethod
def _do_training(self):
"""
Perform some update, e.g. perform one gradient step.
:return:
"""
pass
class ExpAlgorithm(metaclass=abc.ABCMeta):
def __init__(
self,
env,
agent,
agent_exp,
train_tasks,
eval_tasks,
encoder,
meta_batch=64,
num_iterations=100,
num_train_steps_per_itr=1000,
num_initial_steps=100,
num_tasks_sample=100,
num_steps_prior=100,
num_steps_posterior=100,
num_extra_rl_steps_posterior=100,
num_evals=10,
num_steps_per_eval=1000,
batch_size=1024,
embedding_batch_size=1024,
embedding_mini_batch_size=1024,
max_path_length=1000,
discount=0.99,
replay_buffer_size=1000000,
reward_scale=1,
num_exp_traj_eval=1,
update_post_train=1,
eval_deterministic=True,
render=False,
save_replay_buffer=False,
save_algorithm=False,
save_environment=False,
render_eval_paths=False,
dump_eval_paths=False,
plotter=None,
use_SMM=False,
load_SMM =False,
use_history=False,
SMM_path=None,
num_skills = 1,
seed_sample=False,
snail=False,
meta_episode_len=10,
num_trajs = 2,
num_trajs_eval=1
):
"""
:param env: training env
:param agent: agent that is conditioned on a latent variable z that rl_algorithm is responsible for feeding in
:param train_tasks: list of tasks used for training
:param eval_tasks: list of tasks used for eval
see default experiment config file for descriptions of the rest of the arguments
"""
self.env = env
self.agent = agent
self.exploration_agent = agent_exp # Can potentially use a different policy purely for exploration rather than also solving tasks, currently not being used
self.context_encoder = encoder
self.train_tasks = train_tasks
self.eval_tasks = eval_tasks
self.meta_batch = meta_batch
self.num_iterations = num_iterations
self.num_train_steps_per_itr = num_train_steps_per_itr
self.num_initial_steps = num_initial_steps
self.num_tasks_sample = num_tasks_sample
self.num_steps_prior = num_steps_prior
self.num_steps_posterior = num_steps_posterior
self.num_extra_rl_steps_posterior = num_extra_rl_steps_posterior
self.num_evals = num_evals
self.num_steps_per_eval = num_steps_per_eval
self.batch_size = batch_size
self.embedding_batch_size = embedding_batch_size
self.embedding_mini_batch_size = embedding_mini_batch_size
self.max_path_length = max_path_length
self.discount = discount
self.replay_buffer_size = replay_buffer_size
self.reward_scale = reward_scale
self.update_post_train = update_post_train
self.num_exp_traj_eval = num_exp_traj_eval
self.eval_deterministic = eval_deterministic
self.render = render
self.save_replay_buffer = save_replay_buffer
self.save_algorithm = save_algorithm
self.save_environment = save_environment
self.eval_statistics = None
self.render_eval_paths = render_eval_paths
self.dump_eval_paths = dump_eval_paths
self.plotter = plotter
self.use_SMM = use_SMM
self.load_SMM = load_SMM
self.use_history = use_history,
self.SMM_path = SMM_path
self.num_skills = num_skills
self.seed_sample = seed_sample
self.meta_episode_len = meta_episode_len
self.num_trajs = num_trajs
self.num_trajs_eval = num_trajs_eval
self.sampler = InPlacePathSampler(
env=env,
policy=agent,
max_path_length=self.max_path_length,
)
self.expsampler = ExpInPlacePathSampler(
env=env,
policy=self.exploration_agent,
encoder=self.context_encoder,
max_path_length=self.max_path_length,
)
# separate replay buffers for
# - training RL update
# - training encoder update
self.replay_buffer = MultiTaskReplayBuffer(
self.replay_buffer_size,
env,
self.train_tasks,
)
self.enc_replay_buffer = MultiTaskReplayBuffer(
self.replay_buffer_size,
env,
self.train_tasks,
)
self._n_env_steps_total = 0
self._n_train_steps_total = 0
self._n_rollouts_total = 0
self._do_train_time = 0
self._epoch_start_time = None
self._algo_start_time = None
self._old_table_keys = None
self._current_path_builder = PathBuilder()
self._exploration_paths = []
def make_exploration_policy(self, policy):
return policy
def make_eval_policy(self, policy):
return policy
def sample_task(self, is_eval=False):
'''
sample task randomly
'''
if is_eval:
idx = np.random.randint(len(self.eval_tasks))
else:
idx = np.random.randint(len(self.train_tasks))
return idx
def train(self):
'''
meta-training loop
'''
self.pretrain()
params = self.get_epoch_snapshot(-1)
logger.save_itr_params(-1, params)
gt.reset()
gt.set_def_unique(False)
self._current_path_builder = PathBuilder()
# at each iteration, we first collect data from tasks, perform meta-updates, then try to evaluate
for it_ in gt.timed_for(
range(self.num_iterations),
save_itrs=True,
):
self._start_epoch(it_)
self.training_mode(True)
if it_ == 0:
print('collecting initial pool of data for train and eval')
# temp for evaluating
for idx in self.train_tasks:
self.task_idx = idx
self.env.reset_task(idx)
for _ in range(self.num_trajs):
self.collect_data_exp(self.meta_episode_len)
self.collect_data(self.num_initial_steps, 1, np.inf,add_to_enc_buffer=True)
# Sample data from train tasks.
for i in range(self.num_tasks_sample):
idx = np.random.randint(len(self.train_tasks))
self.task_idx = idx
self.env.reset_task(idx)
if (it_+1)%5==0:
self.enc_replay_buffer.task_buffers[idx].clear()
for _ in range(self.num_trajs):
self.collect_data_exp(self.meta_episode_len)
if self.num_steps_prior > 0:
self.collect_data(self.num_steps_prior, 1, np.inf,add_to_enc_buffer=True)
# collect some trajectories with z ~ posterior
if self.num_steps_posterior > 0:
self.collect_data(self.num_steps_posterior, 1, self.update_post_train,add_to_enc_buffer=True)
# even if encoder is trained only on samples from the prior, the policy needs to learn to handle z ~ posterior
if self.num_extra_rl_steps_posterior > 0:
self.collect_data(self.num_extra_rl_steps_posterior, 1, self.update_post_train,)
print('collect over')
# Sample train tasks and compute gradient updates on parameters.
for train_step in range(self.num_train_steps_per_itr):
indices = np.random.choice(self.train_tasks, self.meta_batch)
self._do_training(indices)
self._n_train_steps_total += 1
gt.stamp('train')
self.training_mode(False)
# eval
self._try_to_eval(it_)
gt.stamp('eval')
self._end_epoch()
def pretrain(self):
"""
Do anything before the main training phase.
"""
pass
def sample_eval(self,indices, context):
reward = torch.zeros(context.shape[0],1,1).cuda()
rem = 0
for indice in indices:
self.env.reset_task(indice)
context_i = context[rem,...]
context_i = torch.unsqueeze(context_i,0)
self.agent.clear_z()
self.agent.infer_posterior(context_i)
path,_ = self.sampler.obtain_samples(deterministic=self.eval_deterministic, max_samples=self.max_path_length*5,resample=1)
reward[rem] = eval_util.get_average_returns(path)
rem = rem + 1
return reward
def collect_data(self, num_samples, resample_z_rate, update_posterior_rate, add_to_enc_buffer=False):
'''
get trajectories from current env in batch mode with given policy
collect complete trajectories until the number of collected transitions >= num_samples
:param agent: policy to rollout
:param num_samples: total number of transitions to sample
:param resample_z_rate: how often to resample latent context z (in units of trajectories)
:param update_posterior_rate: how often to update q(z | c) from which z is sampled (in units of trajectories)
:param add_to_enc_buffer: whether to add collected data to encoder replay buffer
'''
# start from the prior
self.agent.clear_z()
num_transitions = 0
while num_transitions < num_samples:
paths, n_samples = self.sampler.obtain_samples(max_samples=num_samples - num_transitions,
max_trajs=update_posterior_rate,
accum_context=False,
resample=resample_z_rate)
num_transitions += n_samples
self.replay_buffer.add_paths(self.task_idx, paths)
if add_to_enc_buffer:
self.enc_replay_buffer.add_paths(self.task_idx, paths)
if update_posterior_rate != np.inf:
context, context_unbatched = self.sample_context(self.task_idx)
self.agent.infer_posterior(context)
self._n_env_steps_total += num_transitions
gt.stamp('sample')
def collect_data_exp(self, num_episodes):
'''
get trajectories from current env in batch mode with given policy
collect complete trajectories until the number of collected transitions >= num_samples
:param agent: policy to rollout
:param num_samples: total number of transitions to sample
:param resample_z_rate: how often to resample latent context z (in units of trajectories)
:param update_posterior_rate: how often to update q(z | c) from which z is sampled (in units of trajectories)
:param add_to_enc_buffer: whether to add collected data to encoder replay buffer
'''
# start from the prior
paths, n_samples = self.expsampler.obtain_samples(max_trajs=num_episodes)
self.enc_replay_buffer.add_paths(self.task_idx, paths)
self._n_env_steps_total += n_samples
gt.stamp('sample')
def _try_to_eval(self, epoch):
logger.save_extra_data(self.get_extra_data_to_save(epoch))
if self._can_evaluate(epoch):
self.evaluate(epoch,self.num_trajs)
params = self.get_epoch_snapshot(epoch)
logger.save_itr_params(epoch, params)
table_keys = logger.get_table_key_set()
if self._old_table_keys is not None:
assert table_keys == self._old_table_keys, (
"Table keys cannot change from iteration to iteration."
)
self._old_table_keys = table_keys
logger.record_tabular(
"Number of train steps total",
self._n_train_steps_total,
)
logger.record_tabular(
"Number of env steps total",
self._n_env_steps_total,
)
logger.record_tabular(
"Number of rollouts total",
self._n_rollouts_total,
)
times_itrs = gt.get_times().stamps.itrs
train_time = times_itrs['train'][-1]
if not self.use_SMM:
sample_time = times_itrs['sample'][-1]
else:
sample_time = times_itrs['policy sample'][-1]
eval_time = times_itrs['eval'][-1] if epoch > 0 else 0
epoch_time = train_time + sample_time + eval_time
total_time = gt.get_times().total
logger.record_tabular('Train Time (s)', train_time)
logger.record_tabular('(Previous) Eval Time (s)', eval_time)
logger.record_tabular('Sample Time (s)', sample_time)
logger.record_tabular('Epoch Time (s)', epoch_time)
logger.record_tabular('Total Train Time (s)', total_time)
logger.record_tabular("Epoch", epoch)
logger.dump_tabular(with_prefix=False, with_timestamp=False)
else:
logger.log("Skipping eval for now.")
def _can_evaluate(self,epoch):
"""
One annoying thing about the logger table is that the keys at each
iteration need to be the exact same. So unless you can compute
everything, skip evaluation.
A common example for why you might want to skip evaluation is that at
the beginning of training, you may not have enough data for a
validation and training set.
:return:
"""
# eval collects its own context, so can eval any time
return True #if (epoch+1)%5==0 else False
def _can_train(self):
return all([self.replay_buffer.num_steps_can_sample(idx) >= self.batch_size for idx in self.train_tasks])
def _get_action_and_info(self, agent, observation):
"""
Get an action to take in the environment.
:param observation:
:return:
"""
agent.set_num_steps_total(self._n_env_steps_total)
return agent.get_action(observation,)
def _start_epoch(self, epoch):
self._epoch_start_time = time.time()
self._exploration_paths = []
self._do_train_time = 0
logger.push_prefix('Iteration #%d | ' % epoch)
def _end_epoch(self):
logger.log("Epoch Duration: {0}".format(
time.time() - self._epoch_start_time
))
logger.log("Started Training: {0}".format(self._can_train()))
logger.pop_prefix()
##### Snapshotting utils #####
def get_epoch_snapshot(self, epoch):
data_to_save = dict(
epoch=epoch,
exploration_policy=self.exploration_policy,
)
if self.save_environment:
data_to_save['env'] = self.training_env
return data_to_save
def get_extra_data_to_save(self, epoch):
"""
Save things that shouldn't be saved every snapshot but rather
overwritten every time.
:param epoch:
:return:
"""
if self.render:
self.training_env.render(close=True)
data_to_save = dict(
epoch=epoch,
)
if self.save_environment:
data_to_save['env'] = self.training_env
if self.save_replay_buffer:
data_to_save['replay_buffer'] = self.replay_buffer
if self.save_algorithm:
data_to_save['algorithm'] = self
return data_to_save
def collect_paths(self, idx, epoch, run):
self.task_idx = idx
self.env.reset_task(idx)
self.agent.clear_z()
paths = []
num_transitions = 0
num_trajs = 0
path, num = self.expsampler.obtain_samples(deterministic=self.eval_deterministic,
max_trajs=self.num_exp_traj_eval, accum_context_for_agent=True, context_agent = self.agent,split=True)
num_transitions += num
num_trajs +=self.num_exp_traj_eval
paths+=path
while num_transitions < self.num_steps_per_eval:
path, num = self.sampler.obtain_samples(deterministic=self.eval_deterministic, max_samples=self.num_steps_per_eval - num_transitions, max_trajs=1, accum_context=True)
paths += path
num_transitions += num
num_trajs += 1
self.agent.infer_posterior(self.agent.context)
if self.sparse_rewards:
for p in paths:
sparse_rewards = np.stack(e['sparse_reward'] for e in p['env_infos']).reshape(-1, 1)
p['rewards'] = sparse_rewards
goal = self.env._goal
for path in paths:
path['goal'] = goal # goal
# save the paths for visualization, only useful for point mass
if self.dump_eval_paths:
logger.save_extra_data(paths, path='eval_trajectories/task{}-epoch{}-run{}'.format(idx, epoch, run))
return paths
def _do_eval(self, indices, epoch):
final_returns = []
online_returns = []
for idx in indices:
all_rets = []
for r in range(self.num_evals):
paths = self.collect_paths(idx, epoch, r)
all_rets.append([eval_util.get_average_returns([p]) for p in paths])
final_returns.append(np.mean([np.mean(a) for a in all_rets]))
# record online returns for the first n trajectories
n = min([len(a) for a in all_rets])
all_rets = [a[:n] for a in all_rets]
all_rets = np.mean(np.stack(all_rets), axis=0) # avg return per nth rollout
online_returns.append(all_rets)
n = min([len(t) for t in online_returns])
online_returns = [t[:n] for t in online_returns]
return final_returns, online_returns
def evaluate(self, epoch,num_trajs):
if self.eval_statistics is None:
self.eval_statistics = OrderedDict()
### sample trajectories from prior for debugging / visualization
if self.dump_eval_paths:
# 100 arbitrarily chosen for visualizations of point_robot trajectories
# just want stochasticity of z, not the policy
self.agent.clear_z()
#if not self.use_SMM:
prior_paths, _ = self.sampler.obtain_samples(deterministic=self.eval_deterministic,
max_samples=self.max_path_length * 20,
accum_context=False,
resample=1)
#else:
# prior_paths, _ = self.smm_sampler.obtain_samples(
# max_samples=self.max_path_length * 20,
# )
logger.save_extra_data(prior_paths, path='eval_trajectories/prior-epoch{}'.format(epoch))
### train tasks
# eval on a subset of train tasks for speed
indices = np.random.choice(self.train_tasks, len(self.eval_tasks))
eval_util.dprint('evaluating on {} train tasks'.format(len(indices)))
### eval train tasks with posterior sampled from the training replay buffer
train_returns = []
for idx in indices:
self.task_idx = idx
self.env.reset_task(idx)
paths = []
for _ in range(self.num_steps_per_eval // self.max_path_length):
context, context_unbatched = self.sample_context(idx)
self.agent.infer_posterior(context)
p, _ = self.sampler.obtain_samples(deterministic=self.eval_deterministic,
max_samples=self.max_path_length,
accum_context=False,
max_trajs=1,
resample=np.inf)
paths += p
if self.sparse_rewards:
for p in paths:
sparse_rewards = np.stack(e['sparse_reward'] for e in p['env_infos']).reshape(-1, 1)
p['rewards'] = sparse_rewards
train_returns.append(eval_util.get_average_returns(paths))
train_returns = np.mean(train_returns)
### eval train tasks with on-policy data to match eval of test tasks
train_final_returns, train_online_returns = self._do_eval(indices, epoch)
eval_util.dprint('train online returns')
eval_util.dprint(train_online_returns)
### test tasks
eval_util.dprint('evaluating on {} test tasks'.format(len(self.eval_tasks)))
test_final_returns, test_online_returns = self._do_eval(self.eval_tasks, epoch)
eval_util.dprint('test online returns')
eval_util.dprint(test_online_returns)
# save the final posterior
self.agent.log_diagnostics(self.eval_statistics)
if hasattr(self.env, "log_diagnostics"):
self.env.log_diagnostics(paths, prefix=None)
avg_train_return = np.mean(train_final_returns)
avg_test_return = np.mean(test_final_returns)
avg_train_online_return = np.mean(np.stack(train_online_returns), axis=0)
avg_test_online_return = np.mean(np.stack(test_online_returns), axis=0)
self.eval_statistics['AverageTrainReturn_all_train_tasks'] = train_returns
self.eval_statistics['AverageReturn_all_train_tasks'] = avg_train_return
self.eval_statistics['AverageReturn_all_test_tasks'] = avg_test_return
logger.save_extra_data(avg_train_online_return, path='online-train-epoch{}'.format(epoch))
logger.save_extra_data(avg_test_online_return, path='online-test-epoch{}'.format(epoch))
for key, value in self.eval_statistics.items():
logger.record_tabular(key, value)
self.eval_statistics = None
if self.render_eval_paths:
self.env.render_paths(paths)
if self.plotter:
self.plotter.draw()
@abc.abstractmethod
def training_mode(self, mode):
"""
Set training mode to `mode`.
:param mode: If True, training will happen (e.g. set the dropout
probabilities to not all ones).
"""
pass
@abc.abstractmethod
def _do_training(self):
"""
Perform some update, e.g. perform one gradient step.
:return:
"""
pass
class ExpAlgorithmSimple(metaclass=abc.ABCMeta):
def __init__(
self,
env,
agent,
agent_exp,
train_tasks,
eval_tasks,
encoder,
meta_batch=64,
num_iterations=100,
num_train_steps_per_itr=1000,
num_initial_steps=100,
num_tasks_sample=100,
num_steps_prior=100,
num_steps_posterior=100,
num_extra_rl_steps_posterior=100,
num_evals=10,
num_steps_per_eval=1000,
batch_size=1024,
embedding_batch_size=1024,
embedding_mini_batch_size=1024,
max_path_length=1000,
discount=0.99,
replay_buffer_size=1000000,
reward_scale=1,
num_exp_traj_eval=1,
update_post_train=1,
eval_deterministic=False,
render=False,
save_replay_buffer=False,
save_algorithm=False,
save_environment=False,
render_eval_paths=False,
dump_eval_paths=False,
plotter=None,
use_SMM=False,
load_SMM =False,
use_history=False,
SMM_path=None,
num_skills = 1,
seed_sample=False,
snail=False,
meta_episode_len=10,
num_trajs = 2,
num_trajs_eval=1
):
"""
:param env: training env
:param agent: agent that is conditioned on a latent variable z that rl_algorithm is responsible for feeding in
:param train_tasks: list of tasks used for training
:param eval_tasks: list of tasks used for eval
see default experiment config file for descriptions of the rest of the arguments
"""
self.env = env
self.agent = agent
self.exploration_agent = agent_exp # Can potentially use a different policy purely for exploration rather than also solving tasks, currently not being used
self.context_encoder = encoder
self.train_tasks = train_tasks
self.eval_tasks = eval_tasks
self.meta_batch = meta_batch
self.num_iterations = num_iterations
self.num_train_steps_per_itr = num_train_steps_per_itr
self.num_initial_steps = num_initial_steps
self.num_tasks_sample = num_tasks_sample
self.num_steps_prior = num_steps_prior
self.num_steps_posterior = num_steps_posterior
self.num_extra_rl_steps_posterior = num_extra_rl_steps_posterior
self.num_evals = num_evals
self.num_steps_per_eval = num_steps_per_eval
self.batch_size = batch_size
self.embedding_batch_size = embedding_batch_size
self.embedding_mini_batch_size = embedding_mini_batch_size
self.max_path_length = max_path_length
self.discount = discount
self.replay_buffer_size = replay_buffer_size
self.reward_scale = reward_scale
self.update_post_train = update_post_train
self.num_exp_traj_eval = num_exp_traj_eval
self.eval_deterministic = eval_deterministic
self.render = render
self.save_replay_buffer = save_replay_buffer
self.save_algorithm = save_algorithm
self.save_environment = save_environment
self.eval_statistics = None
self.render_eval_paths = render_eval_paths
self.dump_eval_paths = dump_eval_paths
self.plotter = plotter
self.use_SMM = use_SMM
self.load_SMM = load_SMM
self.use_history = use_history,
self.SMM_path = SMM_path
self.num_skills = num_skills
self.seed_sample = seed_sample
self.meta_episode_len = meta_episode_len
self.num_trajs = num_trajs
self.num_trajs_eval = num_trajs_eval
self.sampler = InPlacePathSampler(
env=env,
policy=agent,
max_path_length=self.max_path_length,
)
self.expsampler = ExpInPlacePathSamplerSimple(
env=env,
policy=self.exploration_agent,
encoder=self.context_encoder,
max_path_length=self.max_path_length,
)
self.seedsampler = SeedInPlacePathSampler(
env=env,
policy=agent,
max_path_length=self.max_path_length,
)
# separate replay buffers for
# - training RL update
# - training encoder update
self.replay_buffer = MultiTaskReplayBuffer(
self.replay_buffer_size,
env,
self.train_tasks,
)
self.enc_replay_buffer = MultiTaskReplayBuffer(
self.replay_buffer_size,
env,
self.train_tasks,
)
self._n_env_steps_total = 0
self._n_train_steps_total = 0
self._n_rollouts_total = 0
self._do_train_time = 0
self._epoch_start_time = None
self._algo_start_time = None
self._old_table_keys = None
self._current_path_builder = PathBuilder()
self._exploration_paths = []
def make_exploration_policy(self, policy):
return policy
def make_eval_policy(self, policy):
return policy
def sample_task(self, is_eval=False):
'''
sample task randomly
'''
if is_eval:
idx = np.random.randint(len(self.eval_tasks))
else:
idx = np.random.randint(len(self.train_tasks))
return idx
def train(self):
'''
meta-training loop
'''
self.pretrain()
params = self.get_epoch_snapshot(-1)
logger.save_itr_params(-1, params)
gt.reset()
gt.set_def_unique(False)
self._current_path_builder = PathBuilder()
# at each iteration, we first collect data from tasks, perform meta-updates, then try to evaluate
for it_ in gt.timed_for(
range(self.num_iterations),
save_itrs=True,
):
self._start_epoch(it_)
self.training_mode(True)
if it_ == 0:
print('collecting initial pool of data for train and eval')
# temp for evaluating
for idx in self.train_tasks:
self.task_idx = idx
self.env.reset_task(idx)
for _ in range(self.num_trajs):
self.collect_data_exp(self.meta_episode_len)
if self.seed_sample:
self.collect_data_seed(self.num_initial_steps, 1, np.inf, add_to_enc_buffer=False)
else:
self.collect_data(self.num_initial_steps, 1, np.inf,add_to_enc_buffer=False)
# Sample data from train tasks.
for i in range(self.num_tasks_sample):
idx = np.random.randint(len(self.train_tasks))
self.task_idx = idx
self.env.reset_task(idx)
if (it_+1)%5==0:
self.enc_replay_buffer.task_buffers[idx].clear()
for _ in range(self.num_trajs):
self.collect_data_exp(self.meta_episode_len)
if self.num_steps_prior > 0:
if self.seed_sample:
self.collect_data_seed(self.num_steps_prior, 1, np.inf, add_to_enc_buffer=False)
else:
self.collect_data(self.num_steps_prior, 1, np.inf,add_to_enc_buffer=False)
# collect some trajectories with z ~ posterior
if self.num_steps_posterior > 0:
if self.seed_sample:
self.collect_data_seed(self.num_steps_posterior, 1, self.update_post_train, add_to_enc_buffer=False)
else:
self.collect_data(self.num_steps_posterior, 1, self.update_post_train,add_to_enc_buffer=False)
# even if encoder is trained only on samples from the prior, the policy needs to learn to handle z ~ posterior
if self.num_extra_rl_steps_posterior > 0:
if self.seed_sample:
self.collect_data_seed(self.num_extra_rl_steps_posterior, 1, self.update_post_train,)
else:
self.collect_data(self.num_extra_rl_steps_posterior, 1, self.update_post_train,)
print('collect over')
# Sample train tasks and compute gradient updates on parameters.
for train_step in range(self.num_train_steps_per_itr):
indices = np.random.choice(self.train_tasks, self.meta_batch)
self._do_training(indices)
self._n_train_steps_total += 1
gt.stamp('train')
self.training_mode(False)
# eval
self._try_to_eval(it_)
gt.stamp('eval')
self._end_epoch()
def pretrain(self):
"""
Do anything before the main training phase.
"""
pass
def sample_eval(self,indices, context):
reward = torch.zeros(context.shape[0],1,1).cuda()
rem = 0
for indice in indices:
self.env.reset_task(indice)
context_i = context[rem,...]
context_i = torch.unsqueeze(context_i,0)
self.agent.clear_z()
self.agent.infer_posterior(context_i)
path,_ = self.sampler.obtain_samples(deterministic=self.eval_deterministic, max_samples=self.max_path_length*5,resample=1)
reward[rem] = eval_util.get_average_returns(path)
rem = rem + 1
return reward
def collect_data(self, num_samples, resample_z_rate, update_posterior_rate, add_to_enc_buffer=False):
'''
get trajectories from current env in batch mode with given policy
collect complete trajectories until the number of collected transitions >= num_samples
:param agent: policy to rollout
:param num_samples: total number of transitions to sample
:param resample_z_rate: how often to resample latent context z (in units of trajectories)
:param update_posterior_rate: how often to update q(z | c) from which z is sampled (in units of trajectories)
:param add_to_enc_buffer: whether to add collected data to encoder replay buffer
'''
# start from the prior
self.agent.clear_z()
num_transitions = 0
while num_transitions < num_samples:
paths, n_samples = self.sampler.obtain_samples(max_samples=num_samples - num_transitions,
max_trajs=update_posterior_rate,
accum_context=False,
resample=resample_z_rate)
num_transitions += n_samples
self.replay_buffer.add_paths(self.task_idx, paths)
if add_to_enc_buffer:
self.enc_replay_buffer.add_paths(self.task_idx, paths)
if update_posterior_rate != np.inf:
context, context_unbatched = self.sample_context(self.task_idx,False)
self.agent.infer_posterior(context)
self._n_env_steps_total += num_transitions
gt.stamp('sample')
def collect_data_exp(self, num_episodes):
'''
get trajectories from current env in batch mode with given policy
collect complete trajectories until the number of collected transitions >= num_samples
:param agent: policy to rollout
:param num_samples: total number of transitions to sample
:param resample_z_rate: how often to resample latent context z (in units of trajectories)
:param update_posterior_rate: how often to update q(z | c) from which z is sampled (in units of trajectories)
:param add_to_enc_buffer: whether to add collected data to encoder replay buffer
'''
# start from the prior
paths, n_samples = self.expsampler.obtain_samples(max_trajs=num_episodes)
self.enc_replay_buffer.add_paths(self.task_idx, paths)
self._n_env_steps_total += n_samples
gt.stamp('sample')
def collect_data_seed(self, num_samples, resample_z_rate, update_posterior_rate, add_to_enc_buffer=True,add_to_policy_buffer=True,accumulate_context=True):
self.agent.clear_z()
num_transitions = 0
while num_transitions < num_samples:
paths, n_samples = self.seedsampler.obtain_samples(max_samples=num_samples - num_transitions,
max_trajs=1,
accum_context=accumulate_context
)
num_transitions += n_samples
if add_to_policy_buffer:
self.replay_buffer.add_paths(self.task_idx, paths)
if add_to_enc_buffer:
self.enc_replay_buffer.add_paths(self.task_idx, paths)
#if update_posterior_rate != np.inf:
# context = self.prepare_context(self.task_idx)
# self.agent.infer_posterior(context)
self._n_env_steps_total += num_transitions
gt.stamp('sample')
def _try_to_eval(self, epoch):
logger.save_extra_data(self.get_extra_data_to_save(epoch))
if self._can_evaluate(epoch):
self.evaluate(epoch,self.num_trajs)
params = self.get_epoch_snapshot(epoch)
logger.save_itr_params(epoch, params)
table_keys = logger.get_table_key_set()
if self._old_table_keys is not None:
assert table_keys == self._old_table_keys, (
"Table keys cannot change from iteration to iteration."
)
self._old_table_keys = table_keys
logger.record_tabular(
"Number of train steps total",
self._n_train_steps_total,
)
logger.record_tabular(
"Number of env steps total",
self._n_env_steps_total,
)
logger.record_tabular(
"Number of rollouts total",
self._n_rollouts_total,
)
times_itrs = gt.get_times().stamps.itrs
train_time = times_itrs['train'][-1]
if not self.use_SMM:
sample_time = times_itrs['sample'][-1]
else:
sample_time = times_itrs['policy sample'][-1]
eval_time = times_itrs['eval'][-1] if epoch > 0 else 0
epoch_time = train_time + sample_time + eval_time
total_time = gt.get_times().total
logger.record_tabular('Train Time (s)', train_time)
logger.record_tabular('(Previous) Eval Time (s)', eval_time)
logger.record_tabular('Sample Time (s)', sample_time)
logger.record_tabular('Epoch Time (s)', epoch_time)
logger.record_tabular('Total Train Time (s)', total_time)
logger.record_tabular("Epoch", epoch)
logger.dump_tabular(with_prefix=False, with_timestamp=False)
else:
logger.log("Skipping eval for now.")
def _can_evaluate(self,epoch):
"""
One annoying thing about the logger table is that the keys at each
iteration need to be the exact same. So unless you can compute
everything, skip evaluation.
A common example for why you might want to skip evaluation is that at
the beginning of training, you may not have enough data for a
validation and training set.
:return:
"""
# eval collects its own context, so can eval any time
return True #if (epoch+1)%5==0 else False
def _can_train(self):
return all([self.replay_buffer.num_steps_can_sample(idx) >= self.batch_size for idx in self.train_tasks])
def _get_action_and_info(self, agent, observation):
"""
Get an action to take in the environment.
:param observation:
:return:
"""
agent.set_num_steps_total(self._n_env_steps_total)
return agent.get_action(observation,)
def _start_epoch(self, epoch):
self._epoch_start_time = time.time()
self._exploration_paths = []
self._do_train_time = 0
logger.push_prefix('Iteration #%d | ' % epoch)
def _end_epoch(self):
logger.log("Epoch Duration: {0}".format(
time.time() - self._epoch_start_time
))
logger.log("Started Training: {0}".format(self._can_train()))
logger.pop_prefix()
##### Snapshotting utils #####
def get_epoch_snapshot(self, epoch):
data_to_save = dict(
epoch=epoch,
exploration_policy=self.exploration_policy,
)
if self.save_environment:
data_to_save['env'] = self.training_env
return data_to_save
def get_extra_data_to_save(self, epoch):
"""
Save things that shouldn't be saved every snapshot but rather
overwritten every time.
:param epoch:
:return:
"""
if self.render:
self.training_env.render(close=True)
data_to_save = dict(
epoch=epoch,
)
if self.save_environment:
data_to_save['env'] = self.training_env
if self.save_replay_buffer:
data_to_save['replay_buffer'] = self.replay_buffer
if self.save_algorithm:
data_to_save['algorithm'] = self
return data_to_save
def collect_paths(self, idx, epoch, run):
self.task_idx = idx
self.env.reset_task(idx)
self.agent.clear_z()
paths = []
num_transitions = 0
num_trajs = 0
path, num = self.expsampler.obtain_samples(deterministic=self.eval_deterministic,
max_trajs=self.num_exp_traj_eval, accum_context_for_agent=True, context_agent = self.agent,split=True)
num_transitions += num
num_trajs +=self.num_exp_traj_eval
paths+=path
while num_transitions < self.num_steps_per_eval:
path, num = self.sampler.obtain_samples(deterministic=self.eval_deterministic, max_samples=self.num_steps_per_eval - num_transitions, max_trajs=1, accum_context=True)
paths += path
num_transitions += num
num_trajs += 1
self.agent.infer_posterior(self.agent.context)
if self.sparse_rewards:
for p in paths:
sparse_rewards = np.stack(e['sparse_reward'] for e in p['env_infos']).reshape(-1, 1)
p['rewards'] = sparse_rewards
goal = self.env._goal
for path in paths:
path['goal'] = goal # goal
# save the paths for visualization, only useful for point mass
if self.dump_eval_paths:
logger.save_extra_data(paths, path='eval_trajectories/task{}-epoch{}-run{}'.format(idx, epoch, run))
return paths
def _do_eval(self, indices, epoch):
final_returns = []
online_returns = []
for idx in indices:
all_rets = []
for r in range(self.num_evals):
paths = self.collect_paths(idx, epoch, r)
all_rets.append([eval_util.get_average_returns([p]) for p in paths])
final_returns.append(np.mean([np.mean(a) for a in all_rets]))
# record online returns for the first n trajectories
n = min([len(a) for a in all_rets])
all_rets = [a[:n] for a in all_rets]
all_rets = np.mean(np.stack(all_rets), axis=0) # avg return per nth rollout
online_returns.append(all_rets)
n = min([len(t) for t in online_returns])
online_returns = [t[:n] for t in online_returns]
return final_returns, online_returns
def evaluate(self, epoch,num_trajs):
if self.eval_statistics is None:
self.eval_statistics = OrderedDict()
### sample trajectories from prior for debugging / visualization
if self.dump_eval_paths:
# 100 arbitrarily chosen for visualizations of point_robot trajectories
# just want stochasticity of z, not the policy
self.agent.clear_z()
#if not self.use_SMM:
prior_paths, _ = self.sampler.obtain_samples(deterministic=self.eval_deterministic,
max_samples=self.max_path_length * 20,
accum_context=False,
resample=1)
#else:
# prior_paths, _ = self.smm_sampler.obtain_samples(
# max_samples=self.max_path_length * 20,
# )
logger.save_extra_data(prior_paths, path='eval_trajectories/prior-epoch{}'.format(epoch))
### train tasks
# eval on a subset of train tasks for speed
indices = np.random.choice(self.train_tasks, len(self.eval_tasks))
eval_util.dprint('evaluating on {} train tasks'.format(len(indices)))
### eval train tasks with posterior sampled from the training replay buffer
train_returns = []
for idx in indices:
self.task_idx = idx
self.env.reset_task(idx)
paths = []
for _ in range(self.num_steps_per_eval // self.max_path_length):
context, context_unbatched = self.sample_context(idx)
self.agent.infer_posterior(context)
p, _ = self.sampler.obtain_samples(deterministic=self.eval_deterministic,
max_samples=self.max_path_length,
accum_context=False,
max_trajs=1,
resample=np.inf)
paths += p
if self.sparse_rewards:
for p in paths:
sparse_rewards = np.stack(e['sparse_reward'] for e in p['env_infos']).reshape(-1, 1)
p['rewards'] = sparse_rewards
train_returns.append(eval_util.get_average_returns(paths))
train_returns = np.mean(train_returns)
### eval train tasks with on-policy data to match eval of test tasks
train_final_returns, train_online_returns = self._do_eval(indices, epoch)
eval_util.dprint('train online returns')
eval_util.dprint(train_online_returns)
### test tasks
eval_util.dprint('evaluating on {} test tasks'.format(len(self.eval_tasks)))
test_final_returns, test_online_returns = self._do_eval(self.eval_tasks, epoch)
eval_util.dprint('test online returns')
eval_util.dprint(test_online_returns)
# save the final posterior
self.agent.log_diagnostics(self.eval_statistics)
if hasattr(self.env, "log_diagnostics"):
self.env.log_diagnostics(paths, prefix=None)
avg_train_return = np.mean(train_final_returns)
avg_test_return = np.mean(test_final_returns)
avg_train_online_return = np.mean(np.stack(train_online_returns), axis=0)
avg_test_online_return = np.mean(np.stack(test_online_returns), axis=0)
self.eval_statistics['AverageTrainReturn_all_train_tasks'] = train_returns
self.eval_statistics['AverageReturn_all_train_tasks'] = avg_train_return
self.eval_statistics['AverageReturn_all_test_tasks'] = avg_test_return
logger.save_extra_data(avg_train_online_return, path='online-train-epoch{}'.format(epoch))
logger.save_extra_data(avg_test_online_return, path='online-test-epoch{}'.format(epoch))
for key, value in self.eval_statistics.items():
logger.record_tabular(key, value)
self.eval_statistics = None
if self.render_eval_paths:
self.env.render_paths(paths)
if self.plotter:
self.plotter.draw()
@abc.abstractmethod
def training_mode(self, mode):
"""
Set training mode to `mode`.
:param mode: If True, training will happen (e.g. set the dropout
probabilities to not all ones).
"""
pass
@abc.abstractmethod
def _do_training(self):
"""
Perform some update, e.g. perform one gradient step.
:return:
"""
pass
class ExpAlgorithmIter(metaclass=abc.ABCMeta):
def __init__(
self,
env,
agent,
agent_exp,
train_tasks,
eval_tasks,
encoder,
meta_batch=64,
num_iterations=100,
num_train_steps_per_itr=1000,
num_initial_steps=100,
num_tasks_sample=100,
num_steps_prior=100,
num_steps_posterior=100,
num_extra_rl_steps_posterior=100,
num_evals=10,
num_steps_per_eval=1000,
batch_size=1024,
embedding_batch_size=1024,
embedding_mini_batch_size=1024,
max_path_length=1000,
discount=0.99,
replay_buffer_size=1000000,
reward_scale=1,
num_exp_traj_eval=1,
update_post_train=1,
eval_deterministic=False,
render=False,
save_replay_buffer=False,
save_algorithm=False,
save_environment=False,
render_eval_paths=False,
dump_eval_paths=False,
plotter=None,
use_SMM=False,
load_SMM =False,
use_history=False,
SMM_path=None,
num_skills = 1,
seed_sample=False,
snail=False,
meta_episode_len=10,
num_trajs = 2,
num_trajs_eval=1
):
"""
:param env: training env
:param agent: agent that is conditioned on a latent variable z that rl_algorithm is responsible for feeding in
:param train_tasks: list of tasks used for training
:param eval_tasks: list of tasks used for eval
see default experiment config file for descriptions of the rest of the arguments
"""
self.env = env
self.agent = agent
self.exploration_agent = agent_exp # Can potentially use a different policy purely for exploration rather than also solving tasks, currently not being used
self.context_encoder = encoder
self.train_tasks = train_tasks
self.eval_tasks = eval_tasks
self.meta_batch = meta_batch
self.num_iterations = num_iterations
self.num_train_steps_per_itr = num_train_steps_per_itr
self.num_initial_steps = num_initial_steps
self.num_tasks_sample = num_tasks_sample
self.num_steps_prior = num_steps_prior
self.num_steps_posterior = num_steps_posterior
self.num_extra_rl_steps_posterior = num_extra_rl_steps_posterior
self.num_evals = num_evals
self.num_steps_per_eval = num_steps_per_eval
self.batch_size = batch_size
self.embedding_batch_size = embedding_batch_size
self.embedding_mini_batch_size = embedding_mini_batch_size
self.max_path_length = max_path_length
self.discount = discount
self.replay_buffer_size = replay_buffer_size
self.reward_scale = reward_scale
self.update_post_train = update_post_train
self.num_exp_traj_eval = num_exp_traj_eval
self.eval_deterministic = eval_deterministic
self.render = render
self.save_replay_buffer = save_replay_buffer
self.save_algorithm = save_algorithm
self.save_environment = save_environment
self.eval_statistics = None
self.render_eval_paths = render_eval_paths
self.dump_eval_paths = dump_eval_paths
self.plotter = plotter
self.use_SMM = use_SMM
self.load_SMM = load_SMM
self.use_history = use_history,
self.SMM_path = SMM_path
self.num_skills = num_skills
self.seed_sample = seed_sample
self.meta_episode_len = meta_episode_len
self.num_trajs = num_trajs
self.num_trajs_eval = num_trajs_eval
self.sampler = InPlacePathSampler(
env=env,
policy=agent,
max_path_length=self.max_path_length,
)
self.expsampler = ExpInPlacePathSamplerSimple(
env=env,
policy=self.exploration_agent,
encoder=self.context_encoder,
max_path_length=self.max_path_length,
)
self.seedsampler = SeedInPlacePathSampler(
env=env,
policy=agent,
max_path_length=self.max_path_length,
)
# separate replay buffers for
# - training RL update
# - training encoder update
self.replay_buffer = MultiTaskReplayBuffer(
self.replay_buffer_size,
env,
self.train_tasks,
)
self.enc_replay_buffer = MultiTaskReplayBuffer(
self.replay_buffer_size,
env,
self.train_tasks,
)
self._n_env_steps_total = 0
self._n_train_steps_total = 0
self._n_rollouts_total = 0
self._do_train_time = 0
self._epoch_start_time = None
self._algo_start_time = None
self._old_table_keys = None
self._current_path_builder = PathBuilder()
self._exploration_paths = []
def make_exploration_policy(self, policy):
return policy
def make_eval_policy(self, policy):
return policy
def sample_task(self, is_eval=False):
'''
sample task randomly
'''
if is_eval:
idx = np.random.randint(len(self.eval_tasks))
else:
idx = np.random.randint(len(self.train_tasks))
return idx
def train(self):
'''
meta-training loop
'''
self.pretrain()
params = self.get_epoch_snapshot(-1)
logger.save_itr_params(-1, params)
gt.reset()
gt.set_def_unique(False)
self._current_path_builder = PathBuilder()
# at each iteration, we first collect data from tasks, perform meta-updates, then try to evaluate
for it_ in gt.timed_for(
range(self.num_iterations),
save_itrs=True,
):
self._start_epoch(it_)
self.training_mode(True)
if it_ == 0:
print('collecting initial pool of data for train and eval')
# temp for evaluating
for idx in self.train_tasks:
self.task_idx = idx
self.env.reset_task(idx)
for _ in range(self.num_trajs):
self.collect_data_exp(self.meta_episode_len)
if self.seed_sample:
self.collect_data_seed(self.num_initial_steps, 1, np.inf, add_to_enc_buffer=False)
else:
self.collect_data(self.num_initial_steps, 1, np.inf,add_to_enc_buffer=False)
# Sample data from train tasks.
for i in range(self.num_tasks_sample):
idx = np.random.randint(len(self.train_tasks))
self.task_idx = idx
self.env.reset_task(idx)
#if (it_+1)%5==0:
# self.enc_replay_buffer.task_buffers[idx].clear()
for _ in range(self.num_trajs):
self.collect_data_exp(self.meta_episode_len)
if self.num_steps_prior > 0:
if self.seed_sample:
self.collect_data_seed(self.num_steps_prior, 1, np.inf, add_to_enc_buffer=False)
else:
self.collect_data(self.num_steps_prior, 1, np.inf,add_to_enc_buffer=False)
# collect some trajectories with z ~ posterior
if self.num_steps_posterior > 0:
if self.seed_sample:
self.collect_data_seed(self.num_steps_posterior, 1, self.update_post_train, add_to_enc_buffer=False)
else:
self.collect_data(self.num_steps_posterior, 1, self.update_post_train,add_to_enc_buffer=False)
# even if encoder is trained only on samples from the prior, the policy needs to learn to handle z ~ posterior
if self.num_extra_rl_steps_posterior > 0:
if self.seed_sample:
self.collect_data_seed(self.num_extra_rl_steps_posterior, 1, self.update_post_train,)
else:
self.collect_data(self.num_extra_rl_steps_posterior, 1, self.update_post_train,)
print('collect over')
# Sample train tasks and compute gradient updates on parameters.
for train_step in range(self.num_train_steps_per_itr):
indices = np.random.choice(self.train_tasks, self.meta_batch)
self._do_training(indices,0)
self._n_train_steps_total += 1
for train_step in range(self.num_train_steps_per_itr):
indices = np.random.choice(self.train_tasks, self.meta_batch)
self._do_training(indices,1000)
self._n_train_steps_total += 1
gt.stamp('train')
self.training_mode(False)
# eval
self._try_to_eval(it_)
gt.stamp('eval')
self._end_epoch()
def pretrain(self):
"""
Do anything before the main training phase.
"""
pass
def sample_eval(self,indices, context):
reward = torch.zeros(context.shape[0],1,1).cuda()
rem = 0
for indice in indices:
self.env.reset_task(indice)
context_i = context[rem,...]
context_i = torch.unsqueeze(context_i,0)
self.agent.clear_z()
self.agent.infer_posterior(context_i)
path,_ = self.sampler.obtain_samples(deterministic=self.eval_deterministic, max_samples=self.max_path_length*5,resample=1)
reward[rem] = eval_util.get_average_returns(path)
rem = rem + 1
return reward
def collect_data(self, num_samples, resample_z_rate, update_posterior_rate, add_to_enc_buffer=False):
'''
get trajectories from current env in batch mode with given policy
collect complete trajectories until the number of collected transitions >= num_samples
:param agent: policy to rollout
:param num_samples: total number of transitions to sample
:param resample_z_rate: how often to resample latent context z (in units of trajectories)
:param update_posterior_rate: how often to update q(z | c) from which z is sampled (in units of trajectories)
:param add_to_enc_buffer: whether to add collected data to encoder replay buffer
'''
# start from the prior
self.agent.clear_z()
num_transitions = 0
while num_transitions < num_samples:
paths, n_samples = self.sampler.obtain_samples(max_samples=num_samples - num_transitions,
max_trajs=update_posterior_rate,
accum_context=False,
resample=resample_z_rate)
num_transitions += n_samples
self.replay_buffer.add_paths(self.task_idx, paths)
if add_to_enc_buffer:
self.enc_replay_buffer.add_paths(self.task_idx, paths)
if update_posterior_rate != np.inf:
context, context_unbatched = self.sample_context(self.task_idx,False)
self.agent.infer_posterior(context)
self._n_env_steps_total += num_transitions
gt.stamp('sample')
def collect_data_exp(self, num_episodes):
'''
get trajectories from current env in batch mode with given policy
collect complete trajectories until the number of collected transitions >= num_samples
:param agent: policy to rollout
:param num_samples: total number of transitions to sample
:param resample_z_rate: how often to resample latent context z (in units of trajectories)
:param update_posterior_rate: how often to update q(z | c) from which z is sampled (in units of trajectories)
:param add_to_enc_buffer: whether to add collected data to encoder replay buffer
'''
# start from the prior
paths, n_samples = self.expsampler.obtain_samples(max_trajs=num_episodes)
self.enc_replay_buffer.add_paths(self.task_idx, paths)
self._n_env_steps_total += n_samples
gt.stamp('sample')
def collect_data_seed(self, num_samples, resample_z_rate, update_posterior_rate, add_to_enc_buffer=True,add_to_policy_buffer=True,accumulate_context=True):
self.agent.clear_z()
num_transitions = 0
while num_transitions < num_samples:
paths, n_samples = self.seedsampler.obtain_samples(max_samples=num_samples - num_transitions,
max_trajs=1,
accum_context=accumulate_context
)
num_transitions += n_samples
if add_to_policy_buffer:
self.replay_buffer.add_paths(self.task_idx, paths)
if add_to_enc_buffer:
self.enc_replay_buffer.add_paths(self.task_idx, paths)
#if update_posterior_rate != np.inf:
# context = self.prepare_context(self.task_idx)
# self.agent.infer_posterior(context)
self._n_env_steps_total += num_transitions
gt.stamp('sample')
def _try_to_eval(self, epoch):
logger.save_extra_data(self.get_extra_data_to_save(epoch))
if self._can_evaluate(epoch):
self.evaluate(epoch,self.num_trajs)
params = self.get_epoch_snapshot(epoch)
logger.save_itr_params(epoch, params)
table_keys = logger.get_table_key_set()
if self._old_table_keys is not None:
assert table_keys == self._old_table_keys, (
"Table keys cannot change from iteration to iteration."
)
self._old_table_keys = table_keys
logger.record_tabular(
"Number of train steps total",
self._n_train_steps_total,
)
logger.record_tabular(
"Number of env steps total",
self._n_env_steps_total,
)
logger.record_tabular(
"Number of rollouts total",
self._n_rollouts_total,
)
times_itrs = gt.get_times().stamps.itrs
train_time = times_itrs['train'][-1]
if not self.use_SMM:
sample_time = times_itrs['sample'][-1]
else:
sample_time = times_itrs['policy sample'][-1]
eval_time = times_itrs['eval'][-1] if epoch > 0 else 0
epoch_time = train_time + sample_time + eval_time
total_time = gt.get_times().total
logger.record_tabular('Train Time (s)', train_time)
logger.record_tabular('(Previous) Eval Time (s)', eval_time)
logger.record_tabular('Sample Time (s)', sample_time)
logger.record_tabular('Epoch Time (s)', epoch_time)
logger.record_tabular('Total Train Time (s)', total_time)
logger.record_tabular("Epoch", epoch)
logger.dump_tabular(with_prefix=False, with_timestamp=False)
else:
logger.log("Skipping eval for now.")
def _can_evaluate(self,epoch):
"""
One annoying thing about the logger table is that the keys at each
iteration need to be the exact same. So unless you can compute
everything, skip evaluation.
A common example for why you might want to skip evaluation is that at
the beginning of training, you may not have enough data for a
validation and training set.
:return:
"""
# eval collects its own context, so can eval any time
return True #if (epoch+1)%5==0 else False
def _can_train(self):
return all([self.replay_buffer.num_steps_can_sample(idx) >= self.batch_size for idx in self.train_tasks])
def _get_action_and_info(self, agent, observation):
"""
Get an action to take in the environment.
:param observation:
:return:
"""
agent.set_num_steps_total(self._n_env_steps_total)
return agent.get_action(observation,)
def _start_epoch(self, epoch):
self._epoch_start_time = time.time()
self._exploration_paths = []
self._do_train_time = 0
logger.push_prefix('Iteration #%d | ' % epoch)
def _end_epoch(self):
logger.log("Epoch Duration: {0}".format(
time.time() - self._epoch_start_time
))
logger.log("Started Training: {0}".format(self._can_train()))
logger.pop_prefix()
##### Snapshotting utils #####
def get_epoch_snapshot(self, epoch):
data_to_save = dict(
epoch=epoch,
exploration_policy=self.exploration_policy,
)
if self.save_environment:
data_to_save['env'] = self.training_env
return data_to_save
def get_extra_data_to_save(self, epoch):
"""
Save things that shouldn't be saved every snapshot but rather
overwritten every time.
:param epoch:
:return:
"""
if self.render:
self.training_env.render(close=True)
data_to_save = dict(
epoch=epoch,
)
if self.save_environment:
data_to_save['env'] = self.training_env
if self.save_replay_buffer:
data_to_save['replay_buffer'] = self.replay_buffer
if self.save_algorithm:
data_to_save['algorithm'] = self
return data_to_save
def collect_paths(self, idx, epoch, run):
self.task_idx = idx
self.env.reset_task(idx)
self.agent.clear_z()
paths = []
num_transitions = 0
num_trajs = 0
path, num = self.expsampler.obtain_samples(deterministic=self.eval_deterministic,
max_trajs=self.num_exp_traj_eval, accum_context_for_agent=True, context_agent = self.agent,split=True)
num_transitions += num
num_trajs +=self.num_exp_traj_eval
paths+=path
while num_transitions < self.num_steps_per_eval:
path, num = self.sampler.obtain_samples(deterministic=self.eval_deterministic, max_samples=self.num_steps_per_eval - num_transitions, max_trajs=1, accum_context=True)
paths += path
num_transitions += num
num_trajs += 1
self.agent.infer_posterior(self.agent.context)
if self.sparse_rewards:
for p in paths:
sparse_rewards = np.stack(e['sparse_reward'] for e in p['env_infos']).reshape(-1, 1)
p['rewards'] = sparse_rewards
goal = self.env._goal
for path in paths:
path['goal'] = goal # goal
# save the paths for visualization, only useful for point mass
if self.dump_eval_paths:
logger.save_extra_data(paths, path='eval_trajectories/task{}-epoch{}-run{}'.format(idx, epoch, run))
return paths
def _do_eval(self, indices, epoch):
final_returns = []
online_returns = []
for idx in indices:
all_rets = []
for r in range(self.num_evals):
paths = self.collect_paths(idx, epoch, r)
all_rets.append([eval_util.get_average_returns([p]) for p in paths])
final_returns.append(np.mean([np.mean(a) for a in all_rets]))
# record online returns for the first n trajectories
n = min([len(a) for a in all_rets])
all_rets = [a[:n] for a in all_rets]
all_rets = np.mean(np.stack(all_rets), axis=0) # avg return per nth rollout
online_returns.append(all_rets)
n = min([len(t) for t in online_returns])
online_returns = [t[:n] for t in online_returns]
return final_returns, online_returns
def evaluate(self, epoch,num_trajs):
if self.eval_statistics is None:
self.eval_statistics = OrderedDict()
### sample trajectories from prior for debugging / visualization
if self.dump_eval_paths:
# 100 arbitrarily chosen for visualizations of point_robot trajectories
# just want stochasticity of z, not the policy
self.agent.clear_z()
#if not self.use_SMM:
prior_paths, _ = self.sampler.obtain_samples(deterministic=self.eval_deterministic,
max_samples=self.max_path_length * 20,
accum_context=False,
resample=1)
#else:
# prior_paths, _ = self.smm_sampler.obtain_samples(
# max_samples=self.max_path_length * 20,
# )
logger.save_extra_data(prior_paths, path='eval_trajectories/prior-epoch{}'.format(epoch))
### train tasks
# eval on a subset of train tasks for speed
indices = np.random.choice(self.train_tasks, len(self.eval_tasks))
eval_util.dprint('evaluating on {} train tasks'.format(len(indices)))
### eval train tasks with posterior sampled from the training replay buffer
train_returns = []
for idx in indices:
self.task_idx = idx
self.env.reset_task(idx)
paths = []
for _ in range(self.num_steps_per_eval // self.max_path_length):
context, context_unbatched = self.sample_context(idx)
self.agent.infer_posterior(context)
p, _ = self.sampler.obtain_samples(deterministic=self.eval_deterministic,
max_samples=self.max_path_length,
accum_context=False,
max_trajs=1,
resample=np.inf)
paths += p
if self.sparse_rewards:
for p in paths:
sparse_rewards = np.stack(e['sparse_reward'] for e in p['env_infos']).reshape(-1, 1)
p['rewards'] = sparse_rewards
train_returns.append(eval_util.get_average_returns(paths))
train_returns = np.mean(train_returns)
### eval train tasks with on-policy data to match eval of test tasks
train_final_returns, train_online_returns = self._do_eval(indices, epoch)
eval_util.dprint('train online returns')
eval_util.dprint(train_online_returns)
### test tasks
eval_util.dprint('evaluating on {} test tasks'.format(len(self.eval_tasks)))
test_final_returns, test_online_returns = self._do_eval(self.eval_tasks, epoch)
eval_util.dprint('test online returns')
eval_util.dprint(test_online_returns)
# save the final posterior
self.agent.log_diagnostics(self.eval_statistics)
if hasattr(self.env, "log_diagnostics"):
self.env.log_diagnostics(paths, prefix=None)
avg_train_return = np.mean(train_final_returns)
avg_test_return = np.mean(test_final_returns)
avg_train_online_return = np.mean(np.stack(train_online_returns), axis=0)
avg_test_online_return = np.mean(np.stack(test_online_returns), axis=0)
self.eval_statistics['AverageTrainReturn_all_train_tasks'] = train_returns
self.eval_statistics['AverageReturn_all_train_tasks'] = avg_train_return
self.eval_statistics['AverageReturn_all_test_tasks'] = avg_test_return
logger.save_extra_data(avg_train_online_return, path='online-train-epoch{}'.format(epoch))
logger.save_extra_data(avg_test_online_return, path='online-test-epoch{}'.format(epoch))
for key, value in self.eval_statistics.items():
logger.record_tabular(key, value)
self.eval_statistics = None
if self.render_eval_paths:
self.env.render_paths(paths)
if self.plotter:
self.plotter.draw()
@abc.abstractmethod
def training_mode(self, mode):
"""
Set training mode to `mode`.
:param mode: If True, training will happen (e.g. set the dropout
probabilities to not all ones).
"""
pass
@abc.abstractmethod
def _do_training(self):
"""
Perform some update, e.g. perform one gradient step.
:return:
"""
pass
class ExpAlgorithmSimpleOnline(metaclass=abc.ABCMeta):
def __init__(
self,
env,
agent,
agent_exp,
train_tasks,
eval_tasks,
encoder,
meta_batch=64,
num_iterations=100,
num_train_steps_per_itr=1000,
num_initial_steps=100,
num_tasks_sample=100,
num_steps_prior=100,
num_steps_posterior=100,
num_extra_rl_steps_posterior=100,
num_evals=10,
num_steps_per_eval=1000,
batch_size=1024,
embedding_batch_size=1024,
embedding_mini_batch_size=1024,
max_path_length=1000,
discount=0.99,
replay_buffer_size=1000000,
reward_scale=1,
num_exp_traj_eval=1,
update_post_train=1,
eval_deterministic=True,
render=False,
save_replay_buffer=False,
save_algorithm=False,
save_environment=False,
render_eval_paths=False,
dump_eval_paths=False,
plotter=None,
use_SMM=False,
load_SMM =False,
use_history=False,
SMM_path=None,
num_skills = 1,
seed_sample=False,
snail=False,
meta_episode_len=10,
num_trajs = 2,
num_trajs_eval=1
):
"""
:param env: training env
:param agent: agent that is conditioned on a latent variable z that rl_algorithm is responsible for feeding in
:param train_tasks: list of tasks used for training
:param eval_tasks: list of tasks used for eval
see default experiment config file for descriptions of the rest of the arguments
"""
self.env = env
self.agent = agent
self.exploration_agent = agent_exp # Can potentially use a different policy purely for exploration rather than also solving tasks, currently not being used
self.context_encoder = encoder
self.train_tasks = train_tasks
self.eval_tasks = eval_tasks
self.meta_batch = meta_batch
self.num_iterations = num_iterations
self.num_train_steps_per_itr = num_train_steps_per_itr
self.num_initial_steps = num_initial_steps
self.num_tasks_sample = num_tasks_sample
self.num_steps_prior = num_steps_prior
self.num_steps_posterior = num_steps_posterior
self.num_extra_rl_steps_posterior = num_extra_rl_steps_posterior
self.num_evals = num_evals
self.num_steps_per_eval = num_steps_per_eval
self.batch_size = batch_size
self.embedding_batch_size = embedding_batch_size
self.embedding_mini_batch_size = embedding_mini_batch_size
self.max_path_length = max_path_length
self.discount = discount
self.replay_buffer_size = replay_buffer_size
self.reward_scale = reward_scale
self.update_post_train = update_post_train
self.num_exp_traj_eval = num_exp_traj_eval
self.eval_deterministic = eval_deterministic
self.render = render
self.save_replay_buffer = save_replay_buffer
self.save_algorithm = save_algorithm
self.save_environment = save_environment
self.eval_statistics = None
self.render_eval_paths = render_eval_paths
self.dump_eval_paths = dump_eval_paths
self.plotter = plotter
self.use_SMM = use_SMM
self.load_SMM = load_SMM
self.use_history = use_history,
self.SMM_path = SMM_path
self.num_skills = num_skills
self.seed_sample = seed_sample
self.meta_episode_len = meta_episode_len
self.num_trajs = num_trajs
self.num_trajs_eval = num_trajs_eval
self.sampler = InPlacePathSampler(
env=env,
policy=agent,
max_path_length=self.max_path_length,
)
self.expsampler = ExpInPlacePathSamplerSimple(
env=env,
policy=self.exploration_agent,
encoder=self.context_encoder,
max_path_length=self.max_path_length,
)
self.seedsampler = SeedInPlacePathSampler(
env=env,
policy=agent,
max_path_length=self.max_path_length,
)
# separate replay buffers for
# - training RL update
# - training encoder update
self.enc_replay_buffer = MultiTaskReplayBuffer(
self.replay_buffer_size,
env,
self.train_tasks,
)
self._n_env_steps_total = 0
self._n_train_steps_total = 0
self._n_rollouts_total = 0
self._do_train_time = 0
self._epoch_start_time = None
self._algo_start_time = None
self._old_table_keys = None
self._current_path_builder = PathBuilder()
self._exploration_paths = []
def make_exploration_policy(self, policy):
return policy
def make_eval_policy(self, policy):
return policy
def sample_task(self, is_eval=False):
'''
sample task randomly
'''
if is_eval:
idx = np.random.randint(len(self.eval_tasks))
else:
idx = np.random.randint(len(self.train_tasks))
return idx
def train(self):
'''
meta-training loop
'''
self.pretrain()
params = self.get_epoch_snapshot(-1)
logger.save_itr_params(-1, params)
gt.reset()
gt.set_def_unique(False)
self._current_path_builder = PathBuilder()
# at each iteration, we first collect data from tasks, perform meta-updates, then try to evaluate
for it_ in gt.timed_for(
range(self.num_iterations),
save_itrs=True,
):
self._start_epoch(it_)
self.training_mode(True)
indices = np.random.choice(self.train_tasks, self.meta_batch, replace=False)
# Sample data from train tasks.
for i in indices:
idx = i
self.task_idx = idx
self.env.reset_task(idx)
self.enc_replay_buffer.task_buffers[idx].clear()
self.collect_data_exp(self.meta_episode_len)
print('collect over')
# Sample train tasks and compute gradient updates on parameters.
self._do_training(indices)
self._n_train_steps_total += 1
gt.stamp('train')
self.training_mode(False)
# eval
self._try_to_eval(it_)
gt.stamp('eval')
self._end_epoch()
def pretrain(self):
"""
Do anything before the main training phase.
"""
pass
def sample_eval(self,indices, context):
reward = torch.zeros(context.shape[0],1,1).cuda()
rem = 0
for indice in indices:
self.env.reset_task(indice)
context_i = context[rem,...]
context_i = torch.unsqueeze(context_i,0)
self.agent.clear_z()
self.agent.infer_posterior(context_i)
path,_ = self.sampler.obtain_samples(deterministic=self.eval_deterministic, max_samples=self.max_path_length*5,resample=1)
reward[rem] = eval_util.get_average_returns(path)
rem = rem + 1
return reward
def collect_data(self, num_samples, resample_z_rate, update_posterior_rate, add_to_enc_buffer=False):
'''
get trajectories from current env in batch mode with given policy
collect complete trajectories until the number of collected transitions >= num_samples
:param agent: policy to rollout
:param num_samples: total number of transitions to sample
:param resample_z_rate: how often to resample latent context z (in units of trajectories)
:param update_posterior_rate: how often to update q(z | c) from which z is sampled (in units of trajectories)
:param add_to_enc_buffer: whether to add collected data to encoder replay buffer
'''
# start from the prior
self.agent.clear_z()
num_transitions = 0
while num_transitions < num_samples:
paths, n_samples = self.sampler.obtain_samples(max_samples=num_samples - num_transitions,
max_trajs=update_posterior_rate,
accum_context=False,
resample=resample_z_rate)
num_transitions += n_samples
self.replay_buffer.add_paths(self.task_idx, paths)
if add_to_enc_buffer:
self.enc_replay_buffer.add_paths(self.task_idx, paths)
if update_posterior_rate != np.inf:
context, context_unbatched = self.sample_context(self.task_idx)
self.agent.infer_posterior(context)
self._n_env_steps_total += num_transitions
gt.stamp('sample')
def collect_data_exp(self, num_episodes,accum_context_for_agent=False, context_agent =None):
'''
get trajectories from current env in batch mode with given policy
collect complete trajectories until the number of collected transitions >= num_samples
:param agent: policy to rollout
:param num_samples: total number of transitions to sample
:param resample_z_rate: how often to resample latent context z (in units of trajectories)
:param update_posterior_rate: how often to update q(z | c) from which z is sampled (in units of trajectories)
:param add_to_enc_buffer: whether to add collected data to encoder replay buffer
'''
# start from the prior
paths, n_samples = self.expsampler.obtain_samples(max_trajs=num_episodes,accum_context_for_agent=accum_context_for_agent, context_agent = context_agent)
self.enc_replay_buffer.add_paths(self.task_idx, paths)
self._n_env_steps_total += n_samples
gt.stamp('sample')
def collect_data_seed(self, num_samples, resample_z_rate, update_posterior_rate, add_to_enc_buffer=True,add_to_policy_buffer=True,accumulate_context=True):
self.agent.clear_z()
num_transitions = 0
while num_transitions < num_samples:
paths, n_samples = self.seedsampler.obtain_samples(max_samples=num_samples - num_transitions,
max_trajs=1,
accum_context=accumulate_context
)
num_transitions += n_samples
if add_to_policy_buffer:
self.replay_buffer.add_paths(self.task_idx, paths)
if add_to_enc_buffer:
self.enc_replay_buffer.add_paths(self.task_idx, paths)
#if update_posterior_rate != np.inf:
# context = self.prepare_context(self.task_idx)
# self.agent.infer_posterior(context)
self._n_env_steps_total += num_transitions
gt.stamp('sample')
def _try_to_eval(self, epoch):
logger.save_extra_data(self.get_extra_data_to_save(epoch))
if self._can_evaluate(epoch):
self.evaluate(epoch,self.num_trajs)
params = self.get_epoch_snapshot(epoch)
logger.save_itr_params(epoch, params)
table_keys = logger.get_table_key_set()
if self._old_table_keys is not None:
assert table_keys == self._old_table_keys, (
"Table keys cannot change from iteration to iteration."
)
self._old_table_keys = table_keys
logger.record_tabular(
"Number of train steps total",
self._n_train_steps_total,
)
logger.record_tabular(
"Number of env steps total",
self._n_env_steps_total,
)
logger.record_tabular(
"Number of rollouts total",
self._n_rollouts_total,
)
times_itrs = gt.get_times().stamps.itrs
train_time = times_itrs['train'][-1]
if not self.use_SMM:
sample_time = times_itrs['sample'][-1]
else:
sample_time = times_itrs['policy sample'][-1]
eval_time = times_itrs['eval'][-1] if epoch > 0 else 0
epoch_time = train_time + sample_time + eval_time
total_time = gt.get_times().total
logger.record_tabular('Train Time (s)', train_time)
logger.record_tabular('(Previous) Eval Time (s)', eval_time)
logger.record_tabular('Sample Time (s)', sample_time)
logger.record_tabular('Epoch Time (s)', epoch_time)
logger.record_tabular('Total Train Time (s)', total_time)
logger.record_tabular("Epoch", epoch)
logger.dump_tabular(with_prefix=False, with_timestamp=False)
else:
logger.log("Skipping eval for now.")
def _can_evaluate(self,epoch):
"""
One annoying thing about the logger table is that the keys at each
iteration need to be the exact same. So unless you can compute
everything, skip evaluation.
A common example for why you might want to skip evaluation is that at
the beginning of training, you may not have enough data for a
validation and training set.
:return:
"""
# eval collects its own context, so can eval any time
return True #if (epoch+1)%5==0 else False
def _can_train(self):
return all([self.replay_buffer.num_steps_can_sample(idx) >= self.batch_size for idx in self.train_tasks])
def _get_action_and_info(self, agent, observation):
"""
Get an action to take in the environment.
:param observation:
:return:
"""
agent.set_num_steps_total(self._n_env_steps_total)
return agent.get_action(observation,)
def _start_epoch(self, epoch):
self._epoch_start_time = time.time()
self._exploration_paths = []
self._do_train_time = 0
logger.push_prefix('Iteration #%d | ' % epoch)
def _end_epoch(self):
logger.log("Epoch Duration: {0}".format(
time.time() - self._epoch_start_time
))
logger.log("Started Training: {0}".format(self._can_train()))
logger.pop_prefix()
##### Snapshotting utils #####
def get_epoch_snapshot(self, epoch):
data_to_save = dict(
epoch=epoch,
exploration_policy=self.exploration_policy,
)
if self.save_environment:
data_to_save['env'] = self.training_env
return data_to_save
def get_extra_data_to_save(self, epoch):
"""
Save things that shouldn't be saved every snapshot but rather
overwritten every time.
:param epoch:
:return:
"""
if self.render:
self.training_env.render(close=True)
data_to_save = dict(
epoch=epoch,
)
if self.save_environment:
data_to_save['env'] = self.training_env
if self.save_replay_buffer:
data_to_save['replay_buffer'] = self.replay_buffer
if self.save_algorithm:
data_to_save['algorithm'] = self
return data_to_save
def collect_paths(self, idx, epoch, run):
self.task_idx = idx
self.env.reset_task(idx)
self.agent.clear_z()
paths = []
num_transitions = 0
num_trajs = 0
path, num = self.expsampler.obtain_samples(deterministic=self.eval_deterministic,
max_trajs=self.num_exp_traj_eval, accum_context_for_agent=True, context_agent = self.agent,split=True)
num_transitions += num
num_trajs +=self.num_exp_traj_eval
paths+=path
while num_transitions < self.num_steps_per_eval:
path, num = self.sampler.obtain_samples(deterministic=self.eval_deterministic, max_samples=self.num_steps_per_eval - num_transitions, max_trajs=1, accum_context=True)
paths += path
num_transitions += num
num_trajs += 1
self.agent.infer_posterior(self.agent.context)
if self.sparse_rewards:
for p in paths:
sparse_rewards = np.stack(e['sparse_reward'] for e in p['env_infos']).reshape(-1, 1)
p['rewards'] = sparse_rewards
goal = self.env._goal
for path in paths:
path['goal'] = goal # goal
# save the paths for visualization, only useful for point mass
if self.dump_eval_paths:
logger.save_extra_data(paths, path='eval_trajectories/task{}-epoch{}-run{}'.format(idx, epoch, run))
return paths
def _do_eval(self, indices, epoch):
final_returns = []
online_returns = []
for idx in indices:
all_rets = []
for r in range(self.num_evals):
paths = self.collect_paths(idx, epoch, r)
all_rets.append([eval_util.get_average_returns([p]) for p in paths])
final_returns.append(np.mean([np.mean(a) for a in all_rets]))
# record online returns for the first n trajectories
n = min([len(a) for a in all_rets])
all_rets = [a[:n] for a in all_rets]
all_rets = np.mean(np.stack(all_rets), axis=0) # avg return per nth rollout
online_returns.append(all_rets)
n = min([len(t) for t in online_returns])
online_returns = [t[:n] for t in online_returns]
return final_returns, online_returns
def evaluate(self, epoch,num_trajs):
if self.eval_statistics is None:
self.eval_statistics = OrderedDict()
### sample trajectories from prior for debugging / visualization
if self.dump_eval_paths:
# 100 arbitrarily chosen for visualizations of point_robot trajectories
# just want stochasticity of z, not the policy
self.agent.clear_z()
#if not self.use_SMM:
prior_paths, _ = self.sampler.obtain_samples(deterministic=self.eval_deterministic,
max_samples=self.max_path_length * 20,
accum_context=False,
resample=1)
#else:
# prior_paths, _ = self.smm_sampler.obtain_samples(
# max_samples=self.max_path_length * 20,
# )
logger.save_extra_data(prior_paths, path='eval_trajectories/prior-epoch{}'.format(epoch))
### train tasks
# eval on a subset of train tasks for speed
indices = np.random.choice(self.train_tasks, len(self.eval_tasks))
eval_util.dprint('evaluating on {} train tasks'.format(len(indices)))
### eval train tasks with posterior sampled from the training replay buffer
train_returns = []
for idx in indices:
self.task_idx = idx
self.env.reset_task(idx)
paths = []
for _ in range(self.num_steps_per_eval // self.max_path_length):
context, context_unbatched = self.sample_context(idx)
self.agent.infer_posterior(context)
p, _ = self.sampler.obtain_samples(deterministic=self.eval_deterministic,
max_samples=self.max_path_length,
accum_context=False,
max_trajs=1,
resample=np.inf)
paths += p
if self.sparse_rewards:
for p in paths:
sparse_rewards = np.stack(e['sparse_reward'] for e in p['env_infos']).reshape(-1, 1)
p['rewards'] = sparse_rewards
train_returns.append(eval_util.get_average_returns(paths))
train_returns = np.mean(train_returns)
### eval train tasks with on-policy data to match eval of test tasks
train_final_returns, train_online_returns = self._do_eval(indices, epoch)
eval_util.dprint('train online returns')
eval_util.dprint(train_online_returns)
### test tasks
eval_util.dprint('evaluating on {} test tasks'.format(len(self.eval_tasks)))
test_final_returns, test_online_returns = self._do_eval(self.eval_tasks, epoch)
eval_util.dprint('test online returns')
eval_util.dprint(test_online_returns)
# save the final posterior
self.agent.log_diagnostics(self.eval_statistics)
if hasattr(self.env, "log_diagnostics"):
self.env.log_diagnostics(paths, prefix=None)
avg_train_return = np.mean(train_final_returns)
avg_test_return = np.mean(test_final_returns)
avg_train_online_return = np.mean(np.stack(train_online_returns), axis=0)
avg_test_online_return = np.mean(np.stack(test_online_returns), axis=0)
self.eval_statistics['AverageTrainReturn_all_train_tasks'] = train_returns
self.eval_statistics['AverageReturn_all_train_tasks'] = avg_train_return
self.eval_statistics['AverageReturn_all_test_tasks'] = avg_test_return
logger.save_extra_data(avg_train_online_return, path='online-train-epoch{}'.format(epoch))
logger.save_extra_data(avg_test_online_return, path='online-test-epoch{}'.format(epoch))
for key, value in self.eval_statistics.items():
logger.record_tabular(key, value)
self.eval_statistics = None
if self.render_eval_paths:
self.env.render_paths(paths)
if self.plotter:
self.plotter.draw()
@abc.abstractmethod
def training_mode(self, mode):
"""
Set training mode to `mode`.
:param mode: If True, training will happen (e.g. set the dropout
probabilities to not all ones).
"""
pass
@abc.abstractmethod
def _do_training(self):
"""
Perform some update, e.g. perform one gradient step.
:return:
"""
pass
class RLAlgorithm(metaclass=abc.ABCMeta):
def __init__(
self,
env,
exploration_policy: ExplorationPolicy,
training_env=None,
num_epochs=100,
num_episodes=100,
num_steps_per_epoch=10000,
num_steps_per_eval=1000,
num_updates_per_episode=50,
num_updates_per_env_step=1,
num_updates_per_epoch=None,
batch_size=1024,
max_path_length=1000,
discount=0.99,
replay_buffer_size=1000000,
reward_scale=1,
min_num_steps_before_training=None,
render=False,
save_replay_buffer=False,
save_algorithm=False,
save_environment=True,
eval_sampler=None,
eval_policy=None,
replay_buffer=None,
collection_mode='online',
):
"""
Base class for RL Algorithms
:param env: Environment used to evaluate.
:param exploration_policy: Policy used to explore
:param training_env: Environment used by the algorithm. By default, a
copy of `env` will be made for training, so that training and
evaluation are completely independent.
:param num_epochs:
:param num_episodes: Used by episodic training mode.
:param num_steps_per_epoch:
:param num_steps_per_eval:
:param num_updates_per_env_step: Used by online training mode.
:param num_updates_per_epoch: Used by batch training mode.
:param num_updates_per_episode: Used by episodic training mode.
:param batch_size:
:param max_path_length:
:param discount:
:param replay_buffer_size:
:param reward_scale:
:param min_num_steps_before_training:
:param render:
:param save_replay_buffer:
:param save_algorithm:
:param save_environment:
:param eval_sampler:
:param eval_policy: Policy to evaluate with.
:param replay_buffer:
:param collection_mode: String determining how training happens
- 'online': Train after every step taken in the environment.
- 'batch': Train after every epoch.
"""
assert collection_mode in ['online', 'batch', 'episodic']
if collection_mode == 'batch':
assert num_updates_per_epoch is not None
self.training_env = training_env or pickle.loads(pickle.dumps(env))
self.exploration_policy = exploration_policy
self.num_epochs = num_epochs
self.num_env_steps_per_epoch = num_steps_per_epoch
self.num_steps_per_eval = num_steps_per_eval
if collection_mode == 'online':
self.num_updates_per_train_call = num_updates_per_env_step
elif collection_mode == 'batch':
self.num_updates_per_train_call = num_updates_per_epoch
elif collection_mode == 'episodic':
self.num_updates_per_train_call = num_updates_per_episode
self.num_episodes = num_episodes
else:
raise TypeError("Invalid collection_mode: {}".format(
collection_mode
))
self.batch_size = batch_size
self.max_path_length = max_path_length
self.discount = discount
self.replay_buffer_size = replay_buffer_size
self.reward_scale = reward_scale
self.render = render
self.collection_mode = collection_mode
self.save_replay_buffer = save_replay_buffer
self.save_algorithm = save_algorithm
self.save_environment = save_environment
if min_num_steps_before_training is None:
min_num_steps_before_training = self.num_env_steps_per_epoch
self.min_num_steps_before_training = min_num_steps_before_training
if eval_sampler is None:
if eval_policy is None:
eval_policy = exploration_policy
eval_sampler = SMMInPlacePathSampler(
env=env,
policy=eval_policy,
max_samples=self.num_steps_per_eval + self.max_path_length,
max_path_length=self.max_path_length,
)
self.eval_policy = eval_policy
self.eval_sampler = eval_sampler
self.eval_statistics = OrderedDict()
self.need_to_update_eval_statistics = True
self.action_space = env.action_space
self.obs_space = env.observation_space
self.env = env
if replay_buffer is None:
replay_buffer = EnvReplayBuffer(
self.replay_buffer_size,
self.env,
)
self.replay_buffer = replay_buffer
self._n_env_steps_total = 0
self._n_train_steps_total = 0
self._n_rollouts_total = 0
self._do_train_time = 0
self._epoch_start_time = None
self._algo_start_time = None
self._old_table_keys = None
self._current_path_builder = PathBuilder()
self._exploration_paths = []
self.post_epoch_funcs = []
def train(self, start_epoch=0):
self.pretrain()
if start_epoch == 0:
params = self.get_epoch_snapshot(-1)
logger.save_itr_params(-1, params)
self.training_mode(False)
self._n_env_steps_total = start_epoch * self.num_env_steps_per_epoch
gt.reset()
gt.set_def_unique(False)
if self.collection_mode == 'online':
self.train_online(start_epoch=start_epoch)
elif self.collection_mode == 'batch':
self.train_batch(start_epoch=start_epoch)
elif self.collection_mode == 'episodic':
self.train_episodic(start_episode=start_epoch)
else:
raise TypeError("Invalid collection_mode: {}".format(
self.collection_mode
))
def pretrain(self):
pass
def train_online(self, start_epoch=0):
self._current_path_builder = PathBuilder()
for epoch in gt.timed_for(
range(start_epoch, self.num_epochs),
save_itrs=True,
):
self._start_epoch(epoch)
set_to_train_mode(self.training_env)
observation = self._start_new_rollout()
for _ in range(self.num_env_steps_per_epoch):
observation = self._take_step_in_env(observation)
gt.stamp('sample')
self._try_to_train()
gt.stamp('train')
set_to_eval_mode(self.env)
self._try_to_eval(epoch)
gt.stamp('eval')
self._end_epoch(epoch)
def train_batch(self, start_epoch):
self._current_path_builder = PathBuilder()
for epoch in gt.timed_for(
range(start_epoch, self.num_epochs),
save_itrs=True,
):
self._start_epoch(epoch)
set_to_train_mode(self.training_env)
observation = self._start_new_rollout()
# This implementation is rather naive. If you want to (e.g.)
# parallelize data collection, this would be the place to do it.
for _ in range(self.num_env_steps_per_epoch):
observation = self._take_step_in_env(observation)
gt.stamp('sample')
self._try_to_train()
gt.stamp('train')
set_to_eval_mode(self.env)
self._try_to_eval(epoch)
gt.stamp('eval')
self._end_epoch(epoch)
def train_episodic(self, start_episode=0):
self._current_path_builder = PathBuilder()
eval_paths = []
for episode in gt.timed_for(
range(start_episode, self.num_episodes),
save_itrs=True,
):
self._start_epoch(episode)
set_to_train_mode(self.training_env)
observation = self._start_new_rollout()
done = False
while not done:
action, agent_info = self._get_action_and_info(
observation,
)
if self.render:
self.training_env.render()
next_ob, raw_reward, _terminal, env_info = (
self.training_env.step(action)
)
next_ob = self._proc_observation(next_ob)
self._n_env_steps_total += 1
reward = raw_reward * self.reward_scale
terminal = np.array([_terminal])
reward = np.array([reward])
self._handle_step(
observation,
action,
reward,
next_ob,
terminal,
agent_info=agent_info,
env_info=env_info,
)
gt.stamp('sample')
observation = next_ob
done = (_terminal or len(self._current_path_builder) >= self.max_path_length)
# Get path for the current episode.
eval_path = self._current_path_builder.get_all_stacked()
eval_paths.append(eval_path)
# Empty self._current_path_builder, and perform other algorithm-specific handles.
self._handle_rollout_ending()
# Train policy.
self._try_to_train()
gt.stamp('train')
# Evaluate policy.
self.need_to_update_eval_statistics = False # Evaluate regardless of whether we trained or not.
set_to_eval_mode(self.env)
self._try_to_eval(episode)
gt.stamp('eval')
self._end_epoch(episode)
# Evaluate over all episodes.
print("Evaluating all {} episodes...".format(len(eval_paths)))
self.need_to_update_eval_statistics = False # Evaluate eval_path regardless of whether we trained or not.
set_to_eval_mode(self.env)
self._try_to_eval(episode, eval_paths=eval_paths)
gt.stamp('eval')
self._end_epoch(episode)
# Save plots.
if hasattr(self.env, "draw"):
self.env.draw(eval_paths, save_dir=logger.get_snapshot_dir())
def _proc_observation(self, ob):
return ob
def _take_step_in_env(self, observation):
action, agent_info = self._get_action_and_info(
observation,
)
if self.render:
self.training_env.render()
next_ob, raw_reward, terminal, env_info = (
self.training_env.step(action)
)
next_ob = self._proc_observation(next_ob)
self._n_env_steps_total += 1
reward = raw_reward * self.reward_scale
terminal = np.array([terminal])
reward = np.array([reward])
self._handle_step(
observation,
action,
reward,
next_ob,
terminal,
agent_info=agent_info,
env_info=env_info,
)
if terminal or len(self._current_path_builder) >= self.max_path_length:
self._handle_rollout_ending()
new_observation = self._start_new_rollout()
else:
new_observation = next_ob
return new_observation
def _try_to_train(self):
if self._can_train():
self.training_mode(True)
for i in range(self.num_updates_per_train_call):
self._do_training()
self._n_train_steps_total += 1
self.training_mode(False)
def _try_to_eval(self, epoch, eval_paths=None):
logger.save_extra_data(self.get_extra_data_to_save(epoch))
if self._can_evaluate():
self.evaluate(epoch, eval_paths=eval_paths)
params = self.get_epoch_snapshot(epoch)
logger.save_itr_params(epoch, params)
table_keys = logger.get_table_key_set()
if self._old_table_keys is not None:
assert table_keys == self._old_table_keys, (
"Table keys cannot change from iteration to iteration."
)
self._old_table_keys = table_keys
logger.record_tabular(
"Number of train steps total",
self._n_train_steps_total,
)
logger.record_tabular(
"Number of env steps total",
self._n_env_steps_total,
)
logger.record_tabular(
"Number of rollouts total",
self._n_rollouts_total,
)
times_itrs = gt.get_times().stamps.itrs
train_time = times_itrs['train'][-1]
sample_time = times_itrs['sample'][-1]
eval_time = times_itrs['eval'][-1] if epoch > 0 else 0
epoch_time = train_time + sample_time + eval_time
total_time = gt.get_times().total
logger.record_tabular('Train Time (s)', train_time)
logger.record_tabular('(Previous) Eval Time (s)', eval_time)
logger.record_tabular('Sample Time (s)', sample_time)
logger.record_tabular('Epoch Time (s)', epoch_time)
logger.record_tabular('Total Train Time (s)', total_time)
logger.record_tabular("Epoch", epoch)
logger.dump_tabular(with_prefix=False, with_timestamp=False)
else:
logger.log("Skipping eval for now.")
def _can_evaluate(self):
"""
One annoying thing about the logger table is that the keys at each
iteration need to be the exact same. So unless you can compute
everything, skip evaluation.
A common example for why you might want to skip evaluation is that at
the beginning of training, you may not have enough data for a
validation and training set.
"""
return (
len(self._exploration_paths) > 0
and not self.need_to_update_eval_statistics
)
def _can_train(self):
return (
self.replay_buffer.num_steps_can_sample() >=
self.min_num_steps_before_training
)
def _get_action_and_info(self, observation):
"""
Get an action to take in the environment.
:param observation:
:return:
"""
self.exploration_policy.set_num_steps_total(self._n_env_steps_total)
return self.exploration_policy.get_action(
observation,
)
def _start_epoch(self, epoch):
self._epoch_start_time = time.time()
self._exploration_paths = []
self._do_train_time = 0
logger.push_prefix('Iteration #%d | ' % epoch)
def _end_epoch(self, epoch):
logger.log("Epoch Duration: {0}".format(
time.time() - self._epoch_start_time
))
logger.log("Started Training: {0}".format(self._can_train()))
logger.pop_prefix()
for post_epoch_func in self.post_epoch_funcs:
post_epoch_func(self, epoch)
def _start_new_rollout(self):
self.exploration_policy.reset()
ob = self.training_env.reset()
return self._proc_observation(ob)
def _handle_path(self, path):
"""
Naive implementation: just loop through each transition.
:param path:
:return:
"""
for (
ob,
action,
reward,
next_ob,
terminal,
agent_info,
env_info
) in zip(
path["observations"],
path["actions"],
path["rewards"],
path["next_observations"],
path["terminals"],
path["agent_infos"],
path["env_infos"],
):
self._handle_step(
ob,
action,
reward,
next_ob,
terminal,
agent_info=agent_info,
env_info=env_info,
)
self._handle_rollout_ending()
def _handle_step(
self,
observation,
action,
reward,
next_observation,
terminal,
agent_info,
env_info,
):
"""
Implement anything that needs to happen after every step
:return:
"""
self._current_path_builder.add_all(
observations=observation,
actions=action,
rewards=reward,
next_observations=next_observation,
terminals=terminal,
agent_infos=agent_info,
env_infos=env_info,
)
self.replay_buffer.add_sample(
observation=observation,
action=action,
reward=reward,
terminal=terminal,
next_observation=next_observation,
agent_info=agent_info,
env_info=env_info,
)
def _handle_rollout_ending(self):
"""
Implement anything that needs to happen after every rollout.
"""
self.replay_buffer.terminate_episode()
self._n_rollouts_total += 1
if len(self._current_path_builder) > 0:
self._exploration_paths.append(
self._current_path_builder.get_all_stacked()
)
self._current_path_builder = PathBuilder()
def get_epoch_snapshot(self, epoch):
data_to_save = dict(
epoch=epoch,
exploration_policy=self.exploration_policy,
eval_policy=self.eval_policy,
)
if self.save_environment:
data_to_save['env'] = self.training_env
return data_to_save
def get_extra_data_to_save(self, epoch):
"""
Save things that shouldn't be saved every snapshot but rather
overwritten every time.
:param epoch:
:return:
"""
if self.render:
self.training_env.render(close=True)
data_to_save = dict(
epoch=epoch,
)
if self.save_environment:
data_to_save['env'] = self.training_env
if self.save_replay_buffer:
data_to_save['replay_buffer'] = self.replay_buffer
if self.save_algorithm:
data_to_save['algorithm'] = self
return data_to_save
@abc.abstractmethod
def training_mode(self, mode):
"""
Set training mode to `mode`.
:param mode: If True, training will happen (e.g. set the dropout
probabilities to not all ones).
"""
pass
def evaluate(self, epoch, eval_paths=None):
statistics = OrderedDict()
statistics.update(self.eval_statistics)
logger.log("Collecting samples for evaluation")
if eval_paths:
test_paths = eval_paths
else:
test_paths = self.get_eval_paths()
statistics.update(eval_util.get_generic_path_information(
test_paths, stat_prefix="Test",
))
if len(self._exploration_paths) > 0:
statistics.update(eval_util.get_generic_path_information(
self._exploration_paths, stat_prefix="Exploration",
))
if hasattr(self.env, "log_diagnostics"):
self.env.log_diagnostics(test_paths, logger=logger)
if hasattr(self.env, "get_diagnostics"):
env_statistics = self.env.get_diagnostics(test_paths)
if env_statistics is not None:
statistics.update(env_statistics)
average_returns = eval_util.get_average_returns(test_paths)
statistics['AverageReturn'] = average_returns
for key, value in statistics.items():
logger.record_tabular(key, value)
logger.record_tensorboard_scalar(key, value, epoch)
self.need_to_update_eval_statistics = True
def get_eval_paths(self):
return self.eval_sampler.obtain_samples()
@abc.abstractmethod
def _do_training(self):
"""
Perform some update, e.g. perform one gradient step.
:return:
"""
pass
def set_to_train_mode(env):
if hasattr(env, 'train'):
env.train()
def set_to_eval_mode(env):
if hasattr(env, 'eval'):
env.eval()
| 65,924 | 78,378 | 184 |
036e451730e5775a3e9f11a7159dc78fabeb20d8 | 1,588 | py | Python | iButton.py | sgreene570/cshrgbledsign | 4845b92c3d23346e58cb293d0a4eebb2f1caa2eb | [
"MIT"
] | 3 | 2016-12-01T05:40:00.000Z | 2017-04-19T20:42:16.000Z | iButton.py | sgreene570/cshrgbledsign | 4845b92c3d23346e58cb293d0a4eebb2f1caa2eb | [
"MIT"
] | null | null | null | iButton.py | sgreene570/cshrgbledsign | 4845b92c3d23346e58cb293d0a4eebb2f1caa2eb | [
"MIT"
] | 1 | 2016-12-02T22:43:56.000Z | 2016-12-02T22:43:56.000Z | import os
import time
import csh_ldap as ldap
import login
if __name__ == "__main__":
main()
| 24.060606 | 63 | 0.547229 | import os
import time
import csh_ldap as ldap
import login
def main():
global cshldap
cshldap = ldap.CSHLDAP(login.ldap_user, login.ldap_pass)
get_ibutton()
def find_user(varID):
ibutton = varID.strip()
ibutton = "*" + ibutton[3:] + "01"
try:
member = cshldap.get_member_ibutton(ibutton)
return member.homeDirectory
except Exception as e:
print(e)
return None
def get_ibutton():
base_dir = '/sys/devices/w1_bus_master1/w1_master_slaves'
delete_dir = '/sys/devices/w1_bus_master1/w1_master_remove'
while True:
data = open(base_dir, "r")
ibutton = data.read()
ibutton = ibutton.strip()
data.close()
d = open(delete_dir, "w")
if ibutton != "not found.":
print(ibutton)
d.write(ibutton)
d.flush()
try:
find_colors(find_user(ibutton))
except Exception as e:
print(e)
time.sleep(3)
d.close()
def find_colors(user_dir):
if user_dir is not None:
cfile = os.path.join(user_dir, ".colors")
temp = os.popen("ssh -i /home/sgreen/.ssh/id_rsa "
+ login.file_server + " cat " + cfile)
colors = temp.read().split()
temp.close()
for line in colors:
if(line[:5] is "delay"):
time.sleep(float(line[7:]))
else:
print(line)
os.system("curl -X POST -d 'color=" + line +
"' localhost:80")
if __name__ == "__main__":
main()
| 1,393 | 0 | 92 |
6091acc7b448a2f01f3f747403b3166b9a7b9b41 | 1,616 | py | Python | django/contrib/gis/db/backends/postgis/const.py | uskudnik/django | 828e3b1335e2614d338a630fd5b5f88d38a6b5d2 | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | null | null | null | django/contrib/gis/db/backends/postgis/const.py | uskudnik/django | 828e3b1335e2614d338a630fd5b5f88d38a6b5d2 | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | null | null | null | django/contrib/gis/db/backends/postgis/const.py | uskudnik/django | 828e3b1335e2614d338a630fd5b5f88d38a6b5d2 | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | null | null | null | """
PostGIS to GDAL conversion constant definitions
"""
# Lookup to convert pixel type values from GDAL to PostGIS
GDAL_TO_POSTGIS = [None, 4, 6, 5, 8, 7, 10, 11, None, None, None, None]
# Lookup to convert pixel type values from PostGIS to GDAL
POSTGIS_TO_GDAL = [1, 1, 1, 3, 1, 3, 2, 5, 4, None, 6, 7, None, None]
# Struct pack structure for raster header, the raster header has the
# following structure:
#
# Endianness, PostGIS raster version, number of bands, scale, origin,
# skew, srid, width, and height.
#
# Scale, origin, and skew have x and y values. PostGIS currently uses
# a fixed endianness (1) and there is only one version (0).
POSTGIS_HEADER_STRUCTURE = 'B H H d d d d d d i H H'
# Lookup values to convert GDAL pixel types to struct characters. This is
# used to pack and unpack the pixel values of PostGIS raster bands.
GDAL_TO_STRUCT = [
None, 'B', 'H', 'h', 'L', 'l', 'f', 'd',
None, None, None, None,
]
# Size of the packed value in bytes for different numerical types.
# This is needed to cut chunks of band data out of PostGIS raster strings
# when decomposing them into GDALRasters.
# See https://docs.python.org/library/struct.html#format-characters
STRUCT_SIZE = {
'b': 1, # Signed char
'B': 1, # Unsigned char
'?': 1, # _Bool
'h': 2, # Short
'H': 2, # Unsigned short
'i': 4, # Integer
'I': 4, # Unsigned Integer
'l': 4, # Long
'L': 4, # Unsigned Long
'f': 4, # Float
'd': 8, # Double
}
# See https://trac.osgeo.org/postgis/wiki/WKTRaster/RFC/RFC1_V0SerialFormat#Pixeltypeandstorageflag
BANDTYPE_FLAG_HASNODATA = 1 << 6
| 34.382979 | 99 | 0.67203 | """
PostGIS to GDAL conversion constant definitions
"""
# Lookup to convert pixel type values from GDAL to PostGIS
GDAL_TO_POSTGIS = [None, 4, 6, 5, 8, 7, 10, 11, None, None, None, None]
# Lookup to convert pixel type values from PostGIS to GDAL
POSTGIS_TO_GDAL = [1, 1, 1, 3, 1, 3, 2, 5, 4, None, 6, 7, None, None]
# Struct pack structure for raster header, the raster header has the
# following structure:
#
# Endianness, PostGIS raster version, number of bands, scale, origin,
# skew, srid, width, and height.
#
# Scale, origin, and skew have x and y values. PostGIS currently uses
# a fixed endianness (1) and there is only one version (0).
POSTGIS_HEADER_STRUCTURE = 'B H H d d d d d d i H H'
# Lookup values to convert GDAL pixel types to struct characters. This is
# used to pack and unpack the pixel values of PostGIS raster bands.
GDAL_TO_STRUCT = [
None, 'B', 'H', 'h', 'L', 'l', 'f', 'd',
None, None, None, None,
]
# Size of the packed value in bytes for different numerical types.
# This is needed to cut chunks of band data out of PostGIS raster strings
# when decomposing them into GDALRasters.
# See https://docs.python.org/library/struct.html#format-characters
STRUCT_SIZE = {
'b': 1, # Signed char
'B': 1, # Unsigned char
'?': 1, # _Bool
'h': 2, # Short
'H': 2, # Unsigned short
'i': 4, # Integer
'I': 4, # Unsigned Integer
'l': 4, # Long
'L': 4, # Unsigned Long
'f': 4, # Float
'd': 8, # Double
}
# See https://trac.osgeo.org/postgis/wiki/WKTRaster/RFC/RFC1_V0SerialFormat#Pixeltypeandstorageflag
BANDTYPE_FLAG_HASNODATA = 1 << 6
| 0 | 0 | 0 |
bb106129f0112d84d2e32ff27e94398adec95e67 | 1,426 | py | Python | BinarySearch/81_SearchInRotatedSortedArrayII.py | cls1991/leetcode | 0fd165afa2ec339a6f194bc57f8810e66cd2822b | [
"MIT"
] | 180 | 2018-01-25T01:50:04.000Z | 2021-11-09T11:47:45.000Z | BinarySearch/81_SearchInRotatedSortedArrayII.py | cls1991/leetcode | 0fd165afa2ec339a6f194bc57f8810e66cd2822b | [
"MIT"
] | 6 | 2018-01-25T07:51:51.000Z | 2019-10-26T14:10:53.000Z | BinarySearch/81_SearchInRotatedSortedArrayII.py | cls1991/leetcode | 0fd165afa2ec339a6f194bc57f8810e66cd2822b | [
"MIT"
] | 29 | 2018-01-25T01:55:58.000Z | 2019-09-02T07:19:59.000Z | # coding: utf8
"""
题目链接: https://leetcode.com/problems/search-in-rotated-sorted-array-ii/description.
题目描述:
Follow up for "Search in Rotated Sorted Array":
What if duplicates are allowed?
Would this affect the run-time complexity? How and why?
Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.
(i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).
Write a function to determine if a given target is in the array.
The array may contain duplicates.
"""
| 26.407407 | 98 | 0.509116 | # coding: utf8
"""
题目链接: https://leetcode.com/problems/search-in-rotated-sorted-array-ii/description.
题目描述:
Follow up for "Search in Rotated Sorted Array":
What if duplicates are allowed?
Would this affect the run-time complexity? How and why?
Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.
(i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).
Write a function to determine if a given target is in the array.
The array may contain duplicates.
"""
class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: bool
"""
if not nums:
return False
left, right = 0, len(nums) - 1
# 核心: nums[mid] == nums[right] != target, 仅能排除right
while left <= right:
mid = left + (right - left) // 2
if nums[mid] == target:
return True
if nums[mid] < nums[right]:
if nums[mid] < target <= nums[right]:
left = mid + 1
else:
right = mid - 1
elif nums[mid] > nums[right]:
if nums[left] <= target < nums[mid]:
right = mid - 1
else:
left = mid + 1
else:
right -= 1
return False
| 0 | 877 | 23 |
f088e0cb4e77b9ae229ddbd452aff797e2a88523 | 253 | py | Python | keepmealive/items/serializers.py | kymy86/keepmealive | fcca7cb825ae947978ca3251dc0331207cec2527 | [
"Apache-2.0"
] | null | null | null | keepmealive/items/serializers.py | kymy86/keepmealive | fcca7cb825ae947978ca3251dc0331207cec2527 | [
"Apache-2.0"
] | null | null | null | keepmealive/items/serializers.py | kymy86/keepmealive | fcca7cb825ae947978ca3251dc0331207cec2527 | [
"Apache-2.0"
] | null | null | null | from rest_framework import serializers
from items.models import Item
| 31.625 | 94 | 0.687747 | from rest_framework import serializers
from items.models import Item
class ItemSerializer(serializers.ModelSerializer):
class Meta:
model = Item
fields = ('id', 'description', 'url', 'name', 'username', 'pwd', 'folder', 'password')
| 0 | 161 | 23 |
1819f0d4ecff30e9287a66fc50d70f8f967d0afb | 14,951 | py | Python | rootfs/api/tests/test_domain.py | jianxiaoguo/controller | 8cc1e11601e5725e583f0fa82cdb2c10872ca485 | [
"Apache-2.0"
] | null | null | null | rootfs/api/tests/test_domain.py | jianxiaoguo/controller | 8cc1e11601e5725e583f0fa82cdb2c10872ca485 | [
"Apache-2.0"
] | 19 | 2020-07-30T06:31:29.000Z | 2022-03-14T07:33:44.000Z | rootfs/api/tests/test_domain.py | jianxiaoguo/controller | 8cc1e11601e5725e583f0fa82cdb2c10872ca485 | [
"Apache-2.0"
] | 9 | 2020-07-30T02:50:12.000Z | 2020-12-11T06:44:19.000Z | """
Unit tests for the Drycc api app.
Run the tests with "./manage.py test api"
"""
from unittest import mock
from django.contrib.auth import get_user_model
from django.core.cache import cache
from django.conf import settings
from rest_framework.authtoken.models import Token
from api.models import Domain
from api.tests import DryccTestCase
import idna
User = get_user_model()
class DomainTest(DryccTestCase):
"""Tests creation of domains"""
fixtures = ['tests.json']
def test_response_data(self):
"""Test that the serialized response contains only relevant data."""
app_id = self.create_app()
response = self.client.post(
'/v2/apps/{}/domains'.format(app_id),
{'domain': 'test-domain.example.com'}
)
self.assertEqual(response.status_code, 201, response.data)
for key in response.data:
self.assertIn(key, ['uuid', 'owner', 'created', 'updated', 'app', 'domain'])
expected = {
'owner': self.user.username,
'app': app_id,
'domain': 'test-domain.example.com'
}
self.assertDictContainsSubset(expected, response.data)
def test_strip_dot(self):
"""Test that a dot on the right side of the domain gets stripped"""
domain = 'autotest.127.0.0.1.xip.io.'
msg = "failed on '{}'".format(domain)
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
# Create
response = self.client.post(url, {'domain': domain})
self.assertEqual(response.status_code, 201, msg)
# Fetch
domain = 'autotest.127.0.0.1.xip.io' # stripped version
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
response = self.client.get(url)
expected = [data['domain'] for data in response.data['results']]
self.assertEqual(
sorted(["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN), domain]),
expected, msg)
def test_delete_domain_does_not_exist(self):
"""Remove a domain that does not exist"""
url = '/v2/apps/{app_id}/domains/{domain}'.format(domain='test-domain.example.com',
app_id=self.app_id)
response = self.client.delete(url)
self.assertEqual(response.status_code, 404)
def test_delete_domain_does_not_remove_latest(self):
"""https://github.com/drycc/drycc/issues/3239"""
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
test_domains = [
'test-domain.example.com',
'django.paas-sandbox',
]
for domain in test_domains:
response = self.client.post(url, {'domain': domain})
self.assertEqual(response.status_code, 201, response.data)
url = '/v2/apps/{app_id}/domains/{domain}'.format(domain=test_domains[0],
app_id=self.app_id)
response = self.client.delete(url)
self.assertEqual(response.status_code, 204, response.data)
with self.assertRaises(Domain.DoesNotExist):
Domain.objects.get(domain=test_domains[0])
def test_delete_domain_does_not_remove_others(self):
"""https://github.com/drycc/drycc/issues/3475"""
self.test_delete_domain_does_not_remove_latest()
self.assertEqual(Domain.objects.all().count(), 2)
def test_admin_can_add_domains_to_other_apps(self):
"""If a non-admin user creates an app, an administrator should be able to add
domains to it.
"""
user = User.objects.get(username='autotest2')
token = Token.objects.get(user=user).key
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
app_id = self.create_app()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = '/v2/apps/{}/domains'.format(app_id)
response = self.client.post(url, {'domain': 'example.drycc.example.com'})
self.assertEqual(response.status_code, 201, response.data)
def test_unauthorized_user_cannot_modify_domain(self):
"""
An unauthorized user should not be able to modify other domains.
Since an unauthorized user should not know about the application at all, these
requests should return a 404.
"""
app_id = self.create_app()
unauthorized_user = User.objects.get(username='autotest2')
unauthorized_token = Token.objects.get(user=unauthorized_user).key
self.client.credentials(HTTP_AUTHORIZATION='Token ' + unauthorized_token)
url = '/v2/apps/{}/domains'.format(app_id)
response = self.client.post(url, {'domain': 'example.com'})
self.assertEqual(response.status_code, 403)
def test_kubernetes_service_failure(self):
"""
Cause an Exception in kubernetes services
"""
app_id = self.create_app()
# scheduler.svc.update exception
with mock.patch('scheduler.resources.service.Service.update'):
domain = 'foo.com'
url = '/v2/apps/{}/domains'.format(app_id)
response = self.client.post(url, {'domain': domain})
self.assertEqual(response.status_code, 201, response.data)
| 41.301105 | 92 | 0.568858 | """
Unit tests for the Drycc api app.
Run the tests with "./manage.py test api"
"""
from unittest import mock
from django.contrib.auth import get_user_model
from django.core.cache import cache
from django.conf import settings
from rest_framework.authtoken.models import Token
from api.models import Domain
from api.tests import DryccTestCase
import idna
User = get_user_model()
class DomainTest(DryccTestCase):
"""Tests creation of domains"""
fixtures = ['tests.json']
def setUp(self):
self.user = User.objects.get(username='autotest')
self.token = Token.objects.get(user=self.user).key
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
self.app_id = self.create_app()
def tearDown(self):
# make sure every test has a clean slate for k8s mocking
cache.clear()
def test_response_data(self):
"""Test that the serialized response contains only relevant data."""
app_id = self.create_app()
response = self.client.post(
'/v2/apps/{}/domains'.format(app_id),
{'domain': 'test-domain.example.com'}
)
self.assertEqual(response.status_code, 201, response.data)
for key in response.data:
self.assertIn(key, ['uuid', 'owner', 'created', 'updated', 'app', 'domain'])
expected = {
'owner': self.user.username,
'app': app_id,
'domain': 'test-domain.example.com'
}
self.assertDictContainsSubset(expected, response.data)
def test_strip_dot(self):
"""Test that a dot on the right side of the domain gets stripped"""
domain = 'autotest.127.0.0.1.xip.io.'
msg = "failed on '{}'".format(domain)
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
# Create
response = self.client.post(url, {'domain': domain})
self.assertEqual(response.status_code, 201, msg)
# Fetch
domain = 'autotest.127.0.0.1.xip.io' # stripped version
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
response = self.client.get(url)
expected = [data['domain'] for data in response.data['results']]
self.assertEqual(
sorted(["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN), domain]),
expected, msg)
def test_manage_idn_domain(self):
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
test_domains = [
'ドメイン.テスト',
'xn--eckwd4c7c.xn--zckzah',
'xn--80ahd1agd.ru',
'домена.ru',
'*.домена.испытание',
'täst.königsgäßchen.de',
'xn--tst-qla.xn--knigsgsschen-lcb0w.de',
'ドメイン.xn--zckzah',
'xn--eckwd4c7c.テスト',
'täst.xn--knigsgsschen-lcb0w.de',
'*.xn--tst-qla.königsgäßchen.de'
]
for domain in test_domains:
msg = "failed on '{}'".format(domain)
# Generate ACE and Unicode variant for domain
if domain.startswith("*."):
ace_domain = "*." + idna.encode(domain[2:]).decode("utf-8", "strict")
unicode_domain = "*." + idna.decode(ace_domain[2:])
else:
ace_domain = idna.encode(domain).decode("utf-8", "strict")
unicode_domain = idna.decode(ace_domain)
# Create
response = self.client.post(url, {'domain': domain})
self.assertEqual(response.status_code, 201, msg)
# Fetch
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
response = self.client.get(url)
expected = [data['domain'] for data in response.data['results']]
self.assertEqual(
sorted(["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN), ace_domain]),
sorted(expected), msg)
# Verify creation failure for same domain with different encoding
if ace_domain != domain:
response = self.client.post(url, {'domain': ace_domain})
self.assertEqual(response.status_code, 400, msg)
# Verify creation failure for same domain with different encoding
if unicode_domain != domain:
response = self.client.post(url, {'domain': unicode_domain})
self.assertEqual(response.status_code, 400, msg)
# Delete
url = '/v2/apps/{app_id}/domains/{hostname}'.format(hostname=domain,
app_id=self.app_id)
response = self.client.delete(url)
self.assertEqual(response.status_code, 204, msg)
# Verify removal
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
response = self.client.get(url)
self.assertEqual(1, response.data['count'], msg)
# verify only app domain is left
expected = [data['domain'] for data in response.data['results']]
self.assertEqual(
["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN)],
expected, msg)
# Use different encoding for creating and deleting (ACE)
if ace_domain != domain:
# Create
response = self.client.post(url, {'domain': domain})
self.assertEqual(response.status_code, 201, msg)
# Fetch
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
response = self.client.get(url)
expected = [data['domain'] for data in response.data['results']]
self.assertEqual(
sorted(["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN), ace_domain]),
sorted(expected), msg)
# Delete
url = '/v2/apps/{app_id}/domains/{hostname}'.format(hostname=ace_domain,
app_id=self.app_id)
response = self.client.delete(url)
self.assertEqual(response.status_code, 204, msg)
# Verify removal
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
response = self.client.get(url)
self.assertEqual(1, response.data['count'], msg)
# verify only app domain is left
expected = [data['domain'] for data in response.data['results']]
self.assertEqual(
["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN)],
expected, msg)
# Use different encoding for creating and deleting (Unicode)
if unicode_domain != domain:
# Create
response = self.client.post(url, {'domain': domain})
self.assertEqual(response.status_code, 201, msg)
# Fetch
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
response = self.client.get(url)
expected = [data['domain'] for data in response.data['results']]
self.assertEqual(
sorted(["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN), ace_domain]),
sorted(expected), msg)
# Delete
url = '/v2/apps/{app_id}/domains/{hostname}'.format(hostname=unicode_domain,
app_id=self.app_id)
response = self.client.delete(url)
self.assertEqual(response.status_code, 204, msg)
# Verify removal
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
response = self.client.get(url)
self.assertEqual(1, response.data['count'], msg)
# verify only app domain is left
expected = [data['domain'] for data in response.data['results']]
self.assertEqual(
["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN)],
expected, msg)
def test_manage_domain(self):
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
test_domains = [
'test-domain.example.com',
'django.paas-sandbox',
'django.paas--sandbox',
'domain',
'not.too.loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong',
'3com.com',
'domain1',
'3333.xyz',
'w3.example.com',
'MYDOMAIN.NET',
'autotest.127.0.0.1.xip.io',
'*.drycc.example.com'
]
for domain in test_domains:
msg = "failed on '{}'".format(domain)
# Create
response = self.client.post(url, {'domain': domain})
self.assertEqual(response.status_code, 201, msg)
# Fetch
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
response = self.client.get(url)
expected = [data['domain'] for data in response.data['results']]
self.assertEqual(
sorted(["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN), domain]),
sorted(expected), msg)
# Delete
url = '/v2/apps/{app_id}/domains/{hostname}'.format(hostname=domain,
app_id=self.app_id)
response = self.client.delete(url)
self.assertEqual(response.status_code, 204, msg)
# Verify removal
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
response = self.client.get(url)
self.assertEqual(1, response.data['count'], msg)
# verify only app domain is left
expected = [data['domain'] for data in response.data['results']]
self.assertEqual(
["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN)],
expected, msg)
def test_delete_domain_does_not_exist(self):
"""Remove a domain that does not exist"""
url = '/v2/apps/{app_id}/domains/{domain}'.format(domain='test-domain.example.com',
app_id=self.app_id)
response = self.client.delete(url)
self.assertEqual(response.status_code, 404)
def test_delete_domain_does_not_remove_latest(self):
"""https://github.com/drycc/drycc/issues/3239"""
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
test_domains = [
'test-domain.example.com',
'django.paas-sandbox',
]
for domain in test_domains:
response = self.client.post(url, {'domain': domain})
self.assertEqual(response.status_code, 201, response.data)
url = '/v2/apps/{app_id}/domains/{domain}'.format(domain=test_domains[0],
app_id=self.app_id)
response = self.client.delete(url)
self.assertEqual(response.status_code, 204, response.data)
with self.assertRaises(Domain.DoesNotExist):
Domain.objects.get(domain=test_domains[0])
def test_delete_domain_does_not_remove_default(self):
domain = "%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN)
url = '/v2/apps/{app_id}/domains/{domain}'.format(domain=domain,
app_id=self.app_id)
response = self.client.delete(url)
self.assertEqual(response.status_code, 403, response.data)
def test_delete_domain_does_not_remove_others(self):
"""https://github.com/drycc/drycc/issues/3475"""
self.test_delete_domain_does_not_remove_latest()
self.assertEqual(Domain.objects.all().count(), 2)
def test_manage_domain_invalid_app(self):
# Create domain
url = '/v2/apps/{app_id}/domains'.format(app_id="this-app-does-not-exist")
response = self.client.post(url, {'domain': 'test-domain.example.com'})
self.assertEqual(response.status_code, 404)
# verify
url = '/v2/apps/{app_id}/domains'.format(app_id='this-app-does-not-exist')
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_manage_domain_invalid_domain(self):
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
test_domains = [
'this_is_an.invalid.domain',
'this-is-an.invalid.1',
'django.pa--assandbox',
'too.looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong',
'foo.*.bar.com',
'test.local.drycc.cc',
'*',
'a' * 300,
'.'.join(['a'] * 128)
]
for domain in test_domains:
msg = "failed on \"{}\"".format(domain)
response = self.client.post(url, {'domain': domain})
self.assertEqual(response.status_code, 400, msg)
def test_admin_can_add_domains_to_other_apps(self):
"""If a non-admin user creates an app, an administrator should be able to add
domains to it.
"""
user = User.objects.get(username='autotest2')
token = Token.objects.get(user=user).key
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
app_id = self.create_app()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = '/v2/apps/{}/domains'.format(app_id)
response = self.client.post(url, {'domain': 'example.drycc.example.com'})
self.assertEqual(response.status_code, 201, response.data)
def test_unauthorized_user_cannot_modify_domain(self):
"""
An unauthorized user should not be able to modify other domains.
Since an unauthorized user should not know about the application at all, these
requests should return a 404.
"""
app_id = self.create_app()
unauthorized_user = User.objects.get(username='autotest2')
unauthorized_token = Token.objects.get(user=unauthorized_user).key
self.client.credentials(HTTP_AUTHORIZATION='Token ' + unauthorized_token)
url = '/v2/apps/{}/domains'.format(app_id)
response = self.client.post(url, {'domain': 'example.com'})
self.assertEqual(response.status_code, 403)
def test_kubernetes_service_failure(self):
"""
Cause an Exception in kubernetes services
"""
app_id = self.create_app()
# scheduler.svc.update exception
with mock.patch('scheduler.resources.service.Service.update'):
domain = 'foo.com'
url = '/v2/apps/{}/domains'.format(app_id)
response = self.client.post(url, {'domain': domain})
self.assertEqual(response.status_code, 201, response.data)
| 9,527 | 0 | 189 |
75c315b7a7ce4f646d9fa116e0081007d2e70f6e | 159 | py | Python | com/LimePencil/Q8674/Tabliczka.py | LimePencil/baekjoonProblems | 61eeeeb875585d165d9e39ecdb3d905b4ba6aa87 | [
"MIT"
] | null | null | null | com/LimePencil/Q8674/Tabliczka.py | LimePencil/baekjoonProblems | 61eeeeb875585d165d9e39ecdb3d905b4ba6aa87 | [
"MIT"
] | null | null | null | com/LimePencil/Q8674/Tabliczka.py | LimePencil/baekjoonProblems | 61eeeeb875585d165d9e39ecdb3d905b4ba6aa87 | [
"MIT"
] | null | null | null | import sys
input = sys.stdin.readline
w,h = list(map(int,input().split(" ")))
if h%2 ==0 or w%2==0:
print("0")
elif h>=w:
print(w)
else:
print(h) | 14.454545 | 39 | 0.566038 | import sys
input = sys.stdin.readline
w,h = list(map(int,input().split(" ")))
if h%2 ==0 or w%2==0:
print("0")
elif h>=w:
print(w)
else:
print(h) | 0 | 0 | 0 |
56ffd2a76d5be6ebe2756fda990f9b8c0be07cba | 3,321 | py | Python | todone/commands/list.py | safnuk/todone | e6373b69b8159cffb8d1b7ed7b1f5810c4e1e7df | [
"Apache-2.0"
] | null | null | null | todone/commands/list.py | safnuk/todone | e6373b69b8159cffb8d1b7ed7b1f5810c4e1e7df | [
"Apache-2.0"
] | null | null | null | todone/commands/list.py | safnuk/todone | e6373b69b8159cffb8d1b7ed7b1f5810c4e1e7df | [
"Apache-2.0"
] | null | null | null | """Module for searching the database based on user-supplied queries, and
display the result to the user.
"""
from todone import backend
from todone import printers
from todone.parser import factory
def list_items(args):
"""
Print a list of todos matching given search terms.
usage: todone list [.file] [folder/] [tags and keywords]
Search criteria can be any string expression.
Allowed folder keywords are any valid folder name, followed by
a slash. Examples: today/, next/, inbox/, someday/, done/. Shortened
versions accepted when unambiguous, so, for example "done/", "don/",
"do/", and "d/" all indicate the done folder.
If folder is not specified, the search is over all active
folders (default is: inbox/, next/, today/).
If folder is today/, then, in addition to items in the today
folder, items with a reminder or due date prior to or equal to
today's date are also included. This behavior may change in future
versions.
Allowed tags are:
due[+N{d|w|m|y}],
remind[+N{d|w|m|y}],
[project name]
The remainder of the search string provides keywords that must
appear in the todo title. However, searches are always case
insensitive.
If .file is specified, then search results are saved to .file.
If no search criteria is provided, then the todos in the given file
are listed. If no search criteria and no file is specified, then
the most recently run search is listed.
E.g.,
> todone list .my_search today/ @Work
Lists all today items containing tag @Work, and saves to .my_search
> todone list n/due+1w [My Project]
Lists all next items from project [My Project] due in
the next week
> todone list
Repeats most recent search
> todone list .my_search
Repeats list from first search
> todone list
Repeats list from first search
"""
parsed_args = parse_args(args)
if is_loading_saved_search(parsed_args):
query = backend.SavedList.get_todos_in_list(parsed_args['file'])
else:
query = backend.Todo.query(**parsed_args)
backend.SavedList.save_search(parsed_args['file'], query)
backend.SavedList.save_most_recent_search(query)
printers.print_todo_list(query)
list_items.short_help = """
usage: todone list [.file] [folder/] [tags and keywords]
"""
| 33.21 | 79 | 0.658838 | """Module for searching the database based on user-supplied queries, and
display the result to the user.
"""
from todone import backend
from todone import printers
from todone.parser import factory
def list_items(args):
"""
Print a list of todos matching given search terms.
usage: todone list [.file] [folder/] [tags and keywords]
Search criteria can be any string expression.
Allowed folder keywords are any valid folder name, followed by
a slash. Examples: today/, next/, inbox/, someday/, done/. Shortened
versions accepted when unambiguous, so, for example "done/", "don/",
"do/", and "d/" all indicate the done folder.
If folder is not specified, the search is over all active
folders (default is: inbox/, next/, today/).
If folder is today/, then, in addition to items in the today
folder, items with a reminder or due date prior to or equal to
today's date are also included. This behavior may change in future
versions.
Allowed tags are:
due[+N{d|w|m|y}],
remind[+N{d|w|m|y}],
[project name]
The remainder of the search string provides keywords that must
appear in the todo title. However, searches are always case
insensitive.
If .file is specified, then search results are saved to .file.
If no search criteria is provided, then the todos in the given file
are listed. If no search criteria and no file is specified, then
the most recently run search is listed.
E.g.,
> todone list .my_search today/ @Work
Lists all today items containing tag @Work, and saves to .my_search
> todone list n/due+1w [My Project]
Lists all next items from project [My Project] due in
the next week
> todone list
Repeats most recent search
> todone list .my_search
Repeats list from first search
> todone list
Repeats list from first search
"""
parsed_args = parse_args(args)
if is_loading_saved_search(parsed_args):
query = backend.SavedList.get_todos_in_list(parsed_args['file'])
else:
query = backend.Todo.query(**parsed_args)
backend.SavedList.save_search(parsed_args['file'], query)
backend.SavedList.save_most_recent_search(query)
printers.print_todo_list(query)
list_items.short_help = """
usage: todone list [.file] [folder/] [tags and keywords]
"""
def is_loading_saved_search(args):
for key, value in args.items():
if (key != 'file') and value:
return False
return True
def parse_args(args=[]):
parser_initialization = [
(factory.PresetArgument.file,
{'name': 'file'}),
(factory.PresetArgument.all_matching_projects,
{'name': 'parent'}),
(factory.PresetArgument.folder,
{'name': 'folder',
'options': [x.name.lower() for x in backend.Folder.all()]}),
(factory.PresetArgument.due_date,
{'name': 'due'}),
(factory.PresetArgument.remind_date,
{'name': 'remind'}),
(factory.PresetArgument.all_remaining,
{'name': 'keywords',
'format_function': lambda x: x}),
]
parser = factory.ParserFactory.from_arg_list(parser_initialization)
parser.parse(args)
return parser.parsed_data
| 847 | 0 | 46 |
a6b19d30a50d1d952c1e3fbe2c0437c393d7bff8 | 6,303 | py | Python | tests/test_csp_analyser.py | crataegustess/csp-tool | 7d998abf22eb8823d59b4ee2446de334ff396a9c | [
"Apache-2.0"
] | null | null | null | tests/test_csp_analyser.py | crataegustess/csp-tool | 7d998abf22eb8823d59b4ee2446de334ff396a9c | [
"Apache-2.0"
] | null | null | null | tests/test_csp_analyser.py | crataegustess/csp-tool | 7d998abf22eb8823d59b4ee2446de334ff396a9c | [
"Apache-2.0"
] | 2 | 2020-06-19T12:39:56.000Z | 2021-04-10T23:43:30.000Z | import pytest
from csp_tool.csp_analyser import get_domains_per_directive, rollup_data_by_header, \
check_unsafe_inline_used_without_domains, extract_policies_without_domains, \
check_keyword_used_without_domains, extract_policies_using_localhost
@pytest.mark.parametrize(
"header,expected_num_rows,biggest_count_row,expected_count",
[
('full_csp', 4, 3, '2'),
('has_unsafe_inline', 1, 0, '5'),
('has_unsafe_eval', 2, 1, '1'),
('refs_localhost', 2, 1, '1'),
],
)
| 33.887097 | 126 | 0.682691 | import pytest
from csp_tool.csp_analyser import get_domains_per_directive, rollup_data_by_header, \
check_unsafe_inline_used_without_domains, extract_policies_without_domains, \
check_keyword_used_without_domains, extract_policies_using_localhost
def generate_known_test_data():
# for reference expected data_headers = ['repo','file','full_csp','has_unsafe_eval','has_unsafe_inline', 'refs_localhost']
return [
"repo1, file1, default-src 'self' 'unsafe-inline' www.google-analytics.com data:,'0','1','0'",
"repo1, file1, default-src 'self' 'unsafe-inline' 'unsafe-eval','1','1','0'",
"repo1, file1, default-src 'self' 'unsafe-inline' www.google-analytics.com data:,'0','1','0'",
"repo1, file1, default-src 'self' 'unsafe-inline' data:,'0','1','0'",
"repo1, file1, default-src 'self' 'unsafe-inline' localhost:9042 data:,'0','1','1'"
]
@pytest.mark.parametrize(
"header,expected_num_rows,biggest_count_row,expected_count",
[
('full_csp', 4, 3, '2'),
('has_unsafe_inline', 1, 0, '5'),
('has_unsafe_eval', 2, 1, '1'),
('refs_localhost', 2, 1, '1'),
],
)
def test_rollup_data_by_header(header, expected_num_rows, biggest_count_row, expected_count):
data = generate_known_test_data()
data_headers = ['repo', 'file', 'full_csp', 'has_unsafe_eval', 'has_unsafe_inline', 'refs_localhost']
analysed = rollup_data_by_header(data, header, active_only=False, headers=data_headers)
assert len(analysed) == expected_num_rows
assert analysed[biggest_count_row].startswith(expected_count)
def test_get_domains_per_directive_single_directive_returns_correct_domains():
data = [
"repo1, file1, default-src 'self' 'unsafe-inline' www.google-analytics.com data:,'0','1','0'",
]
expected_directive = 'default-src'
expected_domain_count = 1
expected_domain = 'www.google-analytics.com'
domain_summary = get_domains_per_directive(data)
assert 1 == len(domain_summary.keys())
assert expected_directive in domain_summary.keys()
assert len(domain_summary[expected_directive]) == expected_domain_count
assert domain_summary[expected_directive][0] == expected_domain
def test_reported_ok_no_unsafe_inline():
expected_result = False
csp = {'default-src': {
'keywords': ['self'],
'domains': ['www.google.com'],
'data': []
}
}
unsafe_without_domains = check_unsafe_inline_used_without_domains(csp)
assert expected_result == unsafe_without_domains
def test_reported_ok_unsafe_inline_with_domains():
expected_result = False
csp = {'default-src': {
'keywords': ['unsafe-inline'],
'domains': ['www.google.com'],
'data': []
}
}
unsafe_without_domains = check_unsafe_inline_used_without_domains(csp)
assert expected_result == unsafe_without_domains
def test_reported_found_unsafe_inline_without_domains():
expected_result = True
csp = {'default-src': {
'keywords': ['unsafe-inline'],
'domains': [],
'data': []
}
}
unsafe_without_domains = check_unsafe_inline_used_without_domains(csp)
assert expected_result == unsafe_without_domains
def test_extract_domains_unsafe_inline_from_data_entries():
data = [
"repo1,file1,default-src 'self' 'unsafe-inline' www.google-analytics.com data:,'0','1','0'",
]
keyword = 'unsafe-inline'
analysis_entries = extract_policies_without_domains(keyword, data)
assert 0 == len(analysis_entries)
def test_extract_policies_with_missing_domains_with_unsafe_inline_from_data_entries():
data = [
"repo1,file1,default-src 'self' 'unsafe-inline' data:,'0','1','0'",
]
keyword = 'unsafe-inline'
analysis_entries = extract_policies_without_domains(keyword, data)
assert 1 == len(analysis_entries)
entry = analysis_entries[0].rstrip('\n')
parts = entry.split(',')
assert 3 == len(parts)
assert 'repo1' == parts[0]
assert 'file1' == parts[1].lstrip()
assert "default-src 'self' 'unsafe-inline' data:" == parts[2].lstrip()
def test_reported_ok_no_unsafe_eval():
expected_result = False
csp = {'default-src': {
'keywords': ['self'],
'domains': ['www.google.com'],
'data': []
}
}
unsafe_without_domains = check_keyword_used_without_domains('unsafe-eval', csp)
assert expected_result == unsafe_without_domains
def test_reported_ok_unsafe_eval_with_domains():
expected_result = False
csp = {'default-src': {
'keywords': ['unsafe-eval'],
'domains': ['www.google.com'],
'data': []
}
}
unsafe_without_domains = check_keyword_used_without_domains('unsafe-eval', csp)
assert expected_result == unsafe_without_domains
def test_reported_found_unsafe_eval_without_domains():
expected_result = True
csp = {'default-src': {
'keywords': ['unsafe-eval'],
'domains': [],
'data': []
}
}
unsafe_without_domains = check_keyword_used_without_domains('unsafe-eval', csp)
assert expected_result == unsafe_without_domains
def test_extract_domains_unsafe_eval_from_data_entries():
data = [
"repo1,file1,default-src 'self' 'unsafe-eval' www.google-analytics.com data:,'0','1','0'",
]
keyword = 'unsafe-eval'
analysis_entries = extract_policies_without_domains(keyword, data)
assert 0 == len(analysis_entries)
def test_extract_policies_with_missing_domains_with_domains_unsafe_eval_from_data_entries():
data = [
"repo1,file1,default-src 'self' 'unsafe-eval' data:,'0','1','0'",
]
keyword = 'unsafe-eval'
analysis_entries = extract_policies_without_domains(keyword, data)
assert 1 == len(analysis_entries)
entry = analysis_entries[0].rstrip('\n')
parts = entry.split(',')
assert 3 == len(parts)
assert 'repo1' == parts[0]
assert 'file1' == parts[1].lstrip()
assert "default-src 'self' 'unsafe-eval' data:" == parts[2].lstrip()
def test_extract_entries_containing_localhost():
entries = generate_known_test_data()
expected_count = 1
extracted_entries = extract_policies_using_localhost(entries)
assert expected_count == len(extracted_entries)
| 5,451 | 0 | 321 |
9bbef3c56a3d69df44fbec3dfb0d6aed70a16946 | 2,773 | py | Python | src/neuronalNetwork.py | ramevaf/AICars | 0996f6f79875246aa23c8fe68e8ca80fe4127f96 | [
"MIT"
] | null | null | null | src/neuronalNetwork.py | ramevaf/AICars | 0996f6f79875246aa23c8fe68e8ca80fe4127f96 | [
"MIT"
] | null | null | null | src/neuronalNetwork.py | ramevaf/AICars | 0996f6f79875246aa23c8fe68e8ca80fe4127f96 | [
"MIT"
] | null | null | null | '''
This module contains the Neuronaletwork class and its methods.
Created on 29.09.2019
@author: D.Ramonat
'''
import numpy as np | 41.38806 | 93 | 0.623512 | '''
This module contains the Neuronaletwork class and its methods.
Created on 29.09.2019
@author: D.Ramonat
'''
import numpy as np
class NeuronalNetwork:
def __init__(self, structure):
'''
Constructor
:param structure: contains the number of neurons in the respective
layers of the network. For example, if the list was [2, 3, 1]
then it would be a three-layer network, with the first layer
containing 2 neurons, the second layer 3 neurons, and the third
layer 1 neuron.
'''
self.numOfLayers = len(structure)
self.structure = structure
# create an array for the biases. There will be no biases for the input layeer.
# If for example the structure of the NN is [2,3,1] then there will be biases
# for the last two layers means the size of the biases array is [3,1]
# The biases are initialized randomly with a normal distribution with variance 1
# so the value are between -1 and 1
self.biases = [np.random.randn(y, 1) for y in structure[1:]]
# store number of biases overall. In the given example this must be 4
self.numOfBiases = sum(structure[1:])
# create an array for the weigths. There are no weights for the input layer. Each
# neuron in a hiden layer n(l) is conneced to each neuron of the layer before n(l-1)
# That means for the example of a net wihh structure [2,3,1] there is 2 weights for
# each neuron in the hidden layer and 3 weights for the neuron in the output layer
# so overall [[2,2,2],3]. Again the weights are initilaized randomly similar to the
# biases
self.weights = [np.random.randn(y, x) for x, y in zip(structure[:-1], structure[1:])]
# store number of weigths for the hidden layers
numOfHiddenLayers = self.numOfLayers-2 # minus input and output
self.numOfWeights = sum([self.weights[i].size for i in range(numOfHiddenLayers)])
def feedforward(self, input):
'''
Returns the output of the network for a given input.
:param input: input values for the first layer of the net
:returns: numpy array with output values of the net
'''
for biases, weights in zip(self.biases, self.weights):
# use result as new input for next cycle
input = self.sigmoid(np.dot(weights,input)+biases)
return input
def sigmoid(self, x):
'''
The sigmoid function used as activation function for the neurons
:param x: input
:returns: output of the sigmoid function
'''
z = 1/(1 + np.exp(-x))
return z | 0 | 2,618 | 23 |
da86ae199461a628bc11e0e67da77a710ff55631 | 1,666 | py | Python | piqe_ocp_lib/tests/monitoring/test_ocp_cluster_stats_prometheus.py | bhavikbhavsar/piqe-ocp-lib | 4aa64e9ebbec422cdf93150fcb451ca7e529b451 | [
"Apache-2.0"
] | 4 | 2020-05-12T13:59:07.000Z | 2021-08-02T19:21:54.000Z | piqe_ocp_lib/tests/monitoring/test_ocp_cluster_stats_prometheus.py | bhavikbhavsar/piqe-ocp-lib | 4aa64e9ebbec422cdf93150fcb451ca7e529b451 | [
"Apache-2.0"
] | 53 | 2020-05-05T16:44:52.000Z | 2021-12-07T07:34:56.000Z | piqe_ocp_lib/tests/monitoring/test_ocp_cluster_stats_prometheus.py | bhavikbhavsar/piqe-ocp-lib | 4aa64e9ebbec422cdf93150fcb451ca7e529b451 | [
"Apache-2.0"
] | 18 | 2020-03-18T18:18:33.000Z | 2021-08-16T10:08:59.000Z | import logging
import pytest
from piqe_ocp_lib import __loggername__
from piqe_ocp_lib.api.monitoring.ocp_cluster_stats_prometheus import OcpClusterStatsPrometheus
logger = logging.getLogger(__loggername__)
@pytest.fixture(scope="session")
| 43.842105 | 100 | 0.770708 | import logging
import pytest
from piqe_ocp_lib import __loggername__
from piqe_ocp_lib.api.monitoring.ocp_cluster_stats_prometheus import OcpClusterStatsPrometheus
logger = logging.getLogger(__loggername__)
@pytest.fixture(scope="session")
def ocp_cluster_stats_prom(get_kubeconfig):
return OcpClusterStatsPrometheus(kube_config_file=get_kubeconfig)
class TestOcpClusterStatsPrometheus:
def test_get_prometheus_labels(self, ocp_cluster_stats_prom):
logger.info("Get prometheus labels")
prometheus_labels = ocp_cluster_stats_prom.get_prometheus_ocp_labels()
assert isinstance(prometheus_labels, list)
if not prometheus_labels and len(prometheus_labels) == 0:
assert False, "Failed to get prometheus openshift labels"
def test_get_prometheus_jobs(self, ocp_cluster_stats_prom):
logger.info("Get prometheus jobs labels")
prometheus_ocp_jobs = ocp_cluster_stats_prom.get_prometheus_ocp_jobs()
assert isinstance(prometheus_ocp_jobs, list)
if not prometheus_ocp_jobs and len(prometheus_ocp_jobs) == 0:
assert False, "Failed to get prometheus openshift jobs"
def test_get_cluster_stats_using_query_param(self, ocp_cluster_stats_prom):
logger.info("Get openshift cluster stats from prometheus")
label = "namespace:container_cpu_usage:sum"
cluster_stats_dict = ocp_cluster_stats_prom.get_cluster_stats_using_query_param(label=label)
assert isinstance(cluster_stats_dict, dict)
if not cluster_stats_dict and len(cluster_stats_dict["result"]) == 0:
assert False, f"Failed to get cluster stats for label {label}"
| 1,280 | 15 | 125 |
aa8eccb8250d50cd4bf852b7cc6d79f38837fac2 | 3,808 | py | Python | backend/swagger_server/models/audit_log_entry.py | Lend88/libresign | 9537f39a696fa5f3433052406329d77d528b6cf9 | [
"MIT"
] | 6 | 2019-01-29T05:58:37.000Z | 2021-11-02T22:47:02.000Z | backend/swagger_server/models/audit_log_entry.py | Lend88/libresign | 9537f39a696fa5f3433052406329d77d528b6cf9 | [
"MIT"
] | 9 | 2020-09-09T04:53:01.000Z | 2022-03-08T22:52:18.000Z | backend/swagger_server/models/audit_log_entry.py | Lend88/libresign | 9537f39a696fa5f3433052406329d77d528b6cf9 | [
"MIT"
] | 4 | 2019-01-29T07:38:55.000Z | 2021-10-16T21:06:42.000Z | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from .base_model_ import Model
from .. import util
class AuditLogEntry(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, status: str=None, timestamp: str=None, data: object=None): # noqa: E501
"""AuditLogEntry - a model defined in Swagger
:param status: The status of this AuditLogEntry. # noqa: E501
:type status: str
:param timestamp: The timestamp of this AuditLogEntry. # noqa: E501
:type timestamp: str
:param data: The data of this AuditLogEntry. # noqa: E501
:type data: object
"""
self.swagger_types = {
'status': str,
'timestamp': str,
'data': object
}
self.attribute_map = {
'status': 'status',
'timestamp': 'timestamp',
'data': 'data'
}
self._status = status
self._timestamp = timestamp
self._data = data
@classmethod
def from_dict(cls, dikt) -> 'AuditLogEntry':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The AuditLogEntry of this AuditLogEntry. # noqa: E501
:rtype: AuditLogEntry
"""
return util.deserialize_model(dikt, cls)
@property
def status(self) -> str:
"""Gets the status of this AuditLogEntry.
The type of the last action that was carried out on the document. # noqa: E501
:return: The status of this AuditLogEntry.
:rtype: str
"""
return self._status
@status.setter
def status(self, status: str):
"""Sets the status of this AuditLogEntry.
The type of the last action that was carried out on the document. # noqa: E501
:param status: The status of this AuditLogEntry.
:type status: str
"""
allowed_values = ["created", "viewed", "stamp_failed", "stamp_success", "updated"] # noqa: E501
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}"
.format(status, allowed_values)
)
self._status = status
@property
def timestamp(self) -> str:
"""Gets the timestamp of this AuditLogEntry.
An ISO format timestamp indicating the date and time that the event occurred at. # noqa: E501
:return: The timestamp of this AuditLogEntry.
:rtype: str
"""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp: str):
"""Sets the timestamp of this AuditLogEntry.
An ISO format timestamp indicating the date and time that the event occurred at. # noqa: E501
:param timestamp: The timestamp of this AuditLogEntry.
:type timestamp: str
"""
if timestamp is None:
raise ValueError("Invalid value for `timestamp`, must not be `None`") # noqa: E501
self._timestamp = timestamp
@property
def data(self) -> object:
"""Gets the data of this AuditLogEntry.
An object containing extra details about the status. # noqa: E501
:return: The data of this AuditLogEntry.
:rtype: object
"""
return self._data
@data.setter
def data(self, data: object):
"""Sets the data of this AuditLogEntry.
An object containing extra details about the status. # noqa: E501
:param data: The data of this AuditLogEntry.
:type data: object
"""
self._data = data
| 29.068702 | 104 | 0.600578 | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from .base_model_ import Model
from .. import util
class AuditLogEntry(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, status: str=None, timestamp: str=None, data: object=None): # noqa: E501
"""AuditLogEntry - a model defined in Swagger
:param status: The status of this AuditLogEntry. # noqa: E501
:type status: str
:param timestamp: The timestamp of this AuditLogEntry. # noqa: E501
:type timestamp: str
:param data: The data of this AuditLogEntry. # noqa: E501
:type data: object
"""
self.swagger_types = {
'status': str,
'timestamp': str,
'data': object
}
self.attribute_map = {
'status': 'status',
'timestamp': 'timestamp',
'data': 'data'
}
self._status = status
self._timestamp = timestamp
self._data = data
@classmethod
def from_dict(cls, dikt) -> 'AuditLogEntry':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The AuditLogEntry of this AuditLogEntry. # noqa: E501
:rtype: AuditLogEntry
"""
return util.deserialize_model(dikt, cls)
@property
def status(self) -> str:
"""Gets the status of this AuditLogEntry.
The type of the last action that was carried out on the document. # noqa: E501
:return: The status of this AuditLogEntry.
:rtype: str
"""
return self._status
@status.setter
def status(self, status: str):
"""Sets the status of this AuditLogEntry.
The type of the last action that was carried out on the document. # noqa: E501
:param status: The status of this AuditLogEntry.
:type status: str
"""
allowed_values = ["created", "viewed", "stamp_failed", "stamp_success", "updated"] # noqa: E501
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}"
.format(status, allowed_values)
)
self._status = status
@property
def timestamp(self) -> str:
"""Gets the timestamp of this AuditLogEntry.
An ISO format timestamp indicating the date and time that the event occurred at. # noqa: E501
:return: The timestamp of this AuditLogEntry.
:rtype: str
"""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp: str):
"""Sets the timestamp of this AuditLogEntry.
An ISO format timestamp indicating the date and time that the event occurred at. # noqa: E501
:param timestamp: The timestamp of this AuditLogEntry.
:type timestamp: str
"""
if timestamp is None:
raise ValueError("Invalid value for `timestamp`, must not be `None`") # noqa: E501
self._timestamp = timestamp
@property
def data(self) -> object:
"""Gets the data of this AuditLogEntry.
An object containing extra details about the status. # noqa: E501
:return: The data of this AuditLogEntry.
:rtype: object
"""
return self._data
@data.setter
def data(self, data: object):
"""Sets the data of this AuditLogEntry.
An object containing extra details about the status. # noqa: E501
:param data: The data of this AuditLogEntry.
:type data: object
"""
self._data = data
| 0 | 0 | 0 |
198c7c90c74f82b793e09645bd9dde660e439df6 | 1,064 | py | Python | robot-runner/Plugins/Systems/TurtleBot3/modules/sensors/CameraSensor.py | engelhamer/robot-runner | 468ffe8130ec4c4a880c21e4119407738c0d3082 | [
"MIT"
] | 21 | 2020-11-30T12:21:18.000Z | 2021-12-24T06:16:46.000Z | robot-runner/Plugins/Systems/TurtleBot3/modules/sensors/CameraSensor.py | engelhamer/robot-runner | 468ffe8130ec4c4a880c21e4119407738c0d3082 | [
"MIT"
] | null | null | null | robot-runner/Plugins/Systems/TurtleBot3/modules/sensors/CameraSensor.py | engelhamer/robot-runner | 468ffe8130ec4c4a880c21e4119407738c0d3082 | [
"MIT"
] | 3 | 2021-04-11T14:41:54.000Z | 2021-06-09T11:07:17.000Z | import rospy
from rospy import ServiceProxy
from std_srvs.srv import (Empty, EmptyRequest, EmptyResponse)
from ExperimentOrchestrator.Architecture.Singleton import Singleton | 34.322581 | 83 | 0.75 | import rospy
from rospy import ServiceProxy
from std_srvs.srv import (Empty, EmptyRequest, EmptyResponse)
from ExperimentOrchestrator.Architecture.Singleton import Singleton
class CameraSensor(metaclass=Singleton):
__spawn_service_proxy: ServiceProxy
__despawn_service_proxy: ServiceProxy
__start_service_proxy: ServiceProxy
__stop_service_proxy: ServiceProxy
def __init__(self):
self.__spawn_service_proxy = rospy.ServiceProxy('/camera/spawn', Empty)
self.__despawn_service_proxy = rospy.ServiceProxy('/camera/despawn', Empty)
self.__start_service_proxy = rospy.ServiceProxy('/camera/start', Empty)
self.__stop_service_proxy = rospy.ServiceProxy('/camera/stop', Empty)
def spawn(self) -> None:
self.__spawn_service_proxy(EmptyRequest())
def despawn(self) -> None:
self.__despawn_service_proxy(EmptyRequest())
def start_recording(self) -> None:
self.__start_service_proxy(EmptyRequest())
def stop_recording(self):
self.__stop_service_proxy(EmptyRequest()) | 551 | 316 | 23 |
979feae2b64e41c35c8f70c11a56566cc76b3fc4 | 1,383 | py | Python | Network Simulation/C language/Pb-Ps/data/Plot.py | shahmari/Prevention_and_Quarantine_on_SIR | 9173fd3feaa86a79d829ee653ec2c70f678e2ac3 | [
"MIT"
] | 1 | 2021-08-25T09:56:10.000Z | 2021-08-25T09:56:10.000Z | Network Simulation/C language/Pb-Ps/data/Plot.py | shahmari/Prevention_and_Quarantine_on_SIR | 9173fd3feaa86a79d829ee653ec2c70f678e2ac3 | [
"MIT"
] | null | null | null | Network Simulation/C language/Pb-Ps/data/Plot.py | shahmari/Prevention_and_Quarantine_on_SIR | 9173fd3feaa86a79d829ee653ec2c70f678e2ac3 | [
"MIT"
] | null | null | null | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import numpy as np
import matplotlib.pyplot as plt
# %%
file_list=[]
for i in range(50):
file_list.append('Ps-Pb{}.csv'.format(i))
rdata=np.ndarray((50,50,50))
rd_mean=np.ndarray((50,50))
for i in range(50):
file=np.loadtxt(open(file_list[i],'r'), dtype='str', delimiter=",")
for j in range(50):
for k in range(50):
rdata[j,k,i]=int(file[k,j])
for i in range(50):
for j in range(50):
rd_mean[j,i]=np.mean(rdata[i,j])
# %%
for i in range(50):
for j in range(50):
rd_mean[i,j]-=(j)*0.02*10000
# %%
plt.figure(dpi=150,figsize=(12,10))
Pslist=np.linspace(0,0.99,50)
Pblist=np.linspace(0,1,50)
c=plt.pcolormesh(Pslist,Pblist,rd_mean, cmap="plasma")
plt.title('density of Quarantine - density of Prevention (Local)')
plt.colorbar(c)
plt.xlabel('density of Prevention (Ps)')
plt.ylabel('density of Quarantine (Pb)')
plt.savefig('Ps-Pb')
# %%
plt.figure(dpi=150,figsize=(12,10))
plt.xlim((0,0.5))
plt.ylim((0,0.5))
Pslist=np.linspace(0,0.99,50)
Pblist=np.linspace(0,1,50)
c=plt.pcolormesh(Pslist,Pblist,rd_mean, cmap="plasma")
plt.title('density of Quarantine - density of Prevention (Local)')
plt.colorbar(c)
plt.xlabel('density of Prevention (Ps)')
plt.ylabel('density of Quarantine (Pb)')
plt.savefig('Ps-Pb(limited)')
# %%
| 23.05 | 71 | 0.650036 | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import numpy as np
import matplotlib.pyplot as plt
# %%
file_list=[]
for i in range(50):
file_list.append('Ps-Pb{}.csv'.format(i))
rdata=np.ndarray((50,50,50))
rd_mean=np.ndarray((50,50))
for i in range(50):
file=np.loadtxt(open(file_list[i],'r'), dtype='str', delimiter=",")
for j in range(50):
for k in range(50):
rdata[j,k,i]=int(file[k,j])
for i in range(50):
for j in range(50):
rd_mean[j,i]=np.mean(rdata[i,j])
# %%
for i in range(50):
for j in range(50):
rd_mean[i,j]-=(j)*0.02*10000
# %%
plt.figure(dpi=150,figsize=(12,10))
Pslist=np.linspace(0,0.99,50)
Pblist=np.linspace(0,1,50)
c=plt.pcolormesh(Pslist,Pblist,rd_mean, cmap="plasma")
plt.title('density of Quarantine - density of Prevention (Local)')
plt.colorbar(c)
plt.xlabel('density of Prevention (Ps)')
plt.ylabel('density of Quarantine (Pb)')
plt.savefig('Ps-Pb')
# %%
plt.figure(dpi=150,figsize=(12,10))
plt.xlim((0,0.5))
plt.ylim((0,0.5))
Pslist=np.linspace(0,0.99,50)
Pblist=np.linspace(0,1,50)
c=plt.pcolormesh(Pslist,Pblist,rd_mean, cmap="plasma")
plt.title('density of Quarantine - density of Prevention (Local)')
plt.colorbar(c)
plt.xlabel('density of Prevention (Ps)')
plt.ylabel('density of Quarantine (Pb)')
plt.savefig('Ps-Pb(limited)')
# %%
| 0 | 0 | 0 |
43bf3359b94d2b73744ff9f437a8c81bbdefe7bc | 22,576 | py | Python | zaqar/transport/validation.py | ISCAS-VDI/zaqar | 5c1aedbef1930565a46cc60b1a9d5d5e238f174d | [
"Apache-2.0"
] | null | null | null | zaqar/transport/validation.py | ISCAS-VDI/zaqar | 5c1aedbef1930565a46cc60b1a9d5d5e238f174d | [
"Apache-2.0"
] | null | null | null | zaqar/transport/validation.py | ISCAS-VDI/zaqar | 5c1aedbef1930565a46cc60b1a9d5d5e238f174d | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2013 Rackspace, Inc.
# Copyright (c) 2015 Catalyst IT Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import re
from oslo_config import cfg
from oslo_utils import timeutils
import six
from zaqar.i18n import _
MIN_MESSAGE_TTL = 60
MIN_CLAIM_TTL = 60
MIN_CLAIM_GRACE = 60
MIN_SUBSCRIPTION_TTL = 60
_TRANSPORT_LIMITS_OPTIONS = (
cfg.IntOpt('max_queues_per_page', default=20,
deprecated_name='queue_paging_uplimit',
deprecated_group='limits:transport',
help='Defines the maximum number of queues per page.'),
cfg.IntOpt('max_messages_per_page', default=20,
deprecated_name='message_paging_uplimit',
deprecated_group='limits:transport',
help='Defines the maximum number of messages per page.'),
cfg.IntOpt('max_subscriptions_per_page', default=20,
deprecated_name='subscription_paging_uplimit',
deprecated_group='limits:transport',
help='Defines the maximum number of subscriptions per page.'),
cfg.IntOpt('max_messages_per_claim_or_pop', default=20,
deprecated_name='max_messages_per_claim',
help='The maximum number of messages that can be claimed (OR) '
'popped in a single request'),
cfg.IntOpt('max_queue_metadata', default=64 * 1024,
deprecated_name='metadata_size_uplimit',
deprecated_group='limits:transport',
help='Defines the maximum amount of metadata in a queue.'),
cfg.IntOpt('max_messages_post_size', default=256 * 1024,
deprecated_name='message_size_uplimit',
deprecated_group='limits:transport',
deprecated_opts=[cfg.DeprecatedOpt('max_message_size')],
help='Defines the maximum size of message posts.'),
cfg.IntOpt('max_message_ttl', default=1209600,
deprecated_name='message_ttl_max',
deprecated_group='limits:transport',
help='Maximum amount of time a message will be available.'),
cfg.IntOpt('max_claim_ttl', default=43200,
deprecated_name='claim_ttl_max',
deprecated_group='limits:transport',
help='Maximum length of a message in claimed state.'),
cfg.IntOpt('max_claim_grace', default=43200,
deprecated_name='claim_grace_max',
deprecated_group='limits:transport',
help='Defines the maximum message grace period in seconds.'),
cfg.ListOpt('subscriber_types', default=['http', 'https', 'mailto'],
help='Defines supported subscriber types.'),
)
_TRANSPORT_LIMITS_GROUP = 'transport'
# NOTE(kgriffs): Don't use \w because it isn't guaranteed to match
# only ASCII characters.
QUEUE_NAME_REGEX = re.compile('^[a-zA-Z0-9_\-]+$')
QUEUE_NAME_MAX_LEN = 64
PROJECT_ID_MAX_LEN = 256
class ValidationFailed(ValueError):
"""User input did not follow API restrictions."""
| 40.028369 | 79 | 0.607282 | # Copyright (c) 2013 Rackspace, Inc.
# Copyright (c) 2015 Catalyst IT Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import re
from oslo_config import cfg
from oslo_utils import timeutils
import six
from zaqar.i18n import _
MIN_MESSAGE_TTL = 60
MIN_CLAIM_TTL = 60
MIN_CLAIM_GRACE = 60
MIN_SUBSCRIPTION_TTL = 60
_TRANSPORT_LIMITS_OPTIONS = (
cfg.IntOpt('max_queues_per_page', default=20,
deprecated_name='queue_paging_uplimit',
deprecated_group='limits:transport',
help='Defines the maximum number of queues per page.'),
cfg.IntOpt('max_messages_per_page', default=20,
deprecated_name='message_paging_uplimit',
deprecated_group='limits:transport',
help='Defines the maximum number of messages per page.'),
cfg.IntOpt('max_subscriptions_per_page', default=20,
deprecated_name='subscription_paging_uplimit',
deprecated_group='limits:transport',
help='Defines the maximum number of subscriptions per page.'),
cfg.IntOpt('max_messages_per_claim_or_pop', default=20,
deprecated_name='max_messages_per_claim',
help='The maximum number of messages that can be claimed (OR) '
'popped in a single request'),
cfg.IntOpt('max_queue_metadata', default=64 * 1024,
deprecated_name='metadata_size_uplimit',
deprecated_group='limits:transport',
help='Defines the maximum amount of metadata in a queue.'),
cfg.IntOpt('max_messages_post_size', default=256 * 1024,
deprecated_name='message_size_uplimit',
deprecated_group='limits:transport',
deprecated_opts=[cfg.DeprecatedOpt('max_message_size')],
help='Defines the maximum size of message posts.'),
cfg.IntOpt('max_message_ttl', default=1209600,
deprecated_name='message_ttl_max',
deprecated_group='limits:transport',
help='Maximum amount of time a message will be available.'),
cfg.IntOpt('max_claim_ttl', default=43200,
deprecated_name='claim_ttl_max',
deprecated_group='limits:transport',
help='Maximum length of a message in claimed state.'),
cfg.IntOpt('max_claim_grace', default=43200,
deprecated_name='claim_grace_max',
deprecated_group='limits:transport',
help='Defines the maximum message grace period in seconds.'),
cfg.ListOpt('subscriber_types', default=['http', 'https', 'mailto'],
help='Defines supported subscriber types.'),
)
_TRANSPORT_LIMITS_GROUP = 'transport'
# NOTE(kgriffs): Don't use \w because it isn't guaranteed to match
# only ASCII characters.
QUEUE_NAME_REGEX = re.compile('^[a-zA-Z0-9_\-]+$')
QUEUE_NAME_MAX_LEN = 64
PROJECT_ID_MAX_LEN = 256
def _config_options():
return [(_TRANSPORT_LIMITS_GROUP, _TRANSPORT_LIMITS_OPTIONS)]
class ValidationFailed(ValueError):
"""User input did not follow API restrictions."""
def __init__(self, msg, *args, **kwargs):
msg = msg.format(*args, **kwargs)
super(ValidationFailed, self).__init__(msg)
class Validator(object):
def __init__(self, conf):
self._conf = conf
self._conf.register_opts(_TRANSPORT_LIMITS_OPTIONS,
group=_TRANSPORT_LIMITS_GROUP)
self._limits_conf = self._conf[_TRANSPORT_LIMITS_GROUP]
self._supported_operations = ('add', 'remove', 'replace')
def queue_identification(self, queue, project):
"""Restrictions on a project id & queue name pair.
:param queue: Name of the queue
:param project: Project id
:raises: ValidationFailed if the `name` is longer than 64
characters or contains anything other than ASCII digits and
letters, underscores, and dashes. Also raises if `project`
is not None but longer than 256 characters.
"""
if project is not None and len(project) > PROJECT_ID_MAX_LEN:
msg = _(u'Project ids may not be more than {0} characters long.')
raise ValidationFailed(msg, PROJECT_ID_MAX_LEN)
if len(queue) > QUEUE_NAME_MAX_LEN:
msg = _(u'Queue names may not be more than {0} characters long.')
raise ValidationFailed(msg, QUEUE_NAME_MAX_LEN)
if not QUEUE_NAME_REGEX.match(queue):
raise ValidationFailed(
_(u'Queue names may only contain ASCII letters, digits, '
'underscores, and dashes.'))
def _get_change_operation_d10(self, raw_change):
op = raw_change.get('op')
if op is None:
msg = (_('Unable to find `op` in JSON Schema change. '
'It must be one of the following: %(available)s.') %
{'available': ', '.join(self._supported_operations)})
raise ValidationFailed(msg)
if op not in self._supported_operations:
msg = (_('Invalid operation: `%(op)s`. '
'It must be one of the following: %(available)s.') %
{'op': op,
'available': ', '.join(self._supported_operations)})
raise ValidationFailed(msg)
return op
def _get_change_path_d10(self, raw_change):
try:
return raw_change['path']
except KeyError:
msg = _("Unable to find '%s' in JSON Schema change") % 'path'
raise ValidationFailed(msg)
def _decode_json_pointer(self, pointer):
"""Parse a json pointer.
Json Pointers are defined in
http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer .
The pointers use '/' for separation between object attributes, such
that '/A/B' would evaluate to C in {"A": {"B": "C"}}. A '/' character
in an attribute name is encoded as "~1" and a '~' character is encoded
as "~0".
"""
self._validate_json_pointer(pointer)
ret = []
for part in pointer.lstrip('/').split('/'):
ret.append(part.replace('~1', '/').replace('~0', '~').strip())
return ret
def _validate_json_pointer(self, pointer):
"""Validate a json pointer.
We only accept a limited form of json pointers.
"""
if not pointer.startswith('/'):
msg = _('Pointer `%s` does not start with "/".') % pointer
raise ValidationFailed(msg)
if re.search('/\s*?/', pointer[1:]):
msg = _('Pointer `%s` contains adjacent "/".') % pointer
raise ValidationFailed(msg)
if len(pointer) > 1 and pointer.endswith('/'):
msg = _('Pointer `%s` end with "/".') % pointer
raise ValidationFailed(msg)
if pointer[1:].strip() == '/':
msg = _('Pointer `%s` does not contains valid token.') % pointer
raise ValidationFailed(msg)
if re.search('~[^01]', pointer) or pointer.endswith('~'):
msg = _('Pointer `%s` contains "~" not part of'
' a recognized escape sequence.') % pointer
raise ValidationFailed(msg)
def _get_change_value(self, raw_change, op):
if 'value' not in raw_change:
msg = _('Operation "{0}" requires a member named "value".')
raise ValidationFailed(msg, op)
return raw_change['value']
def _validate_change(self, change):
if change['op'] == 'remove':
return
path_root = change['path'][0]
if len(change['path']) >= 1 and path_root.lower() != 'metadata':
msg = _("The root of path must be metadata, e.g /metadata/key.")
raise ValidationFailed(msg)
def _validate_path(self, op, path):
limits = {'add': 2, 'remove': 2, 'replace': 2}
if len(path) != limits.get(op, 2):
msg = _("Invalid JSON pointer for this resource: "
"'/%s, e.g /metadata/key'") % '/'.join(path)
raise ValidationFailed(msg)
def _parse_json_schema_change(self, raw_change, draft_version):
if draft_version == 10:
op = self._get_change_operation_d10(raw_change)
path = self._get_change_path_d10(raw_change)
else:
msg = _('Unrecognized JSON Schema draft version')
raise ValidationFailed(msg)
path_list = self._decode_json_pointer(path)
return op, path_list
def queue_patching(self, request, changes):
washed_changes = []
content_types = {
'application/openstack-messaging-v2.0-json-patch': 10,
}
json_schema_version = content_types[request.content_type]
if not isinstance(changes, list):
msg = _('Request body must be a JSON array of operation objects.')
raise ValidationFailed(msg)
for raw_change in changes:
if not isinstance(raw_change, dict):
msg = _('Operations must be JSON objects.')
raise ValidationFailed(msg)
(op, path) = self._parse_json_schema_change(raw_change,
json_schema_version)
# NOTE(flwang): Now the 'path' is a list.
self._validate_path(op, path)
change = {'op': op, 'path': path,
'json_schema_version': json_schema_version}
if not op == 'remove':
change['value'] = self._get_change_value(raw_change, op)
self._validate_change(change)
washed_changes.append(change)
return washed_changes
def queue_listing(self, limit=None, **kwargs):
"""Restrictions involving a list of queues.
:param limit: The expected number of queues in the list
:param kwargs: Ignored arguments passed to storage API
:raises: ValidationFailed if the limit is exceeded
"""
uplimit = self._limits_conf.max_queues_per_page
if limit is not None and not (0 < limit <= uplimit):
msg = _(u'Limit must be at least 1 and no greater than {0}.')
raise ValidationFailed(msg, self._limits_conf.max_queues_per_page)
def queue_metadata_length(self, content_length):
"""Restrictions on queue's length.
:param content_length: Queue request's length.
:raises: ValidationFailed if the metadata is oversize.
"""
if content_length is None:
return
if content_length > self._limits_conf.max_queue_metadata:
msg = _(u'Queue metadata is too large. Max size: {0}')
raise ValidationFailed(msg, self._limits_conf.max_queue_metadata)
def queue_metadata_putting(self, queue_metadata):
"""Checking if the reserved attributes of the queue are valid.
:param queue_metadata: Queue's metadata.
:raises: ValidationFailed if any reserved attribute is invalid.
"""
if not queue_metadata:
return
queue_default_ttl = queue_metadata.get('_default_message_ttl', None)
if queue_default_ttl and not isinstance(queue_default_ttl, int):
msg = _(u'_default_message_ttl must be integer.')
raise ValidationFailed(msg)
if queue_default_ttl:
if not (MIN_MESSAGE_TTL <= queue_default_ttl <=
self._limits_conf.max_message_ttl):
msg = _(u'_default_message_ttl can not exceed {0} '
'seconds, and must be at least {1} seconds long.')
raise ValidationFailed(
msg, self._limits_conf.max_message_ttl, MIN_MESSAGE_TTL)
queue_max_msg_size = queue_metadata.get('_max_messages_post_size',
None)
if queue_max_msg_size and not isinstance(queue_max_msg_size, int):
msg = _(u'_max_messages_post_size must be integer.')
raise ValidationFailed(msg)
if queue_max_msg_size:
if not (0 < queue_max_msg_size <=
self._limits_conf.max_messages_post_size):
raise ValidationFailed(
_(u'_max_messages_post_size can not exceed {0}, '
' and must be at least greater than 0.'),
self._limits_conf.max_messages_post_size)
def message_posting(self, messages):
"""Restrictions on a list of messages.
:param messages: A list of messages
:raises: ValidationFailed if any message has a out-of-range
TTL.
"""
if not messages:
raise ValidationFailed(_(u'No messages to enqueu.'))
for msg in messages:
self.message_content(msg)
def message_length(self, content_length, max_msg_post_size=None):
"""Restrictions on message post length.
:param content_length: Queue request's length.
:raises: ValidationFailed if the metadata is oversize.
"""
if content_length is None:
return
if max_msg_post_size:
try:
min_max_size = min(max_msg_post_size,
self._limits_conf.max_messages_post_size)
if content_length > min_max_size:
raise ValidationFailed(
_(u'Message collection size is too large. The max '
'size for current queue is {0}. It is calculated '
'by max size = min(max_messages_post_size_config: '
'{1}, max_messages_post_size_queue: {2}).'),
min_max_size,
self._limits_conf.max_messages_post_size,
max_msg_post_size)
except TypeError:
# NOTE(flwang): If there is a type error when using min(),
# it only happens in py3.x, it will be skipped and compare
# the message length with the size defined in config file.
pass
if content_length > self._limits_conf.max_messages_post_size:
raise ValidationFailed(
_(u'Message collection size is too large. Max size {0}'),
self._limits_conf.max_messages_post_size)
def message_content(self, message):
"""Restrictions on each message."""
ttl = message['ttl']
if not (MIN_MESSAGE_TTL <= ttl <= self._limits_conf.max_message_ttl):
msg = _(u'The TTL for a message may not exceed {0} seconds, and '
'must be at least {1} seconds long.')
raise ValidationFailed(
msg, self._limits_conf.max_message_ttl, MIN_MESSAGE_TTL)
def message_listing(self, limit=None, **kwargs):
"""Restrictions involving a list of messages.
:param limit: The expected number of messages in the list
:param kwargs: Ignored arguments passed to storage API
:raises: ValidationFailed if the limit is exceeded
"""
uplimit = self._limits_conf.max_messages_per_page
if limit is not None and not (0 < limit <= uplimit):
msg = _(u'Limit must be at least 1 and may not '
'be greater than {0}.')
raise ValidationFailed(
msg, self._limits_conf.max_messages_per_page)
def message_deletion(self, ids=None, pop=None):
"""Restrictions involving deletion of messages.
:param ids: message ids passed in by the delete request
:param pop: count of messages to be POPped
:raises: ValidationFailed if,
pop AND id params are present together
neither pop or id params are present
message count to be popped > maximum allowed
"""
if pop is not None and ids is not None:
msg = _(u'pop and id params cannot be present together in the '
'delete request.')
raise ValidationFailed(msg)
if pop is None and ids is None:
msg = _(u'The request should have either "ids" or "pop" '
'parameter in the request, to be able to delete.')
raise ValidationFailed(msg)
pop_uplimit = self._limits_conf.max_messages_per_claim_or_pop
if pop is not None and not (0 < pop <= pop_uplimit):
msg = _(u'Pop value must be at least 1 and may not '
'be greater than {0}.')
raise ValidationFailed(msg, pop_uplimit)
delete_uplimit = self._limits_conf.max_messages_per_page
if ids is not None and not (0 < len(ids) <= delete_uplimit):
msg = _(u'ids parameter should have at least 1 and not '
'greater than {0} values.')
raise ValidationFailed(msg, delete_uplimit)
def claim_creation(self, metadata, limit=None):
"""Restrictions on the claim parameters upon creation.
:param metadata: The claim metadata
:param limit: The number of messages to claim
:raises: ValidationFailed if either TTL or grace is out of range,
or the expected number of messages exceed the limit.
"""
self.claim_updating(metadata)
uplimit = self._limits_conf.max_messages_per_claim_or_pop
if limit is not None and not (0 < limit <= uplimit):
msg = _(u'Limit must be at least 1 and may not '
'be greater than {0}.')
raise ValidationFailed(
msg, self._limits_conf.max_messages_per_claim_or_pop)
grace = metadata['grace']
if not (MIN_CLAIM_GRACE <= grace <= self._limits_conf.max_claim_grace):
msg = _(u'The grace for a claim may not exceed {0} seconds, and '
'must be at least {1} seconds long.')
raise ValidationFailed(
msg, self._limits_conf.max_claim_grace, MIN_CLAIM_GRACE)
def claim_updating(self, metadata):
"""Restrictions on the claim TTL.
:param metadata: The claim metadata
:raises: ValidationFailed if the TTL is out of range
"""
ttl = metadata['ttl']
if not (MIN_CLAIM_TTL <= ttl <= self._limits_conf.max_claim_ttl):
msg = _(u'The TTL for a claim may not exceed {0} seconds, and '
'must be at least {1} seconds long.')
raise ValidationFailed(
msg, self._limits_conf.max_claim_ttl, MIN_CLAIM_TTL)
def subscription_posting(self, subscription):
"""Restrictions on a creation of subscription.
:param subscription: dict of subscription
:raises: ValidationFailed if the subscription is invalid.
"""
for p in ('subscriber',):
if p not in subscription.keys():
raise ValidationFailed(_(u'Missing parameter %s in body.') % p)
self.subscription_patching(subscription)
def subscription_patching(self, subscription):
"""Restrictions on an update of subscription.
:param subscription: dict of subscription
:raises: ValidationFailed if the subscription is invalid.
"""
if not subscription:
raise ValidationFailed(_(u'No subscription to create.'))
subscriber = subscription.get('subscriber', None)
subscriber_type = None
if subscriber:
parsed_uri = six.moves.urllib_parse.urlparse(subscriber)
subscriber_type = parsed_uri.scheme
if subscriber_type not in self._limits_conf.subscriber_types:
msg = _(u'The subscriber type of subscription must be '
u'supported in the list {0}.')
raise ValidationFailed(msg, self._limits_conf.subscriber_types)
options = subscription.get('options', None)
if options and not isinstance(options, dict):
msg = _(u'Options must be a dict.')
raise ValidationFailed(msg)
ttl = subscription.get('ttl', None)
if ttl:
if not isinstance(ttl, int):
msg = _(u'TTL must be an integer.')
raise ValidationFailed(msg)
if ttl < MIN_SUBSCRIPTION_TTL:
msg = _(u'The TTL for a subscription '
'must be at least {0} seconds long.')
raise ValidationFailed(msg, MIN_SUBSCRIPTION_TTL)
# NOTE(flwang): By this change, technically, user can set a very
# big TTL so as to get a very long subscription.
now = timeutils.utcnow_ts()
now_dt = datetime.datetime.utcfromtimestamp(now)
msg = _(u'The TTL seconds for a subscription plus current time'
' must be less than {0}.')
try:
# NOTE(flwang): If below expression works, then we believe the
# ttl is acceptable otherwise it exceeds the max time of
# python.
now_dt + datetime.timedelta(seconds=ttl)
except OverflowError:
raise ValidationFailed(msg, datetime.datetime.max)
def subscription_listing(self, limit=None, **kwargs):
"""Restrictions involving a list of subscriptions.
:param limit: The expected number of subscriptions in the list
:param kwargs: Ignored arguments passed to storage API
:raises: ValidationFailed if the limit is exceeded
"""
uplimit = self._limits_conf.max_subscriptions_per_page
if limit is not None and not (0 < limit <= uplimit):
msg = _(u'Limit must be at least 1 and may not '
'be greater than {0}.')
raise ValidationFailed(
msg, self._limits_conf.max_subscriptions_per_page)
def get_limit_conf_value(self, limit_conf_name=None):
"""Return the value of limit configuration.
:param limit_conf_name: configuration name
"""
return self._limits_conf[limit_conf_name]
| 3,683 | 15,304 | 73 |
310027775251a25345760cd9ea562fcd1607d396 | 2,438 | py | Python | assignments/ps5-template/solve_tilt.py | tallamjr/mit-6006 | c2aa6bb48edef5800c0779ba2eebd697d44249b5 | [
"MIT"
] | 1 | 2022-02-26T13:52:31.000Z | 2022-02-26T13:52:31.000Z | assignments/ps5-template/solve_tilt.py | tallamjr/mit-6006 | c2aa6bb48edef5800c0779ba2eebd697d44249b5 | [
"MIT"
] | null | null | null | assignments/ps5-template/solve_tilt.py | tallamjr/mit-6006 | c2aa6bb48edef5800c0779ba2eebd697d44249b5 | [
"MIT"
] | null | null | null | def solve_tilt(B, t):
'''
Input: B | Starting board configuration
t | Tuple t = (x, y) representing the target square
Output: M | List of moves that solves B (or None if B not solvable)
'''
M = []
##################
# YOUR CODE HERE #
##################
return M
####################################
# USE BUT DO NOT MODIFY CODE BELOW #
####################################
def move(B, d):
'''
Input: B | Board configuration
d | Direction: either 'up', down', 'left', or 'right'
Output: B_ | New configuration made by tilting B in direction d
'''
n = len(B)
B_ = list(list(row) for row in B)
if d == 'up':
for x in range(n):
y_ = 0
for y in range(n):
if (B_[y][x] == 'o') and (B_[y_][x] == '.'):
B_[y][x], B_[y_][x] = B_[y_][x], B_[y][x]
y_ += 1
if (B_[y][x] != '.') or (B_[y_][x] != '.'):
y_ = y
if d == 'down':
for x in range(n):
y_ = n - 1
for y in range(n - 1, -1, -1):
if (B_[y][x] == 'o') and (B_[y_][x] == '.'):
B_[y][x], B_[y_][x] = B_[y_][x], B_[y][x]
y_ -= 1
if (B_[y][x] != '.') or (B_[y_][x] != '.'):
y_ = y
if d == 'left':
for y in range(n):
x_ = 0
for x in range(n):
if (B_[y][x] == 'o') and (B_[y][x_] == '.'):
B_[y][x], B_[y][x_] = B_[y][x_], B_[y][x]
x_ += 1
if (B_[y][x] != '.') or (B_[y][x_] != '.'):
x_ = x
if d == 'right':
for y in range(n):
x_ = n - 1
for x in range(n - 1, -1, -1):
if (B_[y][x] == 'o') and (B_[y][x_] == '.'):
B_[y][x], B_[y][x_] = B_[y][x_], B_[y][x]
x_ -= 1
if (B_[y][x] != '.') or (B_[y][x_] != '.'):
x_ = x
B_ = tuple(tuple(row) for row in B_)
return B_
def board_str(B):
'''
Input: B | Board configuration
Output: s | ASCII string representing configuration B
'''
n = len(B)
rows = ['+' + ('-'*n) + '+']
for row in B:
rows.append('|' + ''.join(row) + '|')
rows.append(rows[0])
S = '\n'.join(rows)
return S
| 32.506667 | 71 | 0.350287 | def solve_tilt(B, t):
'''
Input: B | Starting board configuration
t | Tuple t = (x, y) representing the target square
Output: M | List of moves that solves B (or None if B not solvable)
'''
M = []
##################
# YOUR CODE HERE #
##################
return M
####################################
# USE BUT DO NOT MODIFY CODE BELOW #
####################################
def move(B, d):
'''
Input: B | Board configuration
d | Direction: either 'up', down', 'left', or 'right'
Output: B_ | New configuration made by tilting B in direction d
'''
n = len(B)
B_ = list(list(row) for row in B)
if d == 'up':
for x in range(n):
y_ = 0
for y in range(n):
if (B_[y][x] == 'o') and (B_[y_][x] == '.'):
B_[y][x], B_[y_][x] = B_[y_][x], B_[y][x]
y_ += 1
if (B_[y][x] != '.') or (B_[y_][x] != '.'):
y_ = y
if d == 'down':
for x in range(n):
y_ = n - 1
for y in range(n - 1, -1, -1):
if (B_[y][x] == 'o') and (B_[y_][x] == '.'):
B_[y][x], B_[y_][x] = B_[y_][x], B_[y][x]
y_ -= 1
if (B_[y][x] != '.') or (B_[y_][x] != '.'):
y_ = y
if d == 'left':
for y in range(n):
x_ = 0
for x in range(n):
if (B_[y][x] == 'o') and (B_[y][x_] == '.'):
B_[y][x], B_[y][x_] = B_[y][x_], B_[y][x]
x_ += 1
if (B_[y][x] != '.') or (B_[y][x_] != '.'):
x_ = x
if d == 'right':
for y in range(n):
x_ = n - 1
for x in range(n - 1, -1, -1):
if (B_[y][x] == 'o') and (B_[y][x_] == '.'):
B_[y][x], B_[y][x_] = B_[y][x_], B_[y][x]
x_ -= 1
if (B_[y][x] != '.') or (B_[y][x_] != '.'):
x_ = x
B_ = tuple(tuple(row) for row in B_)
return B_
def board_str(B):
'''
Input: B | Board configuration
Output: s | ASCII string representing configuration B
'''
n = len(B)
rows = ['+' + ('-'*n) + '+']
for row in B:
rows.append('|' + ''.join(row) + '|')
rows.append(rows[0])
S = '\n'.join(rows)
return S
| 0 | 0 | 0 |
0d4c68181df187231c7452e046bd203646f84ebf | 733 | py | Python | Problem 07/advent7b.py | mankybansal/advent-of-code-2020 | 62b389c52d488ea88443b564b6b6b89fd7b5290b | [
"Apache-2.0"
] | null | null | null | Problem 07/advent7b.py | mankybansal/advent-of-code-2020 | 62b389c52d488ea88443b564b6b6b89fd7b5290b | [
"Apache-2.0"
] | null | null | null | Problem 07/advent7b.py | mankybansal/advent-of-code-2020 | 62b389c52d488ea88443b564b6b6b89fd7b5290b | [
"Apache-2.0"
] | null | null | null | ad_list = {}
for line in open('input.txt', 'r').readlines():
rules = line.strip().replace('.', '').replace('contain', '').replace(',', ' ').split(' ')
master_key = None
for i in range(len(rules)):
if i == 0:
split_rule = rules[i].split(' ')
key = split_rule[0] + split_rule[1]
if key not in ad_list:
ad_list[key] = []
master_key = key
else:
split_rule = rules[i].split(' ')
number_key = 0 if split_rule[0] == 'no' else int(split_rule[0])
ad_list[master_key].append((number_key, split_rule[1] + split_rule[2]))
print(recurse('shinygold'))
| 25.275862 | 91 | 0.622101 | ad_list = {}
for line in open('input.txt', 'r').readlines():
rules = line.strip().replace('.', '').replace('contain', '').replace(',', ' ').split(' ')
master_key = None
for i in range(len(rules)):
if i == 0:
split_rule = rules[i].split(' ')
key = split_rule[0] + split_rule[1]
if key not in ad_list:
ad_list[key] = []
master_key = key
else:
split_rule = rules[i].split(' ')
number_key = 0 if split_rule[0] == 'no' else int(split_rule[0])
ad_list[master_key].append((number_key, split_rule[1] + split_rule[2]))
def recurse(key):
if key == 'otherbags':
return 0
total = 0
for q_num, q_key in ad_list[key]:
total += (q_num * recurse(q_key)) + q_num
return total
print(recurse('shinygold'))
| 135 | 0 | 23 |
398f084171b85e93a22c9f974a994550bcc35634 | 1,019 | py | Python | DAGs/1.x/dag-run-var.py | 094459/blog-mwaa-variables | 4ad92bc778c1acd7afb2d3ba4f03b13c674d1274 | [
"MIT"
] | 1 | 2021-08-02T22:54:30.000Z | 2021-08-02T22:54:30.000Z | DAGs/1.x/dag-run-var.py | 094459/blog-mwaa-variables | 4ad92bc778c1acd7afb2d3ba4f03b13c674d1274 | [
"MIT"
] | null | null | null | DAGs/1.x/dag-run-var.py | 094459/blog-mwaa-variables | 4ad92bc778c1acd7afb2d3ba4f03b13c674d1274 | [
"MIT"
] | null | null | null | from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.utils.dates import days_ago
with DAG(
dag_id="airflow_variables_atruntime",
schedule_interval=None,
catchup=False,
start_date=days_ago(1)) as dag:
get_var_filename = BashOperator(
task_id="get_var_filename",
bash_command="""echo 'You are running this DAG with the following variable file: "{{ dag_run.conf["cli_test"] if dag_run.conf else var.value.cli_test }}"'""",
)
get_var_filename = BashOperator(
task_id="get_var_filename2",
bash_command='echo "You are running this DAG with the following variable file: \'{{ dag_run.conf["cli_test2"] if dag_run.conf else var.value.cli_test2 }}\'"',
)
get_var_filename = BashOperator(
task_id="get_var_filename3",
bash_command='echo "You are running this DAG with the following variable file: \'{{ dag_run.conf["cli_test3"] if dag_run.conf else var.value.cli_test3 }}\'"',
)
#var.value.cli_test
| 42.458333 | 166 | 0.712463 | from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.utils.dates import days_ago
with DAG(
dag_id="airflow_variables_atruntime",
schedule_interval=None,
catchup=False,
start_date=days_ago(1)) as dag:
get_var_filename = BashOperator(
task_id="get_var_filename",
bash_command="""echo 'You are running this DAG with the following variable file: "{{ dag_run.conf["cli_test"] if dag_run.conf else var.value.cli_test }}"'""",
)
get_var_filename = BashOperator(
task_id="get_var_filename2",
bash_command='echo "You are running this DAG with the following variable file: \'{{ dag_run.conf["cli_test2"] if dag_run.conf else var.value.cli_test2 }}\'"',
)
get_var_filename = BashOperator(
task_id="get_var_filename3",
bash_command='echo "You are running this DAG with the following variable file: \'{{ dag_run.conf["cli_test3"] if dag_run.conf else var.value.cli_test3 }}\'"',
)
#var.value.cli_test
| 0 | 0 | 0 |
742d56ff81c4a523a35051ac2e424c33bf54fbb9 | 7,877 | py | Python | sasdocs/project.py | benjamincorcoran/sasdocs | cbcc022d47c4dc4b3b8f37189b951be3cc4b4648 | [
"MIT"
] | 5 | 2020-02-14T12:32:01.000Z | 2021-12-20T10:01:10.000Z | sasdocs/project.py | benjamincorcoran/sasdocs | cbcc022d47c4dc4b3b8f37189b951be3cc4b4648 | [
"MIT"
] | 45 | 2019-12-26T19:00:33.000Z | 2021-04-21T14:16:34.000Z | sasdocs/project.py | benjamincorcoran/sasdocs | cbcc022d47c4dc4b3b8f37189b951be3cc4b4648 | [
"MIT"
] | null | null | null | import re
import datetime
import logging
import pathlib
import datetime
import jinja2
import importlib.resources as pkg_resources
from collections import Counter
from . import templates, format_logger
from .program import sasProgram
class sasProject(object):
"""
Abstracted SAS project class.
A SAS project is a collection of individual SAS programs that combine,
use the same library, include each other, or generally create datasets used by
each other in such away that they can be considered largly part of the same piece
of work.
...
Attributes
----------
path : pathlib.Path
File path to the root directory of the project
programs : [sasProgram]
List of parsed .sas programs found in the project root or subfolders
macroVariables : [macroVariableDefinition]
List of all macro variable defintions found in all programs in the project
"""
def load_project(self, path):
"""
load_project(path)
Search the given path recursively to find all .sas files, then generate sasProgram objects
from any valid sas programs found.
Sets values of path and programs.
Parameters
----------
path : str
The root file path of the project .
"""
try:
self.path = pathlib.Path(path).resolve(strict=True)
except Exception as e:
self.logger.exception("Unable to resolve path: {}".format(e))
return False
try:
programPaths = self.path.rglob('*.sas')
except Exception as e:
self.logger.exception("Unable to search folder: {}".format(e))
return False
try:
self.add_programs_to_project(programPaths)
except Exception as e:
self.logger.exception("Unable to add programs to project: {}".format(e))
return False
# self.macroVariables = {d.variable:d.value for d in self.get_objects(objectType='macroVariableDefinition')}
def add_programs_to_project(self, programPaths):
"""
add_programs_to_project(programPaths)
For a list of found paths to .sas files in the project directory, generate sasProgram objects. If any sasProgram
objects contain an include object, where possible follow the path in the %include statement, parse file and add to
the project's programs list.
Does not parse the program if the path has already been visited.
Parameters
----------
programPaths : list
List of discovered program paths in the project's directories.
"""
for path in programPaths:
if path not in [program.path for program in self.programs]:
self.programs.append(sasProgram(path))
includePaths = set(include.path for include in self.get_objects(objectType='include'))
while includePaths.difference(set([program.path for program in self.programs])):
for path in includePaths:
self.programs.append(sasProgram(path))
includePaths = set(include.path for include in self.get_objects(objectType='include'))
self.programs = [program for program in self.programs if program.failedLoad != 1]
def add_addtional_documentation_to_project(self):
"""
Add any documenation found in the project as an attribute.
Creates readme and documentation attributes.
"""
mdPaths = self.path.glob('*.md')
# Test for README in root directory
readMe = self.path.joinpath('readme.md')
if readMe.is_file():
with self.path.joinpath('readme.md').open() as f:
self.readme = f.read()
self.readme = re.sub(r'(^#+\s)',r'#\1',self.readme,flags=re.M)
else:
self.readme = ''
docs = {}
for path in mdPaths:
with path.open() as f:
docs[path.relative_to(self.path)] = f.read()
self.documentation['additional'] = docs
def summarise_project(self):
"""
summarise_objects()
Recursively loop through parsed objects in the project's programs, counting each object by object type.
This function will count macros and the contents of said macros.
Returns
-------
ObjCounter : Counter
Collections Counter object for all sasdoc.object types found in all programs in the project.
ProgramCount : dict
Dictionary containing a object Counter for each program found in the project
"""
objectCounter = Counter()
programCounter = dict()
for program in self.programs:
cnt = program.summarise_objects()
objectCounter += cnt
programCounter[program] = dict(cnt)
return objectCounter, programCounter
def get_objects(self, objectType=None):
"""
get_objects(objectType=None)
Recursively loop through parsed programs in the project, yielding each sasdocs object. If the object
is a macro object, enter and yield sas objects found in the macro's contents.
This function will never return a macro object.
If passed with optional objectType, this function will only yield objects of type equal to objectType.
Parameters
----------
objectType : str
If not none, only yield objects where the object is of type objectType.
Yields
------
sasdocs.object
"""
for program in self.programs:
yield from program.get_objects(objectType=objectType)
def get_extended_info(self):
"""
get_extended_info
Creates class attributes for information about the SAS project.
.. code-block:: rst
name : Filename of the SAS code,
path : Full path to the SAS code,
programs : Number of programs found in the project,
summary : Counter object returned by summarise_objects,
objects : Dictionary of Counter objects indexed by program
"""
objSum, prgSum = self.summarise_project()
self.name = self.path.name
self.nPrograms = len(self.programs)
self.summary = dict(objSum)
self.objects = dict(prgSum)
self.buildTime = "{:%Y-%m-%d %H:%M}".format(datetime.datetime.now())
def generate_documentation(self, macroOnly=False):
"""
generate_documentation(outputDirectory=None)
Generate documentation for the project using the jinja2 templates
"""
documentation = {}
if not macroOnly:
for program in self.programs:
documentation[program.name] = program.generate_documentation()
template = jinja2.Template(pkg_resources.read_text(templates, 'macro.md'))
documentation['macros']=template.render(program=self)
return documentation
| 34.700441 | 124 | 0.596166 | import re
import datetime
import logging
import pathlib
import datetime
import jinja2
import importlib.resources as pkg_resources
from collections import Counter
from . import templates, format_logger
from .program import sasProgram
class sasProject(object):
"""
Abstracted SAS project class.
A SAS project is a collection of individual SAS programs that combine,
use the same library, include each other, or generally create datasets used by
each other in such away that they can be considered largly part of the same piece
of work.
...
Attributes
----------
path : pathlib.Path
File path to the root directory of the project
programs : [sasProgram]
List of parsed .sas programs found in the project root or subfolders
macroVariables : [macroVariableDefinition]
List of all macro variable defintions found in all programs in the project
"""
def __init__(self, path):
self.path = path
self.logger = logging.getLogger(__name__)
try:
self.logger = format_logger(self.logger,{'path':self.path})
except Exception as e:
self.logger.exception("Unable to format log. {}".format(e))
self.programs = []
self.documentation = {}
if self.load_project(path) is False:
return None
self.get_extended_info()
def load_project(self, path):
"""
load_project(path)
Search the given path recursively to find all .sas files, then generate sasProgram objects
from any valid sas programs found.
Sets values of path and programs.
Parameters
----------
path : str
The root file path of the project .
"""
try:
self.path = pathlib.Path(path).resolve(strict=True)
except Exception as e:
self.logger.exception("Unable to resolve path: {}".format(e))
return False
try:
programPaths = self.path.rglob('*.sas')
except Exception as e:
self.logger.exception("Unable to search folder: {}".format(e))
return False
try:
self.add_programs_to_project(programPaths)
except Exception as e:
self.logger.exception("Unable to add programs to project: {}".format(e))
return False
# self.macroVariables = {d.variable:d.value for d in self.get_objects(objectType='macroVariableDefinition')}
def add_programs_to_project(self, programPaths):
"""
add_programs_to_project(programPaths)
For a list of found paths to .sas files in the project directory, generate sasProgram objects. If any sasProgram
objects contain an include object, where possible follow the path in the %include statement, parse file and add to
the project's programs list.
Does not parse the program if the path has already been visited.
Parameters
----------
programPaths : list
List of discovered program paths in the project's directories.
"""
for path in programPaths:
if path not in [program.path for program in self.programs]:
self.programs.append(sasProgram(path))
includePaths = set(include.path for include in self.get_objects(objectType='include'))
while includePaths.difference(set([program.path for program in self.programs])):
for path in includePaths:
self.programs.append(sasProgram(path))
includePaths = set(include.path for include in self.get_objects(objectType='include'))
self.programs = [program for program in self.programs if program.failedLoad != 1]
def add_addtional_documentation_to_project(self):
"""
Add any documenation found in the project as an attribute.
Creates readme and documentation attributes.
"""
mdPaths = self.path.glob('*.md')
# Test for README in root directory
readMe = self.path.joinpath('readme.md')
if readMe.is_file():
with self.path.joinpath('readme.md').open() as f:
self.readme = f.read()
self.readme = re.sub(r'(^#+\s)',r'#\1',self.readme,flags=re.M)
else:
self.readme = ''
docs = {}
for path in mdPaths:
with path.open() as f:
docs[path.relative_to(self.path)] = f.read()
self.documentation['additional'] = docs
def summarise_project(self):
"""
summarise_objects()
Recursively loop through parsed objects in the project's programs, counting each object by object type.
This function will count macros and the contents of said macros.
Returns
-------
ObjCounter : Counter
Collections Counter object for all sasdoc.object types found in all programs in the project.
ProgramCount : dict
Dictionary containing a object Counter for each program found in the project
"""
objectCounter = Counter()
programCounter = dict()
for program in self.programs:
cnt = program.summarise_objects()
objectCounter += cnt
programCounter[program] = dict(cnt)
return objectCounter, programCounter
def get_objects(self, objectType=None):
"""
get_objects(objectType=None)
Recursively loop through parsed programs in the project, yielding each sasdocs object. If the object
is a macro object, enter and yield sas objects found in the macro's contents.
This function will never return a macro object.
If passed with optional objectType, this function will only yield objects of type equal to objectType.
Parameters
----------
objectType : str
If not none, only yield objects where the object is of type objectType.
Yields
------
sasdocs.object
"""
for program in self.programs:
yield from program.get_objects(objectType=objectType)
def get_extended_info(self):
"""
get_extended_info
Creates class attributes for information about the SAS project.
.. code-block:: rst
name : Filename of the SAS code,
path : Full path to the SAS code,
programs : Number of programs found in the project,
summary : Counter object returned by summarise_objects,
objects : Dictionary of Counter objects indexed by program
"""
objSum, prgSum = self.summarise_project()
self.name = self.path.name
self.nPrograms = len(self.programs)
self.summary = dict(objSum)
self.objects = dict(prgSum)
self.buildTime = "{:%Y-%m-%d %H:%M}".format(datetime.datetime.now())
def generate_documentation(self, macroOnly=False):
"""
generate_documentation(outputDirectory=None)
Generate documentation for the project using the jinja2 templates
"""
documentation = {}
if not macroOnly:
for program in self.programs:
documentation[program.name] = program.generate_documentation()
template = jinja2.Template(pkg_resources.read_text(templates, 'macro.md'))
documentation['macros']=template.render(program=self)
return documentation
| 464 | 0 | 29 |
0192ea60b65fdb8c659fb3e983ac74af93a1b3af | 18,146 | py | Python | web/JPS_EMISSIONS/python/latest/main.py | mdhillmancmcl/TheWorldAvatar-CMCL-Fork | 011aee78c016b76762eaf511c78fabe3f98189f4 | [
"MIT"
] | 21 | 2021-03-08T01:58:25.000Z | 2022-03-09T15:46:16.000Z | web/JPS_EMISSIONS/python/latest/main.py | mdhillmancmcl/TheWorldAvatar-CMCL-Fork | 011aee78c016b76762eaf511c78fabe3f98189f4 | [
"MIT"
] | 63 | 2021-05-04T15:05:30.000Z | 2022-03-23T14:32:29.000Z | web/JPS_EMISSIONS/python/latest/main.py | mdhillmancmcl/TheWorldAvatar-CMCL-Fork | 011aee78c016b76762eaf511c78fabe3f98189f4 | [
"MIT"
] | 15 | 2021-03-08T07:52:03.000Z | 2022-03-29T04:46:20.000Z | # ## PART 2 - read the power plant database, select different type power plant, and reference plant emission for both ccs and baseline
import pandas as pd
import sys
import json
# import time
from screen_ccs import ccs_screen
from emission_ccs import emission_aggregation_ccs
from post_process_ccs import bau_ccs_post, ccs_results
# rootPath = 'C:/Users/WE/WebstormProjects/JPS_EMISSIONS/'
rootPath = json.loads(sys.argv[1])
# load the powerplant database
# df = pd.read_csv(rootPath + 'data/input/powerplant_database.csv', header='infer', sep=',')
# df = pd.read_csv(rootPath + 'data/output/result.csv', header='infer', sep=',')
from world_powerplants_sparql import WorldPowerPlantsSPARQL
from powerplant_sparql_sync import PowerplantSPARQLSync
wPSPARQL = WorldPowerPlantsSPARQL()
powerplants = wPSPARQL.getPowerplants()
# start_time = time.time()
listDict = []
for powerplant in powerplants:
pSPARQL = PowerplantSPARQLSync(powerplant)
powerplantInfo = pSPARQL.getPowerplantInfo()
listDict.append(powerplantInfo)
df = pd.DataFrame(listDict,
columns=['country', 'capacity_MW', 'primary_fuel',
'generation_technology', 'age', 'output_MWh', 'fuel_used'])
# print(df.dtypes)
# df.to_csv(rootPath + 'data/output/result.csv', index=False)
# print("{} seconds".format(time.time() - start_time))
# with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print(df)
### 2.1 coal
### ultrasupercritical
# choose ultrasupercritical PC from the database
df_01 = df[df.generation_technology == 'ultrasupercritical']
# load the emission inventory table for baseline scenario
dfb_1 = pd.read_csv(rootPath + 'data/input/baseplant/base_ultrasupercritical_PC_coal.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_1 = pd.read_csv(rootPath + 'data/input/ccs/capture_ultrasupercritical_PC_coal.csv', header='infer', sep=',')
# add age column to the dataframe
df_tem = pd.read_csv(rootPath + 'data/input/ccs/ages.csv', header='infer', sep=',')
ages = df_tem.loc[:,('age')].values
df_1['age'] = ages
# ### supercritical anthracite
# choose anthracite and supercritical PC from the database
df_m = df[df.generation_technology == 'supercritical']
df_02 = df_m[df_m.primary_fuel == 'anthracite']
# load the emission inventory table for baseline scenario
dfb_2 = pd.read_csv(rootPath + 'data/input/baseplant/base_supercritical_PC_anthracite.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_2 = pd.read_csv(rootPath + 'data/input/ccs/capture_supercritical_PC_anthracite.csv', header='infer', sep=',')
# add age column to the dataframe
df_2['age'] = ages
# ### supercritical bituminous and coal
# choose anthracite and supercritical PC from the database
df_03 = df_m[(df_m.primary_fuel == 'bituminous') | (df_m.primary_fuel == 'coal')]
# load the emission inventory table for baseline scenario
dfb_3 = pd.read_csv(rootPath + 'data/input/baseplant/base_supercritical_PC_bituminous.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_3 = pd.read_csv(rootPath + 'data/input/ccs/capture_supercritical_PC_bituminous.csv', header='infer', sep=',')
# add age column to the dataframe
df_3['age'] = ages
# ### supercritical subbituminous
# choose anthracite and supercritical PC from the database
df_04 = df_m[df_m.primary_fuel == 'subbituminous']
# load the emission inventory table for baseline scenario
dfb_4 = pd.read_csv(rootPath + 'data/input/baseplant/base_supercritical_PC_subbituminous.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_4 = pd.read_csv(rootPath + 'data/input/ccs/capture_supercritical_PC_subbituminous.csv', header='infer', sep=',')
# add age column to the dataframe
df_4['age'] = ages
# ### supercritical lignite
# choose anthracite and supercritical PC from the database
df_05 = df_m[df_m.primary_fuel == 'lignite']
# load the emission table
dfb_5 = pd.read_csv(rootPath + 'data/input/baseplant/base_supercritical_PC_lignite.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_5 = pd.read_csv(rootPath + 'data/input/ccs/capture_supercritical_PC_lignite.csv', header='infer', sep=',')
# add age column to the dataframe
df_5['age'] = ages
# ### subcritical anthracite
# choose anthracite and subcritical PC from the database
df_n = df[df.generation_technology == 'subcritical']
df_06 = df_n[df_n.primary_fuel == 'anthracite']
# load the emission table
dfb_6 = pd.read_csv(rootPath + 'data/input/baseplant/base_subcritical_PC_anthracite.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_6 = pd.read_csv(rootPath + 'data/input/ccs/capture_subcritical_PC_anthracite.csv', header='infer', sep=',')
# add age column to the dataframe
df_6['age'] = ages
# ### subcritical bituminous and coal
# choose anthracite and supercritical PC from the database
df_coal = df[df.fuel_used == 'coal']
df_07 = df_coal[(df_coal.primary_fuel == 'bituminous') | (df_coal.primary_fuel == 'coal') | (df_coal.generation_technology == 'cogeneration')]
# load the emission table
dfb_7 = pd.read_csv(rootPath + 'data/input/baseplant/base_subcritical_PC_bituminous.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_7 = pd.read_csv(rootPath + 'data/input/ccs/capture_subcritical_PC_bituminous.csv', header='infer', sep=',')
# add age column to the dataframe
df_7['age'] = ages
# ### subcritical subbituminous
# choose anthracite and supercritical PC from the database
df_08 = df_n[df_n.primary_fuel == 'subbituminous']
# load the emission table
dfb_8 = pd.read_csv(rootPath + 'data/input/baseplant/base_subcritical_PC_subbituminous.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_8 = pd.read_csv(rootPath + 'data/input/ccs/capture_subcritical_PC_subbituminous.csv', header='infer', sep=',')
# add age column to the dataframe
df_8['age'] = ages
# ### subcritical lignite
# choose anthracite and supercritical PC from the database
df_09 = df_n[df_n.primary_fuel == 'lignite']
# load the emission table
dfb_9 = pd.read_csv(rootPath + 'data/input/baseplant/base_subcritical_PC_lignite.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_9 = pd.read_csv(rootPath + 'data/input/ccs/capture_subcritical_PC_lignite.csv', header='infer', sep=',')
# add age column to the dataframe
df_9['age'] = ages
# ### subcritical coal_biomass
# choose anthracite and supercritical PC from the database
df_010 = df_n[df_n.primary_fuel == 'coal_biomass']
# load the emission table
dfb_10 = pd.read_csv(rootPath + 'data/input/baseplant/base_subcritical_PC_coal_biomass.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_10 = pd.read_csv(rootPath + 'data/input/ccs/capture_subcritical_PC_coal_biomass.csv', header='infer', sep=',')
# add age column to the dataframe
df_10['age'] = ages
# ### 2.2 natural gas plant
# choose natural gas plant from the database
df_011 = df[df.primary_fuel == 'natural_gas']
# load the emission table
dfb_11 = pd.read_csv(rootPath + 'data/input/baseplant/base_NGCC.csv', header='infer', sep=',')
dfb_11 = dfb_11.sort_values(by=['age','capacity_MW'], ascending=[True,True])
# load the emission inventory table for ccs scenario
df_11 = pd.read_csv(rootPath + 'data/input/ccs/NGCC_capture.csv', header='infer', sep=',')
df_11 = df_11.sort_values(by=['age','capacity_MW'], ascending=[True,True])
# ### 2.3 oil
# choose oil plant from the database
df_012 = df[df.primary_fuel == 'oil']
# load the emission table
dfb_12 = pd.read_csv(rootPath + 'data/input/baseplant/base_NGCC_oil.csv', header='infer', sep=',')
dfb_12 = dfb_12.sort_values(by=['age','capacity_MW'], ascending=[True,True])
# load the emission inventory table for ccs scenario
df_12 = pd.read_csv(rootPath + 'data/input/ccs/NGCC_capture_oil.csv', header='infer', sep=',')
df_12 = df_12.sort_values(by=['age','capacity_MW'], ascending=[True,True])
# ## PART 3 - emission and economics analysis
# ### 3.1 get important parameter input from user interface (e.g. co2 tax and technology learning rate)
# get carbonprice from user interface, choose from 0, 10, 20, 50, 100
# carbonprice = 0
carbonprice = int(json.loads(sys.argv[2]))
# get technology learning rate from user interface, choose from high, middle, low
# coalccslearningrate = 'high'
# gasccslearningrate = 'high'
coalccslearningrate = sys.argv[3] # not needed for string
gasccslearningrate = sys.argv[3]
# ### 3.2 assume the newly added capacity share same decomposition as plant fleet between age 0 and 5
# get the plant list of age between 0 and 5
df_01 = df_01[(df_01.age >0) & (df_01.age <= 5)]
df_02 = df_02[(df_02.age >0) & (df_02.age <= 5)]
df_03 = df_03[(df_03.age >0) & (df_03.age <= 5)]
df_04 = df_04[(df_04.age >0) & (df_04.age <= 5)]
df_05 = df_05[(df_05.age >0) & (df_05.age <= 5)]
df_06 = df_06[(df_06.age >0) & (df_06.age <= 5)]
df_07 = df_07[(df_07.age >0) & (df_07.age <= 5)]
df_08 = df_08[(df_08.age >0) & (df_08.age <= 5)]
df_09 = df_09[(df_09.age >0) & (df_09.age <= 5)]
df_010 = df_010[(df_010.age >0) & (df_010.age <= 5)]
df_011 = df_011[(df_011.age >0) & (df_011.age <= 5)]
df_012 = df_012[(df_012.age >0) & (df_012.age <= 5)]
plant_list = [df_01, df_02, df_03, df_04, df_05, df_06, df_07, df_08, df_09, df_010, df_011, df_012]
emission_list_b = [dfb_1, dfb_2, dfb_3, dfb_4, dfb_5, dfb_6, dfb_7, dfb_8, dfb_9, dfb_10, dfb_11, dfb_12]
emission_list_ccs = [df_1, df_2, df_3, df_4, df_5, df_6, df_7, df_8, df_9, df_10, df_11, df_12]
# ### 3.3 screen the newly added plant whether ccs is possible or not
# load the CCS learning rate for coal and gas
# ues the test learning rate at constant 0.8
# ccs_learning_coal = pd.read_csv(rootPath + 'data/input/ccs/ccs_technology_learning_test.csv', header='infer', sep=',')
ccs_learning_coal = pd.read_csv(rootPath + 'data/input/ccs/coal_ccs_technology_learning.csv', header='infer', sep=',')
ccs_learning_gas = pd.read_csv(rootPath + 'data/input/ccs/gas_ccs_technology_learning.csv', header='infer', sep=',')
# set the learning rate to high
ccs_learning_coal = ccs_learning_coal[ccs_learning_coal.learning_rate == coalccslearningrate]
ccs_learning_gas = ccs_learning_gas[ccs_learning_gas.learning_rate == gasccslearningrate]
# select year and plant LCOE decrease
ccs_learning_coal = ccs_learning_coal[['year','plant_LCOE_decrease']]
ccs_learning_coal = ccs_learning_coal.reset_index(drop=True)
ccs_learning_gas = ccs_learning_gas[['year','plant_LCOE_decrease']]
ccs_learning_gas = ccs_learning_gas.reset_index(drop=True)
# set the key parameters
# set the refernce cost for coal, gas, and oil plant without carbon price respectively
coal_reference_cost_base = 30
gas_reference_cost_base = 60
oil_reference_cost_base = 60
# set the carbon price
carbon_price = carbonprice
# set the capacity factor
capacity_factor = 0.75
# load the CCS table
ccs = pd.read_csv(rootPath + 'data/output/BAU_CCS/input_bau_ccs_learning_high.csv', header='infer', sep=',')
# set the refernce cost for coal, gas, and oil plant with carbon price respectively
coal_reference_cost = coal_reference_cost_base + (carbon_price * 1144) / (1500 * capacity_factor)
gas_reference_cost = gas_reference_cost_base + (carbon_price * 214) / (600 * capacity_factor)
oil_reference_cost = oil_reference_cost_base + (carbon_price * 214) / (600 * capacity_factor)
# calculate the vintage change of powerplant database during 35 years: from 2015 to 2050
import copy
w = {} # emission inventory of existing plant type j at year i
k = {} # ccs possible plant from newly retired plant type j at year i
plant = {} # newly built plant database for type j
plant_m = {} # exisitng plant database for type j
plant_n = {} # ccs plant database for type j
coal_emission_w = {} # overall coal emission inventory at year i
gas_emission_w = {} # overall gas emission inventory at year i
oil_emission_w = {} # overall oil emission inventory at year i
coal_emission_k = {} # overall coal emission inventory at year i
gas_emission_k = {} # overall gas emission inventory at year i
oil_emission_k = {} # overall oil emission inventory at year i
em_li_ccs = {} #emission list
# load the CCS table
ccs = pd.read_csv(rootPath + 'data/output/BAU_CCS/input_bau_ccs_learning_high.csv', header='infer', sep=',')
# open('bau_ccs_learn.txt', 'w').close()
for i in range(36):
w[i] = {}
k[i] = {}
#emission_list_ccs[i] = emission_list_ccs
em_li_ccs[i] = {}
M = copy.deepcopy(emission_list_ccs)
for j in range(12):
# get the newly built plant database
plant[j] = plant_list[j]
# set the reference cost to screen ccs possible plants based on fuel type
if j >= 0 and j <= 9:
reference_cost = coal_reference_cost
learning_rate = ccs_learning_coal.at[i,'plant_LCOE_decrease']
elif j == 10:
reference_cost = gas_reference_cost
learning_rate = ccs_learning_gas.at[i,'plant_LCOE_decrease']
else:
reference_cost = oil_reference_cost
learning_rate = ccs_learning_gas.at[i,'plant_LCOE_decrease']
em_li_ccs[i][j] = M[j]
# set the carbon price and capacity factor
em_li_ccs[i][j]['carbon_price_ton'] = carbon_price
em_li_ccs[i][j]['capacity_factor'] = capacity_factor
# set the capture plant LCOE at year i according to learning rate
em_li_ccs[i][j]['capture_plant_LCOE_MWh'] = em_li_ccs[i][j]['capture_plant_LCOE_MWh'] * (1-learning_rate)
em_li_ccs[i][j]['carbon_LCOE_MWh'] = (emission_list_ccs[j]['emission_rate_ton_h'] * em_li_ccs[i][j]['carbon_price_ton']) / (emission_list_ccs[j]['capacity_MW'] *em_li_ccs[i][j]['capacity_factor'])
em_li_ccs[i][j]['capture_carbon_LCOE_MWh'] = (emission_list_ccs[j]['capture_emission_rate_ton_h'] * em_li_ccs[i][j]['carbon_price_ton']) / (emission_list_ccs[j]['capacity_MW'] *em_li_ccs[i][j]['capacity_factor'])
em_li_ccs[i][j]['LCOE_MWh'] =em_li_ccs[i][j]['carbon_LCOE_MWh'] +em_li_ccs[i][j]['plant_LCOE_MWh']
em_li_ccs[i][j]['capture_LCOE_MWh'] =em_li_ccs[i][j]['capture_carbon_LCOE_MWh'] + em_li_ccs[i][j]['capture_plant_LCOE_MWh']
# screen ccs possible plant from newly built plant
w[i][j] = ccs_screen (plant[j], em_li_ccs[i][j], reference_cost, i)[0]
k[i][j] = ccs_screen (plant[j], em_li_ccs[i][j], reference_cost, i)[1]
# print("plant df: %s" %(j+1))
# text_file = open("bau_ccs_learn.txt", "a")
# text_file.write("plant type: %s, " %(j+1))
# text_file.write("newly built plant number: %s, capacity: %s " %(plant[j].shape[0], plant[j]['capacity_MW'].sum()))
# text_file.write("newly built ccs possible plant number: %s, capacity: %s " %(w[i][j].shape[0], w[i][j]['capacity_MW'].sum()))
# text_file.write('\n')
# aggregated ccs possible new coal power plant
coal_emission_w[i+2015] = pd.concat([w[i][0],w[i][1],w[i][2],w[i][3],w[i][4],w[i][5],w[i][6],w[i][7],w[i][8],w[i][9]],
ignore_index=True, sort=False)
coal_emission_w[i+2015]['fuel_used'] = 'coal'
# aggregated ccs not possible new coal power plant
coal_emission_k[i+2015] = pd.concat([k[i][0],k[i][1],k[i][2],k[i][3],k[i][4],k[i][5],k[i][6],k[i][7],k[i][8],k[i][9]],
ignore_index=True, sort=False)
coal_emission_k[i+2015]['fuel_used'] = 'coal'
# aggregated ccs possible new gas power plant
gas_emission_w[i+2015] = w[i][10]
gas_emission_w[i+2015]['fuel_used'] = 'gas'
# aggregated ccs not possible new gas power plant
gas_emission_k[i+2015] = k[i][10]
gas_emission_k[i+2015]['fuel_used'] = 'gas'
# aggregated ccs possible new oil power plant
oil_emission_w[i+2015] = w[i][11]
oil_emission_w[i+2015]['fuel_used'] = 'oil'
# aggregated ccs not possible new gas power plant
oil_emission_k[i+2015] = k[i][11]
oil_emission_k[i+2015]['fuel_used'] = 'oil'
# aggregate the emission of year i for different plant types
ccs = emission_aggregation_ccs(coal_emission_w[i+2015], coal_emission_k[i+2015], ccs, i)
ccs = emission_aggregation_ccs(gas_emission_w[i+2015], gas_emission_k[i+2015], ccs, i)
ccs = emission_aggregation_ccs(oil_emission_w[i+2015], oil_emission_k[i+2015], ccs, i)
# print (i+2015)
# text_file.write("year: %s" %(i+2015))
# text_file.write("###################### \n")
# post process the ccs table to calculate overall emission and select useful columns
ccs = bau_ccs_post(ccs)
ccs = ccs_results(ccs)
ccs.to_csv(rootPath + 'data/output/BAU_CCS/results_bau_ccs_learning_high.csv', index=False)
# ## 4 visualize the results
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
x = ccs.loc[:,('year')].values
y = ccs.loc[:,['coal_power_annual_emission_existing','gas_power_annual_emission_existing','oil_power_annual_emission_existing']].values
y = np.transpose(y)
sns.set_style("white")
sns.set_context("paper",font_scale=1)
plt.clf()
f, ax = plt.subplots(1, 1, figsize=(3,2.5))
# create your palette using html codes
pal = ["#F44027", "#CAF91E", "#2B8EF1", "#CAF91E"]
plt.stackplot(x,y, labels=['Coal','Natural Gas','Oil'], colors=pal)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height*0.85])
ax.legend(loc='center left', bbox_to_anchor=(-0.03,1.05), ncol=3)
ax.set(xlabel='Year', ylabel='CO2 annual emission (Gt/year)')
ax.set_xlim([2015, 2050])
xticks = [2015,2020,2030,2040,2050]
yticks = np.arange(0, 22, 2.5)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
plt.savefig(rootPath + 'public/images/annual_baseline.png', bbox_inches='tight', dpi=500)
print(json.dumps("COMPLETE")) | 38.608511 | 327 | 0.705279 | # ## PART 2 - read the power plant database, select different type power plant, and reference plant emission for both ccs and baseline
import pandas as pd
import sys
import json
# import time
from screen_ccs import ccs_screen
from emission_ccs import emission_aggregation_ccs
from post_process_ccs import bau_ccs_post, ccs_results
# rootPath = 'C:/Users/WE/WebstormProjects/JPS_EMISSIONS/'
rootPath = json.loads(sys.argv[1])
# load the powerplant database
# df = pd.read_csv(rootPath + 'data/input/powerplant_database.csv', header='infer', sep=',')
# df = pd.read_csv(rootPath + 'data/output/result.csv', header='infer', sep=',')
from world_powerplants_sparql import WorldPowerPlantsSPARQL
from powerplant_sparql_sync import PowerplantSPARQLSync
wPSPARQL = WorldPowerPlantsSPARQL()
powerplants = wPSPARQL.getPowerplants()
# start_time = time.time()
listDict = []
for powerplant in powerplants:
pSPARQL = PowerplantSPARQLSync(powerplant)
powerplantInfo = pSPARQL.getPowerplantInfo()
listDict.append(powerplantInfo)
df = pd.DataFrame(listDict,
columns=['country', 'capacity_MW', 'primary_fuel',
'generation_technology', 'age', 'output_MWh', 'fuel_used'])
# print(df.dtypes)
# df.to_csv(rootPath + 'data/output/result.csv', index=False)
# print("{} seconds".format(time.time() - start_time))
# with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print(df)
### 2.1 coal
### ultrasupercritical
# choose ultrasupercritical PC from the database
df_01 = df[df.generation_technology == 'ultrasupercritical']
# load the emission inventory table for baseline scenario
dfb_1 = pd.read_csv(rootPath + 'data/input/baseplant/base_ultrasupercritical_PC_coal.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_1 = pd.read_csv(rootPath + 'data/input/ccs/capture_ultrasupercritical_PC_coal.csv', header='infer', sep=',')
# add age column to the dataframe
df_tem = pd.read_csv(rootPath + 'data/input/ccs/ages.csv', header='infer', sep=',')
ages = df_tem.loc[:,('age')].values
df_1['age'] = ages
# ### supercritical anthracite
# choose anthracite and supercritical PC from the database
df_m = df[df.generation_technology == 'supercritical']
df_02 = df_m[df_m.primary_fuel == 'anthracite']
# load the emission inventory table for baseline scenario
dfb_2 = pd.read_csv(rootPath + 'data/input/baseplant/base_supercritical_PC_anthracite.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_2 = pd.read_csv(rootPath + 'data/input/ccs/capture_supercritical_PC_anthracite.csv', header='infer', sep=',')
# add age column to the dataframe
df_2['age'] = ages
# ### supercritical bituminous and coal
# choose anthracite and supercritical PC from the database
df_03 = df_m[(df_m.primary_fuel == 'bituminous') | (df_m.primary_fuel == 'coal')]
# load the emission inventory table for baseline scenario
dfb_3 = pd.read_csv(rootPath + 'data/input/baseplant/base_supercritical_PC_bituminous.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_3 = pd.read_csv(rootPath + 'data/input/ccs/capture_supercritical_PC_bituminous.csv', header='infer', sep=',')
# add age column to the dataframe
df_3['age'] = ages
# ### supercritical subbituminous
# choose anthracite and supercritical PC from the database
df_04 = df_m[df_m.primary_fuel == 'subbituminous']
# load the emission inventory table for baseline scenario
dfb_4 = pd.read_csv(rootPath + 'data/input/baseplant/base_supercritical_PC_subbituminous.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_4 = pd.read_csv(rootPath + 'data/input/ccs/capture_supercritical_PC_subbituminous.csv', header='infer', sep=',')
# add age column to the dataframe
df_4['age'] = ages
# ### supercritical lignite
# choose anthracite and supercritical PC from the database
df_05 = df_m[df_m.primary_fuel == 'lignite']
# load the emission table
dfb_5 = pd.read_csv(rootPath + 'data/input/baseplant/base_supercritical_PC_lignite.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_5 = pd.read_csv(rootPath + 'data/input/ccs/capture_supercritical_PC_lignite.csv', header='infer', sep=',')
# add age column to the dataframe
df_5['age'] = ages
# ### subcritical anthracite
# choose anthracite and subcritical PC from the database
df_n = df[df.generation_technology == 'subcritical']
df_06 = df_n[df_n.primary_fuel == 'anthracite']
# load the emission table
dfb_6 = pd.read_csv(rootPath + 'data/input/baseplant/base_subcritical_PC_anthracite.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_6 = pd.read_csv(rootPath + 'data/input/ccs/capture_subcritical_PC_anthracite.csv', header='infer', sep=',')
# add age column to the dataframe
df_6['age'] = ages
# ### subcritical bituminous and coal
# choose anthracite and supercritical PC from the database
df_coal = df[df.fuel_used == 'coal']
df_07 = df_coal[(df_coal.primary_fuel == 'bituminous') | (df_coal.primary_fuel == 'coal') | (df_coal.generation_technology == 'cogeneration')]
# load the emission table
dfb_7 = pd.read_csv(rootPath + 'data/input/baseplant/base_subcritical_PC_bituminous.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_7 = pd.read_csv(rootPath + 'data/input/ccs/capture_subcritical_PC_bituminous.csv', header='infer', sep=',')
# add age column to the dataframe
df_7['age'] = ages
# ### subcritical subbituminous
# choose anthracite and supercritical PC from the database
df_08 = df_n[df_n.primary_fuel == 'subbituminous']
# load the emission table
dfb_8 = pd.read_csv(rootPath + 'data/input/baseplant/base_subcritical_PC_subbituminous.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_8 = pd.read_csv(rootPath + 'data/input/ccs/capture_subcritical_PC_subbituminous.csv', header='infer', sep=',')
# add age column to the dataframe
df_8['age'] = ages
# ### subcritical lignite
# choose anthracite and supercritical PC from the database
df_09 = df_n[df_n.primary_fuel == 'lignite']
# load the emission table
dfb_9 = pd.read_csv(rootPath + 'data/input/baseplant/base_subcritical_PC_lignite.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_9 = pd.read_csv(rootPath + 'data/input/ccs/capture_subcritical_PC_lignite.csv', header='infer', sep=',')
# add age column to the dataframe
df_9['age'] = ages
# ### subcritical coal_biomass
# choose anthracite and supercritical PC from the database
df_010 = df_n[df_n.primary_fuel == 'coal_biomass']
# load the emission table
dfb_10 = pd.read_csv(rootPath + 'data/input/baseplant/base_subcritical_PC_coal_biomass.csv', header='infer', sep=',')
# load the emission inventory table for ccs scenario
df_10 = pd.read_csv(rootPath + 'data/input/ccs/capture_subcritical_PC_coal_biomass.csv', header='infer', sep=',')
# add age column to the dataframe
df_10['age'] = ages
# ### 2.2 natural gas plant
# choose natural gas plant from the database
df_011 = df[df.primary_fuel == 'natural_gas']
# load the emission table
dfb_11 = pd.read_csv(rootPath + 'data/input/baseplant/base_NGCC.csv', header='infer', sep=',')
dfb_11 = dfb_11.sort_values(by=['age','capacity_MW'], ascending=[True,True])
# load the emission inventory table for ccs scenario
df_11 = pd.read_csv(rootPath + 'data/input/ccs/NGCC_capture.csv', header='infer', sep=',')
df_11 = df_11.sort_values(by=['age','capacity_MW'], ascending=[True,True])
# ### 2.3 oil
# choose oil plant from the database
df_012 = df[df.primary_fuel == 'oil']
# load the emission table
dfb_12 = pd.read_csv(rootPath + 'data/input/baseplant/base_NGCC_oil.csv', header='infer', sep=',')
dfb_12 = dfb_12.sort_values(by=['age','capacity_MW'], ascending=[True,True])
# load the emission inventory table for ccs scenario
df_12 = pd.read_csv(rootPath + 'data/input/ccs/NGCC_capture_oil.csv', header='infer', sep=',')
df_12 = df_12.sort_values(by=['age','capacity_MW'], ascending=[True,True])
# ## PART 3 - emission and economics analysis
# ### 3.1 get important parameter input from user interface (e.g. co2 tax and technology learning rate)
# get carbonprice from user interface, choose from 0, 10, 20, 50, 100
# carbonprice = 0
carbonprice = int(json.loads(sys.argv[2]))
# get technology learning rate from user interface, choose from high, middle, low
# coalccslearningrate = 'high'
# gasccslearningrate = 'high'
coalccslearningrate = sys.argv[3] # not needed for string
gasccslearningrate = sys.argv[3]
# ### 3.2 assume the newly added capacity share same decomposition as plant fleet between age 0 and 5
# get the plant list of age between 0 and 5
df_01 = df_01[(df_01.age >0) & (df_01.age <= 5)]
df_02 = df_02[(df_02.age >0) & (df_02.age <= 5)]
df_03 = df_03[(df_03.age >0) & (df_03.age <= 5)]
df_04 = df_04[(df_04.age >0) & (df_04.age <= 5)]
df_05 = df_05[(df_05.age >0) & (df_05.age <= 5)]
df_06 = df_06[(df_06.age >0) & (df_06.age <= 5)]
df_07 = df_07[(df_07.age >0) & (df_07.age <= 5)]
df_08 = df_08[(df_08.age >0) & (df_08.age <= 5)]
df_09 = df_09[(df_09.age >0) & (df_09.age <= 5)]
df_010 = df_010[(df_010.age >0) & (df_010.age <= 5)]
df_011 = df_011[(df_011.age >0) & (df_011.age <= 5)]
df_012 = df_012[(df_012.age >0) & (df_012.age <= 5)]
plant_list = [df_01, df_02, df_03, df_04, df_05, df_06, df_07, df_08, df_09, df_010, df_011, df_012]
emission_list_b = [dfb_1, dfb_2, dfb_3, dfb_4, dfb_5, dfb_6, dfb_7, dfb_8, dfb_9, dfb_10, dfb_11, dfb_12]
emission_list_ccs = [df_1, df_2, df_3, df_4, df_5, df_6, df_7, df_8, df_9, df_10, df_11, df_12]
# ### 3.3 screen the newly added plant whether ccs is possible or not
# load the CCS learning rate for coal and gas
# ues the test learning rate at constant 0.8
# ccs_learning_coal = pd.read_csv(rootPath + 'data/input/ccs/ccs_technology_learning_test.csv', header='infer', sep=',')
ccs_learning_coal = pd.read_csv(rootPath + 'data/input/ccs/coal_ccs_technology_learning.csv', header='infer', sep=',')
ccs_learning_gas = pd.read_csv(rootPath + 'data/input/ccs/gas_ccs_technology_learning.csv', header='infer', sep=',')
# set the learning rate to high
ccs_learning_coal = ccs_learning_coal[ccs_learning_coal.learning_rate == coalccslearningrate]
ccs_learning_gas = ccs_learning_gas[ccs_learning_gas.learning_rate == gasccslearningrate]
# select year and plant LCOE decrease
ccs_learning_coal = ccs_learning_coal[['year','plant_LCOE_decrease']]
ccs_learning_coal = ccs_learning_coal.reset_index(drop=True)
ccs_learning_gas = ccs_learning_gas[['year','plant_LCOE_decrease']]
ccs_learning_gas = ccs_learning_gas.reset_index(drop=True)
# set the key parameters
# set the refernce cost for coal, gas, and oil plant without carbon price respectively
coal_reference_cost_base = 30
gas_reference_cost_base = 60
oil_reference_cost_base = 60
# set the carbon price
carbon_price = carbonprice
# set the capacity factor
capacity_factor = 0.75
# load the CCS table
ccs = pd.read_csv(rootPath + 'data/output/BAU_CCS/input_bau_ccs_learning_high.csv', header='infer', sep=',')
# set the refernce cost for coal, gas, and oil plant with carbon price respectively
coal_reference_cost = coal_reference_cost_base + (carbon_price * 1144) / (1500 * capacity_factor)
gas_reference_cost = gas_reference_cost_base + (carbon_price * 214) / (600 * capacity_factor)
oil_reference_cost = oil_reference_cost_base + (carbon_price * 214) / (600 * capacity_factor)
# calculate the vintage change of powerplant database during 35 years: from 2015 to 2050
import copy
w = {} # emission inventory of existing plant type j at year i
k = {} # ccs possible plant from newly retired plant type j at year i
plant = {} # newly built plant database for type j
plant_m = {} # exisitng plant database for type j
plant_n = {} # ccs plant database for type j
coal_emission_w = {} # overall coal emission inventory at year i
gas_emission_w = {} # overall gas emission inventory at year i
oil_emission_w = {} # overall oil emission inventory at year i
coal_emission_k = {} # overall coal emission inventory at year i
gas_emission_k = {} # overall gas emission inventory at year i
oil_emission_k = {} # overall oil emission inventory at year i
em_li_ccs = {} #emission list
# load the CCS table
ccs = pd.read_csv(rootPath + 'data/output/BAU_CCS/input_bau_ccs_learning_high.csv', header='infer', sep=',')
# open('bau_ccs_learn.txt', 'w').close()
for i in range(36):
w[i] = {}
k[i] = {}
#emission_list_ccs[i] = emission_list_ccs
em_li_ccs[i] = {}
M = copy.deepcopy(emission_list_ccs)
for j in range(12):
# get the newly built plant database
plant[j] = plant_list[j]
# set the reference cost to screen ccs possible plants based on fuel type
if j >= 0 and j <= 9:
reference_cost = coal_reference_cost
learning_rate = ccs_learning_coal.at[i,'plant_LCOE_decrease']
elif j == 10:
reference_cost = gas_reference_cost
learning_rate = ccs_learning_gas.at[i,'plant_LCOE_decrease']
else:
reference_cost = oil_reference_cost
learning_rate = ccs_learning_gas.at[i,'plant_LCOE_decrease']
em_li_ccs[i][j] = M[j]
# set the carbon price and capacity factor
em_li_ccs[i][j]['carbon_price_ton'] = carbon_price
em_li_ccs[i][j]['capacity_factor'] = capacity_factor
# set the capture plant LCOE at year i according to learning rate
em_li_ccs[i][j]['capture_plant_LCOE_MWh'] = em_li_ccs[i][j]['capture_plant_LCOE_MWh'] * (1-learning_rate)
em_li_ccs[i][j]['carbon_LCOE_MWh'] = (emission_list_ccs[j]['emission_rate_ton_h'] * em_li_ccs[i][j]['carbon_price_ton']) / (emission_list_ccs[j]['capacity_MW'] *em_li_ccs[i][j]['capacity_factor'])
em_li_ccs[i][j]['capture_carbon_LCOE_MWh'] = (emission_list_ccs[j]['capture_emission_rate_ton_h'] * em_li_ccs[i][j]['carbon_price_ton']) / (emission_list_ccs[j]['capacity_MW'] *em_li_ccs[i][j]['capacity_factor'])
em_li_ccs[i][j]['LCOE_MWh'] =em_li_ccs[i][j]['carbon_LCOE_MWh'] +em_li_ccs[i][j]['plant_LCOE_MWh']
em_li_ccs[i][j]['capture_LCOE_MWh'] =em_li_ccs[i][j]['capture_carbon_LCOE_MWh'] + em_li_ccs[i][j]['capture_plant_LCOE_MWh']
# screen ccs possible plant from newly built plant
w[i][j] = ccs_screen (plant[j], em_li_ccs[i][j], reference_cost, i)[0]
k[i][j] = ccs_screen (plant[j], em_li_ccs[i][j], reference_cost, i)[1]
# print("plant df: %s" %(j+1))
# text_file = open("bau_ccs_learn.txt", "a")
# text_file.write("plant type: %s, " %(j+1))
# text_file.write("newly built plant number: %s, capacity: %s " %(plant[j].shape[0], plant[j]['capacity_MW'].sum()))
# text_file.write("newly built ccs possible plant number: %s, capacity: %s " %(w[i][j].shape[0], w[i][j]['capacity_MW'].sum()))
# text_file.write('\n')
# aggregated ccs possible new coal power plant
coal_emission_w[i+2015] = pd.concat([w[i][0],w[i][1],w[i][2],w[i][3],w[i][4],w[i][5],w[i][6],w[i][7],w[i][8],w[i][9]],
ignore_index=True, sort=False)
coal_emission_w[i+2015]['fuel_used'] = 'coal'
# aggregated ccs not possible new coal power plant
coal_emission_k[i+2015] = pd.concat([k[i][0],k[i][1],k[i][2],k[i][3],k[i][4],k[i][5],k[i][6],k[i][7],k[i][8],k[i][9]],
ignore_index=True, sort=False)
coal_emission_k[i+2015]['fuel_used'] = 'coal'
# aggregated ccs possible new gas power plant
gas_emission_w[i+2015] = w[i][10]
gas_emission_w[i+2015]['fuel_used'] = 'gas'
# aggregated ccs not possible new gas power plant
gas_emission_k[i+2015] = k[i][10]
gas_emission_k[i+2015]['fuel_used'] = 'gas'
# aggregated ccs possible new oil power plant
oil_emission_w[i+2015] = w[i][11]
oil_emission_w[i+2015]['fuel_used'] = 'oil'
# aggregated ccs not possible new gas power plant
oil_emission_k[i+2015] = k[i][11]
oil_emission_k[i+2015]['fuel_used'] = 'oil'
# aggregate the emission of year i for different plant types
ccs = emission_aggregation_ccs(coal_emission_w[i+2015], coal_emission_k[i+2015], ccs, i)
ccs = emission_aggregation_ccs(gas_emission_w[i+2015], gas_emission_k[i+2015], ccs, i)
ccs = emission_aggregation_ccs(oil_emission_w[i+2015], oil_emission_k[i+2015], ccs, i)
# print (i+2015)
# text_file.write("year: %s" %(i+2015))
# text_file.write("###################### \n")
# post process the ccs table to calculate overall emission and select useful columns
ccs = bau_ccs_post(ccs)
ccs = ccs_results(ccs)
ccs.to_csv(rootPath + 'data/output/BAU_CCS/results_bau_ccs_learning_high.csv', index=False)
# ## 4 visualize the results
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
x = ccs.loc[:,('year')].values
y = ccs.loc[:,['coal_power_annual_emission_existing','gas_power_annual_emission_existing','oil_power_annual_emission_existing']].values
y = np.transpose(y)
sns.set_style("white")
sns.set_context("paper",font_scale=1)
plt.clf()
f, ax = plt.subplots(1, 1, figsize=(3,2.5))
# create your palette using html codes
pal = ["#F44027", "#CAF91E", "#2B8EF1", "#CAF91E"]
plt.stackplot(x,y, labels=['Coal','Natural Gas','Oil'], colors=pal)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height*0.85])
ax.legend(loc='center left', bbox_to_anchor=(-0.03,1.05), ncol=3)
ax.set(xlabel='Year', ylabel='CO2 annual emission (Gt/year)')
ax.set_xlim([2015, 2050])
xticks = [2015,2020,2030,2040,2050]
yticks = np.arange(0, 22, 2.5)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
plt.savefig(rootPath + 'public/images/annual_baseline.png', bbox_inches='tight', dpi=500)
print(json.dumps("COMPLETE")) | 0 | 0 | 0 |
7eb78edf8a3f1ed58f4fc20d6c5b9eaba2c53668 | 633 | py | Python | Player.py | hypnotic322/ping_pong | 269d6936bf4cc4e49f7566224670e7d14d063136 | [
"CC0-1.0"
] | null | null | null | Player.py | hypnotic322/ping_pong | 269d6936bf4cc4e49f7566224670e7d14d063136 | [
"CC0-1.0"
] | null | null | null | Player.py | hypnotic322/ping_pong | 269d6936bf4cc4e49f7566224670e7d14d063136 | [
"CC0-1.0"
] | null | null | null | from pygame import *
from GameSprite import GameSprite
#класс-наследник для спрайта-игрока (управляется стрелками)
| 37.235294 | 64 | 0.595577 | from pygame import *
from GameSprite import GameSprite
#класс-наследник для спрайта-игрока (управляется стрелками)
class Player(GameSprite):
def update_r(self):
keys = key.get_pressed()
if keys[K_UP] and self.rect.y > 5:
self.rect.y -= self.speed
if keys[K_DOWN] and self.rect.y < self.win_height - 80:
self.rect.y += self.speed
def update_l(self):
keys = key.get_pressed()
if keys[K_w] and self.rect.y > 5:
self.rect.y -= self.speed
if keys[K_s] and self.rect.y < self.win_height - 80:
self.rect.y += self.speed
| 434 | 4 | 77 |
4f71faa17e8d77725766cd57c42bf5bd25afc17a | 1,883 | py | Python | amos/data_mart_refresher/cron_jobs/test/test_jobs.py | amosproj/2020ws02-computer-vision-for-sights | 66641de397af77f16ee36aa9e860ca7249982cb1 | [
"MIT"
] | 2 | 2021-02-03T23:25:14.000Z | 2021-03-18T15:03:12.000Z | amos/data_mart_refresher/cron_jobs/test/test_jobs.py | amosproj/2020ws02-computer-vision-for-sights | 66641de397af77f16ee36aa9e860ca7249982cb1 | [
"MIT"
] | 2 | 2021-02-03T22:35:12.000Z | 2021-02-12T14:09:31.000Z | amos/data_mart_refresher/cron_jobs/test/test_jobs.py | amosproj/2020ws02-computer-vision-for-sights | 66641de397af77f16ee36aa9e860ca7249982cb1 | [
"MIT"
] | 1 | 2021-03-18T15:03:14.000Z | 2021-03-18T15:03:14.000Z | import pytest
from mock import patch
from time import sleep
from schedule import run_pending
@pytest.mark.parametrize("no_city_label_yet", [True, False])
@pytest.mark.parametrize(
"exec_sql_result, expected_post_called, expected_fct_result", [("Berlin", True, True), (None, False, False)]
)
| 34.87037 | 112 | 0.747212 | import pytest
from mock import patch
from time import sleep
from schedule import run_pending
def test_start_cron_job():
from cron_jobs.jobs import start_cron_job # needs to stay in here in order for environment patches to work
stateful_cache = []
def _closure_fct():
stateful_cache.append("hello!")
start_cron_job(_closure_fct, 1)
sleep(1)
run_pending()
assert stateful_cache == ["hello!"]
@pytest.mark.parametrize("no_city_label_yet", [True, False])
def test_trigger_city_image_labelling(no_city_label_yet):
from cron_jobs.jobs import trigger_city_image_labelling
with patch("cron_jobs.jobs._notify_external_ils", return_value=no_city_label_yet) as notifier:
assert notifier.called is False
trigger_city_image_labelling()
assert notifier.called
def test_trigger_data_marts_refresh():
from cron_jobs.jobs import trigger_data_marts_refresh
with patch("cron_jobs.jobs.exec_sql") as exec_sql:
assert exec_sql.called is False
trigger_data_marts_refresh()
assert exec_sql.called
assert exec_sql.call_args[0][0] == "SELECT RefreshAllMaterializedViews('data_mart_layer')"
@pytest.mark.parametrize(
"exec_sql_result, expected_post_called, expected_fct_result", [("Berlin", True, True), (None, False, False)]
)
def test_notify_external_ils_with_query_result(exec_sql_result, expected_post_called, expected_fct_result):
from cron_jobs.jobs import _notify_external_ils
with patch("cron_jobs.jobs.exec_sql", return_value=exec_sql_result) as exec_sql, patch(
"cron_jobs.jobs.post"
) as post:
assert (exec_sql.called or post.called) is False
result = _notify_external_ils("SELECT * from ABC", "https://www.com.de/")
assert (exec_sql.called and post.called) is expected_post_called
assert result is expected_fct_result
| 1,492 | 0 | 90 |
c6343f689421655a94b4ca92868e51c7e29c3fd0 | 5,734 | py | Python | CybORG/CybORG/Tests/test_sim/test_Actions/test_MSFActions/test_SSHLoginExploit.py | rafvasq/cage-challenge-1 | 95affdfa38afc1124f1a1a09c92fbc0ed5b96318 | [
"MIT"
] | 18 | 2021-08-20T15:07:55.000Z | 2022-03-11T12:05:15.000Z | CybORG/CybORG/Tests/test_sim/test_Actions/test_MSFActions/test_SSHLoginExploit.py | rafvasq/cage-challenge-1 | 95affdfa38afc1124f1a1a09c92fbc0ed5b96318 | [
"MIT"
] | 7 | 2021-11-09T06:46:58.000Z | 2022-03-31T12:35:06.000Z | CybORG/CybORG/Tests/test_sim/test_Actions/test_MSFActions/test_SSHLoginExploit.py | rafvasq/cage-challenge-1 | 95affdfa38afc1124f1a1a09c92fbc0ed5b96318 | [
"MIT"
] | 13 | 2021-08-17T00:26:31.000Z | 2022-03-29T20:06:45.000Z | import inspect
from datetime import datetime
from ipaddress import IPv4Address
import pytest
from CybORG import CybORG
from CybORG.Shared.Actions import MSFAutoroute
from CybORG.Shared.Actions import SSHLoginExploit
from CybORG.Shared.Enums import SessionType, ProcessState, ProcessType, AppProtocol, Architecture, \
OperatingSystemDistribution, OperatingSystemKernelVersion, OperatingSystemType
from CybORG.Tests.EphemeralPort import LinuxEphemeralPort
| 51.657658 | 116 | 0.525985 | import inspect
from datetime import datetime
from ipaddress import IPv4Address
import pytest
from CybORG import CybORG
from CybORG.Shared.Actions import MSFAutoroute
from CybORG.Shared.Actions import SSHLoginExploit
from CybORG.Shared.Enums import SessionType, ProcessState, ProcessType, AppProtocol, Architecture, \
OperatingSystemDistribution, OperatingSystemKernelVersion, OperatingSystemType
from CybORG.Tests.EphemeralPort import LinuxEphemeralPort
def test_ssh_brute_force_success():
path = str(inspect.getfile(CybORG))
path = path[:-10] + '/Shared/Scenarios/Scenario1.yaml'
cyborg = CybORG(path, 'sim')
agent = 'Red'
initial_result = cyborg.get_observation(agent)
# create ssh session on pretend pi host
session = initial_result['Attacker']['Sessions'][0]['ID']
k_ip_address = initial_result['Attacker']['Interface'][0]['IP Address']
pp_ip_address = initial_result['Gateway']['Interface'][0]['IP Address']
action = SSHLoginExploit(session=session, agent=agent, ip_address=pp_ip_address, port=22)
expected_result = {"success": True,
str(pp_ip_address): {'Interface': [{'IP Address': pp_ip_address}],
'Processes': [{'Connections': [{'Application Protocol': AppProtocol.SSH,
'local_address': pp_ip_address,
'local_port': 22,
'Status': ProcessState.OPEN}],
'Process Type': ProcessType.SSH},
{'Connections': [{'local_address': pp_ip_address,
'local_port': 22,
'remote_address': k_ip_address,
'remote_port': LinuxEphemeralPort()}]}],
'Sessions': [{'Agent': 'Red',
'ID': 1,
'Type': SessionType.MSF_SHELL,
'Username': 'pi'}],
'System info': {'Architecture': Architecture.x64,
'Hostname': 'Gateway',
'OSDistribution': OperatingSystemDistribution.UBUNTU,
'OSType': OperatingSystemType.LINUX},
'User Info': [{'Password': 'raspberry',
'UID': 1001,
'Username': 'pi'}]
},
str(k_ip_address): {
'Interface': [{'IP Address': k_ip_address}],
'Processes': [{'Connections': [{'remote_address': pp_ip_address,
'remote_port': 22,
'local_address': k_ip_address,
'local_port': LinuxEphemeralPort()}]}]
}}
results = cyborg.step(agent, action, skip_valid_action_check=True)
assert results.observation == expected_result
def test_sim_execute_inactive():
path = str(inspect.getfile(CybORG))
path = path[:-10] + '/Shared/Scenarios/Scenario1.yaml'
cyborg = CybORG(path, 'sim')
agent = 'Red'
initial_result = cyborg.get_observation(agent)
expected_observation = {"success": False}
target = "10.0.0.2"
state = cyborg.environment_controller.state
session = state.sessions["Red"][0]
session.active = False
action = SSHLoginExploit(session=session.ident, agent="Red", ip_address=IPv4Address(target), port=22)
observation = action.sim_execute(state)
assert observation.get_dict() == expected_observation
def test_sim_execute_dead():
path = str(inspect.getfile(CybORG))
path = path[:-10] + '/Shared/Scenarios/Scenario1.yaml'
cyborg = CybORG(path, 'sim')
agent = 'Red'
initial_result = cyborg.get_observation(agent)
expected_observation = {"success": False}
target = "10.0.0.2"
state = cyborg.environment_controller.state
session = state.sessions["Red"][0]
state.remove_process(session.host, session.pid)
action = SSHLoginExploit(session=session.ident, agent="Red", ip_address=IPv4Address(target), port=22)
observation = action.sim_execute(state)
assert observation.get_dict() == expected_observation
def test_ssh_brute_force_nacl_block():
path = str(inspect.getfile(CybORG))
path = path[:-10] + '/Shared/Scenarios/Scenario1.yaml'
cyborg = CybORG(path, 'sim')
agent = 'Red'
initial_result = cyborg.get_observation(agent)
# create ssh session on pretend pi host
session = initial_result['Attacker']['Sessions'][0]['ID']
k_ip_address = initial_result['Attacker']['Interface'][0]['IP Address']
pp_ip_address = initial_result['Gateway']['Interface'][0]['IP Address']
action = SSHLoginExploit(session=session, agent=agent, ip_address=pp_ip_address, port=23)
expected_result = {"success": False}
results = cyborg.step(agent, action)
assert results.observation == expected_result
| 5,181 | 0 | 92 |
2f5e7d4cf2a9e75233b5bc534bfedc2535861d5a | 15,723 | py | Python | src/Sentence_Extraction.py | German-NLP-Group/german-transformer-training | 0e5a53215a1837fdb10cf1a774fa0059069c6c25 | [
"BSD-2-Clause"
] | 15 | 2020-08-15T12:27:02.000Z | 2021-09-17T15:32:22.000Z | src/Sentence_Extraction.py | PhilipMay/german-transformer-training | 0e5a53215a1837fdb10cf1a774fa0059069c6c25 | [
"BSD-2-Clause"
] | 2 | 2020-09-24T12:38:53.000Z | 2021-03-05T17:31:30.000Z | src/Sentence_Extraction.py | PhilipMay/german-transformer-training | 0e5a53215a1837fdb10cf1a774fa0059069c6c25 | [
"BSD-2-Clause"
] | 1 | 2021-03-18T10:45:44.000Z | 2021-03-18T10:45:44.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 30 22:57:06 2020
@author: philipp
"""
# =============================================================================
# Data Prep
# =============================================================================
#with open('data/start.txt', 'r') as myfile:
# data = myfile.read()
#
#line_list = data.splitlines()
#Common Crawl Jsonn
import json
import os
import gzip
import pdb
from multiprocessing import Pool
import subprocess
import socket
from bs4 import BeautifulSoup
from tqdm import tqdm
# =============================================================================
# 2nd Approch nearly keep the raw data
# =============================================================================
from somajo import SoMaJo
from tqdm import tqdm
import ray
import pathlib
@ray.remote
def split(list_of_text, thread_number, TMP_DIR):
"""
Splits text in sentences
Writes line for line with leading space (for BPE)
Every document is separated by a free line
"""
print(os.path.join(TMP_DIR, "Splitted_{:05d}.txt".format(thread_number)))
outF = open(os.path.join(TMP_DIR, "Splitted_{:05d}.txt".format(thread_number)), "w")
tokenizer = SoMaJo("de_CMC", split_camel_case=True)
for part in list_of_text:
sentences = tokenizer.tokenize_text([part])
for sentence in sentences:
output = ""
for token in sentence:
#word_list = [token.text for token in sentence]
if (token.space_after and not token.last_in_sentence and not token.first_in_sentence):
output += (token.text + ' ')
elif token.first_in_sentence:
output += (' ' + token.text + ' ')
else:
#output = " ".join(word_list[:-1])
output += token.text
#output += word_list[-1]
#sen_out.append(output)
outF.write(output)
outF.write("\n")
outF.write("\n")
return thread_number
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def read_file(path, TRANSFER_DIR=None, skip_files=None):
"""
Read and extracts all Files in Input Path
"""
#Select right json keyword:
global TYPE
if TYPE == 'CC':
json_key = 'raw_content'
elif TYPE == 'LEGALDUMP':
json_key = 'content'
if path.startswith('gs'):
from google.cloud import storage
p = pathlib.Path(path)
storage_client = storage.Client()
file_list = storage_client.list_blobs(p.parts[1], prefix="/".join(p.parts[2:]))
#print(file_list))
else:
file_list = os.listdir(path)
for file in file_list:
print(file)
if file not in skip_files:
if path.startswith('gs'): #Copy files from storage to local HDD first
pdb.set_trace()
#bucket = storage_client.bucket(p.parts[1])
#blob = bucket.blob("/".join(p.parts[2:]))
file.download_to_file(TRANSFER_DIR)
print("Blob {} downloaded to {}.".format(p.parts[2:],TRANSFER_DIR))
file_path = os.path.join(TRANSFER_DIR, file)
else:
file_path = os.path.join(path, file)
if file.endswith('.gz'):
data = []
with gzip.open(file_path, 'rt', encoding='utf-8', errors='ignore') as zipfile:
if FIX_EXPORT_ERROR:
a = zipfile.readline()
a = a.split("{'url'")
a = [("{'url'" + item) for item in a]
for line in tqdm(a[1:]):
#for line in zipfile:
#pdb.set_trace()
#scraped = json.loads(line)
try:
scraped = eval(line)
except ValueError:
scraped = eval(line.replace(chr(0), ""))
except SyntaxError:
None
#Only take really good parts
if scraped["language_score"] > 0.98:
data.append(scraped[json_key])
else:
None
#print(scraped[json_key])
elif file.endswith('.txt'):
with open(file_path) as f:
raw_text = f.readlines()
data = [x.strip() for x in raw_text]
elif file.endswith('.json'):
data = []
for line in open(file_path, 'r'):
scraped = json.loads(line)
data.append(scraped[json_key])
if TYPE == 'LEGALDUMP': #HTML to Text Conversion
data = [BeautifulSoup(line).text for line in data]
yield data
else:
print('Skipping file', file)
if __name__ == '__main__':
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]=""
os.environ["GCLOUD_PROJECT"]=""
#Define source type here
TYPE = 'CC'
FIX_EXPORT_ERROR = True #Fix weird format which was accidently saved as dedup output
#For local Debugging Purposes
if socket.gethostname() == "philipp-desktop":
VOCAB_FILE = "/media/data/48_BERT/german-transformer-training/src/vocab.txt"
THREADS = 8
#Path from where the txt files are read in
IN_DIR = "/media/data/48_BERT/german-transformer-training/data/head"
#When downloaded from gcp this is the destination folder
#TRANSFER_DIR = "/media/data/48_BERT/german-transformer-training/data/download"
#TMP_DIR: Folder where cleaned and splitted texts are places
TMP_DIR = "/media/data/48_BERT/german-transformer-training/data/tmp"
#TF_OUT_DIR: tf_records file for training BERT
TF_OUT_DIR = "/media/data/48_BERT/german-transformer-training/data/tf_rec"
#PATH TO BERT SRC Code
BERT_PATH = "../../01_BERT_Code/bert/"
else: #For large scale execution on server
VOCAB_FILE = "/mnt/disks/data/mBERT/vocab.txt"
THREADS = os.cpu_count()
IN_DIR = "/mnt/disks/data/data_head_url"
TMP_DIR = "/mnt/disks/data/data_head_url_cleaned"
TF_OUT_DIR = "/home/philipp_reissel/data/data_head_url_mbert_tfrec"
BERT_PATH = "/home/philipp_reissel/bert"
#a = list(files)
global_index = 0
READ_LOG = True
if READ_LOG:
with open('Log_Sentence_Extraction.txt', 'r') as log_file:
logs = log_file.readlines()
global_index = int(logs[-2].split('Splitted_')[1][0:5])
skip_files = [line.strip() for line in logs[:-1] if 'tar.gz' in line]
files = read_file(IN_DIR, skip_files=skip_files)
pdb.set_trace()
for file in files:
ray.init(num_cpus=THREADS)
result_ids = []
#1e4 for json,gz else 1e5
chunksize = int(1e3) #Needs XX GB RAM per Core
for local_index, chunk in enumerate(chunks(file, chunksize)):
index = global_index + local_index
result_ids.append(split.remote(chunk, index, TMP_DIR))
results = ray.get(result_ids)
global_index += local_index + 1
ray.shutdown()
pdb.set_trace()
cmd_var = []
for index, file in enumerate(os.listdir(TMP_DIR)):
file_path = os.path.join(TMP_DIR, file)
#os.system(bert_pretraining_cmd.format(file_path, TF_OUT_DIR, str(index)))
cmd_var.append([BERT_PATH, file_path, TF_OUT_DIR, index])
pool = Pool(processes=THREADS) #22 * 14 mb *dupe=5 (1540mb) equals 30 GB of RAM
pool.map(run_command, cmd_var) #Output: 22*270 Mb = 5940 mb Output Files
pool.close()
pool.join()
"""
filenames = [f"data/tmp/Splitted_{thread_number}.txt" for thread_number in range(0,THREADS)]
with open('data/merged.txt', 'w') as outfile:
for fname in tqdm(filenames):
with open(fname) as infile:
for line in infile:
outfile.write(line)
"""
# Takes 4 Minutes on 22 Cores for 1e6 Wiki lines (300 mb)
# =============================================================================
# DEPRECATED !!!!
# =============================================================================
"""
sen_out = []
#outF = open("data/Splitted.txt", "w")
outF = open("/media/data/47_KISS/11_tika/Sentences.txt", "w")
tokenizer = SoMaJo("de_CMC", split_camel_case=True)
#for part in tqdm(raw_text):
if True:
part = data
sentences = tokenizer.tokenize_text([part])
for sentence in sentences:
output = ""
for token in sentence:
#word_list = [token.text for token in sentence]
if (token.space_after and not token.last_in_sentence and not token.first_in_sentence):
output += (token.text + ' ')
elif token.first_in_sentence:
output += (' ' + token.text + ' ')
else:
#output = " ".join(word_list[:-1])
output += token.text
#output += word_list[-1]
sen_out.append(output)
if len(output) > 3 and len(output) < 300 and not 'ID' in output:
outF.write(output.strip())
outF.write("\n")
#outF.write("\n")data = json.loads(json_str)
outF.close()
"""
# =============================================================================
# Copy pasted from BERT tokenization.py
# =============================================================================
import unicodedata
def _run_split_on_punc(text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
# =============================================================================
# SoMaJo taken from https://github.com/tsproisl/SoMaJo
# =============================================================================
if False:
from tqdm import tqdm
sen_out = []
tokenizer = SoMaJo("de_CMC", split_camel_case=True)
for part in tqdm(raw_text):
sentences = tokenizer.tokenize_text([part])
for sentence in sentences:
word_list = [token.text for token in sentence]
output = " ".join(word_list[:-1])
output += word_list[-1]
sen_out.append(output)
_is_punctuation(raw_text[-1][-1])
stripped = []
for index, part in tqdm(enumerate(sen_out)):
reordered = ""
for char in part:
if not _is_punctuation(char):
reordered += char
else:
reordered += char
break
reordered = reordered.strip()
stripped.append(reordered)
outF = open("data/Splitted.txt", "w")
for line in stripped:
# write line to output file
outF.write(line)
outF.write("\n\n")
outF.close()
# =============================================================================
# Spacy
# =============================================================================
if False:
import spacy
data = "Trinken soll nur, wer's verträgt Jetzt ist's aus mit euch"
nlp = spacy.load("de_core_news_sm")
doc = nlp(data)
for sent in doc.sents:
print(sent.text)
# =============================================================================
# Moses: Used in cc_net https://github.com/luismsgomes/mosestokenizer
# =============================================================================
if False:
from mosestokenizer import *
splitsents = MosesSentenceSplitter('de')
splitsents([data])
# =============================================================================
# https://github.com/bminixhofer/nnsplit
# =============================================================================
if False:
from nnsplit import NNSplit
splitter = NNSplit("de")
res = splitter.split([data])
# =============================================================================
# More advanced: Deepsegment: Does not support German
# =============================================================================
if False:
from deepsegment import DeepSegment
# The default language is 'en'
segmenter = DeepSegment('de')
with open('data/start.txt', 'r') as myfile:
data = myfile.read()
segmenter.segment('I am Batman i live in gotham')
# =============================================================================
# Huggingface tokenizer
# =============================================================================
if False:
from tokenizers.implementations import ByteLevelBPETokenizer
from tokenizers.processors import BertProcessing
from pathlib import Path
tokenizer = ByteLevelBPETokenizer(
"data/german_old.json",
"data/german_old.txt",)
tokenizer = ByteLevelBPETokenizer(
"data/german_old.json",
"data/german_old.txt",)
tokenizer._tokenizer.post_processor = BertProcessing(
("</s>", tokenizer.token_to_id("</s>")),
("<s>", tokenizer.token_to_id("<s>")),)
tokenizer.enable_truncation(max_length=512)
#print(tokenizer.encode(sen_out[0]))
examples = []
lines = Path('data/Splitted.txt').read_text(encoding="utf-8").splitlines()
examples += [x.ids for x in tokenizer.encode_batch(lines)]
a = tokenizer.encode(sen_out[0])
a.tokens
| 34.180435 | 103 | 0.507092 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 30 22:57:06 2020
@author: philipp
"""
# =============================================================================
# Data Prep
# =============================================================================
#with open('data/start.txt', 'r') as myfile:
# data = myfile.read()
#
#line_list = data.splitlines()
#Common Crawl Jsonn
import json
import os
import gzip
import pdb
from multiprocessing import Pool
import subprocess
import socket
from bs4 import BeautifulSoup
from tqdm import tqdm
# =============================================================================
# 2nd Approch nearly keep the raw data
# =============================================================================
from somajo import SoMaJo
from tqdm import tqdm
import ray
import pathlib
@ray.remote
def split(list_of_text, thread_number, TMP_DIR):
"""
Splits text in sentences
Writes line for line with leading space (for BPE)
Every document is separated by a free line
"""
print(os.path.join(TMP_DIR, "Splitted_{:05d}.txt".format(thread_number)))
outF = open(os.path.join(TMP_DIR, "Splitted_{:05d}.txt".format(thread_number)), "w")
tokenizer = SoMaJo("de_CMC", split_camel_case=True)
for part in list_of_text:
sentences = tokenizer.tokenize_text([part])
for sentence in sentences:
output = ""
for token in sentence:
#word_list = [token.text for token in sentence]
if (token.space_after and not token.last_in_sentence and not token.first_in_sentence):
output += (token.text + ' ')
elif token.first_in_sentence:
output += (' ' + token.text + ' ')
else:
#output = " ".join(word_list[:-1])
output += token.text
#output += word_list[-1]
#sen_out.append(output)
outF.write(output)
outF.write("\n")
outF.write("\n")
return thread_number
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def read_file(path, TRANSFER_DIR=None, skip_files=None):
"""
Read and extracts all Files in Input Path
"""
#Select right json keyword:
global TYPE
if TYPE == 'CC':
json_key = 'raw_content'
elif TYPE == 'LEGALDUMP':
json_key = 'content'
if path.startswith('gs'):
from google.cloud import storage
p = pathlib.Path(path)
storage_client = storage.Client()
file_list = storage_client.list_blobs(p.parts[1], prefix="/".join(p.parts[2:]))
#print(file_list))
else:
file_list = os.listdir(path)
for file in file_list:
print(file)
if file not in skip_files:
if path.startswith('gs'): #Copy files from storage to local HDD first
pdb.set_trace()
#bucket = storage_client.bucket(p.parts[1])
#blob = bucket.blob("/".join(p.parts[2:]))
file.download_to_file(TRANSFER_DIR)
print("Blob {} downloaded to {}.".format(p.parts[2:],TRANSFER_DIR))
file_path = os.path.join(TRANSFER_DIR, file)
else:
file_path = os.path.join(path, file)
if file.endswith('.gz'):
data = []
with gzip.open(file_path, 'rt', encoding='utf-8', errors='ignore') as zipfile:
if FIX_EXPORT_ERROR:
a = zipfile.readline()
a = a.split("{'url'")
a = [("{'url'" + item) for item in a]
for line in tqdm(a[1:]):
#for line in zipfile:
#pdb.set_trace()
#scraped = json.loads(line)
try:
scraped = eval(line)
except ValueError:
scraped = eval(line.replace(chr(0), ""))
except SyntaxError:
None
#Only take really good parts
if scraped["language_score"] > 0.98:
data.append(scraped[json_key])
else:
None
#print(scraped[json_key])
elif file.endswith('.txt'):
with open(file_path) as f:
raw_text = f.readlines()
data = [x.strip() for x in raw_text]
elif file.endswith('.json'):
data = []
for line in open(file_path, 'r'):
scraped = json.loads(line)
data.append(scraped[json_key])
if TYPE == 'LEGALDUMP': #HTML to Text Conversion
data = [BeautifulSoup(line).text for line in data]
yield data
else:
print('Skipping file', file)
def run_command(cmd_vars):
bert_pretraining_cmd = """python3 {}create_pretraining_data.py \
--input_file={} \
--output_file={}/tf_examples.tfrecord_{:05d} \
--vocab_file={} \
--do_lower_case=True \
--max_seq_length=512 \
--max_predictions_per_seq=20 \
--masked_lm_prob=0.15 \
--random_seed=12345 \
--dupe_factor=5 \
--do_whole_word_mask=True"""
BERT_PATH, file_path, TF_OUT_DIR, index = cmd_vars
command = bert_pretraining_cmd.format(BERT_PATH, file_path, TF_OUT_DIR, index, VOCAB_FILE)
process = subprocess.Popen(command, shell=True)
process.wait()
return None
if __name__ == '__main__':
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]=""
os.environ["GCLOUD_PROJECT"]=""
#Define source type here
TYPE = 'CC'
FIX_EXPORT_ERROR = True #Fix weird format which was accidently saved as dedup output
#For local Debugging Purposes
if socket.gethostname() == "philipp-desktop":
VOCAB_FILE = "/media/data/48_BERT/german-transformer-training/src/vocab.txt"
THREADS = 8
#Path from where the txt files are read in
IN_DIR = "/media/data/48_BERT/german-transformer-training/data/head"
#When downloaded from gcp this is the destination folder
#TRANSFER_DIR = "/media/data/48_BERT/german-transformer-training/data/download"
#TMP_DIR: Folder where cleaned and splitted texts are places
TMP_DIR = "/media/data/48_BERT/german-transformer-training/data/tmp"
#TF_OUT_DIR: tf_records file for training BERT
TF_OUT_DIR = "/media/data/48_BERT/german-transformer-training/data/tf_rec"
#PATH TO BERT SRC Code
BERT_PATH = "../../01_BERT_Code/bert/"
else: #For large scale execution on server
VOCAB_FILE = "/mnt/disks/data/mBERT/vocab.txt"
THREADS = os.cpu_count()
IN_DIR = "/mnt/disks/data/data_head_url"
TMP_DIR = "/mnt/disks/data/data_head_url_cleaned"
TF_OUT_DIR = "/home/philipp_reissel/data/data_head_url_mbert_tfrec"
BERT_PATH = "/home/philipp_reissel/bert"
#a = list(files)
global_index = 0
READ_LOG = True
if READ_LOG:
with open('Log_Sentence_Extraction.txt', 'r') as log_file:
logs = log_file.readlines()
global_index = int(logs[-2].split('Splitted_')[1][0:5])
skip_files = [line.strip() for line in logs[:-1] if 'tar.gz' in line]
files = read_file(IN_DIR, skip_files=skip_files)
pdb.set_trace()
for file in files:
ray.init(num_cpus=THREADS)
result_ids = []
#1e4 for json,gz else 1e5
chunksize = int(1e3) #Needs XX GB RAM per Core
for local_index, chunk in enumerate(chunks(file, chunksize)):
index = global_index + local_index
result_ids.append(split.remote(chunk, index, TMP_DIR))
results = ray.get(result_ids)
global_index += local_index + 1
ray.shutdown()
pdb.set_trace()
cmd_var = []
for index, file in enumerate(os.listdir(TMP_DIR)):
file_path = os.path.join(TMP_DIR, file)
#os.system(bert_pretraining_cmd.format(file_path, TF_OUT_DIR, str(index)))
cmd_var.append([BERT_PATH, file_path, TF_OUT_DIR, index])
pool = Pool(processes=THREADS) #22 * 14 mb *dupe=5 (1540mb) equals 30 GB of RAM
pool.map(run_command, cmd_var) #Output: 22*270 Mb = 5940 mb Output Files
pool.close()
pool.join()
"""
filenames = [f"data/tmp/Splitted_{thread_number}.txt" for thread_number in range(0,THREADS)]
with open('data/merged.txt', 'w') as outfile:
for fname in tqdm(filenames):
with open(fname) as infile:
for line in infile:
outfile.write(line)
"""
# Takes 4 Minutes on 22 Cores for 1e6 Wiki lines (300 mb)
# =============================================================================
# DEPRECATED !!!!
# =============================================================================
"""
sen_out = []
#outF = open("data/Splitted.txt", "w")
outF = open("/media/data/47_KISS/11_tika/Sentences.txt", "w")
tokenizer = SoMaJo("de_CMC", split_camel_case=True)
#for part in tqdm(raw_text):
if True:
part = data
sentences = tokenizer.tokenize_text([part])
for sentence in sentences:
output = ""
for token in sentence:
#word_list = [token.text for token in sentence]
if (token.space_after and not token.last_in_sentence and not token.first_in_sentence):
output += (token.text + ' ')
elif token.first_in_sentence:
output += (' ' + token.text + ' ')
else:
#output = " ".join(word_list[:-1])
output += token.text
#output += word_list[-1]
sen_out.append(output)
if len(output) > 3 and len(output) < 300 and not 'ID' in output:
outF.write(output.strip())
outF.write("\n")
#outF.write("\n")data = json.loads(json_str)
outF.close()
"""
# =============================================================================
# Copy pasted from BERT tokenization.py
# =============================================================================
import unicodedata
def _run_split_on_punc(text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
# =============================================================================
# SoMaJo taken from https://github.com/tsproisl/SoMaJo
# =============================================================================
if False:
from tqdm import tqdm
sen_out = []
tokenizer = SoMaJo("de_CMC", split_camel_case=True)
for part in tqdm(raw_text):
sentences = tokenizer.tokenize_text([part])
for sentence in sentences:
word_list = [token.text for token in sentence]
output = " ".join(word_list[:-1])
output += word_list[-1]
sen_out.append(output)
_is_punctuation(raw_text[-1][-1])
stripped = []
for index, part in tqdm(enumerate(sen_out)):
reordered = ""
for char in part:
if not _is_punctuation(char):
reordered += char
else:
reordered += char
break
reordered = reordered.strip()
stripped.append(reordered)
outF = open("data/Splitted.txt", "w")
for line in stripped:
# write line to output file
outF.write(line)
outF.write("\n\n")
outF.close()
# =============================================================================
# Spacy
# =============================================================================
if False:
import spacy
data = "Trinken soll nur, wer's verträgt Jetzt ist's aus mit euch"
nlp = spacy.load("de_core_news_sm")
doc = nlp(data)
for sent in doc.sents:
print(sent.text)
# =============================================================================
# Moses: Used in cc_net https://github.com/luismsgomes/mosestokenizer
# =============================================================================
if False:
from mosestokenizer import *
splitsents = MosesSentenceSplitter('de')
splitsents([data])
# =============================================================================
# https://github.com/bminixhofer/nnsplit
# =============================================================================
if False:
from nnsplit import NNSplit
splitter = NNSplit("de")
res = splitter.split([data])
# =============================================================================
# More advanced: Deepsegment: Does not support German
# =============================================================================
if False:
from deepsegment import DeepSegment
# The default language is 'en'
segmenter = DeepSegment('de')
with open('data/start.txt', 'r') as myfile:
data = myfile.read()
segmenter.segment('I am Batman i live in gotham')
# =============================================================================
# Huggingface tokenizer
# =============================================================================
if False:
from tokenizers.implementations import ByteLevelBPETokenizer
from tokenizers.processors import BertProcessing
from pathlib import Path
tokenizer = ByteLevelBPETokenizer(
"data/german_old.json",
"data/german_old.txt",)
tokenizer = ByteLevelBPETokenizer(
"data/german_old.json",
"data/german_old.txt",)
tokenizer._tokenizer.post_processor = BertProcessing(
("</s>", tokenizer.token_to_id("</s>")),
("<s>", tokenizer.token_to_id("<s>")),)
tokenizer.enable_truncation(max_length=512)
#print(tokenizer.encode(sen_out[0]))
examples = []
lines = Path('data/Splitted.txt').read_text(encoding="utf-8").splitlines()
examples += [x.ids for x in tokenizer.encode_batch(lines)]
a = tokenizer.encode(sen_out[0])
a.tokens
| 807 | 0 | 23 |
eb8764ee4f65524733f6f787900731f8421530c7 | 2,433 | py | Python | samples/jax/tools/numpy_representation.py | fgolemo/segar | 8e21f8ee01bc72adb84dec7998b014d11d2b1fbe | [
"MIT"
] | 19 | 2022-02-16T18:45:12.000Z | 2022-03-25T10:42:19.000Z | samples/jax/tools/numpy_representation.py | microsoft/segar | 78463968238482ae035121504458dd0909107e10 | [
"MIT"
] | 4 | 2022-02-16T22:58:00.000Z | 2022-03-02T23:11:10.000Z | samples/jax/tools/numpy_representation.py | fgolemo/segar | 8e21f8ee01bc72adb84dec7998b014d11d2b1fbe | [
"MIT"
] | 5 | 2022-02-17T01:46:18.000Z | 2022-03-21T21:21:19.000Z | __author__ = "R Devon Hjelm, Bogdan Mazoure, Florian Golemo"
__copyright__ = "Copyright (c) Microsoft Corporation and Mila - Quebec AI " \
"Institute"
__license__ = "MIT"
from flax.training.train_state import TrainState
from flax.training import checkpoints
import jax.numpy as jnp
import jax
import optax
import numpy as np
from models import TwinHeadModel
class NumpyRepresentation:
"""
A simple interface between pre-trained PPO policies (in Jax) and any other
framework, e.g. PyTorch, Tensorflow, etc. Converts all vectors to NumPy.
"""
def __call__(self,
x_: np.ndarray) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Input:
x_: n_batch x resolution x resolution x 3, float32 array [0.,1.]
Output:
representation: n_batch x n_d
pi_logits: n_batch x n_actions
v: n_batch
"""
x_ = jnp.array(x_)
v, pi = self.train_state.apply_fn(self.train_state.params, x_)
z = self.train_state.apply_fn(self.train_state.params,
x_,
method=self.model_ppo.encode)
return np.array(z), np.array(pi.distribution.loc), np.array(v)
if __name__ == '__main__':
np_model = NumpyRepresentation(n_action=2)
obs = jnp.ones(shape=(64, 64, 3))
z, _, _ = np_model(obs)
print(z.shape)
| 36.313433 | 79 | 0.561036 | __author__ = "R Devon Hjelm, Bogdan Mazoure, Florian Golemo"
__copyright__ = "Copyright (c) Microsoft Corporation and Mila - Quebec AI " \
"Institute"
__license__ = "MIT"
from flax.training.train_state import TrainState
from flax.training import checkpoints
import jax.numpy as jnp
import jax
import optax
import numpy as np
from models import TwinHeadModel
class NumpyRepresentation:
"""
A simple interface between pre-trained PPO policies (in Jax) and any other
framework, e.g. PyTorch, Tensorflow, etc. Converts all vectors to NumPy.
"""
def __init__(self,
n_action: int,
resolution: int = 64,
model_dir: str = './model_weights'):
self.model_ppo = TwinHeadModel(action_dim=n_action,
prefix_critic='vfunction',
prefix_actor="policy",
action_scale=1.)
key = jax.random.PRNGKey(123)
state = jnp.ones(shape=(1, resolution, resolution, 3),
dtype=jnp.float32)
tx = optax.chain(optax.clip_by_global_norm(2),
optax.adam(3e-4, eps=1e-5))
params_model = self.model_ppo.init(key, state)
train_state_ppo = TrainState.create(apply_fn=self.model_ppo.apply,
params=params_model,
tx=tx)
self.train_state = checkpoints.restore_checkpoint(
'./%s' % model_dir, target=train_state_ppo)
def __call__(self,
x_: np.ndarray) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Input:
x_: n_batch x resolution x resolution x 3, float32 array [0.,1.]
Output:
representation: n_batch x n_d
pi_logits: n_batch x n_actions
v: n_batch
"""
x_ = jnp.array(x_)
v, pi = self.train_state.apply_fn(self.train_state.params, x_)
z = self.train_state.apply_fn(self.train_state.params,
x_,
method=self.model_ppo.encode)
return np.array(z), np.array(pi.distribution.loc), np.array(v)
if __name__ == '__main__':
np_model = NumpyRepresentation(n_action=2)
obs = jnp.ones(shape=(64, 64, 3))
z, _, _ = np_model(obs)
print(z.shape)
| 982 | 0 | 27 |
e1251476b3853c06125fbc5d0140d6f62c8ca04f | 4,803 | py | Python | Scrapers/maverik_scraper.py | chanson02/ny-bb-scrapers | 4239872653d239ef19dc377fe0adb1f1548cd875 | [
"MIT"
] | null | null | null | Scrapers/maverik_scraper.py | chanson02/ny-bb-scrapers | 4239872653d239ef19dc377fe0adb1f1548cd875 | [
"MIT"
] | null | null | null | Scrapers/maverik_scraper.py | chanson02/ny-bb-scrapers | 4239872653d239ef19dc377fe0adb1f1548cd875 | [
"MIT"
] | null | null | null | import json
import time
import pdb
import re
import traceback
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.common import exceptions
driver = webdriver.Chrome('../chromedriver.exe')
driver.maximize_window()
driver.get("https://loyalty.maverik.com/locations/map")
chain = {"name": "Maverik", "stores": []}
default_delay = 2
time.sleep(3)
with open("../Outputs/maverik.json", "r") as f:
chain = json.load(f)
# Close popup
driver.find_element_by_class_name("alert-button-inner").click()
# Select 'Search by Store #'
store_number_type = driver.find_element_by_xpath(
"/html/body/app-root/ion-app/ion-router-outlet/app-locations/div/div[2]/div/ion-radio-group/div/div[4]/ion-radio")
shadow_click(store_number_type)
time.sleep(3)
for i in range(694, 999):
search_bar = driver.find_element_by_xpath(
"/html/body/app-root/ion-app/ion-router-outlet/app-locations/div/div[2]/div/div[2]/ion-searchbar/div/input")
search_bar.send_keys(Keys.BACKSPACE * 3)
search_bar.send_keys(str(i))
time.sleep(default_delay)
for location_index in range(len(get_locations())):
location = get_locations()[location_index]
scrape(location)
while len(get_clusters()) > 0:
cluster = get_clusters()[0]
try:
ActionChains(driver).move_to_element(
cluster).click().click().perform()
except:
pass
time.sleep(default_delay)
for location_index in range(len(get_locations())):
location = get_locations()[location_index]
scrape(location)
print("Done")
| 38.119048 | 897 | 0.736623 | import json
import time
import pdb
import re
import traceback
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.common import exceptions
driver = webdriver.Chrome('../chromedriver.exe')
driver.maximize_window()
driver.get("https://loyalty.maverik.com/locations/map")
chain = {"name": "Maverik", "stores": []}
default_delay = 2
time.sleep(3)
with open("../Outputs/maverik.json", "r") as f:
chain = json.load(f)
def write_output():
with open("../Outputs/maverik.json", "w") as f:
json.dump(chain, f, indent=2)
def shadow_click(element):
shadow_root = driver.execute_script(
'return arguments[0].shadowRoot', element)
shadow_root.find_element_by_tag_name("button").click()
def scrape(location):
ActionChains(driver).move_to_element(location).click().perform()
time.sleep(default_delay)
name = driver.find_element_by_xpath(
"/html/body/app-root/ion-app/ion-router-outlet/app-locations/div/div[2]/app-desktop-detail/ion-header/ion-toolbar/ion-buttons/ion-title").text
remote_id = re.sub("[^0-9]", "", name)
data = driver.find_element_by_xpath(
"/html/body/app-root/ion-app/ion-router-outlet/app-locations/div/div[2]/app-desktop-detail/ion-content/div/div[2]").text.split("\n")
address = ", ".join(data[:2])
phone = data[2][1:].replace(") ", "-")
store = {"address": address, "phone": phone, "id": remote_id}
if store not in chain["stores"]:
chain["stores"].append(store)
print("Added", store)
back_button()
def get_clusters():
try:
divs = driver.find_elements_by_tag_name("div")
cluster_url = "https://s3.us-west-2.amazonaws.com/imagesms-dev.maverik.com/media/ac/assets/cluster_markers/m1.png"
clusters = [
cluster for cluster in divs if cluster_url in cluster.get_attribute("style")]
return clusters
except Exception as e:
print(e)
return []
def get_locations():
try:
images = driver.find_elements_by_tag_name("img")
location_images = [e for e in images if e.get_attribute("src") == "data:image/svg+xml;charset=UTF-8;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCAyMyAyNyI+PHBhdGggZD0iTTE3LjQ3MSwwYzMuMDUyLDAgNS41MjksMi40ODIgNS41MjksNS41NGwwLDExLjA4YzAsMy4wNTcgLTIuNDc3LDUuNTM5IC01LjUyOSw1LjUzOWwtMi44MjEsMGwtMy4xNSw0Ljg0MWwtMy4xNSwtNC44NDFsLTIuODIxLDBjLTMuMDUyLDAgLTUuNTI5LC0yLjQ4MiAtNS41MjksLTUuNTM5bDAsLTExLjA4YzAsLTMuMDU4IDIuNDc3LC01LjU0IDUuNTI5LC01LjU0bDExLjk0MiwwWiIgc3R5bGU9ImZpbGw6I2FmMjIzMzsiLz48Zz48cGF0aCBkPSJNNy44MjgsMTEuNzY0bDIuNjIzLC00LjQ3N2wyLjU4OCw0LjQ3N2wtNS4yMTEsMFoiIHN0eWxlPSJmaWxsOiNmZmY7Ii8+PHBhdGggZD0iTTQuNDcsMTEuNzY0bDQuMjY3LC03LjQxNWwxLjE4OSwyLjA2M2wtMy4wNzgsNS4zNTJsLTIuMzc4LDBaIiBzdHlsZT0iZmlsbDojZmZmOyIvPjxwYXRoIGQ9Ik0xNC4wMTgsMTEuNzY0bC0zLjA3OCwtNS4zNTJsMi4yMzksLTMuOTE3bDUuMzUxLDkuMjY5bC00LjUxMiwwWiIgc3R5bGU9ImZpbGw6I2ZmZjsiLz48L2c+PC9zdmc+"]
return location_images
except:
return []
def back_button():
try:
back_button = driver.find_element_by_xpath(
"/html/body/app-root/ion-app/ion-router-outlet/app-locations/div/div[2]/app-desktop-detail/ion-header/ion-toolbar/ion-buttons/ion-button")
shadow_click(back_button)
except:
print("Back failed")
time.sleep(default_delay)
# Close popup
driver.find_element_by_class_name("alert-button-inner").click()
# Select 'Search by Store #'
store_number_type = driver.find_element_by_xpath(
"/html/body/app-root/ion-app/ion-router-outlet/app-locations/div/div[2]/div/ion-radio-group/div/div[4]/ion-radio")
shadow_click(store_number_type)
time.sleep(3)
for i in range(694, 999):
search_bar = driver.find_element_by_xpath(
"/html/body/app-root/ion-app/ion-router-outlet/app-locations/div/div[2]/div/div[2]/ion-searchbar/div/input")
search_bar.send_keys(Keys.BACKSPACE * 3)
search_bar.send_keys(str(i))
time.sleep(default_delay)
for location_index in range(len(get_locations())):
location = get_locations()[location_index]
scrape(location)
while len(get_clusters()) > 0:
cluster = get_clusters()[0]
try:
ActionChains(driver).move_to_element(
cluster).click().click().perform()
except:
pass
time.sleep(default_delay)
for location_index in range(len(get_locations())):
location = get_locations()[location_index]
scrape(location)
print("Done")
| 2,749 | 0 | 138 |
86f134f3daad7dc53e0dde0677ce31a0c76762b2 | 3,321 | py | Python | k8s/monitoring/kubemon.py | open-covid-data/healthmap-gdo-temp | 5af5c9e2f7dcefa9039dc6b3a2e2c5094566fc2e | [
"MIT"
] | 5 | 2020-04-16T13:54:24.000Z | 2020-07-06T13:42:10.000Z | k8s/monitoring/kubemon.py | open-covid-data/healthmap-gdo-temp | 5af5c9e2f7dcefa9039dc6b3a2e2c5094566fc2e | [
"MIT"
] | 487 | 2020-04-16T00:56:09.000Z | 2020-07-27T09:09:29.000Z | k8s/monitoring/kubemon.py | open-covid-data/healthmap-gdo-temp | 5af5c9e2f7dcefa9039dc6b3a2e2c5094566fc2e | [
"MIT"
] | null | null | null | """
Monitor Global.health Kubernetes pods to see if any
are stuck in Pending status for a while. If such pods are
present, notify on Slack.
IMPORTANT:
The kubernetes Python package needs to be kept in sync
with the EKS Kubernetes version. The compatibility matrix
for the client Python library is at
https://github.com/kubernetes-client/python#compatibility
NOTE:
This will need to be updated once we move to separate
clusters for different environments.
"""
import os
import sys
import base64
import logging
import datetime
from typing import Any
from pathlib import Path
import requests
import kubernetes
SERVICES = ["curator", "data", "location"]
SLACK_WEBHOOK_URL = os.getenv("SLACK_WEBHOOK_URL")
def last_status(pod: dict[str, Any]) -> (str, datetime.datetime):
"Returns last status for pod"
statuses = list(pod.status.conditions)
statuses.sort(key=(lambda k: k.last_transition_time), reverse=True)
return statuses[0].type, statuses[0].last_transition_time
def is_service(pod: dict[str, Any]) -> bool:
"Returns whether pod is curator, location, or data pod"
return any(pod.metadata.name.startswith(service + "-") for service in SERVICES)
def get_pods() -> list[dict[str, Any]]:
"Returns list of pods in the cluster"
v1 = kubernetes.client.CoreV1Api()
return filter(is_service, v1.list_pod_for_all_namespaces(watch=False).items)
def is_not_ready(pod: dict[str, Any]) -> bool:
"Returns whether a pod is not ready"
return last_status(pod)[0] != "Ready"
def summary(pod: dict[str, Any]) -> str:
"Returns readable one-line summary of pod status"
status_type, status_time = last_status(pod)
return f"- *{pod.metadata.name}* ({status_type}, {status_time})"
def notify(text: str):
"Notifies Slack with message"
text = text.strip()
if text:
text = "⚠ Some pods are stuck in pending!\n" + text
logging.info(text)
if SLACK_WEBHOOK_URL and text:
response = requests.post(SLACK_WEBHOOK_URL, json={"text": text})
if response.status_code != 200:
logging.error(
f"Slack notification failed with {response.status_code}: {response.text}"
)
sys.exit(1)
if __name__ == "__main__":
ensure_kubeconfig_exists()
config = kubernetes.config.load_config()
notify("\n".join(map(summary, filter(is_not_ready, get_pods()))))
| 31.628571 | 89 | 0.694369 | """
Monitor Global.health Kubernetes pods to see if any
are stuck in Pending status for a while. If such pods are
present, notify on Slack.
IMPORTANT:
The kubernetes Python package needs to be kept in sync
with the EKS Kubernetes version. The compatibility matrix
for the client Python library is at
https://github.com/kubernetes-client/python#compatibility
NOTE:
This will need to be updated once we move to separate
clusters for different environments.
"""
import os
import sys
import base64
import logging
import datetime
from typing import Any
from pathlib import Path
import requests
import kubernetes
def setup_logger():
h = logging.StreamHandler(sys.stdout)
rootLogger = logging.getLogger()
rootLogger.addHandler(h)
rootLogger.setLevel(logging.INFO)
SERVICES = ["curator", "data", "location"]
SLACK_WEBHOOK_URL = os.getenv("SLACK_WEBHOOK_URL")
def ensure_kubeconfig_exists():
if (
kubeconfig_path := Path.home() / ".kube" / "config"
).exists() and kubeconfig_path.stat().st_size > 0:
logging.info("Using existing ~/.kube/config")
elif KUBECONFIG_VALUE := os.getenv("KUBECONFIG_VALUE"):
logging.info("Writing base64 encoded KUBECONFIG_VALUE to ~/.kube/config")
(Path.home() / ".kube").mkdir(exist_ok=True)
logging.info(kubeconfig_path)
kubeconfig_path.write_text(base64.b64decode(KUBECONFIG_VALUE).decode("utf-8"))
elif os.getenv("ENV") == "test":
logging.info("Skipping kubernetes configuration in testing")
else:
logging.error("Neither ~/.kube/config or KUBECONFIG_VALUE exists, aborting")
sys.exit(1)
def last_status(pod: dict[str, Any]) -> (str, datetime.datetime):
"Returns last status for pod"
statuses = list(pod.status.conditions)
statuses.sort(key=(lambda k: k.last_transition_time), reverse=True)
return statuses[0].type, statuses[0].last_transition_time
def is_service(pod: dict[str, Any]) -> bool:
"Returns whether pod is curator, location, or data pod"
return any(pod.metadata.name.startswith(service + "-") for service in SERVICES)
def get_pods() -> list[dict[str, Any]]:
"Returns list of pods in the cluster"
v1 = kubernetes.client.CoreV1Api()
return filter(is_service, v1.list_pod_for_all_namespaces(watch=False).items)
def is_not_ready(pod: dict[str, Any]) -> bool:
"Returns whether a pod is not ready"
return last_status(pod)[0] != "Ready"
def summary(pod: dict[str, Any]) -> str:
"Returns readable one-line summary of pod status"
status_type, status_time = last_status(pod)
return f"- *{pod.metadata.name}* ({status_type}, {status_time})"
def notify(text: str):
"Notifies Slack with message"
text = text.strip()
if text:
text = "⚠ Some pods are stuck in pending!\n" + text
logging.info(text)
if SLACK_WEBHOOK_URL and text:
response = requests.post(SLACK_WEBHOOK_URL, json={"text": text})
if response.status_code != 200:
logging.error(
f"Slack notification failed with {response.status_code}: {response.text}"
)
sys.exit(1)
if __name__ == "__main__":
ensure_kubeconfig_exists()
config = kubernetes.config.load_config()
notify("\n".join(map(summary, filter(is_not_ready, get_pods()))))
| 873 | 0 | 46 |
ec4ea648bbf85751e473866e84f114644fa23de4 | 826 | py | Python | setup.py | ninjaotoko/djblog_wordpress_importer | 2c59310d5b69e5cad4063656137d330918898bd8 | [
"BSD-3-Clause"
] | null | null | null | setup.py | ninjaotoko/djblog_wordpress_importer | 2c59310d5b69e5cad4063656137d330918898bd8 | [
"BSD-3-Clause"
] | null | null | null | setup.py | ninjaotoko/djblog_wordpress_importer | 2c59310d5b69e5cad4063656137d330918898bd8 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding:utf8 -*-
#
# Copyright (c) 2014 Xavier Lesa <xavierlesa@gmail.com>.
# All rights reserved.
# Distributed under the BSD license, see LICENSE
from setuptools import setup, find_packages
import sys, os
setup(name='djblog_wordpress_importer',
version='0.1',
description="Herramienta para migrar wordpress a djblog, a través de wordpress-json",
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
install_requires=[
'wordpress-json',
],
dependency_links=[
'git+https://github.com/ninjaotoko/djblog.git',
],
zip_safe=False,
author='Xavier Lesa',
author_email='xavierlesa@gmail.com',
url='http://github.com/ninjaotoko/djblog_wordpress_importer'
)
| 33.04 | 93 | 0.646489 | # -*- coding:utf8 -*-
#
# Copyright (c) 2014 Xavier Lesa <xavierlesa@gmail.com>.
# All rights reserved.
# Distributed under the BSD license, see LICENSE
from setuptools import setup, find_packages
import sys, os
setup(name='djblog_wordpress_importer',
version='0.1',
description="Herramienta para migrar wordpress a djblog, a través de wordpress-json",
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
install_requires=[
'wordpress-json',
],
dependency_links=[
'git+https://github.com/ninjaotoko/djblog.git',
],
zip_safe=False,
author='Xavier Lesa',
author_email='xavierlesa@gmail.com',
url='http://github.com/ninjaotoko/djblog_wordpress_importer'
)
| 0 | 0 | 0 |
001f7b5ae0aac0c2f4c3ee4e5d5ef0b0ed859700 | 723 | py | Python | pythonmorsels/tags_equal/tags_equal.py | TecKnow/learning | 71d1ddf9d580027ecc62a067581da378a9e85f6d | [
"BSD-3-Clause"
] | null | null | null | pythonmorsels/tags_equal/tags_equal.py | TecKnow/learning | 71d1ddf9d580027ecc62a067581da378a9e85f6d | [
"BSD-3-Clause"
] | null | null | null | pythonmorsels/tags_equal/tags_equal.py | TecKnow/learning | 71d1ddf9d580027ecc62a067581da378a9e85f6d | [
"BSD-3-Clause"
] | null | null | null | import shlex
| 23.322581 | 45 | 0.608575 | import shlex
def parse_attrs(tag_dict: dict):
attr_dict = dict()
for element in tag_dict["attrs"]:
element: str
element_parts = element.split("=", 1)
key = element_parts[0].strip()
value = element_parts[1:]
if not key in attr_dict:
attr_dict[key] = value
tag_dict["attrs"] = attr_dict
return tag_dict
def parse_tag(tag: str):
tag = tag[1:-1].casefold()
tag = shlex.split(tag)
tag_data = dict()
tag_data["name"] = tag[0].strip()
tag_data["attrs"] = tag[1:]
tag_data = parse_attrs(tag_data)
return tag_data
def tags_equal(tag1: str, tag2: str):
tag1 = parse_tag(tag1)
tag2 = parse_tag(tag2)
return tag1 == tag2
| 638 | 0 | 69 |
7c33daca24e569da0a231459053482e72ffd3c2e | 10,607 | py | Python | cogdl/data/data.py | xssstory/cogdl | ae8de495c365993f19f04774f083960fd282c2a3 | [
"MIT"
] | null | null | null | cogdl/data/data.py | xssstory/cogdl | ae8de495c365993f19f04774f083960fd282c2a3 | [
"MIT"
] | null | null | null | cogdl/data/data.py | xssstory/cogdl | ae8de495c365993f19f04774f083960fd282c2a3 | [
"MIT"
] | null | null | null | import re
import torch
import numpy as np
import scipy.sparse as sparse
from torch_geometric.data import Data as pyg_data
class Data(object):
r"""A plain old python object modeling a single graph with various
(optional) attributes:
Args:
x (Tensor, optional): Node feature matrix with shape :obj:`[num_nodes,
num_node_features]`. (default: :obj:`None`)
edge_index (LongTensor, optional): Graph connectivity in COO format
with shape :obj:`[2, num_edges]`. (default: :obj:`None`)
edge_attr (Tensor, optional): Edge feature matrix with shape
:obj:`[num_edges, num_edge_features]`. (default: :obj:`None`)
y (Tensor, optional): Graph or node targets with arbitrary shape.
(default: :obj:`None`)
pos (Tensor, optional): Node position matrix with shape
:obj:`[num_nodes, num_dimensions]`. (default: :obj:`None`)
The data object is not restricted to these attributes and can be extented
by any other additional data.
"""
@staticmethod
def from_dict(dictionary):
r"""Creates a data object from a python dictionary."""
data = Data()
for key, item in dictionary.items():
data[key] = item
return data
def __getitem__(self, key):
r"""Gets the data of the attribute :obj:`key`."""
return getattr(self, key)
def __setitem__(self, key, value):
"""Sets the attribute :obj:`key` to :obj:`value`."""
setattr(self, key, value)
@property
def keys(self):
r"""Returns all names of graph attributes."""
keys = [key for key in self.__dict__.keys() if self[key] is not None]
keys = [key for key in keys if key[:2] != '__' and key[-2:] != '__']
return keys
def __len__(self):
r"""Returns the number of all present attributes."""
return len(self.keys)
def __contains__(self, key):
r"""Returns :obj:`True`, if the attribute :obj:`key` is present in the
data."""
return key in self.keys
def __iter__(self):
r"""Iterates over all present attributes in the data, yielding their
attribute names and content."""
for key in sorted(self.keys):
yield key, self[key]
def __call__(self, *keys):
r"""Iterates over all attributes :obj:`*keys` in the data, yielding
their attribute names and content.
If :obj:`*keys` is not given this method will iterative over all
present attributes."""
for key in sorted(self.keys) if not keys else keys:
if self[key] is not None:
yield key, self[key]
def cat_dim(self, key, value):
r"""Returns the dimension in which the attribute :obj:`key` with
content :obj:`value` gets concatenated when creating batches.
.. note::
This method is for internal use only, and should only be overridden
if the batch concatenation process is corrupted for a specific data
attribute.
"""
# `*index*` and `*face*` should be concatenated in the last dimension,
# everything else in the first dimension.
return -1 if bool(re.search("(index|face)", key)) else 0
def __inc__(self, key, value):
r""""Returns the incremental count to cumulatively increase the value
of the next attribute of :obj:`key` when creating batches.
.. note::
This method is for internal use only, and should only be overridden
if the batch concatenation process is corrupted for a specific data
attribute.
"""
# Only `*index*` and `*face*` should be cumulatively summed up when
# creating batches.
return self.num_nodes if bool(re.search('(index|face)', key)) else 0
@property
def num_edges(self):
r"""Returns the number of edges in the graph."""
for key, item in self("edge_index", "edge_attr"):
return item.size(self.cat_dim(key, item))
return None
@property
def num_features(self):
r"""Returns the number of features per node in the graph."""
return 1 if self.x.dim() == 1 else self.x.size(1)
@property
@num_nodes.setter
def is_coalesced(self):
r"""Returns :obj:`True`, if edge indices are ordered and do not contain
duplicate entries."""
row, col = self.edge_index
index = self.num_nodes * row + col
return row.size(0) == torch.unique(index).size(0)
def apply(self, func, *keys):
r"""Applies the function :obj:`func` to all attributes :obj:`*keys`.
If :obj:`*keys` is not given, :obj:`func` is applied to all present
attributes.
"""
for key, item in self(*keys):
self[key] = func(item)
return self
def contiguous(self, *keys):
r"""Ensures a contiguous memory layout for all attributes :obj:`*keys`.
If :obj:`*keys` is not given, all present attributes are ensured to
have a contiguous memory layout."""
return self.apply(lambda x: x.contiguous(), *keys)
def to(self, device, *keys):
r"""Performs tensor dtype and/or device conversion to all attributes
:obj:`*keys`.
If :obj:`*keys` is not given, the conversion is applied to all present
attributes."""
return self.apply(lambda x: x.to(device), *keys)
def subgraph(self, node_idx):
"""Return the induced node subgraph."""
if self.__adj is None:
self._build_adj_()
if isinstance(node_idx, torch.Tensor):
node_idx = node_idx.cpu().numpy()
node_idx = np.unique(node_idx)
adj = self.__adj[node_idx, :][:, node_idx]
adj_coo = sparse.coo_matrix(adj)
row, col = adj_coo.row, adj_coo.col
edge_attr = torch.from_numpy(adj_coo.data).to(self.x.device)
edge_index = torch.from_numpy(np.concatenate([row, col], axis=0)).to(self.x.device)
keys = self.keys
attrs = {key: self[key][node_idx] for key in keys if "edge" not in key}
attrs["edge_attr"] = edge_attr
attrs["edge_index"] = edge_index
return Data(**attrs)
def edge_subgraph(self, edge_idx):
"""Return the induced edge subgraph."""
if isinstance(edge_idx, torch.Tensor):
edge_idx = edge_idx.cpu().numpy()
edge_index = self.edge_index.T[edge_idx].cpu().numpy()
node_idx = np.unique(edge_index)
idx_dict = {val: key for key, val in enumerate(node_idx)}
func = lambda x: [idx_dict[x[0]], idx_dict[x[1]]]
edge_index = np.array([func(x) for x in edge_index]).transpose()
edge_index = torch.from_numpy(edge_index).to(self.x.device)
edge_attr = self.edge_attr[edge_idx]
keys = self.keys
attrs = {key: self[key][node_idx] for key in keys if "edge" not in key}
attrs["edge_attr"] = edge_attr
attrs["edge_index"] = edge_index
return Data(**attrs)
@staticmethod
| 38.017921 | 104 | 0.601112 | import re
import torch
import numpy as np
import scipy.sparse as sparse
from torch_geometric.data import Data as pyg_data
class Data(object):
r"""A plain old python object modeling a single graph with various
(optional) attributes:
Args:
x (Tensor, optional): Node feature matrix with shape :obj:`[num_nodes,
num_node_features]`. (default: :obj:`None`)
edge_index (LongTensor, optional): Graph connectivity in COO format
with shape :obj:`[2, num_edges]`. (default: :obj:`None`)
edge_attr (Tensor, optional): Edge feature matrix with shape
:obj:`[num_edges, num_edge_features]`. (default: :obj:`None`)
y (Tensor, optional): Graph or node targets with arbitrary shape.
(default: :obj:`None`)
pos (Tensor, optional): Node position matrix with shape
:obj:`[num_nodes, num_dimensions]`. (default: :obj:`None`)
The data object is not restricted to these attributes and can be extented
by any other additional data.
"""
def __init__(self, x=None, edge_index=None, edge_attr=None, y=None, pos=None, **kwargs):
self.x = x
self.edge_index = edge_index
self.edge_attr = edge_attr
self.y = y
self.pos = pos
for key, item in kwargs.items():
if key == 'num_nodes':
self.__num_nodes__ = item
else:
self[key] = item
self.__adj = None
@staticmethod
def from_dict(dictionary):
r"""Creates a data object from a python dictionary."""
data = Data()
for key, item in dictionary.items():
data[key] = item
return data
def __getitem__(self, key):
r"""Gets the data of the attribute :obj:`key`."""
return getattr(self, key)
def __setitem__(self, key, value):
"""Sets the attribute :obj:`key` to :obj:`value`."""
setattr(self, key, value)
@property
def keys(self):
r"""Returns all names of graph attributes."""
keys = [key for key in self.__dict__.keys() if self[key] is not None]
keys = [key for key in keys if key[:2] != '__' and key[-2:] != '__']
return keys
def __len__(self):
r"""Returns the number of all present attributes."""
return len(self.keys)
def __contains__(self, key):
r"""Returns :obj:`True`, if the attribute :obj:`key` is present in the
data."""
return key in self.keys
def __iter__(self):
r"""Iterates over all present attributes in the data, yielding their
attribute names and content."""
for key in sorted(self.keys):
yield key, self[key]
def __call__(self, *keys):
r"""Iterates over all attributes :obj:`*keys` in the data, yielding
their attribute names and content.
If :obj:`*keys` is not given this method will iterative over all
present attributes."""
for key in sorted(self.keys) if not keys else keys:
if self[key] is not None:
yield key, self[key]
def cat_dim(self, key, value):
r"""Returns the dimension in which the attribute :obj:`key` with
content :obj:`value` gets concatenated when creating batches.
.. note::
This method is for internal use only, and should only be overridden
if the batch concatenation process is corrupted for a specific data
attribute.
"""
# `*index*` and `*face*` should be concatenated in the last dimension,
# everything else in the first dimension.
return -1 if bool(re.search("(index|face)", key)) else 0
def __inc__(self, key, value):
r""""Returns the incremental count to cumulatively increase the value
of the next attribute of :obj:`key` when creating batches.
.. note::
This method is for internal use only, and should only be overridden
if the batch concatenation process is corrupted for a specific data
attribute.
"""
# Only `*index*` and `*face*` should be cumulatively summed up when
# creating batches.
return self.num_nodes if bool(re.search('(index|face)', key)) else 0
@property
def num_edges(self):
r"""Returns the number of edges in the graph."""
for key, item in self("edge_index", "edge_attr"):
return item.size(self.cat_dim(key, item))
return None
@property
def num_features(self):
r"""Returns the number of features per node in the graph."""
return 1 if self.x.dim() == 1 else self.x.size(1)
@property
def num_nodes(self):
if self.x is not None:
return self.x.shape[0]
return torch.max(self.edge_index)+1
@num_nodes.setter
def num_nodes(self, num_nodes):
self.__num_nodes__ = num_nodes
def is_coalesced(self):
r"""Returns :obj:`True`, if edge indices are ordered and do not contain
duplicate entries."""
row, col = self.edge_index
index = self.num_nodes * row + col
return row.size(0) == torch.unique(index).size(0)
def apply(self, func, *keys):
r"""Applies the function :obj:`func` to all attributes :obj:`*keys`.
If :obj:`*keys` is not given, :obj:`func` is applied to all present
attributes.
"""
for key, item in self(*keys):
self[key] = func(item)
return self
def contiguous(self, *keys):
r"""Ensures a contiguous memory layout for all attributes :obj:`*keys`.
If :obj:`*keys` is not given, all present attributes are ensured to
have a contiguous memory layout."""
return self.apply(lambda x: x.contiguous(), *keys)
def to(self, device, *keys):
r"""Performs tensor dtype and/or device conversion to all attributes
:obj:`*keys`.
If :obj:`*keys` is not given, the conversion is applied to all present
attributes."""
return self.apply(lambda x: x.to(device), *keys)
def cuda(self, *keys):
return self.apply(lambda x: x.cuda(), *keys)
def clone(self):
return Data.from_dict({k: v.clone() for k, v in self})
def _build_adj_(self):
if self.__adj is not None:
return
num_edges = self.edge_index.shape[1]
edge_index_np = self.edge_index.cpu().numpy()
num_nodes = self.x.shape[0]
edge_attr_np = np.ones(num_edges)
self.__adj = sparse.csr_matrix(
(edge_attr_np, (edge_index_np[0], edge_index_np[1])),
shape=(num_nodes, num_nodes)
)
def subgraph(self, node_idx):
"""Return the induced node subgraph."""
if self.__adj is None:
self._build_adj_()
if isinstance(node_idx, torch.Tensor):
node_idx = node_idx.cpu().numpy()
node_idx = np.unique(node_idx)
adj = self.__adj[node_idx, :][:, node_idx]
adj_coo = sparse.coo_matrix(adj)
row, col = adj_coo.row, adj_coo.col
edge_attr = torch.from_numpy(adj_coo.data).to(self.x.device)
edge_index = torch.from_numpy(np.concatenate([row, col], axis=0)).to(self.x.device)
keys = self.keys
attrs = {key: self[key][node_idx] for key in keys if "edge" not in key}
attrs["edge_attr"] = edge_attr
attrs["edge_index"] = edge_index
return Data(**attrs)
def edge_subgraph(self, edge_idx):
"""Return the induced edge subgraph."""
if isinstance(edge_idx, torch.Tensor):
edge_idx = edge_idx.cpu().numpy()
edge_index = self.edge_index.T[edge_idx].cpu().numpy()
node_idx = np.unique(edge_index)
idx_dict = {val: key for key, val in enumerate(node_idx)}
func = lambda x: [idx_dict[x[0]], idx_dict[x[1]]]
edge_index = np.array([func(x) for x in edge_index]).transpose()
edge_index = torch.from_numpy(edge_index).to(self.x.device)
edge_attr = self.edge_attr[edge_idx]
keys = self.keys
attrs = {key: self[key][node_idx] for key in keys if "edge" not in key}
attrs["edge_attr"] = edge_attr
attrs["edge_index"] = edge_index
return Data(**attrs)
def sample_adj(self, batch, size=-1, replace=True):
assert size != 0
if self.__adj is None:
self._build_adj_()
if isinstance(batch, torch.Tensor):
batch = batch.cpu().numpy()
adj = self.__adj[batch].tocsr()
batch_size = len(batch)
if size == -1:
adj = adj.tocoo()
row, col = torch.from_numpy(adj.row), torch.from_numpy(adj.col)
node_idx = torch.unique(col)
else:
indices = torch.from_numpy(adj.indices)
indptr = torch.from_numpy(adj.indptr)
node_idx, (row, col) = self._sample_adj(batch_size, indices, indptr, size)
col = col.numpy()
_node_idx = node_idx.numpy()
# Reindexing: target nodes are always put at the front
_node_idx = list(batch) + list(set(_node_idx).difference(set(batch)))
node_dict = {val: key for key, val in enumerate(_node_idx)}
new_col = torch.LongTensor([node_dict[i] for i in col])
edge_index = torch.stack([row.long(), new_col])
node_idx = torch.Tensor(_node_idx).long().to(self.x.device)
edge_index = edge_index.long().to(self.x.device)
return node_idx, edge_index
def _sample_adj(self, batch_size, indices, indptr, size):
indptr = indptr
row_counts = torch.Tensor([indptr[i] - indptr[i - 1] for i in range(1, len(indptr))])
# if not replace:
# edge_cols = [col[indptr[i]: indptr[i+1]] for i in range(len(indptr)-1)]
# edge_cols = [np.random.choice(x, min(size, len(x)), replace=False) for x in edge_cols]
# else:
rand = torch.rand(batch_size, size)
rand = rand * row_counts.view(-1, 1)
rand = rand.long()
rand = rand + indptr[:-1].view(-1, 1)
edge_cols = indices[rand].view(-1)
row = torch.arange(0, batch_size).view(-1, 1).repeat(1, size).view(-1)
node_idx = torch.unique(edge_cols)
return node_idx, (row, edge_cols)
@staticmethod
def from_pyg_data(data: pyg_data):
val = {k: v for k, v in data}
return Data(**val)
def __repr__(self):
info = ["{}={}".format(key, list(item.size())) for key, item in self if not key.startswith("_")]
return "{}({})".format(self.__class__.__name__, ", ".join(info))
| 3,264 | 0 | 267 |
90183bb1e23adc8c3232b56ac3c0c0c9c8012620 | 5,155 | py | Python | plugin/plugins/nrft/src/check_nrft_nb_batch.py | crazy-canux/xPlugin_Monitoring | 4a66d26f9d2982609489eaa0f57d6afb16aca37c | [
"Apache-2.0"
] | 2 | 2015-12-17T04:00:30.000Z | 2015-12-22T11:49:01.000Z | plugin/plugins/nrft/src/check_nrft_nb_batch.py | crazy-canux/xPlugin_Monitoring | 4a66d26f9d2982609489eaa0f57d6afb16aca37c | [
"Apache-2.0"
] | null | null | null | plugin/plugins/nrft/src/check_nrft_nb_batch.py | crazy-canux/xPlugin_Monitoring | 4a66d26f9d2982609489eaa0f57d6afb16aca37c | [
"Apache-2.0"
] | 1 | 2017-02-20T22:57:17.000Z | 2017-02-20T22:57:17.000Z | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Canux CHENG <canuxcheng@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import logging
import traceback
from monitoring.nagios.plugin import NagiosPluginSSH
logger = logging.getLogger('plugin.nrftfm')
# define new args
# Init plugin
plugin = PluginNRFTFM(version="1.0",
description="check Oracle table")
plugin.shortoutput = "check lines from frmentprop table"
# Final status exit for the plugin
status = None
# ORACLE_HOME="/opt/oracle/ora11"
cmd = """echo ". /usr/local/bin/nrft.env ;echo \\"select count(*) from \
nrftfmfi.fmentprop;\\" \
|{0}/bin/sqlplus -s nagios/nagios_nrft" \
|sudo -u oracle -i \
|sed -n '/--/{{n; p;}}""".format(plugin.options.oracle_home)
logger.debug("cmd : {0}".format(cmd))
try:
command = plugin.ssh.execute(cmd)
output = command.output
errors = command.errors
logger.debug("Received output: %s", output)
logger.debug("Received errors: %s", errors)
except:
plugin.shortoutput = "Something unexpected happened ! " \
"Please investigate..."
plugin.longoutput = traceback.format_exc().splitlines()
plugin.unknown(plugin.output())
if errors:
plugin.unknown("Errors found:\n{}".format("\n".join(errors)))
for line in output:
result = int(line)
status = plugin.ok
logger.debug("Result: %d", result)
# Check threshold
if plugin.options.warning:
if result >= plugin.options.warning:
status = plugin.warning
plugin.shortoutput = "The number of lines in the fmentprop " \
"table is {}" .format(result)
if plugin.options.critical:
if result >= plugin.options.critical:
status = plugin.critical
plugin.shortoutput = "The number of lines in the fmentprop " \
"table is {}" .format(result)
# Return status with message to Nagios
logger.debug("Return status and exit to Nagios.")
if status:
plugin.shortoutput = "The number of lines in the fmentprop " \
"table is {}" .format(result)
status(plugin.output())
else:
plugin.unknown('Unexpected error during plugin execution, '
'please investigate with debug mode on.')
| 38.185185 | 78 | 0.564694 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Canux CHENG <canuxcheng@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import logging
import traceback
from monitoring.nagios.plugin import NagiosPluginSSH
logger = logging.getLogger('plugin.nrftfm')
# define new args
class PluginNRFTFM(NagiosPluginSSH):
def define_plugin_arguments(self):
super(PluginNRFTFM, self).define_plugin_arguments()
self.required_args.add_argument('-O', '--Oracle_home',
dest="oracle_home",
default='/opt/oracle/ora11',
help="Oracle Home",
required=False)
self.required_args.add_argument('-S', '--Oracle_SID',
dest="oracle_sid",
help="Oracle SID",
required=False)
self.required_args.add_argument('-o', '--User',
dest="oracle_user",
help="Oracle user",
required=False)
self.required_args.add_argument('-OP', '--Password',
dest="oracle_password",
help="Oracle Password",
required=False)
self.required_args.add_argument('-w', '--warn',
type=int,
dest='warning',
default=0,
help='Warning threshold.',
required=False)
self.required_args.add_argument('-c', '--crit',
type=int,
dest='critical',
default=0,
help='Critical threshold.',
required=False)
# Init plugin
plugin = PluginNRFTFM(version="1.0",
description="check Oracle table")
plugin.shortoutput = "check lines from frmentprop table"
# Final status exit for the plugin
status = None
# ORACLE_HOME="/opt/oracle/ora11"
cmd = """echo ". /usr/local/bin/nrft.env ;echo \\"select count(*) from \
nrftfmfi.fmentprop;\\" \
|{0}/bin/sqlplus -s nagios/nagios_nrft" \
|sudo -u oracle -i \
|sed -n '/--/{{n; p;}}""".format(plugin.options.oracle_home)
logger.debug("cmd : {0}".format(cmd))
try:
command = plugin.ssh.execute(cmd)
output = command.output
errors = command.errors
logger.debug("Received output: %s", output)
logger.debug("Received errors: %s", errors)
except:
plugin.shortoutput = "Something unexpected happened ! " \
"Please investigate..."
plugin.longoutput = traceback.format_exc().splitlines()
plugin.unknown(plugin.output())
if errors:
plugin.unknown("Errors found:\n{}".format("\n".join(errors)))
for line in output:
result = int(line)
status = plugin.ok
logger.debug("Result: %d", result)
# Check threshold
if plugin.options.warning:
if result >= plugin.options.warning:
status = plugin.warning
plugin.shortoutput = "The number of lines in the fmentprop " \
"table is {}" .format(result)
if plugin.options.critical:
if result >= plugin.options.critical:
status = plugin.critical
plugin.shortoutput = "The number of lines in the fmentprop " \
"table is {}" .format(result)
# Return status with message to Nagios
logger.debug("Return status and exit to Nagios.")
if status:
plugin.shortoutput = "The number of lines in the fmentprop " \
"table is {}" .format(result)
status(plugin.output())
else:
plugin.unknown('Unexpected error during plugin execution, '
'please investigate with debug mode on.')
| 1,774 | 15 | 48 |
edeec0965d66e9775459654b8010223ab2b38ced | 1,591 | py | Python | main.py | Smaniac1/01-Guessing | 19f09031bd2907929f2e2ac70e45b793ea4a87eb | [
"MIT"
] | null | null | null | main.py | Smaniac1/01-Guessing | 19f09031bd2907929f2e2ac70e45b793ea4a87eb | [
"MIT"
] | null | null | null | main.py | Smaniac1/01-Guessing | 19f09031bd2907929f2e2ac70e45b793ea4a87eb | [
"MIT"
] | null | null | null | import random
Quit = False
while not Quit:
#Start up message to the user
print("Hello user, this program allows you to play a guessing game"\
" with the computer. The computer will select a number between 1 and"\
" your max number and you will have to guess which number the computer has"\
" selected.")
print("The program will also keep a running total on how many times you guessed")
#Have the program select a number between 1 and the players number
Max = int(input("Please enter the maximum number you want:"))
Computer = random.randint(1, Max)
#Setup the counter
Counter = 0
#Tell the user that the computer has choosen and it is time to guess
print("The computer has choosen a number")
User = int(input("Please guess a number:"))
#Set up the loop
while User != Computer :
Counter += 1
print("You guessed wrong")
if User > Computer:
print("It was to high")
User = int(input("Please try a diffrent number:"))
elif User < Computer:
print("It was to low")
User = int(input("Please try a diffrent number:"))
#Once they guessed right
print("Congradulations user, you guessed the right number which was", \
User ,"you guessed a total of", Counter ,"times!")
PlayAgain = input("Would you like to play again (yes or no)?")
PlayAgain = PlayAgain.lower()
if PlayAgain == "yes" or PlayAgain == "y":
Quit = False
else:
Quit = True
print("Thanks for playing, hope you come back again!")
| 34.586957 | 85 | 0.636706 | import random
Quit = False
while not Quit:
#Start up message to the user
print("Hello user, this program allows you to play a guessing game"\
" with the computer. The computer will select a number between 1 and"\
" your max number and you will have to guess which number the computer has"\
" selected.")
print("The program will also keep a running total on how many times you guessed")
#Have the program select a number between 1 and the players number
Max = int(input("Please enter the maximum number you want:"))
Computer = random.randint(1, Max)
#Setup the counter
Counter = 0
#Tell the user that the computer has choosen and it is time to guess
print("The computer has choosen a number")
User = int(input("Please guess a number:"))
#Set up the loop
while User != Computer :
Counter += 1
print("You guessed wrong")
if User > Computer:
print("It was to high")
User = int(input("Please try a diffrent number:"))
elif User < Computer:
print("It was to low")
User = int(input("Please try a diffrent number:"))
#Once they guessed right
print("Congradulations user, you guessed the right number which was", \
User ,"you guessed a total of", Counter ,"times!")
PlayAgain = input("Would you like to play again (yes or no)?")
PlayAgain = PlayAgain.lower()
if PlayAgain == "yes" or PlayAgain == "y":
Quit = False
else:
Quit = True
print("Thanks for playing, hope you come back again!")
| 0 | 0 | 0 |
bb72db10ba90b749fc7d7afca4fca6836a5f88e1 | 107 | py | Python | app/model/__init__.py | daniilsavchuk/flask-restplus-min-boilerplate | 398d897d73400954bd1c1bd5eb58a478f40a29c8 | [
"MIT"
] | null | null | null | app/model/__init__.py | daniilsavchuk/flask-restplus-min-boilerplate | 398d897d73400954bd1c1bd5eb58a478f40a29c8 | [
"MIT"
] | null | null | null | app/model/__init__.py | daniilsavchuk/flask-restplus-min-boilerplate | 398d897d73400954bd1c1bd5eb58a478f40a29c8 | [
"MIT"
] | null | null | null | from .db import db
from .entity import *
from .relation import *
| 13.375 | 23 | 0.700935 | from .db import db
from .entity import *
from .relation import *
def init_app(app):
db.init_app(app)
| 18 | 0 | 23 |
619a0830c73d2526e60ce4d6b6445ebb1a0e8f1c | 156 | py | Python | lino_xl/lib/online/__init__.py | khchine5/xl | b1634937a9ce87af1e948eb712b934b11f221d9d | [
"BSD-2-Clause"
] | 1 | 2018-01-12T14:09:48.000Z | 2018-01-12T14:09:48.000Z | lino_xl/lib/online/__init__.py | khchine5/xl | b1634937a9ce87af1e948eb712b934b11f221d9d | [
"BSD-2-Clause"
] | 1 | 2019-09-10T05:03:47.000Z | 2019-09-10T05:03:47.000Z | lino_xl/lib/online/__init__.py | khchine5/xl | b1634937a9ce87af1e948eb712b934b11f221d9d | [
"BSD-2-Clause"
] | null | null | null | """A package to differentiate between the "standard" `users`
module and the version with online registration
.. autosummary::
:toctree:
users
"""
| 15.6 | 60 | 0.705128 | """A package to differentiate between the "standard" `users`
module and the version with online registration
.. autosummary::
:toctree:
users
"""
| 0 | 0 | 0 |
514621a0235804c4c7a393dc5ca7be87092a87f3 | 1,231 | py | Python | development/migrations/0003_auto_20171006_1429.py | damgambit/james-gattuso-realty | f1ef70a2d4e8d4cbc2d5774f92891560b89ec63b | [
"Apache-2.0"
] | null | null | null | development/migrations/0003_auto_20171006_1429.py | damgambit/james-gattuso-realty | f1ef70a2d4e8d4cbc2d5774f92891560b89ec63b | [
"Apache-2.0"
] | 4 | 2020-02-12T00:07:36.000Z | 2021-12-13T19:46:18.000Z | development/migrations/0003_auto_20171006_1429.py | damgambit/james-gattuso-realty | f1ef70a2d4e8d4cbc2d5774f92891560b89ec63b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-06 14:29
from __future__ import unicode_literals
from django.db import migrations, models
| 30.02439 | 67 | 0.595451 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-06 14:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('development', '0002_auto_20171005_1911'),
]
operations = [
migrations.AlterField(
model_name='commonwealthauction',
name='auction_date',
field=models.CharField(default='NULL', max_length=100),
),
migrations.AlterField(
model_name='commonwealthauction',
name='auction_time',
field=models.CharField(default='NULL', max_length=100),
),
migrations.AlterField(
model_name='commonwealthauction',
name='country',
field=models.CharField(default='NULL', max_length=100),
),
migrations.AlterField(
model_name='commonwealthauction',
name='deposit',
field=models.CharField(default='NULL', max_length=100),
),
migrations.AlterField(
model_name='commonwealthauction',
name='status',
field=models.CharField(default='NULL', max_length=100),
),
]
| 0 | 1,052 | 23 |
b28de891c603a05a76b0cf095b7e5769bfbfc711 | 79 | py | Python | examples/all_of_model_models.py | n0npax/OpenAlchemy | ba1ff4e38c5721ff2a5f1a24e3db58792869d20c | [
"Apache-2.0"
] | null | null | null | examples/all_of_model_models.py | n0npax/OpenAlchemy | ba1ff4e38c5721ff2a5f1a24e3db58792869d20c | [
"Apache-2.0"
] | null | null | null | examples/all_of_model_models.py | n0npax/OpenAlchemy | ba1ff4e38c5721ff2a5f1a24e3db58792869d20c | [
"Apache-2.0"
] | null | null | null | from open_alchemy import init_yaml
init_yaml("all-of-model-example-spec.yml")
| 19.75 | 42 | 0.810127 | from open_alchemy import init_yaml
init_yaml("all-of-model-example-spec.yml")
| 0 | 0 | 0 |
73698cea73e0ee40d719751cf0efe4cef0bdb076 | 466 | py | Python | Python/Rock_paper-scissor/paper.py | janvi16/-HACKTOBERFEST2K20 | aa9c8b6f7feb245793c0a003ba6fbea3fca9ca22 | [
"Apache-2.0"
] | 30 | 2020-10-07T09:16:29.000Z | 2020-10-19T06:50:37.000Z | Python/Rock_paper-scissor/paper.py | janvi16/-HACKTOBERFEST2K20 | aa9c8b6f7feb245793c0a003ba6fbea3fca9ca22 | [
"Apache-2.0"
] | 70 | 2020-10-07T03:26:13.000Z | 2020-10-25T06:58:07.000Z | Python/Rock_paper-scissor/paper.py | janvi16/-HACKTOBERFEST2K20 | aa9c8b6f7feb245793c0a003ba6fbea3fca9ca22 | [
"Apache-2.0"
] | 280 | 2020-10-07T03:39:21.000Z | 2020-10-25T07:16:33.000Z | from random import randint
import utils
objs = ["Rock", "Paper", "Scissor"]
computer = objs[randint(0, 2)]
playing = True
while playing:
player = input("Rock, Paper or Scissor ? ")
computer = objs[randint(0, 2)]
print(utils.getWinner(player, computer))
key = input(
"""
1. To keep Playing Press Enter
2. To Quit Press input Q
"""
)
if key == "q":
playing = False
print("Thank You For Playing!")
| 17.259259 | 50 | 0.583691 | from random import randint
import utils
objs = ["Rock", "Paper", "Scissor"]
computer = objs[randint(0, 2)]
playing = True
while playing:
player = input("Rock, Paper or Scissor ? ")
computer = objs[randint(0, 2)]
print(utils.getWinner(player, computer))
key = input(
"""
1. To keep Playing Press Enter
2. To Quit Press input Q
"""
)
if key == "q":
playing = False
print("Thank You For Playing!")
| 0 | 0 | 0 |
ff7d0c70b0e0d57d9f039062e7a17d260ac50ee9 | 561 | py | Python | theisle/Server.py | gquarles/theisle | 5170ebcbfbe1ec58d63c44385f008b830be037e7 | [
"MIT"
] | null | null | null | theisle/Server.py | gquarles/theisle | 5170ebcbfbe1ec58d63c44385f008b830be037e7 | [
"MIT"
] | null | null | null | theisle/Server.py | gquarles/theisle | 5170ebcbfbe1ec58d63c44385f008b830be037e7 | [
"MIT"
] | null | null | null | from .Player import Player
import os
| 26.714286 | 65 | 0.552585 | from .Player import Player
import os
class Server:
def __init__(self, path):
self.path = path
if not self.path[-1:] == '/':
self.path = self.path + '/'
def getplayer(self, steamid):
steamid = str(steamid)
if os.path.exists(self.path + steamid + '.json'):
return Player(self.path + steamid + '.json', steamid)
return False
def players(self):
for file in os.listdir(self.path):
if file.endswith('.json'):
yield Player(self.path + file, file[:-5])
| 428 | -8 | 103 |
95bce5642c150f60930c8e4077dd574b4bdd9a30 | 357 | py | Python | mod_users/forms.py | parryc/record_beer | 1c87cc0083f1d556620a0562fc75a0612697fcd2 | [
"MIT"
] | null | null | null | mod_users/forms.py | parryc/record_beer | 1c87cc0083f1d556620a0562fc75a0612697fcd2 | [
"MIT"
] | 5 | 2019-12-16T03:24:39.000Z | 2021-12-13T19:45:36.000Z | mod_users/forms.py | parryc/record_beer | 1c87cc0083f1d556620a0562fc75a0612697fcd2 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import TextField
from wtforms.validators import Required, NumberRange, Optional
from mod_tags.models import *
| 27.461538 | 63 | 0.80112 | from flask_wtf import FlaskForm
from wtforms import TextField
from wtforms.validators import Required, NumberRange, Optional
from mod_tags.models import *
class UserForm(FlaskForm):
default_drink_location_country = TextField("Drink Country")
default_drink_location_city = TextField("Drink City")
default_drink_date = TextField("Drink Date")
| 0 | 178 | 23 |
a2c198e5b3eef3213a7a50764454138d1797856d | 3,156 | py | Python | brainspell/deploy.py | akeshavan/brainspell-neo | 05259770a9928d01adaaa028e9d2115750c4fe03 | [
"MIT"
] | 7 | 2017-06-25T18:44:30.000Z | 2019-06-06T20:05:36.000Z | brainspell/deploy.py | akeshavan/brainspell-neo | 05259770a9928d01adaaa028e9d2115750c4fe03 | [
"MIT"
] | 57 | 2017-03-25T01:54:37.000Z | 2019-06-05T18:08:16.000Z | brainspell/deploy.py | akeshavan/brainspell-neo | 05259770a9928d01adaaa028e9d2115750c4fe03 | [
"MIT"
] | 6 | 2017-04-15T16:18:15.000Z | 2018-01-19T00:22:24.000Z | """
A deployment script that accomplishes the following:
Sets up a handler for the endpoint /deploy, which when triggered on a production port:
1) Navigates into a "debug" directory, where it pulls the latest copy of Brainspell from GitHub (or clones a copy, if one doesn't already exist)
2) Starts the "debug" Brainspell server at port 5858.
3) Triggers the /deploy endpoint on the debug server locally, which:
a) Navigates out of the "debug" directory, and pulls a fresh copy of the GitHub repo for the production server.
This process ensures that a GitHub push is deployed to production only if:
i) The server can successfully run and
ii) The deploy endpoint on the server still exists.
There is no guarantee that the deploy endpoint is functioning properly.
"""
# TODO: set up SupervisorD
import argparse
import os
import subprocess
from time import sleep
import tornado.ioloop
import brainspell
from base_handler import *
def subprocess_cmd_sync(command):
""" Synchronously run a bash command. """
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
proc_stdout = process.communicate()[0].strip()
def subprocess_cmd_async(command):
""" Asynchronously run a bash command. """
subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
class DeployHandler(BaseHandler):
""" Implement the protocol described above. """
| 42.08 | 161 | 0.682826 | """
A deployment script that accomplishes the following:
Sets up a handler for the endpoint /deploy, which when triggered on a production port:
1) Navigates into a "debug" directory, where it pulls the latest copy of Brainspell from GitHub (or clones a copy, if one doesn't already exist)
2) Starts the "debug" Brainspell server at port 5858.
3) Triggers the /deploy endpoint on the debug server locally, which:
a) Navigates out of the "debug" directory, and pulls a fresh copy of the GitHub repo for the production server.
This process ensures that a GitHub push is deployed to production only if:
i) The server can successfully run and
ii) The deploy endpoint on the server still exists.
There is no guarantee that the deploy endpoint is functioning properly.
"""
# TODO: set up SupervisorD
import argparse
import os
import subprocess
from time import sleep
import tornado.ioloop
import brainspell
from base_handler import *
def subprocess_cmd_sync(command):
""" Synchronously run a bash command. """
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
proc_stdout = process.communicate()[0].strip()
def subprocess_cmd_async(command):
""" Asynchronously run a bash command. """
subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
class DeployHandler(BaseHandler):
""" Implement the protocol described above. """
def get(self):
# get the port that is currently running
port_to_run = brainspell.get_port_to_run()
if port_to_run == 5858: # potentially change this to a flag, and make the port choice arbitrary
# if the server is running on a debug port
print(
"Debug server: Navigating into the parent directory, and pulling from the git repo.")
subprocess_cmd_sync(
"cd ../../; git pull origin master; python3 json_api/brainspell.py")
print("Debug server: Finished. Closing debug server.")
tornado.ioloop.IOLoop.instance().stop()
else:
# if this is a production port
"""
Note that all shell commands executed are idempotent. If a folder already exists, for example, mkdir does nothing.
"""
print("Production server: Cloning the Brainspell git repo into a debug folder, if one doesn't already exist, then pulling...")
init_commands = "mkdir debug &>/dev/null; cd debug; git clone https://github.com/OpenNeuroLab/brainspell-neo.git &>/dev/null; git pull origin master"
# clone/pull the debug server
subprocess_cmd_sync(init_commands)
subprocess_cmd_sync(
"cd debug/brainspell-neo; pip install -r requirements.txt")
print("Production server: Starting debug server...")
subprocess_cmd_async(
"cd debug/brainspell-neo; python3 json_api/brainspell.py -p 5858 &")
sleep(.5)
print("Production server: Pinging /deploy endpoint of debug server...")
subprocess_cmd_async("wget -qO- localhost:5858/deploy &>/dev/null")
print("Production server: Done.")
| 1,743 | 0 | 27 |
118b7a5fdf0fc04ab5868e2097e62ce3a5802e90 | 1,842 | py | Python | packages/OpenCV/nodes/OpenCV___SaveImage0/OpenCV___SaveImage0___METACODE.py | lidong1266/Ryven-Switch | 68d1f71e81d6564196f44ca49d5903f06db6a4d9 | [
"MIT"
] | 18 | 2021-01-18T09:52:41.000Z | 2022-03-22T10:48:44.000Z | packages/OpenCV/nodes/OpenCV___SaveImage0/OpenCV___SaveImage0___METACODE.py | xamofb-xsk/Ryven | 8c3dcc613098863ae9fb747e62c0bb9d9eb4cef1 | [
"MIT"
] | null | null | null | packages/OpenCV/nodes/OpenCV___SaveImage0/OpenCV___SaveImage0___METACODE.py | xamofb-xsk/Ryven | 8c3dcc613098863ae9fb747e62c0bb9d9eb4cef1 | [
"MIT"
] | 3 | 2021-01-18T09:49:42.000Z | 2022-03-22T10:48:47.000Z | from NIENV import *
# API METHODS
# self.main_widget <- access to main widget
# Ports
# self.input(index) <- access to input data
# set_output_val(index, val) <- set output data port value
# self.exec_output(index) <- executes an execution output
# self.create_new_input(type_, label, widget_name=None, widget_pos='under', pos=-1)
# self.delete_input(index or input)
# self.create_new_output(type_, label, pos=-1)
# self.delete_output(index or output)
# Logging
# mylog = self.new_log('Example Log')
# mylog.log('I\'m alive!!')
# self.log_message('hello global!', 'global')
# self.log_message('that\'s not good', 'error')
# ------------------------------------------------------------------------------
import cv2
| 27.909091 | 83 | 0.60532 | from NIENV import *
# API METHODS
# self.main_widget <- access to main widget
# Ports
# self.input(index) <- access to input data
# set_output_val(index, val) <- set output data port value
# self.exec_output(index) <- executes an execution output
# self.create_new_input(type_, label, widget_name=None, widget_pos='under', pos=-1)
# self.delete_input(index or input)
# self.create_new_output(type_, label, pos=-1)
# self.delete_output(index or output)
# Logging
# mylog = self.new_log('Example Log')
# mylog.log('I\'m alive!!')
# self.log_message('hello global!', 'global')
# self.log_message('that\'s not good', 'error')
# ------------------------------------------------------------------------------
import cv2
class %CLASS%(NodeInstance):
def __init__(self, params):
super(%CLASS%, self).__init__(params)
# self.special_actions['action name'] = {'method': M(self.action_method)}
self.image_filepath = ''
self.img = None
def update_event(self, input_called=-1):
if input_called != 0:
return
self.img = self.input(2)
try:
self.log_message('Saving pic to '+self.image_filepath, 'global')
cv2.imwrite(self.image_filepath, self.img)
self.main_widget.set_path_text(self.image_filepath)
self.main_widget.setText('Success')
except Exception as e:
self.main_widget.setText('Error')
def get_data(self):
data = {'image file path': self.image_filepath}
return data
def set_data(self, data):
self.image_filepath = data['image file path']
def path_chosen(self, file_path):
self.image_filepath = file_path
self.main_widget.setText('')
self.update()
def remove_event(self):
pass
| 886 | 7 | 184 |
c54a81616f05eb2e66465abf762e6472dcb2c69f | 1,032 | py | Python | src_doit/js.py | hat-open/hat-util | 88729b163bfa0dd6b3e4eed9a089895aa6a684d6 | [
"Apache-2.0"
] | 1 | 2022-02-01T13:42:09.000Z | 2022-02-01T13:42:09.000Z | src_doit/js.py | hat-open/hat-util | 88729b163bfa0dd6b3e4eed9a089895aa6a684d6 | [
"Apache-2.0"
] | null | null | null | src_doit/js.py | hat-open/hat-util | 88729b163bfa0dd6b3e4eed9a089895aa6a684d6 | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
from hat.doit import common
from hat.doit.js import (build_npm,
run_eslint)
__all__ = ['task_js_build',
'task_js_check',
'task_js_deps']
build_js_dir = Path('build/js')
src_js_dir = Path('src_js')
readme_path = Path('README.rst')
def task_js_build():
"""JavaScript - build"""
return {'actions': [build],
'task_dep': ['js_deps']}
def task_js_check():
"""JavaScript - check with eslint"""
return {'actions': [(run_eslint, [src_js_dir])],
'task_dep': ['js_deps']}
def task_js_deps():
"""JavaScript - install dependencies"""
return {'actions': ['yarn install --silent']}
| 23.454545 | 60 | 0.584302 | from pathlib import Path
from hat.doit import common
from hat.doit.js import (build_npm,
run_eslint)
__all__ = ['task_js_build',
'task_js_check',
'task_js_deps']
build_js_dir = Path('build/js')
src_js_dir = Path('src_js')
readme_path = Path('README.rst')
def task_js_build():
"""JavaScript - build"""
def build():
build_npm(
src_dir=src_js_dir,
dst_dir=build_js_dir,
name='@hat-open/util',
description='Hat utility module',
license=common.License.APACHE2,
homepage='https://github.com/hat-open/hat-util',
repository='hat-open/hat-util')
return {'actions': [build],
'task_dep': ['js_deps']}
def task_js_check():
"""JavaScript - check with eslint"""
return {'actions': [(run_eslint, [src_js_dir])],
'task_dep': ['js_deps']}
def task_js_deps():
"""JavaScript - install dependencies"""
return {'actions': ['yarn install --silent']}
| 306 | 0 | 27 |
7056942e80018b443e8d3dc005bbac35db071d61 | 3,339 | py | Python | mdssdk/fcns.py | akshatha-s13/mdssdk | 615a5528d0af1201e8fe8f305c62b258e5433990 | [
"Apache-2.0"
] | null | null | null | mdssdk/fcns.py | akshatha-s13/mdssdk | 615a5528d0af1201e8fe8f305c62b258e5433990 | [
"Apache-2.0"
] | null | null | null | mdssdk/fcns.py | akshatha-s13/mdssdk | 615a5528d0af1201e8fe8f305c62b258e5433990 | [
"Apache-2.0"
] | null | null | null | import logging
import re
from .connection_manager.errors import CLIError
log = logging.getLogger(__name__)
| 29.8125 | 79 | 0.545373 | import logging
import re
from .connection_manager.errors import CLIError
log = logging.getLogger(__name__)
class Fcns(object):
def __init__(self, switch):
self.__swobj = switch
def database(self, vsan=None, fcid=None, detail=False):
cmd = "show fcns database"
if fcid is not None:
cmd += " fcid " + str(fcid)
if detail:
cmd += " detail"
if vsan is not None:
cmd += " vsan " + str(vsan)
out = self.__swobj.show(cmd)
if out:
return out["TABLE_fcns_vsan"]["ROW_fcns_vsan"]
else:
return None
def statistics(self, vsan=None, detail=False):
cmd = "show fcns statistics"
if detail:
cmd += " detail"
if vsan is not None:
cmd += " vsan " + str(vsan)
out = self.__swobj.show(cmd)
if out:
return out["TABLE_fcns_vsan"]["ROW_fcns_vsan"]
else:
return None
@property
def no_bulk_notify(self):
cmd = "show running section fcns"
out = self.__swobj.show(cmd, raw_text=True)
pat = "fcns no-bulk-notify"
match = re.search(pat, "".join(out))
if match:
return True
else:
return False
@no_bulk_notify.setter
def no_bulk_notify(self, value):
if type(value) is not bool:
raise TypeError("Only bool value(true/false) supported.")
if value:
cmd = "terminal dont-ask ; fcns no-bulk-notify"
else:
cmd = "no fcns no-bulk-notify"
try:
out = self.__swobj.config(cmd)
except CLIError as c:
if "FCNS bulk notification optimization is necessary" in c.message:
log.debug(c.message)
else:
raise CLIError(cmd, c.message)
self.__swobj.config("no terminal dont-ask")
@property
def zone_lookup_cache(self):
cmd = "show running section fcns"
out = self.__swobj.show(cmd, raw_text=True)
pat = "fcns zone-lookup-cache"
match = re.search(pat, "".join(out))
if match:
return True
else:
return False
@zone_lookup_cache.setter
def zone_lookup_cache(self, value):
if type(value) is not bool:
raise TypeError("Only bool value(true/false) supported.")
cmd = "fcns zone-lookup-cache"
if not value:
cmd = "no " + cmd
out = self.__swobj.config(cmd)
if out:
raise CLIError(cmd, out[0]["msg"])
def proxy_port(self, pwwn, vsan):
cmd = "fcns proxy-port " + str(pwwn) + " vsan " + str(vsan)
out = self.__swobj.config(cmd)
if out:
raise CLIError(cmd, out[0]["msg"])
def no_auto_poll(self, vsan=None, pwwn=None):
cmd = "fcns no-auto-poll"
if vsan is not None:
cmd += " vsan " + str(vsan)
elif pwwn is not None:
cmd += " wwn " + str(pwwn)
out = self.__swobj.config(cmd)
if out:
raise CLIError(cmd, out[0]["msg"])
def reject_duplicate_pwwn(self, vsan):
cmd = "fcns reject-duplicate-pwwn vsan " + str(vsan)
out = self.__swobj.config(cmd)
if out:
raise CLIError(cmd, out[0]["msg"])
# TODO
| 2,842 | 364 | 23 |
2d1423a2493ad24edbd8cd119bd11281a783501e | 6,816 | py | Python | sleuth_backend/solr/connection.py | ubclaunchpad/sleuth | 7b7be0b7097a26169e17037f4220fd0ce039bde1 | [
"MIT"
] | 12 | 2017-09-17T02:14:35.000Z | 2022-01-09T10:14:59.000Z | sleuth_backend/solr/connection.py | ubclaunchpad/sleuth | 7b7be0b7097a26169e17037f4220fd0ce039bde1 | [
"MIT"
] | 92 | 2017-09-16T23:50:45.000Z | 2018-01-02T01:56:33.000Z | sleuth_backend/solr/connection.py | ubclaunchpad/sleuth | 7b7be0b7097a26169e17037f4220fd0ce039bde1 | [
"MIT"
] | 5 | 2017-12-26T01:47:36.000Z | 2021-12-31T11:15:07.000Z | import pysolr
import json
import requests
class SolrConnection(object):
'''
Connection to Solr database
'''
# The number of documents held in a core's insert queue before
# the documents in the core are automatically inserted.
QUEUE_THRESHOLD = 100
def __init__(self, url):
'''
Creates a SolrConnection form the given base Solr url of the form
'http://<solrhostname>:<port>/solr'.
'''
self.url = url
self.solr = pysolr.Solr(url, timeout=10)
self.solr_admin = pysolr.SolrCoreAdmin(url + '/admin/cores')
self.cores = {}
self.queues = {}
for core_name in self.fetch_core_names():
self.cores[core_name] = pysolr.Solr(self.url + '/' + core_name)
self.queues[core_name] = list()
def fetch_core_names(self):
'''
Makes a request to Solr and returns an array of strings where each
string is the name of a core in the response from Solr.
'''
status_response = self.solr_admin.status()
status = json.loads(status_response)
return [core_name for core_name in status['status']]
def core_names(self):
'''
Returns a list of known valid cores in the Solr instance without
making a request to Solr - this request excludes cores used for testing.
'''
valid_cores = list(self.cores.keys())
if 'test' in valid_cores:
valid_cores.remove('test')
return valid_cores
def fetch_core_schema(self, name):
'''
Returns the schema of the core with the given name as a dictionary.
'''
response = self._get_url("{}/{}/schema".format(self.url, name), {})
if 'schema' not in response:
raise ValueError('Solr did not return a schema. Are you sure ' + \
'the core named "{}" is an existing core?'.format(name))
return response['schema']
def queue_document(self, core, doc):
'''
Queues a document for insertion into the specified core and returns None.
If the number of documents in the queue exceeds a certain threshold,
this function will insert them all the documents held in the queue of the
specified core and return the response from Solr.
All values in 'doc' must be strings.
'''
if core not in self.cores:
raise ValueError("A core for the document type {} was not found".format(core))
self.queues[core].append(doc)
if len(self.queues[core]) >= self.QUEUE_THRESHOLD:
docs = list(self.queues[core].copy())
del self.queues[core][:]
return self.insert_documents(core, docs)
return None
def insert_documents(self, core_name, docs):
'''
Inserts given list of documents into specified core. Returns Solr response.
'''
if core_name not in self.cores:
raise ValueError('No Solr core with the name "{}" was found'.format(core_name))
print('Inserting {} items into core {}'.format(str(len(docs)), core_name))
return self.cores[core_name].add(docs)
def insert_queued(self):
'''
Inserts all queued documents across all cores. Returns an object
containing the Solr response from each core.
'''
response = {}
for core in self.cores:
docs = list(self.queues[core].copy())
del self.queues[core][:]
response[core] = self.insert_documents(core, docs)
return response
def query(self, core, query, sort="", start="", rows="", default_field="",
search_fields="", return_fields="", highlight_fields="", omit_header=True):
'''
Returns the response body from Solr corresponding to the given query.
See https://lucene.apache.org/solr/guide/6_6/common-query-parameters.html
and https://lucene.apache.org/solr/guide/6_6/highlighting.html
for common query parameters and parameter formatting.
Params (See Solr docs link above for details):
core (str): The name of the Solr core to search in.
query (str): The string to search the core for.
sort (str): The field to sort results on, and the sort order (see
Solr docs for details).
start (int): Specifies an offset into a query’s result set and instructs
Solr to begin displaying results from this offset.
rows (int): The maximum number of documents from the complete result
set that Solr should return.
default_field (str): The default field to search in.
search_fields (str): Defines a query that can be used to restrict
the superset of documents that can be returned, without
influencing score.
return_fields (str): Limits the information included in a query
response to a specified list of fields.
highlight_fields (str): Specifies a list of fields to highlight.
omit_header (bool): Whether or not Solr should include a header with
metadata about the query in its response.
'''
params = {
"q": query,
"wt": "json",
"df": default_field,
"omitHeader": "true" if omit_header else "false",
"hl.fragsize": 200
}
if sort is not "":
params["sort"] = sort
if start is not "":
params["start"] = start
if rows is not "":
params["rows"] = rows
if search_fields is not "":
params["fq"] = search_fields
if return_fields is not "":
params["fl"] = return_fields
if highlight_fields is not "":
params["hl"] = "on"
params["hl.fl"] = highlight_fields
return self._get_url("{}/{}/select".format(self.url, core), params)
def optimize(self, core_name=None):
'''
Performs defragmentation of specified core in Solr database.
If no core is specified, defragments all cores.
'''
if core_name:
if core_name not in self.cores:
raise ValueError('No Solr core with the name "{}" was found'.format(core_name))
self.cores[core_name].optimize()
else:
[self.cores[core].optimize() for core in self.cores]
def _get_url(self, url, params):
'''
Makes a request to the given url relative to the base url with the given
parameters and returns the response as a JSON string.
'''
response = requests.get(url, params=pysolr.safe_urlencode(params))
return response.json()
| 40.094118 | 95 | 0.596831 | import pysolr
import json
import requests
class SolrConnection(object):
'''
Connection to Solr database
'''
# The number of documents held in a core's insert queue before
# the documents in the core are automatically inserted.
QUEUE_THRESHOLD = 100
def __init__(self, url):
'''
Creates a SolrConnection form the given base Solr url of the form
'http://<solrhostname>:<port>/solr'.
'''
self.url = url
self.solr = pysolr.Solr(url, timeout=10)
self.solr_admin = pysolr.SolrCoreAdmin(url + '/admin/cores')
self.cores = {}
self.queues = {}
for core_name in self.fetch_core_names():
self.cores[core_name] = pysolr.Solr(self.url + '/' + core_name)
self.queues[core_name] = list()
def fetch_core_names(self):
'''
Makes a request to Solr and returns an array of strings where each
string is the name of a core in the response from Solr.
'''
status_response = self.solr_admin.status()
status = json.loads(status_response)
return [core_name for core_name in status['status']]
def core_names(self):
'''
Returns a list of known valid cores in the Solr instance without
making a request to Solr - this request excludes cores used for testing.
'''
valid_cores = list(self.cores.keys())
if 'test' in valid_cores:
valid_cores.remove('test')
return valid_cores
def fetch_core_schema(self, name):
'''
Returns the schema of the core with the given name as a dictionary.
'''
response = self._get_url("{}/{}/schema".format(self.url, name), {})
if 'schema' not in response:
raise ValueError('Solr did not return a schema. Are you sure ' + \
'the core named "{}" is an existing core?'.format(name))
return response['schema']
def queue_document(self, core, doc):
'''
Queues a document for insertion into the specified core and returns None.
If the number of documents in the queue exceeds a certain threshold,
this function will insert them all the documents held in the queue of the
specified core and return the response from Solr.
All values in 'doc' must be strings.
'''
if core not in self.cores:
raise ValueError("A core for the document type {} was not found".format(core))
self.queues[core].append(doc)
if len(self.queues[core]) >= self.QUEUE_THRESHOLD:
docs = list(self.queues[core].copy())
del self.queues[core][:]
return self.insert_documents(core, docs)
return None
def insert_documents(self, core_name, docs):
'''
Inserts given list of documents into specified core. Returns Solr response.
'''
if core_name not in self.cores:
raise ValueError('No Solr core with the name "{}" was found'.format(core_name))
print('Inserting {} items into core {}'.format(str(len(docs)), core_name))
return self.cores[core_name].add(docs)
def insert_queued(self):
'''
Inserts all queued documents across all cores. Returns an object
containing the Solr response from each core.
'''
response = {}
for core in self.cores:
docs = list(self.queues[core].copy())
del self.queues[core][:]
response[core] = self.insert_documents(core, docs)
return response
def query(self, core, query, sort="", start="", rows="", default_field="",
search_fields="", return_fields="", highlight_fields="", omit_header=True):
'''
Returns the response body from Solr corresponding to the given query.
See https://lucene.apache.org/solr/guide/6_6/common-query-parameters.html
and https://lucene.apache.org/solr/guide/6_6/highlighting.html
for common query parameters and parameter formatting.
Params (See Solr docs link above for details):
core (str): The name of the Solr core to search in.
query (str): The string to search the core for.
sort (str): The field to sort results on, and the sort order (see
Solr docs for details).
start (int): Specifies an offset into a query’s result set and instructs
Solr to begin displaying results from this offset.
rows (int): The maximum number of documents from the complete result
set that Solr should return.
default_field (str): The default field to search in.
search_fields (str): Defines a query that can be used to restrict
the superset of documents that can be returned, without
influencing score.
return_fields (str): Limits the information included in a query
response to a specified list of fields.
highlight_fields (str): Specifies a list of fields to highlight.
omit_header (bool): Whether or not Solr should include a header with
metadata about the query in its response.
'''
params = {
"q": query,
"wt": "json",
"df": default_field,
"omitHeader": "true" if omit_header else "false",
"hl.fragsize": 200
}
if sort is not "":
params["sort"] = sort
if start is not "":
params["start"] = start
if rows is not "":
params["rows"] = rows
if search_fields is not "":
params["fq"] = search_fields
if return_fields is not "":
params["fl"] = return_fields
if highlight_fields is not "":
params["hl"] = "on"
params["hl.fl"] = highlight_fields
return self._get_url("{}/{}/select".format(self.url, core), params)
def optimize(self, core_name=None):
'''
Performs defragmentation of specified core in Solr database.
If no core is specified, defragments all cores.
'''
if core_name:
if core_name not in self.cores:
raise ValueError('No Solr core with the name "{}" was found'.format(core_name))
self.cores[core_name].optimize()
else:
[self.cores[core].optimize() for core in self.cores]
def _get_url(self, url, params):
'''
Makes a request to the given url relative to the base url with the given
parameters and returns the response as a JSON string.
'''
response = requests.get(url, params=pysolr.safe_urlencode(params))
return response.json()
| 0 | 0 | 0 |
4891533bd7a0213b27acb0112898f0244531468b | 3,410 | py | Python | src/ThaiPersonalCardExtract/utils.py | ggafiled/ThaiPersonalCardExtract | 7747086ff4d28ad9ec04f8ca7d68ac6e6f393cd4 | [
"Apache-2.0"
] | 12 | 2021-08-11T08:48:48.000Z | 2022-02-10T03:06:34.000Z | src/ThaiPersonalCardExtract/utils.py | ggafiled/ThaiPersonalCardExtract | 7747086ff4d28ad9ec04f8ca7d68ac6e6f393cd4 | [
"Apache-2.0"
] | 2 | 2021-08-21T16:14:39.000Z | 2022-02-17T15:31:07.000Z | src/ThaiPersonalCardExtract/utils.py | ggafiled/ThaiPersonalCardExtract | 7747086ff4d28ad9ec04f8ca7d68ac6e6f393cd4 | [
"Apache-2.0"
] | 7 | 2021-08-11T09:32:02.000Z | 2022-03-26T13:56:46.000Z | from enum import Enum
import cv2
import numpy as np
def convertScale(img, alpha, beta):
"""Add bias and gain to an image with saturation arithmetics. Unlike
cv2.convertScaleAbs, it does not take an absolute value, which would lead to
nonsensical results (e.g., a pixel at 44 with alpha = 3 and beta = -210
becomes 78 with OpenCV, when in fact it should become 0).
"""
new_img = img * alpha + beta
new_img[new_img < 0] = 0
new_img[new_img > 255] = 255
return new_img.astype(np.uint8)
# Automatic brightness and contrast optimization with optional histogram clipping
| 31.869159 | 120 | 0.676833 | from enum import Enum
import cv2
import numpy as np
class Language(Enum):
THAI = 'tha'
ENGLISH = 'eng'
MIX = 'mix'
def __str__(self):
return self.value
class Provider(Enum):
EASYOCR = 'easyocr'
TESSERACT = 'tesseract'
DEFAULT = 'default'
def __str__(self):
return self.value
class Card(Enum):
FRONT_TEMPLATE = 'front'
BACK_TEMPLATE = 'back'
def __str__(self):
return self.value
def convertScale(img, alpha, beta):
"""Add bias and gain to an image with saturation arithmetics. Unlike
cv2.convertScaleAbs, it does not take an absolute value, which would lead to
nonsensical results (e.g., a pixel at 44 with alpha = 3 and beta = -210
becomes 78 with OpenCV, when in fact it should become 0).
"""
new_img = img * alpha + beta
new_img[new_img < 0] = 0
new_img[new_img > 255] = 255
return new_img.astype(np.uint8)
# Automatic brightness and contrast optimization with optional histogram clipping
def automatic_brightness_and_contrast(image, clip_hist_percent=25):
gray = image
# Calculate grayscale histogram
hist = cv2.calcHist([gray], [0], None, [256], [0, 256])
hist_size = len(hist)
# Calculate cumulative distribution from the histogram
accumulator = []
accumulator.append(float(hist[0]))
for index in range(1, hist_size):
accumulator.append(accumulator[index - 1] + float(hist[index]))
# Locate points to clip
maximum = accumulator[-1]
clip_hist_percent *= (maximum / 100.0)
clip_hist_percent /= 2.0
# Locate left cut
minimum_gray = 0
while accumulator[minimum_gray] < clip_hist_percent:
minimum_gray += 1
# Locate right cut
maximum_gray = hist_size - 1
while accumulator[maximum_gray] >= (maximum - clip_hist_percent):
maximum_gray -= 1
# Calculate alpha and beta values
alpha = 255 / (maximum_gray - minimum_gray)
beta = -minimum_gray * alpha
auto_result = convertScale(image, alpha=alpha, beta=beta)
return (auto_result, alpha, beta)
def remove_horizontal_line(image):
thresh = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Remove horizontal
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (25, 1))
detected_lines = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, horizontal_kernel, iterations=2)
cnts = cv2.findContours(detected_lines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
cv2.drawContours(image, [c], -1, (255, 255, 255), 2)
# Repair image
repair_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 6))
result = 255 - cv2.morphologyEx(255 - image, cv2.MORPH_CLOSE, repair_kernel, iterations=1)
return result
def remove_dot_noise(image):
_, blackAndWhite = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY_INV)
nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(blackAndWhite, None, None, None, 4, cv2.CV_32S)
sizes = stats[1:, -1] # get CC_STAT_AREA component
img2 = np.zeros((labels.shape), np.uint8)
for i in range(0, nlabels - 1):
if sizes[i] >= 8: # filter small dotted regions
img2[labels == i + 1] = 255
res = cv2.bitwise_not(img2)
kernel = np.ones((3, 3), np.uint8)
res = cv2.erode(res,kernel, iterations=1)
return res
| 2,403 | 262 | 137 |
ec1e83b61d34e793bf15866f24a7fc264fe80cfc | 4,096 | py | Python | adv_train/scripts/test_langevin.py | ChiaraRgnz/Adversarial-Training | 637c4d2a1fea5e05294735952146dde2ebc1263c | [
"MIT"
] | null | null | null | adv_train/scripts/test_langevin.py | ChiaraRgnz/Adversarial-Training | 637c4d2a1fea5e05294735952146dde2ebc1263c | [
"MIT"
] | null | null | null | adv_train/scripts/test_langevin.py | ChiaraRgnz/Adversarial-Training | 637c4d2a1fea5e05294735952146dde2ebc1263c | [
"MIT"
] | 1 | 2022-01-31T06:14:41.000Z | 2022-01-31T06:14:41.000Z | # Should test if the langevin actually produces adversarial against a pretrained classifier
from adv_train.launcher import Launcher
from adv_train.model import (
DatasetType,
MnistModel,
CifarModel,
load_classifier,
load_dataset,
)
from adv_train.dynamic import Attacker
from adv_train.dataset import AdversarialDataset
import argparse
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import tqdm
if __name__ == "__main__":
parser = Attacker.add_arguments()
parser = LangevinAttack.add_arguments(parser)
args = parser.parse_args()
torch.manual_seed(1234)
attack = LangevinAttack(args)
attack.launch()
| 30.117647 | 91 | 0.591797 | # Should test if the langevin actually produces adversarial against a pretrained classifier
from adv_train.launcher import Launcher
from adv_train.model import (
DatasetType,
MnistModel,
CifarModel,
load_classifier,
load_dataset,
)
from adv_train.dynamic import Attacker
from adv_train.dataset import AdversarialDataset
import argparse
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import tqdm
class LangevinAttack(Launcher):
@staticmethod
def add_arguments(parser=None):
if parser is None:
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset",
default=DatasetType.MNIST,
type=DatasetType,
choices=DatasetType,
)
args, _ = parser.parse_known_args()
if args.dataset == DatasetType.MNIST:
parser.add_argument(
"--type",
default=MnistModel.MODEL_A,
type=MnistModel,
choices=MnistModel,
)
elif args.dataset == DatasetType.CIFAR:
parser.add_argument(
"--type",
default=CifarModel.RESNET_18,
type=CifarModel,
choices=CifarModel,
)
parser.add_argument("--n_epochs", default=1, type=int)
parser.add_argument("--batch_size", default=100, type=int)
parser.add_argument("--model_path", default=None, type=str)
parser.add_argument("--name", default="train_0", type=str)
parser.add_argument(
"--model_dir",
default="/checkpoint/hberard/OnlineAttack/pretained_models",
type=str,
)
parser.add_argument("--n_adv", default=1, type=int)
return parser
def __init__(self, args):
torch.manual_seed(1234)
self.device = "cuda" if torch.cuda.is_available() else "cpu"
dataset = load_dataset(args.dataset, train=False)
self.dataset = AdversarialDataset(dataset, n_adv=args.n_adv)
self.dataloader = DataLoader(
self.dataset, batch_size=args.batch_size, shuffle=True, num_workers=4
)
self.model = load_classifier(
args.dataset,
args.type,
model_path=args.model_path,
name=args.name,
model_dir=args.model_dir,
device=self.device,
eval=True,
)
self.loss_fn = nn.CrossEntropyLoss()
self.attacker = Attacker.load_attacker(self.model, args)
self.n_epochs = args.n_epochs
def forward(self, x, y, return_pred=False):
pred = self.model(x)
loss = self.loss_fn(pred, y)
if return_pred:
return loss, pred
return loss
def epoch_langevin(self):
total_loss, total_err = 0.0, 0.0
for x, y, x_adv, idx in tqdm.tqdm(self.dataloader):
x, x_adv, y = (
x.to(self.device),
x_adv.to(self.device),
y.to(self.device).long(),
)
x_adv = self.attacker.perturb(x_adv, y)
x_adv = self.attacker.projection(x_adv, x)
if not self.attacker.projection.is_valid(x, x_adv):
raise ValueError()
loss, pred = self.forward(x_adv, y, return_pred=True)
total_err += (pred.max(dim=1)[1] != y).sum().item()
total_loss += loss.item()
self.dataset.update_adv(x_adv, idx)
return total_err / len(self.dataset), total_loss / len(self.dataset)
def launch(self):
for _ in range(self.n_epochs):
train_err, train_loss = self.epoch_langevin()
print(
"Train error: %.2f%%, Train Loss: %.4f" % (train_err * 100, train_loss)
) # TODO: Replace this with a Logger interface
if __name__ == "__main__":
parser = Attacker.add_arguments()
parser = LangevinAttack.add_arguments(parser)
args = parser.parse_args()
torch.manual_seed(1234)
attack = LangevinAttack(args)
attack.launch()
| 3,235 | 162 | 23 |
67d03d4275ac805b177fc497641efceef96e964a | 955 | py | Python | ITE-428/Databases/pracSqlExtra12.py | richeyphu/ITE-428-LAB | c68fdddb7bbfa8b74a58a0b26bdcb4565af3b08b | [
"MIT"
] | 1 | 2021-07-14T11:12:19.000Z | 2021-07-14T11:12:19.000Z | ITE-428/Databases/pracSqlExtra12.py | richeyphu/ITE-428-LAB | c68fdddb7bbfa8b74a58a0b26bdcb4565af3b08b | [
"MIT"
] | null | null | null | ITE-428/Databases/pracSqlExtra12.py | richeyphu/ITE-428-LAB | c68fdddb7bbfa8b74a58a0b26bdcb4565af3b08b | [
"MIT"
] | null | null | null | import sqlite3
if __name__ == '__main__':
dbname = "myDatabase/Sqlite_Northwind.sqlite3"
sales = [int(input("กรอกจำนวนที่สินค้าขายได้มากกว่ากี่ครั้ง : "))]
sqlQuery(dbname, sales)
| 27.285714 | 88 | 0.542408 | import sqlite3
def sqlQuery(db, args=[]):
with (sqlite3.connect(db)) as conn:
conn.row_factory = sqlite3.Row
sql_command = '''SELECT p.ProductId, ProductName, count(p.ProductId) as NoSales
FROM Products p
JOIN OrdersDetails o ON p.ProductId = o.ProductId
GROUP BY p.ProductId
HAVING NoSales > ?'''
cursor = conn.execute(sql_command, args).fetchall()
display(cursor)
def display(cursor):
print("Found = {}".format(len(cursor)))
line()
print("รหัส สินค้า")
line()
for v in cursor:
print("{:<3} {}".format(v['ProductId'], v['ProductName']))
def line():
print('-' * 50)
if __name__ == '__main__':
dbname = "myDatabase/Sqlite_Northwind.sqlite3"
sales = [int(input("กรอกจำนวนที่สินค้าขายได้มากกว่ากี่ครั้ง : "))]
sqlQuery(dbname, sales)
| 689 | 0 | 75 |
9c9eb60291265597b2eadc48b9e32d0f236e3856 | 1,027 | py | Python | xontrib/ipysh.py | c6401/xontrib-ipython-shell-magic | 933545e7d5f7091ce61d9b115c5386b10450312a | [
"MIT"
] | null | null | null | xontrib/ipysh.py | c6401/xontrib-ipython-shell-magic | 933545e7d5f7091ce61d9b115c5386b10450312a | [
"MIT"
] | 1 | 2021-08-18T16:20:51.000Z | 2021-08-18T16:20:51.000Z | xontrib/ipysh.py | c6401/xontrib-ipython-shell-magic | 933545e7d5f7091ce61d9b115c5386b10450312a | [
"MIT"
] | null | null | null | import re
import subprocess
from IPython.utils.text import SList
__all__ = (
'ish_out',
'ish_run',
)
expression_str = r'\!([^\(\[].*)'
expression = re.compile(expression_str)
assignment = re.compile(r'([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*' + expression_str)
@events.on_transform_command
| 27.026316 | 114 | 0.591042 | import re
import subprocess
from IPython.utils.text import SList
__all__ = (
'ish_out',
'ish_run',
)
expression_str = r'\!([^\(\[].*)'
expression = re.compile(expression_str)
assignment = re.compile(r'([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*' + expression_str)
def ish_out(command):
print(subprocess.run(command, capture_output=True, shell=True, text=True).stdout)
def ish_run(command):
return SList(subprocess.run(f'''{command}''', capture_output=True, shell=True, text=True).stdout.splitlines())
@events.on_transform_command
def ipypreproc(cmd, **kw):
if matched := expression.match(cmd):
cmd, = matched.groups()
return (
f"try: ish_out(f''' {cmd} ''')\n"
f"except NameError: ish_out(''' {cmd} ''')\n"
)
elif matched := assignment.match(cmd):
var, cmd = matched.groups()
return (
f"try: {var} = ish_run(f''' {cmd} ''')\n"
f"except NameError: {var} = ish_run(''' {cmd} ''')\n"
)
else:
return cmd
| 665 | 0 | 68 |
a6c2ae05084aff02094ef04e7173c8bf632f93c6 | 1,011 | py | Python | astrality/tests/module/test_setup_action_block.py | fossabot/astrality | 25666adf3519a7fc8389e43a974eb8e7d1e60b35 | [
"MIT"
] | 111 | 2018-03-19T12:56:35.000Z | 2022-02-05T11:19:04.000Z | astrality/tests/module/test_setup_action_block.py | fossabot/astrality | 25666adf3519a7fc8389e43a974eb8e7d1e60b35 | [
"MIT"
] | 120 | 2018-02-22T11:23:08.000Z | 2021-03-25T22:13:47.000Z | astrality/tests/module/test_setup_action_block.py | fossabot/astrality | 25666adf3519a7fc8389e43a974eb8e7d1e60b35 | [
"MIT"
] | 7 | 2018-04-06T14:28:33.000Z | 2020-03-18T20:25:59.000Z | """Tests for handling setup action blocks in ModuleManager."""
from pathlib import Path
from astrality.module import ModuleManager
def test_that_setup_block_is_only_executed_once(tmpdir):
"""Setup blocks in modules should only be performed once."""
touched = Path(tmpdir, 'touched.tmp')
modules = {
'A': {
'on_setup': {
'run': {
'shell': f'touch {touched}',
},
},
},
}
module_manager = ModuleManager(modules=modules)
# The touched file should not exist before we have done anything
assert not touched.exists()
# After finishing tasks, the file should be touched
module_manager.finish_tasks()
assert touched.exists()
# We now create a new object lifetime
del module_manager
touched.unlink()
# The setup block should now *not* be executed
module_manager = ModuleManager(modules=modules)
module_manager.finish_tasks()
assert not touched.exists()
| 27.324324 | 68 | 0.644906 | """Tests for handling setup action blocks in ModuleManager."""
from pathlib import Path
from astrality.module import ModuleManager
def test_that_setup_block_is_only_executed_once(tmpdir):
"""Setup blocks in modules should only be performed once."""
touched = Path(tmpdir, 'touched.tmp')
modules = {
'A': {
'on_setup': {
'run': {
'shell': f'touch {touched}',
},
},
},
}
module_manager = ModuleManager(modules=modules)
# The touched file should not exist before we have done anything
assert not touched.exists()
# After finishing tasks, the file should be touched
module_manager.finish_tasks()
assert touched.exists()
# We now create a new object lifetime
del module_manager
touched.unlink()
# The setup block should now *not* be executed
module_manager = ModuleManager(modules=modules)
module_manager.finish_tasks()
assert not touched.exists()
| 0 | 0 | 0 |
8a3f2cb9064e15f830500157c18768b3d4e91e6d | 6,934 | py | Python | examples/pytorch/kg_completion/spodernet/backends/torchmodels.py | stjordanis/graph4nlp | c6ebde32bc77d3a7b78f86a93f19b1c057963ffa | [
"Apache-2.0"
] | 121 | 2021-06-06T02:26:16.000Z | 2022-03-24T09:59:09.000Z | examples/pytorch/kg_completion/spodernet/backends/torchmodels.py | stjordanis/graph4nlp | c6ebde32bc77d3a7b78f86a93f19b1c057963ffa | [
"Apache-2.0"
] | 9 | 2021-06-07T09:45:02.000Z | 2022-03-29T07:21:28.000Z | examples/pytorch/kg_completion/spodernet/backends/torchmodels.py | stjordanis/graph4nlp | c6ebde32bc77d3a7b78f86a93f19b1c057963ffa | [
"Apache-2.0"
] | 25 | 2021-06-06T02:26:28.000Z | 2022-03-17T13:20:44.000Z | from torch.nn import LSTM
from torch.autograd import Variable
import torch
import torch.nn.functional as F
from spodernet.frontend import AbstractModel
from spodernet.utils.global_config import Config
| 38.73743 | 150 | 0.638881 | from torch.nn import LSTM
from torch.autograd import Variable
import torch
import torch.nn.functional as F
from spodernet.frontend import AbstractModel
from spodernet.utils.global_config import Config
class TorchEmbedding(torch.nn.Module, AbstractModel):
def __init__(self, embedding_size, num_embeddings):
super(TorchEmbedding, self).__init__()
self.emb= torch.nn.Embedding(num_embeddings,
embedding_size, padding_idx=0)#, scale_grad_by_freq=True, padding_idx=0)
def forward(self, str2var, *args):
self.expected_str2var_keys_oneof(str2var, ['input', 'support'])
self.expected_args('None', 'None')
self.generated_outputs('input idx, support idx', 'both sequences have shape = [batch, timesteps, embedding dim]')
embedded_results = []
if 'input' in str2var:
embedded_results.append(self.emb(str2var['input']))
if 'support' in str2var:
embedded_results.append(self.emb(str2var['support']))
return embedded_results
class TorchBiDirectionalLSTM(torch.nn.Module, AbstractModel):
def __init__(self, input_size, hidden_size,
dropout=0.0, layers=1,
bidirectional=True, to_cuda=False, conditional_encoding=True):
super(TorchBiDirectionalLSTM, self).__init__()
use_bias = True
num_directions = (1 if not bidirectional else 2)
self.lstm = LSTM(input_size,hidden_size,layers,
use_bias,True,0.2,bidirectional)
# states of both LSTMs
self.h0 = None
self.c0 = None
self.h0 = Variable(torch.FloatTensor(num_directions*layers, Config.batch_size, hidden_size))
self.c0 = Variable(torch.FloatTensor(num_directions*layers, Config.batch_size, hidden_size))
if Config.cuda:
self.h0 = self.h0.cuda()
self.c0 = self.c0.cuda()
def forward(self, str2var, *args):
self.expected_str2var_keys(str2var, [])
self.expected_args('embedded seq', 'size [batch, time steps, embedding dim]')
self.generated_outputs('LSTM output seq', 'size [batch, time steps, 2x hidden dim]')
seq = args
self.h0.data.zero_()
self.c0.data.zero_()
out, hid = self.lstm(seq, (self.h0, self.c0))
return [out, hid]
class TorchPairedBiDirectionalLSTM(torch.nn.Module, AbstractModel):
def __init__(self, input_size, hidden_size,
dropout=0.0, layers=1,
bidirectional=True, to_cuda=False, conditional_encoding=True):
super(TorchPairedBiDirectionalLSTM, self).__init__()
self.conditional_encoding = conditional_encoding
use_bias = True
num_directions = (1 if not bidirectional else 2)
self.conditional_encoding = conditional_encoding
self.lstm1 = LSTM(input_size,hidden_size,layers,
use_bias,True,Config.dropout,bidirectional)
self.lstm2 = LSTM(input_size,hidden_size,layers,
use_bias,True,Config.dropout,bidirectional)
# states of both LSTMs
self.h01 = None
self.c01 = None
self.h02 = None
self.c02 = None
self.h01 = Variable(torch.FloatTensor(num_directions*layers, Config.batch_size, hidden_size))
self.c01 = Variable(torch.FloatTensor(num_directions*layers, Config.batch_size, hidden_size))
if Config.cuda:
self.h01 = self.h01.cuda()
self.c01 = self.c01.cuda()
if not self.conditional_encoding:
self.h02 = Variable(torch.FloatTensor(num_directions*layers, Config.batch_size, hidden_size))
self.c02 = Variable(torch.FloatTensor(num_directions*layers, Config.batch_size, hidden_size))
if Config.cuda:
self.h02 = self.h02.cuda()
self.c02 = self.c02.cuda()
def forward(self, str2var, *args):
self.expected_str2var_keys(str2var, [])
self.expected_args('embedded input seq, embedded seq support', 'both of size [batch, time steps, embedding dim]')
self.generated_outputs('LSTM output seq inputs, LSTM output seq support', 'both of size [batch, time steps, 2x hidden dim]')
seq1, seq2 = args
if self.conditional_encoding:
self.h01.data.zero_()
self.c01.data.zero_()
out1, hid1 = self.lstm1(seq1, (self.h01, self.c01))
out2, hid2 = self.lstm2(seq2, hid1)
else:
self.h01.data.zero_()
self.c01.data.zero_()
self.h02.data.zero_()
self.c02.data.zero_()
out1, hid1 = self.lstm1(seq1, (self.h01, self.c01))
out2, hid2 = self.lstm2(seq2, (self.h02, self.c02))
return [out1, out2]
class TorchVariableLengthOutputSelection(torch.nn.Module, AbstractModel):
def __init__(self):
super(TorchVariableLengthOutputSelection, self).__init__()
self.b1 = None
self.b2 = None
def forward(self, str2var, *args):
self.expected_str2var_keys(str2var, ['input_length', 'support_length'])
self.expected_args('LSTM output sequence input , LSTM output sequence support', 'dimension of both: [batch, time steps, 2x LSTM hidden size]')
self.generated_outputs('stacked bidirectional outputs of last timestep', 'dim is [batch_size, 4x hidden size]')
output_lstm1, output_lstm2 = args
l1, l2 = str2var['input_length'], str2var['support_length']
if self.b1 == None:
b1 = torch.ByteTensor(output_lstm1.size())
b2 = torch.ByteTensor(output_lstm2.size())
if Config.cuda:
b1 = b1.cuda()
b2 = b2.cuda()
b1.fill_(0)
for i, num in enumerate(l1.data):
b1[i,num-1,:] = 1
out1 = output_lstm1[b1].view(Config.batch_size, -1)
b2.fill_(0)
for i, num in enumerate(l2.data):
b2[i,num-1,:] = 1
out2 = output_lstm2[b2].view(Config.batch_size, -1)
out = torch.cat([out1,out2], 1)
return [out]
class TorchSoftmaxCrossEntropy(torch.nn.Module, AbstractModel):
def __init__(self, input_dim, num_labels):
super(TorchSoftmaxCrossEntropy, self).__init__()
self.num_labels = num_labels
self.projection_to_labels = torch.nn.Linear(input_dim, num_labels)
def forward(self, str2var, *args):
self.expected_str2var_keys(str2var, ['target'])
self.expected_args('some inputs', 'dimension: [batch, any]')
self.generated_outputs('logits, loss, argmax', 'dimensions: logits = [batch, labels], loss = 1x1, argmax = [batch, 1]')
outputs_prev_layer = args[0]
t = str2var['target']
logits = self.projection_to_labels(outputs_prev_layer)
out = F.log_softmax(logits)
loss = F.nll_loss(out, t)
maximum, argmax = torch.topk(out.data, 1)
return [logits, loss, argmax]
| 6,135 | 212 | 381 |
3af0e54e86b1cd1c97fe3918f5e4a671bc620564 | 1,413 | py | Python | tests/api/test_chats.py | evansloan/Groupy | 33f81a6cfe5b76f21680d5ee9451be1d0bf506ad | [
"Apache-2.0"
] | 65 | 2015-07-27T20:54:07.000Z | 2022-02-09T01:20:09.000Z | tests/api/test_chats.py | evansloan/Groupy | 33f81a6cfe5b76f21680d5ee9451be1d0bf506ad | [
"Apache-2.0"
] | 51 | 2015-04-20T00:22:09.000Z | 2021-09-18T20:31:53.000Z | tests/api/test_chats.py | evansloan/Groupy | 33f81a6cfe5b76f21680d5ee9451be1d0bf506ad | [
"Apache-2.0"
] | 29 | 2015-05-30T18:02:38.000Z | 2020-02-20T22:33:17.000Z | import unittest
from unittest import mock
from groupy import pagers
from groupy.api import chats
| 28.26 | 77 | 0.626327 | import unittest
from unittest import mock
from groupy import pagers
from groupy.api import chats
class ChatsTests(unittest.TestCase):
def setUp(self):
self.m_session = mock.Mock()
self.chats = chats.Chats(self.m_session)
class ListChatsTests(ChatsTests):
def setUp(self):
super().setUp()
m_chat = {
'other_user': {'id': 42},
'created_at': 123457890,
'updated_at': 123457891,
}
self.m_session.get.return_value = mock.Mock(data=[m_chat])
self.results = self.chats.list()
def test_results_contains_chats(self):
self.assertTrue(all(isinstance(c, chats.Chat) for c in self.results))
def test_results_is_a_ChatList(self):
self.assertTrue(isinstance(self.results, pagers.ChatList))
class ChatTests(unittest.TestCase):
def setUp(self):
self.m_manager = mock.Mock()
m_chat = {
'other_user': {'id': 42, 'name': 'foo'},
'created_at': 123457890,
'updated_at': 123457891,
}
self.chat = chats.Chat(self.m_manager, **m_chat)
def test_repr(self):
representation = repr(self.chat)
self.assertEqual(representation, "<Chat(other_user='foo')>")
def test_post_uses_create(self):
self.chat.messages = mock.Mock()
self.chat.post()
self.assertTrue(self.chat.messages.create.called)
| 1,016 | 41 | 255 |
2eb9b1394a4644af3c8cacdca8f50c5ceeb578e7 | 26,419 | py | Python | experiments/higgs/higgs/source/runtime/layout.py | fabianmuehlboeck/monnom | 609b307d9fa9e6641443f34dbcc0b035b34d3044 | [
"BSD-3-Clause",
"MIT"
] | 2 | 2021-11-16T20:58:02.000Z | 2021-12-05T18:15:41.000Z | experiments/higgs/higgs/source/runtime/layout.py | fabianmuehlboeck/monnom | 609b307d9fa9e6641443f34dbcc0b035b34d3044 | [
"BSD-3-Clause",
"MIT"
] | null | null | null | experiments/higgs/higgs/source/runtime/layout.py | fabianmuehlboeck/monnom | 609b307d9fa9e6641443f34dbcc0b035b34d3044 | [
"BSD-3-Clause",
"MIT"
] | null | null | null | import sys
import string
from copy import deepcopy
D_OUT_FILE = 'runtime/layout.d'
JS_OUT_FILE = 'runtime/layout.js'
JS_DEF_PREFIX = '$rt_'
# Type sizes in bytes
typeSize = {
'uint8':1,
'uint16':2,
'uint32':4,
'uint64':8,
'int8':1,
'int16':2,
'int32':4,
'int64':8,
'float64':8,
'rawptr':8,
'refptr':8,
'funptr':8,
'shapeptr':8,
}
typeShortName = {
'uint8':'u8',
'uint16':'u16',
'uint32':'u32',
'uint64':'u64',
'int8':'i8',
'int16':'i16',
'int32':'i32',
'int64':'i64',
'float64':'f64',
'rawptr':'rawptr',
'refptr':'refptr',
'funptr':'funptr',
'shapeptr':'shapeptr',
}
# Layout declarations
layouts = [
# String layout
{
'name':'str',
'tag':'string',
'fields':
[
# String length
{ 'name': "len" , 'tag':'uint32' },
# Hash code
{ 'name': 'hash', 'tag':'uint32' },
# UTF-16 character data
{ 'name': 'data', 'tag':'uint16', 'szField':'len' }
]
},
# String table layout (for hash consing)
{
'name':'strtbl',
'tag':'refptr',
'fields':
[
# Capacity, total number of slots
{ 'name':'cap', 'tag':'uint32' },
# Number of strings
{ 'name':'num_strs', 'tag':'uint32', 'init':"0" },
# Array of strings
{ 'name':'str', 'tag':'refptr', 'szField':'cap', 'init':'null' },
]
},
# Rope
{
'name':'rope',
'tag':'rope',
'fields':
[
# Total length
{ 'name':'len', 'tag':'uint32' },
# Left string (rope or string)
{ 'name':'left', 'tag':'refptr' },
# Right string (always a string)
{ 'name':'right', 'tag':'refptr' },
]
},
# Object layout
{
'name':'obj',
'tag':'object',
'fields':
[
# Capacity, number of property slots
{ 'name':"cap" , 'tag':"uint32" },
# Object shape index
{ 'name':"shape_idx", 'tag':"uint32" },
# Property words
{ 'name':"word", 'tag':"uint64", 'szField':"cap", 'tpField':'tag' },
# Property types
{ 'name':"tag", 'tag':"uint8", 'szField':"cap" }
]
},
# Function/closure layout (extends object)
{
'name':'clos',
'tag':'closure',
'extends':'obj',
'fields':
[
# Note: the function pointer is stored in the first object slot
# Number of closure cells
{ 'name':"num_cells" , 'tag':"uint32" },
# Closure cell pointers
{ 'name':"cell", 'tag':"refptr", 'szField':"num_cells", 'init':"null" },
]
},
# Closure cell
{
'name':'cell',
'tag':'refptr',
'fields':
[
# Value word
{ 'name':"word", 'tag':"uint64", 'init':'undef_word', 'tpField':'tag' },
# Value type
{ 'name':"tag", 'tag':"uint8", 'init':'undef_type' },
]
},
# Array layout (extends object)
{
'name':'arr',
'tag':'array',
'extends':'obj',
'fields':
[
]
},
# Array table layout (contains array elements)
{
'name':'arrtbl',
'tag':'refptr',
'fields':
[
# Array capacity
{ 'name':"cap" , 'tag':"uint32" },
# Element words
{ 'name':"word", 'tag':"uint64", 'szField':"cap", 'tpField':'tag' },
# Element types
{ 'name':"tag", 'tag':"uint8", 'szField':"cap" },
]
},
]
# Indent a text string
# Perform basic validation
for layout in layouts:
# Check for duplicate field names
for fieldIdx, field in enumerate(layout['fields']):
for prev in layout['fields'][:fieldIdx]:
if prev['name'] == field['name']:
raise Exception('duplicate field name ' + field['name'])
# Perform layout extensions
for layoutIdx, layout in enumerate(layouts):
# If this layout does not extend another, skip it
if 'extends' not in layout:
continue
# Find the parent layout
parent = None
for prev in layouts[:layoutIdx]:
if prev['name'] == layout['extends']:
parent = prev
break
if parent == None:
raise Exception("parent not found")
# Add the parent fields (except type) to this layout
fieldCopies = []
for field in parent['fields']:
fieldCopies += [deepcopy(field)]
layout['fields'] = fieldCopies + layout['fields']
# Assign layout ids, add the next and header fields
nextLayoutId = 0
for layout in layouts:
layoutId = nextLayoutId
layout['typeId'] = layoutId
nextLayoutId += 1
nextField = [{ 'name':'next', 'tag':'refptr', 'init':"null" }]
typeField = [{ 'name':'header', 'tag':'uint32', 'init':str(layoutId) }]
layout['fields'] = nextField + typeField + layout['fields']
# Find/resolve size fields
for layout in layouts:
# List of size fields for this layout
layout['szFields'] = []
for fieldIdx, field in enumerate(layout['fields']):
# If this field has no size field, skip it
if 'szField' not in field:
continue
# Find the size field and add it to the size field list
szName = field['szField']
field['szField'] = None
for prev in layout['fields'][:fieldIdx]:
if prev['name'] == szName:
field['szField'] = prev
# Add the field to the size field list
if prev not in layout['szFields']:
layout['szFields'] += [prev]
break
# If the size field was not found, raise an exception
if field['szField'] == None:
raise Exception('size field "%s" of "%s" not found' % (szName, field['name']))
# Find/resolve word type fields
for layout in layouts:
for field in layout['fields']:
# If this field has no type field, skip it
if 'tpField' not in field:
continue
# Find the type field
tpName = field['tpField']
field['tpField'] = None
for prev in layout['fields']:
if prev['name'] == tpName:
field['tpField'] = prev
# If the type field was not found, raise an exception
if field['tpField'] == None:
raise Exception('type field "%s" of "%s" not found' % (tpName, field['name']))
# Compute field alignment requirements
for layout in layouts:
#print('');
#print(layout['name'])
# Current offset since last dynamic alignment
curOfs = 0
# For each field of this layout
for fieldIdx, field in enumerate(layout['fields']):
# Field type size
fSize = typeSize[field['tag']]
# If the previous field was dynamically sized and of smaller type size
if fieldIdx > 0 and 'szField' in layout['fields'][fieldIdx-1] and \
typeSize[layout['fields'][fieldIdx-1]['tag']] < fSize:
# This field will be dynamically aligned
field['dynAlign'] = True
field['alignPad'] = 0
# Reset the current offset
curOfs = 0
else:
# Compute the padding required for alignment
alignRem = curOfs % fSize
if alignRem != 0:
alignPad = fSize - alignRem
else:
alignPad = 0
field['dynAlign'] = False
field['alignPad'] = alignPad
# Update the current offset
curOfs += alignPad + fSize
#print(field['name'])
#print(' fSize: ' + str(fSize))
#print(' align: ' + str(field['alignPad']))
# List of generated functions and declarations
decls = []
# For each layout
for layout in layouts:
ofsPref = layout['name'] + '_ofs_';
setPref = layout['name'] + '_set_';
getPref = layout['name'] + '_get_';
# Define the layout type constant
decls += [ConstDef(
'uint32',
'LAYOUT_' + layout['name'].upper(),
layout['typeId']
)]
# Generate offset computation functions
for fieldIdx, field in enumerate(layout['fields']):
fun = Function('uint32', ofsPref + field['name'], [Var('refptr', 'o')])
if 'szField' in field:
fun.params += [Var('uint32', 'i')]
sumExpr = Cst(0)
for prev in layout['fields'][:fieldIdx]:
# If this field must be dymamically aligned
if prev['dynAlign']:
ptrSize = typeSize['rawptr']
sumExpr = AndExpr(AddExpr(sumExpr, Cst(ptrSize - 1)), Cst(-ptrSize))
elif prev['alignPad'] > 0:
sumExpr = AddExpr(sumExpr, Cst(prev['alignPad']))
# Compute the previous field size
termExpr = Cst(typeSize[prev['tag']])
if 'szField' in prev:
szCall = CallExpr(getPref + prev['szField']['name'], [fun.params[0]])
termExpr = MulExpr(termExpr, szCall)
sumExpr = AddExpr(sumExpr, termExpr)
# If this field must be dymamically aligned
if field['dynAlign']:
ptrSize = typeSize['rawptr']
sumExpr = AndExpr(AddExpr(sumExpr, Cst(ptrSize - 1)), Cst(-ptrSize))
elif field['alignPad'] > 0:
sumExpr = AddExpr(sumExpr, Cst(field['alignPad']))
# Compute the index into the last field
if 'szField' in field:
fieldSize = Cst(typeSize[field['tag']])
sumExpr = AddExpr(sumExpr, MulExpr(fieldSize , fun.params[1]))
fun.stmts += [RetStmt(sumExpr)]
decls += [fun]
# Generate getter methods
for fieldIdx, field in enumerate(layout['fields']):
fun = Function(field['tag'], getPref + field['name'], [Var('refptr', 'o')])
if 'szField' in field:
fun.params += [Var('uint32', 'i')]
ofsCall = CallExpr(ofsPref + field['name'], [fun.params[0]])
if 'szField' in field:
ofsCall.args += [fun.params[1]]
fun.stmts += [RetStmt(LoadExpr(field['tag'], fun.params[0], ofsCall))]
decls += [fun]
# Generate setter methods
for fieldIdx, field in enumerate(layout['fields']):
fun = Function('void', setPref + field['name'], [Var('refptr', 'o')])
if 'szField' in field:
fun.params += [Var('uint32', 'i')]
fun.params += [Var(field['tag'], 'v')]
ofsCall = CallExpr(ofsPref + field['name'], [fun.params[0]])
if 'szField' in field:
ofsCall.args += [fun.params[1]]
fun.stmts += [ExprStmt(StoreExpr(field['tag'], fun.params[0], ofsCall, fun.params[-1]))]
decls += [fun]
# Generate the layout size computation function
fun = Function('uint32', layout['name'] + '_comp_size', [])
szVars = {}
for szField in layout['szFields']:
szVar = Var(szField['tag'], szField['name'])
szVars[szVar.name] = szVar
fun.params += [szVar]
szSum = Cst(0)
for field in layout['fields']:
# If this field must be dymamically aligned
if field['dynAlign']:
ptrSize = typeSize['rawptr']
szSum = AndExpr(AddExpr(szSum, Cst(ptrSize - 1)), Cst(-ptrSize))
elif field['alignPad'] > 0:
szSum = AddExpr(szSum, Cst(field['alignPad']))
szTerm = Cst(typeSize[field['tag']])
if 'szField' in field:
szTerm = MulExpr(szTerm, szVars[field['szField']['name']])
szSum = AddExpr(szSum, szTerm)
fun.stmts += [RetStmt(szSum)]
decls += [fun]
# Generate the sizeof method
fun = Function('uint32', layout['name'] + '_sizeof', [Var('refptr', 'o')])
callExpr = CallExpr(layout['name'] + '_comp_size', [])
for szField in layout['szFields']:
getCall = CallExpr(getPref + szField['name'], [fun.params[0]])
callExpr.args += [getCall]
fun.stmts += [RetStmt(callExpr)]
decls += [fun]
# Generate the allocation function
fun = Function('refptr', layout['name'] + '_alloc', [Var('VM', 'vm')])
szVars = {}
for szField in layout['szFields']:
szVar = Var(szField['tag'], szField['name'])
szVars[szVar.name] = szVar
fun.params += [szVar]
szCall = CallExpr(layout['name'] + '_comp_size', [])
for szField in layout['szFields']:
szCall.args += [szVars[szField['name']]]
objVar = Var('refptr', 'o')
fun.stmts += [DeclStmt(objVar, AllocExpr(szCall, layout['tag']))]
for szField in layout['szFields']:
setCall = CallExpr(setPref + szField['name'], [objVar, szVars[szField['name']]])
fun.stmts += [ExprStmt(setCall)]
for field in layout['fields']:
if 'init' not in field:
continue
initVal = field['init']
# Some init values map to zero and do not need to be written
if initVal == '0':
continue
if initVal == 'null':
continue
if initVal == 'undef_type':
continue
if 'szField' in field:
loopVar = Var('uint32', 'i')
szVar = szVars[field['szField']['name']]
setCall = CallExpr(setPref + field['name'], [objVar, loopVar, Cst(field['init'])])
fun.stmts += [ForLoop(loopVar, szVar, [ExprStmt(setCall)])]
else:
setCall = CallExpr(setPref + field['name'], [objVar, Cst(field['init'])])
fun.stmts += [ExprStmt(setCall)]
fun.stmts += [RetStmt(objVar)]
decls += [fun]
# Generate the GC visit function
fun = Function('void', layout['name'] + '_visit_gc', [Var('VM', 'vm'), Var('refptr', 'o')])
vmVar = fun.params[0]
objVar = fun.params[1]
for field in layout['fields']:
# If this is not a heap reference field, skip it
if field['tag'] != 'refptr' and (not 'tpField' in field):
continue
# If this is a variable-size field
if 'szField' in field:
szVar = Var('uint32', field['szField']['name'])
szStmt = DeclStmt(szVar, CallExpr(getPref + field['szField']['name'], [objVar]))
fun.stmts += [szStmt]
loopVar = Var('uint32', 'i')
# If this is a word/type pair
if 'tpField' in field:
getWCall = CallExpr(getPref + field['name'], [objVar, loopVar])
getTCall = CallExpr(getPref + field['tpField']['name'], [objVar, loopVar])
fwdCall = CallExpr('gcForward', [vmVar, getWCall, getTCall])
else:
getCall = CallExpr(getPref + field['name'], [objVar, loopVar])
fwdCall = CallExpr('gcForward', [vmVar, getCall])
setCall = CallExpr(setPref + field['name'], [objVar, loopVar, fwdCall])
fun.stmts += [ForLoop(loopVar, szVar, [ExprStmt(setCall)])]
else:
# If this is a word/type pair
if 'tpField' in field:
getWCall = CallExpr(getPref + field['name'], [objVar])
getTCall = CallExpr(getPref + field['tpField']['name'], [objVar])
fwdCall = CallExpr('gcForward', [vmVar, getWCall, getTCall])
else:
getCall = CallExpr(getPref + field['name'], [objVar])
fwdCall = CallExpr('gcForward', [vmVar, getCall])
setCall = CallExpr(setPref + field['name'], [objVar, fwdCall])
fun.stmts += [ExprStmt(setCall)]
decls += [fun]
# Generate the sizeof dispatch method
fun = Function('uint32', 'layout_sizeof', [Var('refptr', 'o')])
typeVar = Var('uint32', 't')
fun.stmts += [DeclStmt(typeVar, CallExpr('obj_get_header', [fun.params[0]]))]
for layout in layouts:
cmpExpr = EqExpr(typeVar, Var('uint32', 'LAYOUT_' + layout['name'].upper()))
retStmt = RetStmt(CallExpr(layout['name'] + '_sizeof', [fun.params[0]]))
fun.stmts += [IfStmt(cmpExpr, [retStmt])]
fun.stmts += [ExprStmt(CallExpr('assert', [Cst('false'), Cst('"invalid layout in layout_sizeof"')]))]
decls += [fun]
# Generate the GC visit dispatch method
fun = Function('void', 'layout_visit_gc', [Var('VM', 'vm'), Var('refptr', 'o')])
typeVar = Var('uint32', 't')
fun.stmts += [DeclStmt(typeVar, CallExpr('obj_get_header', [fun.params[1]]))]
for layout in layouts:
cmpExpr = EqExpr(typeVar, Var('uint32', 'LAYOUT_' + layout['name'].upper()))
callStmt = ExprStmt(CallExpr(layout['name'] + '_visit_gc', [fun.params[0], fun.params[1]]))
retStmt = RetStmt()
fun.stmts += [IfStmt(cmpExpr, [callStmt, retStmt])]
fun.stmts += [ExprStmt(CallExpr('assert', [Cst('false'), Cst('"invalid layout in layout_visit_gc"')]))]
decls += [fun]
# Open the output files for writing
DFile = open(D_OUT_FILE, 'w')
JSFile = open(JS_OUT_FILE, 'w')
comment = \
'//\n' + \
'// Code auto-generated from "' + sys.argv[0] + '". Do not modify.\n' + \
'//\n\n'
DFile.write(comment)
JSFile.write(comment)
DFile.write('module runtime.layout;\n')
DFile.write('\n');
DFile.write('import runtime.vm;\n')
DFile.write('import runtime.gc;\n')
DFile.write('\n');
DFile.write('alias ubyte* funptr;\n');
DFile.write('alias ubyte* shapeptr;\n');
DFile.write('alias ubyte* rawptr;\n');
DFile.write('alias ubyte* refptr;\n');
DFile.write('alias byte int8;\n');
DFile.write('alias short int16;\n');
DFile.write('alias int int32;\n');
DFile.write('alias long int64;\n');
DFile.write('alias ubyte uint8;\n');
DFile.write('alias ushort uint16;\n');
DFile.write('alias uint uint32;\n');
DFile.write('alias ulong uint64;\n');
DFile.write('alias double float64;\n');
DFile.write('\n');
# Output D and JS code, write to file
for decl in decls:
JSFile.write(decl.genJS() + '\n\n')
DFile.write(decl.genD() + '\n\n')
DFile.close()
JSFile.close()
| 28.105319 | 137 | 0.523828 | import sys
import string
from copy import deepcopy
D_OUT_FILE = 'runtime/layout.d'
JS_OUT_FILE = 'runtime/layout.js'
JS_DEF_PREFIX = '$rt_'
# Type sizes in bytes
typeSize = {
'uint8':1,
'uint16':2,
'uint32':4,
'uint64':8,
'int8':1,
'int16':2,
'int32':4,
'int64':8,
'float64':8,
'rawptr':8,
'refptr':8,
'funptr':8,
'shapeptr':8,
}
typeShortName = {
'uint8':'u8',
'uint16':'u16',
'uint32':'u32',
'uint64':'u64',
'int8':'i8',
'int16':'i16',
'int32':'i32',
'int64':'i64',
'float64':'f64',
'rawptr':'rawptr',
'refptr':'refptr',
'funptr':'funptr',
'shapeptr':'shapeptr',
}
# Layout declarations
layouts = [
# String layout
{
'name':'str',
'tag':'string',
'fields':
[
# String length
{ 'name': "len" , 'tag':'uint32' },
# Hash code
{ 'name': 'hash', 'tag':'uint32' },
# UTF-16 character data
{ 'name': 'data', 'tag':'uint16', 'szField':'len' }
]
},
# String table layout (for hash consing)
{
'name':'strtbl',
'tag':'refptr',
'fields':
[
# Capacity, total number of slots
{ 'name':'cap', 'tag':'uint32' },
# Number of strings
{ 'name':'num_strs', 'tag':'uint32', 'init':"0" },
# Array of strings
{ 'name':'str', 'tag':'refptr', 'szField':'cap', 'init':'null' },
]
},
# Rope
{
'name':'rope',
'tag':'rope',
'fields':
[
# Total length
{ 'name':'len', 'tag':'uint32' },
# Left string (rope or string)
{ 'name':'left', 'tag':'refptr' },
# Right string (always a string)
{ 'name':'right', 'tag':'refptr' },
]
},
# Object layout
{
'name':'obj',
'tag':'object',
'fields':
[
# Capacity, number of property slots
{ 'name':"cap" , 'tag':"uint32" },
# Object shape index
{ 'name':"shape_idx", 'tag':"uint32" },
# Property words
{ 'name':"word", 'tag':"uint64", 'szField':"cap", 'tpField':'tag' },
# Property types
{ 'name':"tag", 'tag':"uint8", 'szField':"cap" }
]
},
# Function/closure layout (extends object)
{
'name':'clos',
'tag':'closure',
'extends':'obj',
'fields':
[
# Note: the function pointer is stored in the first object slot
# Number of closure cells
{ 'name':"num_cells" , 'tag':"uint32" },
# Closure cell pointers
{ 'name':"cell", 'tag':"refptr", 'szField':"num_cells", 'init':"null" },
]
},
# Closure cell
{
'name':'cell',
'tag':'refptr',
'fields':
[
# Value word
{ 'name':"word", 'tag':"uint64", 'init':'undef_word', 'tpField':'tag' },
# Value type
{ 'name':"tag", 'tag':"uint8", 'init':'undef_type' },
]
},
# Array layout (extends object)
{
'name':'arr',
'tag':'array',
'extends':'obj',
'fields':
[
]
},
# Array table layout (contains array elements)
{
'name':'arrtbl',
'tag':'refptr',
'fields':
[
# Array capacity
{ 'name':"cap" , 'tag':"uint32" },
# Element words
{ 'name':"word", 'tag':"uint64", 'szField':"cap", 'tpField':'tag' },
# Element types
{ 'name':"tag", 'tag':"uint8", 'szField':"cap" },
]
},
]
# Indent a text string
def indent(input, indentStr = ' '):
output = ''
if len(input) > 0:
output += ' '
for i in range(len(input)):
ch = input[i]
output += ch
if ch == '\n' and i != len(input)-1:
output += indentStr
return output
def sepList(lst, sep = ', '):
if len(lst) == 0:
return ''
return reduce(lambda x,y: x + sep + y, lst)
class Var:
def __init__(self, type, name):
self.type = type
self.name = name
def genJS(self):
return self.name
def genD(self):
return self.name
def genDeclD(self):
return self.type + ' ' + self.name
class Cst:
def __init__(self, val):
self.val = val
def genJS(self):
if self.val == 'undef_word':
return '$ir_get_word($undef)'
if self.val == 'undef_type':
return '$ir_get_tag($undef)'
return str(self.val)
def genD(self):
if self.val == 'undef_word':
return 'UNDEF.word.uint8Val'
if self.val == 'undef_type':
return 'Type.CONST'
return str(self.val)
class ConstDef:
def __init__(self, type, name, val):
self.type = type
self.name = name
self.val = val
def genJS(self):
#return 'var ' + JS_DEF_PREFIX + self.name + ' = ' + str(self.val) + ';'
name = JS_DEF_PREFIX + self.name
val = str(self.val)
return '$ir_obj_def_const(this, "%s", %s, false);' % (name, val)
def genD(self):
return 'const ' + self.type + ' ' + self.name + ' = ' + str(self.val) + ';'
class Function:
def __init__(self, type, name, params):
self.type = type
self.name = name
self.params = params
self.stmts = []
def genJS(self):
out = ''
out += 'function ' + JS_DEF_PREFIX + self.name + '('
params = self.params
if len(params) >= 1 and params[0].name == 'vm':
params = params[1:]
out += sepList(map(lambda v:v.genJS(), params))
out += ')\n'
out += '{'
stmts = ''
for stmt in self.stmts:
stmts += '\n' + stmt.genJS()
out += indent(stmts)
out += '\n}'
return out
def genD(self):
out = ''
out += 'extern (C) ' + self.type + ' ' + self.name + '('
out += sepList(map(lambda v:v.genDeclD(), self.params))
out += ')\n'
out += '{'
stmts = ''
for stmt in self.stmts:
stmts += '\n' + stmt.genD()
out += indent(stmts)
out += '\n}'
return out
class RetStmt:
def __init__(self, expr = None):
self.expr = expr
def genJS(self):
if self.expr:
return 'return ' + self.expr.genJS() + ';'
else:
return 'return;'
def genD(self):
if self.expr:
return 'return ' + self.expr.genD() + ';'
else:
return 'return;'
class ExprStmt:
def __init__(self, expr):
self.expr = expr
def genJS(self):
return self.expr.genJS() + ';'
def genD(self):
return self.expr.genD() + ';'
class DeclStmt:
def __init__(self, var, val):
self.type = type
self.var = var
self.val = val
def genJS(self):
return 'var ' + self.var.genJS() + ' = ' + self.val.genJS() + ';'
def genD(self):
return 'auto ' + self.var.genD() + ' = ' + self.val.genD() + ';'
class IfStmt:
def __init__(self, expr, trueStmts, falseStmts = None):
self.expr = expr
self.trueStmts = trueStmts
self.falseStmts = falseStmts
def genJS(self):
out = 'if (' + self.expr.genJS() + ')\n'
out += '{'
stmts = ''
for stmt in self.trueStmts:
stmts += '\n' + stmt.genJS()
out += indent(stmts)
out += '\n}'
if self.falseStmts:
out += 'else'
out += '{'
stmts = ''
for stmt in self.falseStmts:
stmts += '\n' + stmt.genJS()
out += indent(stmts)
out += '\n}'
return out
def genD(self):
out = 'if (' + self.expr.genD() + ')\n'
out += '{'
stmts = ''
for stmt in self.trueStmts:
stmts += '\n' + stmt.genD()
out += indent(stmts)
out += '\n}'
if self.falseStmts:
out += 'else'
out += '{'
stmts = ''
for stmt in self.falseStmts:
stmts += '\n' + stmt.genD()
out += indent(stmts)
out += '\n}'
return out
class AddExpr:
def __init__(self, lExpr, rExpr):
self.lExpr = lExpr
self.rExpr = rExpr
def genJS(self):
return '$ir_add_i32(' + self.lExpr.genJS() + ', ' + self.rExpr.genJS() + ')'
def genD(self):
return '(' + self.lExpr.genD() + ' + ' + self.rExpr.genD() + ')'
class MulExpr:
def __init__(self, lExpr, rExpr):
self.lExpr = lExpr
self.rExpr = rExpr
def genJS(self):
return '$ir_mul_i32(' + self.lExpr.genJS() + ', ' + self.rExpr.genJS() + ')'
def genD(self):
return '(' + self.lExpr.genD() + ' * ' + self.rExpr.genD() + ')'
class AndExpr:
def __init__(self, lExpr, rExpr):
self.lExpr = lExpr
self.rExpr = rExpr
def genJS(self):
return '$ir_and_i32(' + self.lExpr.genJS() + ', ' + self.rExpr.genJS() + ')'
def genD(self):
return '(' + self.lExpr.genD() + ' & ' + self.rExpr.genD() + ')'
class EqExpr:
def __init__(self, lExpr, rExpr):
self.lExpr = lExpr
self.rExpr = rExpr
def genJS(self):
return '$ir_eq_i32(' + self.lExpr.genJS() + ', ' + self.rExpr.genJS() + ')'
def genD(self):
return '(' + self.lExpr.genD() + ' == ' + self.rExpr.genD() + ')'
class LoadExpr:
def __init__(self, type, ptr, ofs):
self.type = type
self.ptr = ptr
self.ofs = ofs
def genJS(self):
return '$ir_load_' + typeShortName[self.type] + '(' + self.ptr.genJS() + ', ' + self.ofs.genJS() + ')'
def genD(self):
return '*cast(' + self.type + '*)(' + self.ptr.genD() + ' + ' + self.ofs.genD() + ')'
class StoreExpr:
def __init__(self, type, ptr, ofs, val):
self.type = type
self.ptr = ptr
self.ofs = ofs
self.val = val
def genJS(self):
return '$ir_store_' + typeShortName[self.type] + '(' + self.ptr.genJS() + ', ' + self.ofs.genJS() + ', ' + self.val.genJS() + ')'
def genD(self):
return '*cast(' + self.type + '*)(' + self.ptr.genD() + ' + ' + self.ofs.genD() + ') = ' + self.val.genD()
class AllocExpr:
def __init__(self, size, tag):
self.size = size
self.tag = tag
def genJS(self):
return '$ir_alloc_' + self.tag + '(' + self.size.genJS() + ')'
def genD(self):
return 'vm.heapAlloc(' + self.size.genD() + ')'
class CallExpr:
def __init__(self, fName, args):
self.fName = fName
self.args = args
def genJS(self):
out = JS_DEF_PREFIX + self.fName + '('
out += sepList(map(lambda v:v.genJS(), self.args))
out += ')'
return out
def genD(self):
out = self.fName + '('
out += sepList(map(lambda v:v.genD(), self.args))
out += ')'
return out
class ForLoop:
def __init__(self, loopVar, endVar, stmts):
self.loopVar = loopVar
self.endVar = endVar
self.stmts = stmts
def genJS(self):
loopV = self.loopVar.genJS()
endV = self.endVar.genJS()
out = ''
out += 'for (var %s = 0; $ir_lt_i32(%s, %s); %s = $ir_add_i32(%s, 1))\n' % (loopV, loopV, endV, loopV, loopV)
out += '{'
stmts = ''
for stmt in self.stmts:
stmts += '\n' + stmt.genJS()
out += indent(stmts)
out += '\n}'
return out
def genD(self):
out = ''
out += 'for (' + self.loopVar.type + ' ' + self.loopVar.genD() + ' = 0; ' + self.loopVar.genD() + ' < '
out += self.endVar.genD() + '; ++' + self.loopVar.genD() + ')\n'
out += '{'
stmts = ''
for stmt in self.stmts:
stmts += '\n' + stmt.genD()
out += indent(stmts)
out += '\n}'
return out
# Perform basic validation
for layout in layouts:
# Check for duplicate field names
for fieldIdx, field in enumerate(layout['fields']):
for prev in layout['fields'][:fieldIdx]:
if prev['name'] == field['name']:
raise Exception('duplicate field name ' + field['name'])
# Perform layout extensions
for layoutIdx, layout in enumerate(layouts):
# If this layout does not extend another, skip it
if 'extends' not in layout:
continue
# Find the parent layout
parent = None
for prev in layouts[:layoutIdx]:
if prev['name'] == layout['extends']:
parent = prev
break
if parent == None:
raise Exception("parent not found")
# Add the parent fields (except type) to this layout
fieldCopies = []
for field in parent['fields']:
fieldCopies += [deepcopy(field)]
layout['fields'] = fieldCopies + layout['fields']
# Assign layout ids, add the next and header fields
nextLayoutId = 0
for layout in layouts:
layoutId = nextLayoutId
layout['typeId'] = layoutId
nextLayoutId += 1
nextField = [{ 'name':'next', 'tag':'refptr', 'init':"null" }]
typeField = [{ 'name':'header', 'tag':'uint32', 'init':str(layoutId) }]
layout['fields'] = nextField + typeField + layout['fields']
# Find/resolve size fields
for layout in layouts:
# List of size fields for this layout
layout['szFields'] = []
for fieldIdx, field in enumerate(layout['fields']):
# If this field has no size field, skip it
if 'szField' not in field:
continue
# Find the size field and add it to the size field list
szName = field['szField']
field['szField'] = None
for prev in layout['fields'][:fieldIdx]:
if prev['name'] == szName:
field['szField'] = prev
# Add the field to the size field list
if prev not in layout['szFields']:
layout['szFields'] += [prev]
break
# If the size field was not found, raise an exception
if field['szField'] == None:
raise Exception('size field "%s" of "%s" not found' % (szName, field['name']))
# Find/resolve word type fields
for layout in layouts:
for field in layout['fields']:
# If this field has no type field, skip it
if 'tpField' not in field:
continue
# Find the type field
tpName = field['tpField']
field['tpField'] = None
for prev in layout['fields']:
if prev['name'] == tpName:
field['tpField'] = prev
# If the type field was not found, raise an exception
if field['tpField'] == None:
raise Exception('type field "%s" of "%s" not found' % (tpName, field['name']))
# Compute field alignment requirements
for layout in layouts:
#print('');
#print(layout['name'])
# Current offset since last dynamic alignment
curOfs = 0
# For each field of this layout
for fieldIdx, field in enumerate(layout['fields']):
# Field type size
fSize = typeSize[field['tag']]
# If the previous field was dynamically sized and of smaller type size
if fieldIdx > 0 and 'szField' in layout['fields'][fieldIdx-1] and \
typeSize[layout['fields'][fieldIdx-1]['tag']] < fSize:
# This field will be dynamically aligned
field['dynAlign'] = True
field['alignPad'] = 0
# Reset the current offset
curOfs = 0
else:
# Compute the padding required for alignment
alignRem = curOfs % fSize
if alignRem != 0:
alignPad = fSize - alignRem
else:
alignPad = 0
field['dynAlign'] = False
field['alignPad'] = alignPad
# Update the current offset
curOfs += alignPad + fSize
#print(field['name'])
#print(' fSize: ' + str(fSize))
#print(' align: ' + str(field['alignPad']))
# List of generated functions and declarations
decls = []
# For each layout
for layout in layouts:
ofsPref = layout['name'] + '_ofs_';
setPref = layout['name'] + '_set_';
getPref = layout['name'] + '_get_';
# Define the layout type constant
decls += [ConstDef(
'uint32',
'LAYOUT_' + layout['name'].upper(),
layout['typeId']
)]
# Generate offset computation functions
for fieldIdx, field in enumerate(layout['fields']):
fun = Function('uint32', ofsPref + field['name'], [Var('refptr', 'o')])
if 'szField' in field:
fun.params += [Var('uint32', 'i')]
sumExpr = Cst(0)
for prev in layout['fields'][:fieldIdx]:
# If this field must be dymamically aligned
if prev['dynAlign']:
ptrSize = typeSize['rawptr']
sumExpr = AndExpr(AddExpr(sumExpr, Cst(ptrSize - 1)), Cst(-ptrSize))
elif prev['alignPad'] > 0:
sumExpr = AddExpr(sumExpr, Cst(prev['alignPad']))
# Compute the previous field size
termExpr = Cst(typeSize[prev['tag']])
if 'szField' in prev:
szCall = CallExpr(getPref + prev['szField']['name'], [fun.params[0]])
termExpr = MulExpr(termExpr, szCall)
sumExpr = AddExpr(sumExpr, termExpr)
# If this field must be dymamically aligned
if field['dynAlign']:
ptrSize = typeSize['rawptr']
sumExpr = AndExpr(AddExpr(sumExpr, Cst(ptrSize - 1)), Cst(-ptrSize))
elif field['alignPad'] > 0:
sumExpr = AddExpr(sumExpr, Cst(field['alignPad']))
# Compute the index into the last field
if 'szField' in field:
fieldSize = Cst(typeSize[field['tag']])
sumExpr = AddExpr(sumExpr, MulExpr(fieldSize , fun.params[1]))
fun.stmts += [RetStmt(sumExpr)]
decls += [fun]
# Generate getter methods
for fieldIdx, field in enumerate(layout['fields']):
fun = Function(field['tag'], getPref + field['name'], [Var('refptr', 'o')])
if 'szField' in field:
fun.params += [Var('uint32', 'i')]
ofsCall = CallExpr(ofsPref + field['name'], [fun.params[0]])
if 'szField' in field:
ofsCall.args += [fun.params[1]]
fun.stmts += [RetStmt(LoadExpr(field['tag'], fun.params[0], ofsCall))]
decls += [fun]
# Generate setter methods
for fieldIdx, field in enumerate(layout['fields']):
fun = Function('void', setPref + field['name'], [Var('refptr', 'o')])
if 'szField' in field:
fun.params += [Var('uint32', 'i')]
fun.params += [Var(field['tag'], 'v')]
ofsCall = CallExpr(ofsPref + field['name'], [fun.params[0]])
if 'szField' in field:
ofsCall.args += [fun.params[1]]
fun.stmts += [ExprStmt(StoreExpr(field['tag'], fun.params[0], ofsCall, fun.params[-1]))]
decls += [fun]
# Generate the layout size computation function
fun = Function('uint32', layout['name'] + '_comp_size', [])
szVars = {}
for szField in layout['szFields']:
szVar = Var(szField['tag'], szField['name'])
szVars[szVar.name] = szVar
fun.params += [szVar]
szSum = Cst(0)
for field in layout['fields']:
# If this field must be dymamically aligned
if field['dynAlign']:
ptrSize = typeSize['rawptr']
szSum = AndExpr(AddExpr(szSum, Cst(ptrSize - 1)), Cst(-ptrSize))
elif field['alignPad'] > 0:
szSum = AddExpr(szSum, Cst(field['alignPad']))
szTerm = Cst(typeSize[field['tag']])
if 'szField' in field:
szTerm = MulExpr(szTerm, szVars[field['szField']['name']])
szSum = AddExpr(szSum, szTerm)
fun.stmts += [RetStmt(szSum)]
decls += [fun]
# Generate the sizeof method
fun = Function('uint32', layout['name'] + '_sizeof', [Var('refptr', 'o')])
callExpr = CallExpr(layout['name'] + '_comp_size', [])
for szField in layout['szFields']:
getCall = CallExpr(getPref + szField['name'], [fun.params[0]])
callExpr.args += [getCall]
fun.stmts += [RetStmt(callExpr)]
decls += [fun]
# Generate the allocation function
fun = Function('refptr', layout['name'] + '_alloc', [Var('VM', 'vm')])
szVars = {}
for szField in layout['szFields']:
szVar = Var(szField['tag'], szField['name'])
szVars[szVar.name] = szVar
fun.params += [szVar]
szCall = CallExpr(layout['name'] + '_comp_size', [])
for szField in layout['szFields']:
szCall.args += [szVars[szField['name']]]
objVar = Var('refptr', 'o')
fun.stmts += [DeclStmt(objVar, AllocExpr(szCall, layout['tag']))]
for szField in layout['szFields']:
setCall = CallExpr(setPref + szField['name'], [objVar, szVars[szField['name']]])
fun.stmts += [ExprStmt(setCall)]
for field in layout['fields']:
if 'init' not in field:
continue
initVal = field['init']
# Some init values map to zero and do not need to be written
if initVal == '0':
continue
if initVal == 'null':
continue
if initVal == 'undef_type':
continue
if 'szField' in field:
loopVar = Var('uint32', 'i')
szVar = szVars[field['szField']['name']]
setCall = CallExpr(setPref + field['name'], [objVar, loopVar, Cst(field['init'])])
fun.stmts += [ForLoop(loopVar, szVar, [ExprStmt(setCall)])]
else:
setCall = CallExpr(setPref + field['name'], [objVar, Cst(field['init'])])
fun.stmts += [ExprStmt(setCall)]
fun.stmts += [RetStmt(objVar)]
decls += [fun]
# Generate the GC visit function
fun = Function('void', layout['name'] + '_visit_gc', [Var('VM', 'vm'), Var('refptr', 'o')])
vmVar = fun.params[0]
objVar = fun.params[1]
for field in layout['fields']:
# If this is not a heap reference field, skip it
if field['tag'] != 'refptr' and (not 'tpField' in field):
continue
# If this is a variable-size field
if 'szField' in field:
szVar = Var('uint32', field['szField']['name'])
szStmt = DeclStmt(szVar, CallExpr(getPref + field['szField']['name'], [objVar]))
fun.stmts += [szStmt]
loopVar = Var('uint32', 'i')
# If this is a word/type pair
if 'tpField' in field:
getWCall = CallExpr(getPref + field['name'], [objVar, loopVar])
getTCall = CallExpr(getPref + field['tpField']['name'], [objVar, loopVar])
fwdCall = CallExpr('gcForward', [vmVar, getWCall, getTCall])
else:
getCall = CallExpr(getPref + field['name'], [objVar, loopVar])
fwdCall = CallExpr('gcForward', [vmVar, getCall])
setCall = CallExpr(setPref + field['name'], [objVar, loopVar, fwdCall])
fun.stmts += [ForLoop(loopVar, szVar, [ExprStmt(setCall)])]
else:
# If this is a word/type pair
if 'tpField' in field:
getWCall = CallExpr(getPref + field['name'], [objVar])
getTCall = CallExpr(getPref + field['tpField']['name'], [objVar])
fwdCall = CallExpr('gcForward', [vmVar, getWCall, getTCall])
else:
getCall = CallExpr(getPref + field['name'], [objVar])
fwdCall = CallExpr('gcForward', [vmVar, getCall])
setCall = CallExpr(setPref + field['name'], [objVar, fwdCall])
fun.stmts += [ExprStmt(setCall)]
decls += [fun]
# Generate the sizeof dispatch method
fun = Function('uint32', 'layout_sizeof', [Var('refptr', 'o')])
typeVar = Var('uint32', 't')
fun.stmts += [DeclStmt(typeVar, CallExpr('obj_get_header', [fun.params[0]]))]
for layout in layouts:
cmpExpr = EqExpr(typeVar, Var('uint32', 'LAYOUT_' + layout['name'].upper()))
retStmt = RetStmt(CallExpr(layout['name'] + '_sizeof', [fun.params[0]]))
fun.stmts += [IfStmt(cmpExpr, [retStmt])]
fun.stmts += [ExprStmt(CallExpr('assert', [Cst('false'), Cst('"invalid layout in layout_sizeof"')]))]
decls += [fun]
# Generate the GC visit dispatch method
fun = Function('void', 'layout_visit_gc', [Var('VM', 'vm'), Var('refptr', 'o')])
typeVar = Var('uint32', 't')
fun.stmts += [DeclStmt(typeVar, CallExpr('obj_get_header', [fun.params[1]]))]
for layout in layouts:
cmpExpr = EqExpr(typeVar, Var('uint32', 'LAYOUT_' + layout['name'].upper()))
callStmt = ExprStmt(CallExpr(layout['name'] + '_visit_gc', [fun.params[0], fun.params[1]]))
retStmt = RetStmt()
fun.stmts += [IfStmt(cmpExpr, [callStmt, retStmt])]
fun.stmts += [ExprStmt(CallExpr('assert', [Cst('false'), Cst('"invalid layout in layout_visit_gc"')]))]
decls += [fun]
# Open the output files for writing
DFile = open(D_OUT_FILE, 'w')
JSFile = open(JS_OUT_FILE, 'w')
comment = \
'//\n' + \
'// Code auto-generated from "' + sys.argv[0] + '". Do not modify.\n' + \
'//\n\n'
DFile.write(comment)
JSFile.write(comment)
DFile.write('module runtime.layout;\n')
DFile.write('\n');
DFile.write('import runtime.vm;\n')
DFile.write('import runtime.gc;\n')
DFile.write('\n');
DFile.write('alias ubyte* funptr;\n');
DFile.write('alias ubyte* shapeptr;\n');
DFile.write('alias ubyte* rawptr;\n');
DFile.write('alias ubyte* refptr;\n');
DFile.write('alias byte int8;\n');
DFile.write('alias short int16;\n');
DFile.write('alias int int32;\n');
DFile.write('alias long int64;\n');
DFile.write('alias ubyte uint8;\n');
DFile.write('alias ushort uint16;\n');
DFile.write('alias uint uint32;\n');
DFile.write('alias ulong uint64;\n');
DFile.write('alias double float64;\n');
DFile.write('\n');
# Output D and JS code, write to file
for decl in decls:
JSFile.write(decl.genJS() + '\n\n')
DFile.write(decl.genD() + '\n\n')
DFile.close()
JSFile.close()
| 6,659 | -119 | 1,840 |
54ca825ceaa589ccd5a09e3f4b4eda87fbe86de1 | 3,280 | py | Python | seisflows/system/tiger_md_gpu.py | chukren/seisflows | c4a5a8a9411b365c9bba818f6ed3ba03f24e681b | [
"BSD-2-Clause"
] | 1 | 2017-08-31T09:11:39.000Z | 2017-08-31T09:11:39.000Z | seisflows/system/tiger_md_gpu.py | chukren/seisflows | c4a5a8a9411b365c9bba818f6ed3ba03f24e681b | [
"BSD-2-Clause"
] | null | null | null | seisflows/system/tiger_md_gpu.py | chukren/seisflows | c4a5a8a9411b365c9bba818f6ed3ba03f24e681b | [
"BSD-2-Clause"
] | 1 | 2020-04-16T08:38:49.000Z | 2020-04-16T08:38:49.000Z |
import os
import sys
from os.path import abspath, join
from seisflows.tools import unix
from seisflows.tools.code import call, findpath, saveobj
from seisflows.tools.config import ParameterError, custom_import, \
SeisflowsObjects, SeisflowsParameters, SeisflowsPaths
PAR = SeisflowsParameters()
PATH = SeisflowsPaths()
class tiger_md_gpu(custom_import('system', 'tiger_md')):
""" An interface through which to submit workflows, run tasks in serial or
parallel, and perform other system functions.
By hiding environment details behind a python interface layer, these
classes provide a consistent command set across different computing
environments.
For important additional information, please see
http://seisflows.readthedocs.org/en/latest/manual/manual.html#system-configuration
"""
def check(self):
""" Checks parameters and paths
"""
raise NotImplementedError('Provided by Etienne Bachmann. Not recently testested and not likely to work out of the box.')
# why does Etienne have it this way?
if 'NGPU' not in PAR:
setattr(PAR, 'NGPU', 4)
super(tiger_md_gpu, self).check()
def submit(self, workflow):
""" Submits workflow
"""
unix.mkdir(PATH.OUTPUT)
unix.cd(PATH.OUTPUT)
self.checkpoint()
if not exists(PATH.SUBMIT + '/' + 'scratch'):
unix.ln(PATH.SCRATCH, PATH.SUBMIT + '/' + 'scratch')
call('sbatch '
+ '--job-name=%s ' % PAR.SUBTITLE
+ '--output=%s ' % (PATH.SUBMIT +'/'+ 'output.log')
+ '--nodes 1 '
+ '--ntasks=% ' % PAR.NGPU
+ '--ntasks-per-socket=%d ' % PAR.NGPU
+ '--gres=gpu:%d ' % PAR.NGPU
+ '--time=%d ' % PAR.WALLTIME
+ findpath('seisflows.system') +'/'+ 'wrappers/submit '
+ PATH.OUTPUT)
def run(self, classname, funcname, hosts='all', **kwargs):
""" Runs tasks in serial or parallel on specified hosts
"""
self.checkpoint()
self.save_kwargs(classname, funcname, kwargs)
if hosts == 'all':
call('srun '
+ '--wait=0 '
+ join(findpath('seisflows.system'), 'wrappers/run ')
+ PATH.OUTPUT + ' '
+ classname + ' '
+ funcname)
elif hosts == 'head':
# run on head node
call('srun '
+ '--wait=0 '
+ join(findpath('seisflows.system'), 'wrappers/run_head ')
+ PATH.OUTPUT + ' '
+ classname + ' '
+ funcname)
def getnode(self):
""" Gets number of running task
"""
gid = os.getenv('SLURM_GTIDS').split(',')
lid = int(os.getenv('SLURM_LOCALID'))
return int(gid[lid])
| 31.538462 | 128 | 0.558537 |
import os
import sys
from os.path import abspath, join
from seisflows.tools import unix
from seisflows.tools.code import call, findpath, saveobj
from seisflows.tools.config import ParameterError, custom_import, \
SeisflowsObjects, SeisflowsParameters, SeisflowsPaths
PAR = SeisflowsParameters()
PATH = SeisflowsPaths()
class tiger_md_gpu(custom_import('system', 'tiger_md')):
""" An interface through which to submit workflows, run tasks in serial or
parallel, and perform other system functions.
By hiding environment details behind a python interface layer, these
classes provide a consistent command set across different computing
environments.
For important additional information, please see
http://seisflows.readthedocs.org/en/latest/manual/manual.html#system-configuration
"""
def check(self):
""" Checks parameters and paths
"""
raise NotImplementedError('Provided by Etienne Bachmann. Not recently testested and not likely to work out of the box.')
# why does Etienne have it this way?
if 'NGPU' not in PAR:
setattr(PAR, 'NGPU', 4)
super(tiger_md_gpu, self).check()
def submit(self, workflow):
""" Submits workflow
"""
unix.mkdir(PATH.OUTPUT)
unix.cd(PATH.OUTPUT)
self.checkpoint()
if not exists(PATH.SUBMIT + '/' + 'scratch'):
unix.ln(PATH.SCRATCH, PATH.SUBMIT + '/' + 'scratch')
call('sbatch '
+ '--job-name=%s ' % PAR.SUBTITLE
+ '--output=%s ' % (PATH.SUBMIT +'/'+ 'output.log')
+ '--nodes 1 '
+ '--ntasks=% ' % PAR.NGPU
+ '--ntasks-per-socket=%d ' % PAR.NGPU
+ '--gres=gpu:%d ' % PAR.NGPU
+ '--time=%d ' % PAR.WALLTIME
+ findpath('seisflows.system') +'/'+ 'wrappers/submit '
+ PATH.OUTPUT)
def run(self, classname, funcname, hosts='all', **kwargs):
""" Runs tasks in serial or parallel on specified hosts
"""
self.checkpoint()
self.save_kwargs(classname, funcname, kwargs)
if hosts == 'all':
call('srun '
+ '--wait=0 '
+ join(findpath('seisflows.system'), 'wrappers/run ')
+ PATH.OUTPUT + ' '
+ classname + ' '
+ funcname)
elif hosts == 'head':
# run on head node
call('srun '
+ '--wait=0 '
+ join(findpath('seisflows.system'), 'wrappers/run_head ')
+ PATH.OUTPUT + ' '
+ classname + ' '
+ funcname)
def getnode(self):
""" Gets number of running task
"""
gid = os.getenv('SLURM_GTIDS').split(',')
lid = int(os.getenv('SLURM_LOCALID'))
return int(gid[lid])
def mpiexec(self):
return 'mpirun -np %d '%PAR.NPROC
def save_kwargs(self, classname, funcname, kwargs):
kwargspath = join(PATH.OUTPUT, 'SeisflowsObjects', classname+'_kwargs')
kwargsfile = join(kwargspath, funcname+'.p')
unix.mkdir(kwargspath)
saveobj(kwargsfile, kwargs)
| 269 | 0 | 54 |
b1eceb58e10379816e758b6eb289db3e703e794c | 14,441 | py | Python | archived/v0/code/ext_genepheno_candidates.py | HazyResearch/dd-genomics | 3e3f272327d4c29daa53347b7ddb20ea1a9ae78a | [
"Apache-2.0"
] | 13 | 2015-04-03T04:55:54.000Z | 2021-11-08T06:19:09.000Z | archived/v0/code/ext_genepheno_candidates.py | HazyResearch/dd-genomics | 3e3f272327d4c29daa53347b7ddb20ea1a9ae78a | [
"Apache-2.0"
] | 85 | 2015-04-29T02:35:42.000Z | 2015-08-03T18:37:41.000Z | archived/v0/code/ext_genepheno_candidates.py | HazyResearch/dd-genomics | 3e3f272327d4c29daa53347b7ddb20ea1a9ae78a | [
"Apache-2.0"
] | 7 | 2015-04-02T23:38:25.000Z | 2022-02-18T12:22:44.000Z | #! /usr/bin/env python3
import fileinput
import random
import re
from dstruct.Mention import Mention
from dstruct.Sentence import Sentence
from dstruct.Relation import Relation
from helper.dictionaries import load_dict
from helper.easierlife import get_dict_from_TSVline, no_op, TSVstring2list
# Load the gene<->hpoterm dictionary
genehpoterms_dict = load_dict("genehpoterms")
# Supervise the candidates
if __name__ == "__main__":
# Process input
with fileinput.input() as input_files:
for line in input_files:
# Parse the TSV line
line_dict = get_dict_from_TSVline(
line, ["doc_id", "sent_id", "wordidxs", "words", "poses",
"ners", "lemmas", "dep_paths", "dep_parents",
"bounding_boxes", "gene_entities", "gene_wordidxss",
"gene_is_corrects", "gene_types",
"hpoterm_entities", "hpoterm_wordidxss",
"hpoterm_is_corrects", "hpoterm_types"],
[no_op, int, lambda x: TSVstring2list(x, int), TSVstring2list,
TSVstring2list, TSVstring2list, TSVstring2list,
TSVstring2list, lambda x: TSVstring2list(x, int),
TSVstring2list, # these are for the sentence
TSVstring2list, lambda x: TSVstring2list(x, sep="!~!"),
TSVstring2list, TSVstring2list, # these are for the genes
TSVstring2list, lambda x: TSVstring2list(x, sep="!~!"),
TSVstring2list, TSVstring2list, # these are for the HPO
])
# Remove the genes that are unsupervised copies or duplicates
supervised_idxs = set()
unsupervised_idxs = set()
for i in range(len(line_dict["gene_is_corrects"])):
if line_dict["gene_is_corrects"][i] == "n":
unsupervised_idxs.add(i)
else:
if line_dict["gene_types"][i] != "GENE_SUP_contr_2":
# The above condition is to avoid duplicates
supervised_idxs.add(i)
survived_unsuperv_idxs = set()
for i in unsupervised_idxs:
wordidxs = line_dict["gene_wordidxss"][i]
found = False
for j in supervised_idxs:
if line_dict["gene_wordidxss"][j] == wordidxs:
found = True
break
if not found:
survived_unsuperv_idxs.add(i)
to_keep = sorted(survived_unsuperv_idxs | supervised_idxs)
new_gene_entities = []
new_gene_wordidxss = []
new_gene_is_corrects = []
new_gene_types = []
for i in to_keep:
new_gene_entities.append(line_dict["gene_entities"][i])
new_gene_wordidxss.append(line_dict["gene_wordidxss"][i])
new_gene_is_corrects.append(line_dict["gene_is_corrects"][i])
new_gene_types.append(line_dict["gene_types"][i])
line_dict["gene_entities"] = new_gene_entities
line_dict["gene_wordidxss"] = new_gene_wordidxss
line_dict["gene_is_corrects"] = new_gene_is_corrects
line_dict["gene_types"] = new_gene_types
# Remove the hpoterms that are unsupervised copies
supervised_idxs = set()
unsupervised_idxs = set()
for i in range(len(line_dict["hpoterm_is_corrects"])):
if line_dict["hpoterm_is_corrects"][i] == "n":
unsupervised_idxs.add(i)
else:
supervised_idxs.add(i)
survived_unsuperv_idxs = set()
for i in unsupervised_idxs:
wordidxs = line_dict["hpoterm_wordidxss"][i]
found = False
for j in supervised_idxs:
if line_dict["hpoterm_wordidxss"][j] == wordidxs:
found = True
break
if not found:
survived_unsuperv_idxs.add(i)
to_keep = sorted(survived_unsuperv_idxs | supervised_idxs)
new_hpoterm_entities = []
new_hpoterm_wordidxss = []
new_hpoterm_is_corrects = []
new_hpoterm_types = []
for i in to_keep:
new_hpoterm_entities.append(line_dict["hpoterm_entities"][i])
new_hpoterm_wordidxss.append(line_dict["hpoterm_wordidxss"][i])
new_hpoterm_is_corrects.append(
line_dict["hpoterm_is_corrects"][i])
new_hpoterm_types.append(line_dict["hpoterm_types"][i])
line_dict["hpoterm_entities"] = new_hpoterm_entities
line_dict["hpoterm_wordidxss"] = new_hpoterm_wordidxss
line_dict["hpoterm_is_corrects"] = new_hpoterm_is_corrects
line_dict["hpoterm_types"] = new_hpoterm_types
# Create the sentence object where the two mentions appear
sentence = Sentence(
line_dict["doc_id"], line_dict["sent_id"],
line_dict["wordidxs"], line_dict["words"], line_dict["poses"],
line_dict["ners"], line_dict["lemmas"], line_dict["dep_paths"],
line_dict["dep_parents"], line_dict["bounding_boxes"])
# Skip weird sentences
if sentence.is_weird():
continue
gene_mentions = []
hpoterm_mentions = []
positive_relations = []
gene_wordidxs = set()
hpoterm_wordidxs = set()
# Iterate over each pair of (gene,phenotype) mentions
for g_idx in range(len(line_dict["gene_is_corrects"])):
g_wordidxs = TSVstring2list(
line_dict["gene_wordidxss"][g_idx], int)
for idx in g_wordidxs:
gene_wordidxs.add(idx)
gene_mention = Mention(
"GENE", line_dict["gene_entities"][g_idx],
[sentence.words[j] for j in g_wordidxs])
if line_dict["gene_is_corrects"][g_idx] == "n":
gene_mention.is_correct = None
elif line_dict["gene_is_corrects"][g_idx] == "f":
gene_mention.is_correct = False
elif line_dict["gene_is_corrects"][g_idx] == "t":
gene_mention.is_correct = True
else:
assert False
gene_mention.type = line_dict["gene_types"][g_idx]
assert not gene_mention.type.endswith("_UNSUP")
gene_mentions.append(gene_mention)
for h_idx in range(len(line_dict["hpoterm_is_corrects"])):
h_wordidxs = TSVstring2list(
line_dict["hpoterm_wordidxss"][h_idx], int)
for idx in h_wordidxs:
hpoterm_wordidxs.add(idx)
hpoterm_mention = Mention(
"hpoterm", line_dict["hpoterm_entities"][h_idx],
[sentence.words[j] for j in h_wordidxs])
if line_dict["hpoterm_is_corrects"][h_idx] == "n":
hpoterm_mention.is_correct = None
elif line_dict["hpoterm_is_corrects"][h_idx] == "f":
hpoterm_mention.is_correct = False
elif line_dict["hpoterm_is_corrects"][h_idx] == "t":
hpoterm_mention.is_correct = True
else:
assert False
hpoterm_mention.type = line_dict["hpoterm_types"][h_idx]
assert not hpoterm_mention.type.endswith("_UNSUP")
hpoterm_mentions.append(hpoterm_mention)
# Skip if the word indexes overlab
if set(g_wordidxs) & set(h_wordidxs):
continue
# Skip if the mentions are too far away
gene_start = gene_mention.wordidxs[0]
hpoterm_start = hpoterm_mention.wordidxs[0]
gene_end = gene_mention.wordidxs[-1]
hpoterm_end = hpoterm_mention.wordidxs[-1]
limits = sorted(
(gene_start, hpoterm_start, gene_end, hpoterm_end))
start = limits[0]
betw_start = limits[1]
betw_end = limits[2]
if betw_end - betw_start > 50:
continue
relation = Relation(
"GENEPHENO", gene_mention, hpoterm_mention)
# Supervise
supervise(relation, gene_mention, hpoterm_mention,
sentence)
if relation.is_correct:
positive_relations.append(
(gene_mention, hpoterm_mention))
# Print!
print(relation.tsv_dump())
# Create some artificial negative examples:
# for each (gene, phenotype) pair that is labelled as positive
# example, select one word w in the same sentence that (1) is not a
# gene mention candidate and (2) is not a phenotype mention
# candidate, add (gene, w) and (w, phenotype) as negative example
avail_wordidxs = (
set(line_dict["wordidxs"]) - set(hpoterm_wordidxs)) - \
set(gene_wordidxs)
avail_wordidxs = list(avail_wordidxs)
if len(avail_wordidxs) > 0:
fake_rels = []
for (gene_mention, hpoterm_mention) in positive_relations:
other_word = sentence.words[random.choice(avail_wordidxs)]
fake_gene_mention = Mention(
"FAKE_GENE", other_word.lemma, [other_word, ])
fake_hpo_mention = Mention(
"FAKE_HPOTERM", other_word.lemma, [other_word, ])
fake_rel_1 = Relation(
"GENEPHENO_SUP_POSFAKEGENE", fake_gene_mention,
hpoterm_mention)
fake_rel_2 = Relation(
"GENEPHENO_SUP_POSFAKEHPO", gene_mention,
fake_hpo_mention)
fake_rel_1.is_correct = False
fake_rel_2.is_correct = False
# Print!
print(fake_rel_1.tsv_dump())
print(fake_rel_2.tsv_dump())
# Create more artificial negative examples:
# for each gene candidate G in the sentence, if the pattern G
# <Verb> X appears in the same sentence and X is not a phenotype
# mention candidate, add (gene, X) as negative examples
for gene_mention in gene_mentions:
try:
next_word = sentence.words[gene_mention.wordidxs[-1] + 1]
except IndexError:
continue
if re.search('^VB[A-Z]*$', next_word.pos) and \
next_word.word not in ["{", "}", "(", ")", "[", "]"]:
try:
after_next_word = sentence.words[
next_word.in_sent_idx + 1]
except IndexError:
continue
if after_next_word.in_sent_idx in hpoterm_wordidxs:
continue
fake_hpo_mention = Mention(
"FAKE_HPOTERM", after_next_word.lemma,
[after_next_word, ])
fake_rel = Relation(
"GENEPHENO_SUP_FAKEHPO", gene_mention,
fake_hpo_mention)
fake_rel.is_correct = False
print(fake_rel.tsv_dump())
# Create more artificial negative examples:
# as before but for phenotypes
for hpo_mention in hpoterm_mentions:
try:
next_word = sentence.words[hpo_mention.wordidxs[-1] + 1]
except IndexError:
continue
if re.search('^VB[A-Z]*$', next_word.pos) and \
next_word.word not in ["{", "}", "(", ")", "[", "]"]:
try:
after_next_word = sentence.words[
next_word.in_sent_idx + 1]
except IndexError:
continue
if after_next_word.in_sent_idx in gene_wordidxs:
continue
fake_gene_mention = Mention(
"FAKE_GENE", after_next_word.lemma,
[after_next_word, ])
fake_rel = Relation(
"GENEPHENO_SUP_FAKEGENE", fake_gene_mention,
hpo_mention)
fake_rel.is_correct = False
print(fake_rel.tsv_dump())
| 49.119048 | 79 | 0.532096 | #! /usr/bin/env python3
import fileinput
import random
import re
from dstruct.Mention import Mention
from dstruct.Sentence import Sentence
from dstruct.Relation import Relation
from helper.dictionaries import load_dict
from helper.easierlife import get_dict_from_TSVline, no_op, TSVstring2list
# Load the gene<->hpoterm dictionary
genehpoterms_dict = load_dict("genehpoterms")
# Supervise the candidates
def supervise(relation, gene_mention, hpoterm_mention, sentence):
# One of the two mentions is labelled as False
if gene_mention.is_correct is False and \
hpoterm_mention.is_correct is not False:
relation.is_correct = False
relation.type = "GENEPHENO_SUP_F_G"
elif hpoterm_mention.is_correct is False and \
gene_mention.is_correct is not False:
relation.is_correct = False
relation.type = "GENEPHENO_SUP_F_H"
elif hpoterm_mention.is_correct is False and \
gene_mention.is_correct is False:
relation.is_correct = False
relation.type = "GENEPHENO_SUP_F_GH"
else:
# Present in the existing HPO mapping
in_mapping = False
hpo_entity_id = hpoterm_mention.entity.split("|")[0]
if frozenset([gene_mention.words[0].word, hpo_entity_id]) in \
genehpoterms_dict:
in_mapping = True
else:
for gene in gene_mention.entity.split("|"):
if frozenset([gene, hpo_entity_id]) in \
genehpoterms_dict:
in_mapping = True
break
if in_mapping:
relation.is_correct = True
relation.type = "GENEPHENO_SUP_MAP"
if __name__ == "__main__":
# Process input
with fileinput.input() as input_files:
for line in input_files:
# Parse the TSV line
line_dict = get_dict_from_TSVline(
line, ["doc_id", "sent_id", "wordidxs", "words", "poses",
"ners", "lemmas", "dep_paths", "dep_parents",
"bounding_boxes", "gene_entities", "gene_wordidxss",
"gene_is_corrects", "gene_types",
"hpoterm_entities", "hpoterm_wordidxss",
"hpoterm_is_corrects", "hpoterm_types"],
[no_op, int, lambda x: TSVstring2list(x, int), TSVstring2list,
TSVstring2list, TSVstring2list, TSVstring2list,
TSVstring2list, lambda x: TSVstring2list(x, int),
TSVstring2list, # these are for the sentence
TSVstring2list, lambda x: TSVstring2list(x, sep="!~!"),
TSVstring2list, TSVstring2list, # these are for the genes
TSVstring2list, lambda x: TSVstring2list(x, sep="!~!"),
TSVstring2list, TSVstring2list, # these are for the HPO
])
# Remove the genes that are unsupervised copies or duplicates
supervised_idxs = set()
unsupervised_idxs = set()
for i in range(len(line_dict["gene_is_corrects"])):
if line_dict["gene_is_corrects"][i] == "n":
unsupervised_idxs.add(i)
else:
if line_dict["gene_types"][i] != "GENE_SUP_contr_2":
# The above condition is to avoid duplicates
supervised_idxs.add(i)
survived_unsuperv_idxs = set()
for i in unsupervised_idxs:
wordidxs = line_dict["gene_wordidxss"][i]
found = False
for j in supervised_idxs:
if line_dict["gene_wordidxss"][j] == wordidxs:
found = True
break
if not found:
survived_unsuperv_idxs.add(i)
to_keep = sorted(survived_unsuperv_idxs | supervised_idxs)
new_gene_entities = []
new_gene_wordidxss = []
new_gene_is_corrects = []
new_gene_types = []
for i in to_keep:
new_gene_entities.append(line_dict["gene_entities"][i])
new_gene_wordidxss.append(line_dict["gene_wordidxss"][i])
new_gene_is_corrects.append(line_dict["gene_is_corrects"][i])
new_gene_types.append(line_dict["gene_types"][i])
line_dict["gene_entities"] = new_gene_entities
line_dict["gene_wordidxss"] = new_gene_wordidxss
line_dict["gene_is_corrects"] = new_gene_is_corrects
line_dict["gene_types"] = new_gene_types
# Remove the hpoterms that are unsupervised copies
supervised_idxs = set()
unsupervised_idxs = set()
for i in range(len(line_dict["hpoterm_is_corrects"])):
if line_dict["hpoterm_is_corrects"][i] == "n":
unsupervised_idxs.add(i)
else:
supervised_idxs.add(i)
survived_unsuperv_idxs = set()
for i in unsupervised_idxs:
wordidxs = line_dict["hpoterm_wordidxss"][i]
found = False
for j in supervised_idxs:
if line_dict["hpoterm_wordidxss"][j] == wordidxs:
found = True
break
if not found:
survived_unsuperv_idxs.add(i)
to_keep = sorted(survived_unsuperv_idxs | supervised_idxs)
new_hpoterm_entities = []
new_hpoterm_wordidxss = []
new_hpoterm_is_corrects = []
new_hpoterm_types = []
for i in to_keep:
new_hpoterm_entities.append(line_dict["hpoterm_entities"][i])
new_hpoterm_wordidxss.append(line_dict["hpoterm_wordidxss"][i])
new_hpoterm_is_corrects.append(
line_dict["hpoterm_is_corrects"][i])
new_hpoterm_types.append(line_dict["hpoterm_types"][i])
line_dict["hpoterm_entities"] = new_hpoterm_entities
line_dict["hpoterm_wordidxss"] = new_hpoterm_wordidxss
line_dict["hpoterm_is_corrects"] = new_hpoterm_is_corrects
line_dict["hpoterm_types"] = new_hpoterm_types
# Create the sentence object where the two mentions appear
sentence = Sentence(
line_dict["doc_id"], line_dict["sent_id"],
line_dict["wordidxs"], line_dict["words"], line_dict["poses"],
line_dict["ners"], line_dict["lemmas"], line_dict["dep_paths"],
line_dict["dep_parents"], line_dict["bounding_boxes"])
# Skip weird sentences
if sentence.is_weird():
continue
gene_mentions = []
hpoterm_mentions = []
positive_relations = []
gene_wordidxs = set()
hpoterm_wordidxs = set()
# Iterate over each pair of (gene,phenotype) mentions
for g_idx in range(len(line_dict["gene_is_corrects"])):
g_wordidxs = TSVstring2list(
line_dict["gene_wordidxss"][g_idx], int)
for idx in g_wordidxs:
gene_wordidxs.add(idx)
gene_mention = Mention(
"GENE", line_dict["gene_entities"][g_idx],
[sentence.words[j] for j in g_wordidxs])
if line_dict["gene_is_corrects"][g_idx] == "n":
gene_mention.is_correct = None
elif line_dict["gene_is_corrects"][g_idx] == "f":
gene_mention.is_correct = False
elif line_dict["gene_is_corrects"][g_idx] == "t":
gene_mention.is_correct = True
else:
assert False
gene_mention.type = line_dict["gene_types"][g_idx]
assert not gene_mention.type.endswith("_UNSUP")
gene_mentions.append(gene_mention)
for h_idx in range(len(line_dict["hpoterm_is_corrects"])):
h_wordidxs = TSVstring2list(
line_dict["hpoterm_wordidxss"][h_idx], int)
for idx in h_wordidxs:
hpoterm_wordidxs.add(idx)
hpoterm_mention = Mention(
"hpoterm", line_dict["hpoterm_entities"][h_idx],
[sentence.words[j] for j in h_wordidxs])
if line_dict["hpoterm_is_corrects"][h_idx] == "n":
hpoterm_mention.is_correct = None
elif line_dict["hpoterm_is_corrects"][h_idx] == "f":
hpoterm_mention.is_correct = False
elif line_dict["hpoterm_is_corrects"][h_idx] == "t":
hpoterm_mention.is_correct = True
else:
assert False
hpoterm_mention.type = line_dict["hpoterm_types"][h_idx]
assert not hpoterm_mention.type.endswith("_UNSUP")
hpoterm_mentions.append(hpoterm_mention)
# Skip if the word indexes overlab
if set(g_wordidxs) & set(h_wordidxs):
continue
# Skip if the mentions are too far away
gene_start = gene_mention.wordidxs[0]
hpoterm_start = hpoterm_mention.wordidxs[0]
gene_end = gene_mention.wordidxs[-1]
hpoterm_end = hpoterm_mention.wordidxs[-1]
limits = sorted(
(gene_start, hpoterm_start, gene_end, hpoterm_end))
start = limits[0]
betw_start = limits[1]
betw_end = limits[2]
if betw_end - betw_start > 50:
continue
relation = Relation(
"GENEPHENO", gene_mention, hpoterm_mention)
# Supervise
supervise(relation, gene_mention, hpoterm_mention,
sentence)
if relation.is_correct:
positive_relations.append(
(gene_mention, hpoterm_mention))
# Print!
print(relation.tsv_dump())
# Create some artificial negative examples:
# for each (gene, phenotype) pair that is labelled as positive
# example, select one word w in the same sentence that (1) is not a
# gene mention candidate and (2) is not a phenotype mention
# candidate, add (gene, w) and (w, phenotype) as negative example
avail_wordidxs = (
set(line_dict["wordidxs"]) - set(hpoterm_wordidxs)) - \
set(gene_wordidxs)
avail_wordidxs = list(avail_wordidxs)
if len(avail_wordidxs) > 0:
fake_rels = []
for (gene_mention, hpoterm_mention) in positive_relations:
other_word = sentence.words[random.choice(avail_wordidxs)]
fake_gene_mention = Mention(
"FAKE_GENE", other_word.lemma, [other_word, ])
fake_hpo_mention = Mention(
"FAKE_HPOTERM", other_word.lemma, [other_word, ])
fake_rel_1 = Relation(
"GENEPHENO_SUP_POSFAKEGENE", fake_gene_mention,
hpoterm_mention)
fake_rel_2 = Relation(
"GENEPHENO_SUP_POSFAKEHPO", gene_mention,
fake_hpo_mention)
fake_rel_1.is_correct = False
fake_rel_2.is_correct = False
# Print!
print(fake_rel_1.tsv_dump())
print(fake_rel_2.tsv_dump())
# Create more artificial negative examples:
# for each gene candidate G in the sentence, if the pattern G
# <Verb> X appears in the same sentence and X is not a phenotype
# mention candidate, add (gene, X) as negative examples
for gene_mention in gene_mentions:
try:
next_word = sentence.words[gene_mention.wordidxs[-1] + 1]
except IndexError:
continue
if re.search('^VB[A-Z]*$', next_word.pos) and \
next_word.word not in ["{", "}", "(", ")", "[", "]"]:
try:
after_next_word = sentence.words[
next_word.in_sent_idx + 1]
except IndexError:
continue
if after_next_word.in_sent_idx in hpoterm_wordidxs:
continue
fake_hpo_mention = Mention(
"FAKE_HPOTERM", after_next_word.lemma,
[after_next_word, ])
fake_rel = Relation(
"GENEPHENO_SUP_FAKEHPO", gene_mention,
fake_hpo_mention)
fake_rel.is_correct = False
print(fake_rel.tsv_dump())
# Create more artificial negative examples:
# as before but for phenotypes
for hpo_mention in hpoterm_mentions:
try:
next_word = sentence.words[hpo_mention.wordidxs[-1] + 1]
except IndexError:
continue
if re.search('^VB[A-Z]*$', next_word.pos) and \
next_word.word not in ["{", "}", "(", ")", "[", "]"]:
try:
after_next_word = sentence.words[
next_word.in_sent_idx + 1]
except IndexError:
continue
if after_next_word.in_sent_idx in gene_wordidxs:
continue
fake_gene_mention = Mention(
"FAKE_GENE", after_next_word.lemma,
[after_next_word, ])
fake_rel = Relation(
"GENEPHENO_SUP_FAKEGENE", fake_gene_mention,
hpo_mention)
fake_rel.is_correct = False
print(fake_rel.tsv_dump())
| 1,257 | 0 | 22 |
acabce901b355b7c739c040859fb887508e7978a | 5,806 | py | Python | exercises/adaboost_scenario.py | noamwino/IML.HUJI | 0b1b6f333a16200fa7717af1be12e5f38694b74c | [
"MIT"
] | null | null | null | exercises/adaboost_scenario.py | noamwino/IML.HUJI | 0b1b6f333a16200fa7717af1be12e5f38694b74c | [
"MIT"
] | null | null | null | exercises/adaboost_scenario.py | noamwino/IML.HUJI | 0b1b6f333a16200fa7717af1be12e5f38694b74c | [
"MIT"
] | null | null | null | import numpy as np
from typing import Tuple
from IMLearn.metalearners.adaboost import AdaBoost
from IMLearn.learners.classifiers import DecisionStump
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from IMLearn.metrics.loss_functions import accuracy
def generate_data(n: int, noise_ratio: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a dataset in R^2 of specified size
Parameters
----------
n: int
Number of samples to generate
noise_ratio: float
Ratio of labels to invert
Returns
-------
X: np.ndarray of shape (n_samples,2)
Design matrix of samples
y: np.ndarray of shape (n_samples,)
Labels of samples
"""
'''
generate samples X with shape: (num_samples, 2) and labels y with shape (num_samples).
num_samples: the number of samples to generate
noise_ratio: invert the label for this ratio of the samples
'''
X, y = np.random.rand(n, 2) * 2 - 1, np.ones(n)
y[np.sum(X ** 2, axis=1) < 0.5 ** 2] = -1
y[np.random.choice(n, int(noise_ratio * n))] *= -1
return X, y
if __name__ == '__main__':
np.random.seed(0)
fit_and_evaluate_adaboost(noise=0)
fit_and_evaluate_adaboost(noise=0.4)
| 42.379562 | 117 | 0.629866 | import numpy as np
from typing import Tuple
from IMLearn.metalearners.adaboost import AdaBoost
from IMLearn.learners.classifiers import DecisionStump
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from IMLearn.metrics.loss_functions import accuracy
def generate_data(n: int, noise_ratio: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a dataset in R^2 of specified size
Parameters
----------
n: int
Number of samples to generate
noise_ratio: float
Ratio of labels to invert
Returns
-------
X: np.ndarray of shape (n_samples,2)
Design matrix of samples
y: np.ndarray of shape (n_samples,)
Labels of samples
"""
'''
generate samples X with shape: (num_samples, 2) and labels y with shape (num_samples).
num_samples: the number of samples to generate
noise_ratio: invert the label for this ratio of the samples
'''
X, y = np.random.rand(n, 2) * 2 - 1, np.ones(n)
y[np.sum(X ** 2, axis=1) < 0.5 ** 2] = -1
y[np.random.choice(n, int(noise_ratio * n))] *= -1
return X, y
def question_1(train_X, train_y, test_X, test_y, n_learners, noise):
adaboost = AdaBoost(wl=lambda: DecisionStump(), iterations=n_learners).fit(train_X, train_y)
train_errors, test_errors = np.zeros(n_learners + 1), np.zeros(n_learners + 1)
train_errors[0], test_errors[0] = np.inf, np.inf # so 0 learners will not count as a good model
for t in range(1, n_learners + 1):
train_errors[t] = adaboost.partial_loss(train_X, train_y, t)
test_errors[t] = adaboost.partial_loss(test_X, test_y, t)
fig = go.Figure([
go.Scatter(x=np.arange(1, n_learners + 2), y=train_errors[1:], name="train errors"),
go.Scatter(x=np.arange(1, n_learners + 2), y=test_errors[1:], name="test errors")
],
layout=go.Layout(title=f"Q1 - Train and test errors as a function of the number of learners in the "
f"ensemble (noise={noise})".title(),
xaxis=dict(title=r"Number of weak learners", tick0=0, dtick=25),
yaxis=dict(title=r"Misclassification error")
)
)
fig.show()
return adaboost, test_errors
def question_2(adaboost, test_X, test_y, lims, noise):
T = [5, 50, 100, 250]
fig = make_subplots(rows=2, cols=2, subplot_titles=[rf"$\textbf{{{t} Weak Learners}}$" for t in T],
horizontal_spacing=0.03, vertical_spacing=.03)
for i, weak_models_count in enumerate(T):
fig.add_traces([
decision_surface(lambda X: adaboost.partial_predict(X, weak_models_count), lims[0], lims[1],
showscale=False),
go.Scatter(x=test_X[:, 0], y=test_X[:, 1], mode="markers", showlegend=False,
marker=dict(color=test_y, colorscale=[custom[0], custom[-1]],
line=dict(color="black", width=1)))
],
rows=(i // 2) + 1, cols=(i % 2) + 1
)
fig.update_layout(title=f"Q2 - Decision boundaries per number of week learners (noise={noise})".title(),
margin=dict(t=100)).update_xaxes(visible=False).update_yaxes(visible=False)
fig.show()
def question_3(adaboost, test_X, test_y, test_errors, lims, noise):
optimal_learners_count = int(np.argmin(test_errors))
fig = go.Figure([
decision_surface(lambda X: adaboost.partial_predict(X, optimal_learners_count), lims[0], lims[1],
showscale=False),
go.Scatter(x=test_X[:, 0], y=test_X[:, 1], mode="markers", showlegend=False,
marker=dict(color=test_y, colorscale=[custom[0], custom[-1]],
line=dict(color="black", width=1)))
],
)
accuracy_score = accuracy(test_y, adaboost.partial_predict(test_X, optimal_learners_count))
fig.update_layout(title=f"Q3 - Decision Boundaries of the Optimal Ensemble - {optimal_learners_count} "
f"Decision Stumps, Accuracy: {accuracy_score} (noise={noise})")
fig.show()
def question_4(adaboost, train_X, train_y, lims, noise):
D = adaboost.D_ / np.max(adaboost.D_) * (20 if noise == 0 else 10)
fig = go.Figure([
decision_surface(lambda X: adaboost.partial_predict(X, adaboost.iterations_), lims[0], lims[1],
showscale=False),
go.Scatter(x=train_X[:, 0], y=train_X[:, 1], mode='markers', showlegend=False,
marker=dict(color=train_y, colorscale=class_colors(2), size=D))
],
)
fig.update_layout(title=f"Q4 - Decision boundaries of the full ensemble (noise={noise})".title())
fig.show()
def fit_and_evaluate_adaboost(noise, n_learners=250, train_size=5000, test_size=500):
(train_X, train_y), (test_X, test_y) = generate_data(train_size, noise), generate_data(test_size, noise)
# Question 1: Train- and test errors of AdaBoost in noiseless case
adaboost, test_errors = question_1(train_X, train_y, test_X, test_y, n_learners, noise)
# Question 2: Plotting decision surfaces
lims = np.array([np.r_[train_X, test_X].min(axis=0), np.r_[train_X, test_X].max(axis=0)]).T + np.array([-.1, .1])
question_2(adaboost, test_X, test_y, lims, noise)
# Question 3: Decision surface of best performing ensemble
question_3(adaboost, test_X, test_y, test_errors, lims, noise)
# Question 4: Decision surface with weighted samples
question_4(adaboost, train_X, train_y, lims, noise)
if __name__ == '__main__':
np.random.seed(0)
fit_and_evaluate_adaboost(noise=0)
fit_and_evaluate_adaboost(noise=0.4)
| 4,416 | 0 | 115 |
2ed83bf71183bf748436ac2bee2e9d5ee525fe75 | 3,548 | py | Python | src/sas/sasgui/perspectives/fitting/resultpanel.py | andyfaff/sasview | c00a797ab9c4ddc60f0fa8a64ae8a2067c225921 | [
"BSD-3-Clause"
] | null | null | null | src/sas/sasgui/perspectives/fitting/resultpanel.py | andyfaff/sasview | c00a797ab9c4ddc60f0fa8a64ae8a2067c225921 | [
"BSD-3-Clause"
] | null | null | null | src/sas/sasgui/perspectives/fitting/resultpanel.py | andyfaff/sasview | c00a797ab9c4ddc60f0fa8a64ae8a2067c225921 | [
"BSD-3-Clause"
] | null | null | null | """
FitPanel class contains fields allowing to fit models and data
:note: For Fit to be performed the user should check at least one parameter
on fit Panel window.
"""
import wx
import wx.lib.newevent
from wx.aui import AuiNotebook as Notebook
import datetime
from bumps.gui.convergence_view import ConvergenceView
from bumps.gui.uncertainty_view import UncertaintyView, CorrelationView, TraceView
from bumps.dream.stats import var_stats, format_vars
from sas.sasgui.guiframe.panel_base import PanelBase
from sas.sasgui.guiframe.events import StatusEvent
(PlotResultEvent, EVT_PLOT_RESULT) = wx.lib.newevent.NewEvent()
class ResultPanel(Notebook, PanelBase):
"""
FitPanel class contains fields allowing to fit models and data
:note: For Fit to be performed the user should check at least one parameter
on fit Panel window.
"""
## Internal name for the AUI manager
window_name = "Result panel"
## Title to appear on top of the window
window_caption = "Result Panel"
CENTER_PANE = True
def __init__(self, parent, manager=None, *args, **kwargs):
"""
"""
style = ((wx.aui.AUI_NB_WINDOWLIST_BUTTON
| wx.aui.AUI_NB_DEFAULT_STYLE
| wx.CLIP_CHILDREN)
& ~wx.aui.AUI_NB_CLOSE_ON_ACTIVE_TAB)
Notebook.__init__(self, parent, wx.ID_ANY, style=style)
PanelBase.__init__(self, parent)
self.frame = parent
self.Bind(EVT_PLOT_RESULT, self.on_plot_results)
self.frame.Bind(wx.EVT_CLOSE, self.on_close)
self._manager = None
| 35.48 | 98 | 0.644307 | """
FitPanel class contains fields allowing to fit models and data
:note: For Fit to be performed the user should check at least one parameter
on fit Panel window.
"""
import wx
import wx.lib.newevent
from wx.aui import AuiNotebook as Notebook
import datetime
from bumps.gui.convergence_view import ConvergenceView
from bumps.gui.uncertainty_view import UncertaintyView, CorrelationView, TraceView
from bumps.dream.stats import var_stats, format_vars
from sas.sasgui.guiframe.panel_base import PanelBase
from sas.sasgui.guiframe.events import StatusEvent
(PlotResultEvent, EVT_PLOT_RESULT) = wx.lib.newevent.NewEvent()
class ResultPanel(Notebook, PanelBase):
"""
FitPanel class contains fields allowing to fit models and data
:note: For Fit to be performed the user should check at least one parameter
on fit Panel window.
"""
## Internal name for the AUI manager
window_name = "Result panel"
## Title to appear on top of the window
window_caption = "Result Panel"
CENTER_PANE = True
def __init__(self, parent, manager=None, *args, **kwargs):
"""
"""
style = ((wx.aui.AUI_NB_WINDOWLIST_BUTTON
| wx.aui.AUI_NB_DEFAULT_STYLE
| wx.CLIP_CHILDREN)
& ~wx.aui.AUI_NB_CLOSE_ON_ACTIVE_TAB)
Notebook.__init__(self, parent, wx.ID_ANY, style=style)
PanelBase.__init__(self, parent)
self.frame = parent
self.Bind(EVT_PLOT_RESULT, self.on_plot_results)
self.frame.Bind(wx.EVT_CLOSE, self.on_close)
self._manager = None
def on_close(self, event):
if event.CanVeto():
self.frame.Hide()
event.Veto()
else:
event.Skip()
def on_plot_results(self, event):
self.frame.Show(True)
result = event.result[0][0]
filename = result.data.sas_data.filename
current_time = datetime.datetime.now().strftime("%I:%M%p, %B %d, %Y")
self.parent.SetTitle(self.window_name + " - " + filename + " - " + current_time)
if hasattr(result, 'convergence'):
best, pop = result.convergence[:, 0], result.convergence[:, 1:]
self._get_view(ConvergenceView).update(best, pop)
else:
self._del_view(ConvergenceView)
if hasattr(result, 'uncertainty_state'):
stats = var_stats(result.uncertainty_state.draw())
msg = format_vars(stats)
self._get_view(CorrelationView).update(result.uncertainty_state)
self._get_view(UncertaintyView).update((result.uncertainty_state, stats))
self._get_view(TraceView).update(result.uncertainty_state)
# TODO: stats should be stored in result rather than computed in bumps UncertaintyView
wx.PostEvent(self.frame.parent,
StatusEvent(status=msg, info="info"))
else:
for view in (CorrelationView, UncertaintyView, TraceView):
self._del_view(view)
def get_frame(self):
return self.frame
def _get_view(self, view_class):
for idx in range(self.PageCount):
if self.GetPageText(idx) == view_class.title:
return self.GetPage(idx)
else:
panel = view_class(self)
self.AddPage(panel, panel.title)
return panel
def _del_view(self, view_class):
for idx in range(self.PageCount):
if self.GetPageText(idx) == view_class.title:
self.DeletePage(idx)
| 1,817 | 0 | 135 |
29ecccaeaaefda909eced886e9f6ccfb802c9ab1 | 1,360 | py | Python | binary_search_unsorted.py | paigelarson/Lessons | 842a6df67551c781acf4243bb40ee8dc04a932cb | [
"MIT"
] | null | null | null | binary_search_unsorted.py | paigelarson/Lessons | 842a6df67551c781acf4243bb40ee8dc04a932cb | [
"MIT"
] | null | null | null | binary_search_unsorted.py | paigelarson/Lessons | 842a6df67551c781acf4243bb40ee8dc04a932cb | [
"MIT"
] | null | null | null | from random import randint
from random_word import RandomWords
random_num_list=[]
for i in range(200):
random_num_list.append(randint(0,1000))
r = RandomWords()
random_word_list=r.get_random_words()
list_type=input("Which type of list would you like to sort and search, words or numbers?")
if list_type=="words":
print(random_word_list)
search=input("What word do you want to find?")
my_list=mysort(random_word_list)
if list_type=="numbers":
print(random_num_list)
search=input("What number do you want to find?")
search=int(search)
my_list=mysort(random_num_list)
print(my_list)
result=binary(my_list,search)
if result != -1:
print("Element is present at index "+ str(result)+". This means this is element number "+str(result+1)+" in the list.")
else:
print("Element is not present in this list")
| 19.428571 | 122 | 0.633824 | from random import randint
from random_word import RandomWords
def mysort(my_list):
new_list=[]
while my_list:
min=my_list[0]
for i in my_list:
if i<min:
min=i
new_list.append(min)
my_list.remove(min)
return new_list
def binary(arr, x):
low=0
high=len(arr)-1
while low<=high:
mid=(high+low)//2
print(low, high, mid,arr[mid], x)
if arr[mid]>x:
high=mid-1
elif arr[mid]<x:
low=mid+1
else:
return mid
return -1
random_num_list=[]
for i in range(200):
random_num_list.append(randint(0,1000))
r = RandomWords()
random_word_list=r.get_random_words()
list_type=input("Which type of list would you like to sort and search, words or numbers?")
if list_type=="words":
print(random_word_list)
search=input("What word do you want to find?")
my_list=mysort(random_word_list)
if list_type=="numbers":
print(random_num_list)
search=input("What number do you want to find?")
search=int(search)
my_list=mysort(random_num_list)
print(my_list)
result=binary(my_list,search)
if result != -1:
print("Element is present at index "+ str(result)+". This means this is element number "+str(result+1)+" in the list.")
else:
print("Element is not present in this list")
| 413 | 0 | 50 |
5bb069bab349991d6869642252a999be8287805a | 4,513 | py | Python | integrationtest/vm/virtualrouter/destroy_none_vr_vms.py | sherry546/zstack-woodpecker | 54a37459f2d72ce6820974feaa6eb55772c3d2ce | [
"Apache-2.0"
] | 2 | 2016-03-23T08:45:44.000Z | 2017-06-26T02:40:46.000Z | integrationtest/vm/virtualrouter/destroy_none_vr_vms.py | KevinDavidMitnick/zstack-woodpecker | 96257faaf3c362168d008bdb47002025ad669b24 | [
"Apache-2.0"
] | null | null | null | integrationtest/vm/virtualrouter/destroy_none_vr_vms.py | KevinDavidMitnick/zstack-woodpecker | 96257faaf3c362168d008bdb47002025ad669b24 | [
"Apache-2.0"
] | 1 | 2017-05-19T06:40:40.000Z | 2017-05-19T06:40:40.000Z | '''
Cleanup all VMs (find from ZStack database). It is mainly for debuging.
@author: Youyk
'''
import threading
import time
import sys
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.config_operations as con_ops
import zstackwoodpecker.operations.net_operations as net_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import apibinding.inventory as inventory
thread_threshold=1000
session_uuid = None
session_to = None
session_mc = None
| 35.81746 | 105 | 0.671837 | '''
Cleanup all VMs (find from ZStack database). It is mainly for debuging.
@author: Youyk
'''
import threading
import time
import sys
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.config_operations as con_ops
import zstackwoodpecker.operations.net_operations as net_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import apibinding.inventory as inventory
thread_threshold=1000
session_uuid = None
session_to = None
session_mc = None
def destroy_vips(vips):
for vip in vips:
thread = threading.Thread(target=net_ops.delete_vip, args=(vip.uuid,))
while threading.active_count() > thread_threshold:
time.sleep(0.5)
exc = sys.exc_info()
if exc[0]:
raise info1, None, info2
thread.start()
while threading.activeCount() > 1:
exc = sys.exc_info()
if exc[0]:
raise info1, None, info2
time.sleep(0.1)
def destroy_vms(vms):
for vm in vms:
thread = threading.Thread(target=vm_ops.destroy_vm, args=(vm.uuid,))
while threading.active_count() > thread_threshold:
time.sleep(0.5)
exc = sys.exc_info()
if exc[0]:
raise info1, None, info2
thread.start()
while threading.activeCount() > 1:
exc = sys.exc_info()
if exc[0]:
raise info1, None, info2
time.sleep(0.1)
def test():
global session_to
global session_mc
global session_uuid
session_uuid = acc_ops.login_as_admin()
session_to = con_ops.change_global_config('identity', 'session.timeout', '720000', session_uuid)
session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000', session_uuid)
cond = res_ops.gen_query_conditions('type', '=', inventory.USER_VM_TYPE)
cond = res_ops.gen_query_conditions('state', '!=', 'Destroyed', cond)
num = res_ops.query_resource_count(res_ops.VM_INSTANCE, cond, session_uuid)
if num <= thread_threshold:
vms = res_ops.query_resource(res_ops.VM_INSTANCE, cond, session_uuid)
destroy_vms(vms)
else:
start = 0
limit = thread_threshold - 1
curr_num = start
vms = []
while curr_num < num:
vms_temp = res_ops.query_resource_fields(res_ops.VM_INSTANCE, \
cond, session_uuid, ['uuid'], start, limit)
vms.extend(vms_temp)
curr_num += limit
start += limit
destroy_vms(vms)
vip_num = res_ops.query_resource_count(res_ops.VIP, [], session_uuid)
if vip_num <= thread_threshold:
vips = res_ops.query_resource(res_ops.VIP, [], session_uuid)
destroy_vips(vips)
else:
start = 0
limit = thread_threshold - 1
curr_num = start
vms = []
while curr_num < vip_num:
vips_temp = res_ops.query_resource_fields(res_ops.VIP, \
[], session_uuid, ['uuid'], start, limit)
vips.extend(vips_temp)
curr_num += limit
start += limit
destroy_vips(vips)
#con_ops.change_global_config('identity', 'session.timeout', session_to)
#con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc)
left_num = res_ops.query_resource_count(res_ops.VM_INSTANCE, cond, session_uuid)
if left_num == 0:
test_util.test_pass('None VR VMs destroy Success. Destroy %d VMs.' % num)
else:
test_util.test_fail('None VR VMs destroy Fail. %d VMs are not Destroied.' % left_num)
left_num = res_ops.query_resource_count(res_ops.VIP, [], session_uuid)
if left_num == 0:
test_util.test_pass('VIP destroy Success. Destroy %d VIP.' % num)
else:
test_util.test_fail('VIP destroy Fail. %d VIP are not Destroied.' % left_num)
con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid)
con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid)
acc_ops.logout(session_uuid)
def error_cleanup():
if session_to:
con_ops.change_global_config('identity', 'session.timeout', session_to)
if session_mc:
con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc)
if session_uuid:
acc_ops.logout(session_uuid)
| 3,755 | 0 | 92 |
0a9dad405f706381771e6ea0364a9fb2d5566d96 | 8,201 | py | Python | workshop3/src/NEL_Jacques-212588109-workshop3/mcts.py | jmnel/combinatorial-optimization | c921dee6cb0febc47a8a791f8220b02b35caf0cf | [
"MIT"
] | null | null | null | workshop3/src/NEL_Jacques-212588109-workshop3/mcts.py | jmnel/combinatorial-optimization | c921dee6cb0febc47a8a791f8220b02b35caf0cf | [
"MIT"
] | null | null | null | workshop3/src/NEL_Jacques-212588109-workshop3/mcts.py | jmnel/combinatorial-optimization | c921dee6cb0febc47a8a791f8220b02b35caf0cf | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import Tuple, List
from random import randint
from time import perf_counter
import numpy as np
from common.util import argmax
from common.ticktack import *
def uct_search(init_state,
util_fn: Callable[[Tuple[int, ...]], float],
selection_criteria: str = 'max_child',
exploration_bias: float = 1. / np.sqrt(2.),
max_epochs: int = 200,
max_robust_min_epochs: int = 20):
"""
Performs Monte Carlo tree search using UCT method to find optimal next state.
Args:
init_state: initial root state from which to make choice
util_fn: score calculation function
selection_criteria: choice of 'max_child', 'robust_child', 'max_rebust_child',
or 'secure_child'
exploration_bias: bias which determines propensity to expand nodes
max_epochs: maximum number of epochs to perform
max_robust_min_epochs: number of epochs before testing max-robust selection criteria;
only applies to max-robust method
Returns:
Tuple with optimal state, estimated score, and profiling statistics
"""
# This will store some profiling statistics.
stats = {'explored_count': 0,
'visit_count': 0,
'util_fn_evals': 0,
'simulation_time': 0}
# Start profiling timer.
t_start = perf_counter()
def tree_policy(v: Node) -> Node:
"""
Traverses tree by expanding unexplored nodes, and / or selects children with maximal
UCT until terminal state is encountered.
"""
# Loop while v is non-terminal.
while len(v.state) < 9:
# Expand and return v if it is unexplored.
if len(v.a_collapsed) > 0:
return expand(v)
# Otherwise, return v's most promising child.
else:
v = best_child(v, exploration_bias)
return v
def expand(v):
"""
Expands a given node with available action and adds new child to parent.
"""
# Update profiling statistics.
stats['explored_count'] += 1
assert(len(v.a_collapsed) > 0)
# Pick a unexplored action from v.
a = v.a_collapsed.pop(0)
# Create new child for v with action a.
v_new = Node(state=v.state + (a,),
parent=v,
a_incoming=a)
# Append new node to parent's list of children.
v.children.append(v_new)
return v_new
def best_child(v: Node, c):
"""Selects child node which maximises UCT function."""
best_uct_child = (float('-inf'), None)
for child in v.children:
# Calculate average expected reward for child.
q_bar = child.q / child.n
# Calculate UCT function for child.
uct = q_bar + c * np.sqrt(2.0 * np.log(v.n) / child.n)
# Update best child.
if (uct,) > best_uct_child:
best_uct_child = (uct, child)
return best_uct_child[1]
def backup(v: Node, delta):
"""Traverses tree from child to parent, propogating count and score."""
# Iterate until root node is encountered.
while v is not None:
# Increment visit count.
v.n += 1
# Update profiling statistics.
stats['visit_count'] += 1
# Propogate score.
v.q += delta
# Go to parent next.
v = v.parent
# Handle selection criteria a.1 max-child, a.2 robust-child, and a.3 secure-child.
if selection_criteria == 'max' or 'robust' or 'secure':
root = Node(state=init_state)
# Perform MCTS algorithm main loop.
for epoch in range(max_epochs):
vl = tree_policy(root)
sim_t_start = perf_counter()
delta = default_policy(vl.state)
stats['simulation_time'] += perf_counter() - sim_t_start
backup(vl, delta)
# This helper extracts appropriate value from child depending on selection criteria.
# Return state of optimal child to caller.
optim_child = root.children[
argmax(
(crit_selector(selection_criteria, c)
for c in root.children))
]
optim_score = crit_selector(selection_criteria, optim_child)
stats['t_elapsed'] = t_start - perf_counter()
return (optim_child.state, optim_score, stats)
# Handle selection criteria b max-robust-child.
elif selection_criteria == 'max_robust':
root = Node(state=init_state)
# Perform MCTS algorithm main loop; max-robust variant.
for epoch in range(max_epochs):
vl = tree_policy(root)
delta = 1.0 - default_policy(vl.state)
backup(vl, delta)
# Start testing max-robust selection criteria after minimum epochs.
if epoch > max_robust_min_epochs:
# Determine index of child with maximum reward.
q_max = argmax((c.q for c in root.children))
# Determine index of child with maximum visit count.
n_max = argmax((c.n for c in root.children))
# If above 2 indices agree, return state of optimal node to caller.
if q_max == n_max:
optim_child = root.children[q_max]
optim_score = (optim_child.q, optim_child.n)
stats['t_elapsed'] = t_start - perf_counter()
return (optim_child.state, optim_score, stats)
# Selection criteria is invalid.
else:
# Throw exception.
raise ValueError(
'selection_criteria must be one of \'max\', \'robust\', \'max_robust\', or \'secure\'.')
| 32.41502 | 100 | 0.556883 | from __future__ import annotations
from typing import Tuple, List
from random import randint
from time import perf_counter
import numpy as np
from common.util import argmax
from common.ticktack import *
class Node:
parent: Node = None # node's parent
state: Tuple[int, ...] = None # state of node
q: float = 0.0 # simulated reward of node
n: int = 0 # visit count of node
a_incoming: int = None # incoming action which created node
a_collapsed: List[int] = list() # unexplored actions at node
children: List[Node] = list() # explored children of node
def __init__(self,
state: Tuple[int, ...],
parent: type(self) = None,
a_incoming: int = None):
"""Constructs node for MCTS tree."""
self.parent = parent
self.state = state
self.a_incoming = a_incoming
self.children = list()
# Unexplored actions are the set difference {0,...9} - {x for x in state}.
self.a_collapsed = list([a for a in set(range(9)) - set(state)])
def __str__(self):
s = 'Node object:\n'
s += f' parent={self.parent.__repr__()}\n'
s += f' state={self.state}\n'
s += f' a_incoming={self.a_incoming}\n'
s += f' a_collapsed={self.a_collapsed}\n'
s += f' children={len(self.children)}\n'
s += f' q={self.q}\n'
return s
def uct_search(init_state,
util_fn: Callable[[Tuple[int, ...]], float],
selection_criteria: str = 'max_child',
exploration_bias: float = 1. / np.sqrt(2.),
max_epochs: int = 200,
max_robust_min_epochs: int = 20):
"""
Performs Monte Carlo tree search using UCT method to find optimal next state.
Args:
init_state: initial root state from which to make choice
util_fn: score calculation function
selection_criteria: choice of 'max_child', 'robust_child', 'max_rebust_child',
or 'secure_child'
exploration_bias: bias which determines propensity to expand nodes
max_epochs: maximum number of epochs to perform
max_robust_min_epochs: number of epochs before testing max-robust selection criteria;
only applies to max-robust method
Returns:
Tuple with optimal state, estimated score, and profiling statistics
"""
# This will store some profiling statistics.
stats = {'explored_count': 0,
'visit_count': 0,
'util_fn_evals': 0,
'simulation_time': 0}
# Start profiling timer.
t_start = perf_counter()
def tree_policy(v: Node) -> Node:
"""
Traverses tree by expanding unexplored nodes, and / or selects children with maximal
UCT until terminal state is encountered.
"""
# Loop while v is non-terminal.
while len(v.state) < 9:
# Expand and return v if it is unexplored.
if len(v.a_collapsed) > 0:
return expand(v)
# Otherwise, return v's most promising child.
else:
v = best_child(v, exploration_bias)
return v
def default_policy(state: Tuple[int, ...]):
while True:
# Evaluate utility function.
scr = util_fn(state)
# Update profiling statistics.
stats['util_fn_evals'] += 1
if scr != None:
break
a = tuple(set(range(9)) - set(state))[randint(0, 8 - len(state))]
state = state + (a,)
return scr
def expand(v):
"""
Expands a given node with available action and adds new child to parent.
"""
# Update profiling statistics.
stats['explored_count'] += 1
assert(len(v.a_collapsed) > 0)
# Pick a unexplored action from v.
a = v.a_collapsed.pop(0)
# Create new child for v with action a.
v_new = Node(state=v.state + (a,),
parent=v,
a_incoming=a)
# Append new node to parent's list of children.
v.children.append(v_new)
return v_new
def best_child(v: Node, c):
"""Selects child node which maximises UCT function."""
best_uct_child = (float('-inf'), None)
for child in v.children:
# Calculate average expected reward for child.
q_bar = child.q / child.n
# Calculate UCT function for child.
uct = q_bar + c * np.sqrt(2.0 * np.log(v.n) / child.n)
# Update best child.
if (uct,) > best_uct_child:
best_uct_child = (uct, child)
return best_uct_child[1]
def backup(v: Node, delta):
"""Traverses tree from child to parent, propogating count and score."""
# Iterate until root node is encountered.
while v is not None:
# Increment visit count.
v.n += 1
# Update profiling statistics.
stats['visit_count'] += 1
# Propogate score.
v.q += delta
# Go to parent next.
v = v.parent
# Handle selection criteria a.1 max-child, a.2 robust-child, and a.3 secure-child.
if selection_criteria == 'max' or 'robust' or 'secure':
root = Node(state=init_state)
# Perform MCTS algorithm main loop.
for epoch in range(max_epochs):
vl = tree_policy(root)
sim_t_start = perf_counter()
delta = default_policy(vl.state)
stats['simulation_time'] += perf_counter() - sim_t_start
backup(vl, delta)
# This helper extracts appropriate value from child depending on selection criteria.
def crit_selector(crit: str, child: Node):
if crit == 'max':
# Return reward for max-child criteria.
return child.q
elif crit == 'robust':
# Return visit-count for robust-child criteria.
return child.n
else:
# Calculate and return UCT for secure-child criteria.
uct = child.q / child.n + exploration_bias * \
np.sqrt(2. * np.log(root.n) / child.n)
return uct
# Return state of optimal child to caller.
optim_child = root.children[
argmax(
(crit_selector(selection_criteria, c)
for c in root.children))
]
optim_score = crit_selector(selection_criteria, optim_child)
stats['t_elapsed'] = t_start - perf_counter()
return (optim_child.state, optim_score, stats)
# Handle selection criteria b max-robust-child.
elif selection_criteria == 'max_robust':
root = Node(state=init_state)
# Perform MCTS algorithm main loop; max-robust variant.
for epoch in range(max_epochs):
vl = tree_policy(root)
delta = 1.0 - default_policy(vl.state)
backup(vl, delta)
# Start testing max-robust selection criteria after minimum epochs.
if epoch > max_robust_min_epochs:
# Determine index of child with maximum reward.
q_max = argmax((c.q for c in root.children))
# Determine index of child with maximum visit count.
n_max = argmax((c.n for c in root.children))
# If above 2 indices agree, return state of optimal node to caller.
if q_max == n_max:
optim_child = root.children[q_max]
optim_score = (optim_child.q, optim_child.n)
stats['t_elapsed'] = t_start - perf_counter()
return (optim_child.state, optim_score, stats)
# Selection criteria is invalid.
else:
# Throw exception.
raise ValueError(
'selection_criteria must be one of \'max\', \'robust\', \'max_robust\', or \'secure\'.')
| 1,208 | 950 | 80 |
ad9998df6946a5a9ff557cf6ac6a1e0aa4cac608 | 2,545 | py | Python | src/app/server/model/network_model.py | CWebkas/collective-movement-exploration | d0c572643485cb293df9d16ae12c8011d4c6eaa7 | [
"MIT"
] | null | null | null | src/app/server/model/network_model.py | CWebkas/collective-movement-exploration | d0c572643485cb293df9d16ae12c8011d4c6eaa7 | [
"MIT"
] | null | null | null | src/app/server/model/network_model.py | CWebkas/collective-movement-exploration | d0c572643485cb293df9d16ae12c8011d4c6eaa7 | [
"MIT"
] | null | null | null | from sqlalchemy.orm import backref
from sqlalchemy.orm import deferred
from db import db
from model.dataset_model import Dataset
from sqlalchemy.dialects.postgresql import JSONB
class Network(db.Model):
"""
Network data class
"""
# db table name
__tablename__ = 'network'
# realtionship to dataset table
dataset_id = db.Column(db.Integer, db.ForeignKey('dataset.id'), primary_key=True, nullable=False)
dataset = db.relationship(Dataset, backref=backref("network", cascade="all, delete-orphan"))
# columns
network_id = db.Column(db.Integer, primary_key=True, nullable=False)
name = db.Column(db.String(255), nullable=False)
network = deferred(db.Column(JSONB))
metric_distance = db.Column(db.Float)
speed = db.Column(db.Float)
acceleration = db.Column(db.Float)
distance_centroid = db.Column(db.Float)
direction = db.Column(db.Float)
euclidean_distance = db.Column(db.Float)
finished = db.Column(db.Boolean, default=False)
error = db.Column(db.Boolean, default=False)
status = db.Column(db.String(255))
metric = db.Column(db.String(255), nullable=False, default='euclidean')
hierarchy = deferred(db.Column(JSONB))
| 32.628205 | 102 | 0.598821 | from sqlalchemy.orm import backref
from sqlalchemy.orm import deferred
from db import db
from model.dataset_model import Dataset
from sqlalchemy.dialects.postgresql import JSONB
class Network(db.Model):
"""
Network data class
"""
# db table name
__tablename__ = 'network'
# realtionship to dataset table
dataset_id = db.Column(db.Integer, db.ForeignKey('dataset.id'), primary_key=True, nullable=False)
dataset = db.relationship(Dataset, backref=backref("network", cascade="all, delete-orphan"))
# columns
network_id = db.Column(db.Integer, primary_key=True, nullable=False)
name = db.Column(db.String(255), nullable=False)
network = deferred(db.Column(JSONB))
metric_distance = db.Column(db.Float)
speed = db.Column(db.Float)
acceleration = db.Column(db.Float)
distance_centroid = db.Column(db.Float)
direction = db.Column(db.Float)
euclidean_distance = db.Column(db.Float)
finished = db.Column(db.Boolean, default=False)
error = db.Column(db.Boolean, default=False)
status = db.Column(db.String(255))
metric = db.Column(db.String(255), nullable=False, default='euclidean')
hierarchy = deferred(db.Column(JSONB))
def __init__(self, network_id, **kwargs):
self.network_id = network_id
self.network = {}
self.hierarchy = {}
self.__dict__.update(kwargs)
def __repr__(self):
return '(' + str(self.dataset_id) + ',' + self.name + ')'
def as_dict(self):
return {
'dataset_name': self.dataset.name,
'name': self.name,
'finished': self.finished,
'error': self.error,
'status': self.status,
'metric_distance': self.metric_distance,
'speed': self.speed,
'acceleration': self.acceleration,
'distance_centroid': self.distance_centroid,
'direction': self.direction,
'euclidean_distance': self.euclidean_distance,
'metric': self.metric
}
def as_info_dict(self):
return {
'name': self.name,
'finished': self.finished,
'network_id': self.network_id
}
def network_as_data_dict(self):
return {
'name': self.name,
'data': self.network
}
def hierarchy_as_data_dict(self):
return {
'name': self.name,
'hierarchy': self.hierarchy
} | 1,129 | 0 | 173 |
f46a0453bc721f21093cb4d73085465742749467 | 28,995 | py | Python | warden/api/routes.py | pathtoknowhere/warden | 3cff5378bf5e47132c3ddf38b9b9caf1085c7e46 | [
"MIT"
] | null | null | null | warden/api/routes.py | pathtoknowhere/warden | 3cff5378bf5e47132c3ddf38b9b9caf1085c7e46 | [
"MIT"
] | null | null | null | warden/api/routes.py | pathtoknowhere/warden | 3cff5378bf5e47132c3ddf38b9b9caf1085c7e46 | [
"MIT"
] | null | null | null | from flask import (Blueprint, flash, request, current_app, jsonify, Response)
from warden_modules import (warden_metadata,
positions_dynamic,
generatenav, specter_df,
current_path, regenerate_nav,
home_path, transactions_fx)
from connections import tor_request
from pricing_engine.engine import price_ondate, historical_prices
from flask_login import login_required, current_user
from random import randrange
from pricing_engine.engine import fx_rate, realtime_price
from utils import heatmap_generator, pickle_it
from models import Trades, AccountInfo, TickerInfo
from datetime import datetime, timedelta
from dateutil import parser
from dateutil.relativedelta import relativedelta
import mhp as mrh
import simplejson
import logging
import pandas as pd
import numpy as np
import json
import os
import math
import csv
import requests
api = Blueprint('api', __name__)
@api.route("/gitreleases", methods=["GET"])
@login_required
@api.route("/txs_json", methods=['GET'])
@login_required
@api.route("/satoshi_quotes_json", methods=['GET'])
@login_required
# API End Point checks for wallet activity
# Gets a local pickle file and dumps - does not work with pandas df
# Do not include extension pkl on argument
@api.route("/get_pickle", methods=['GET'])
@login_required
@api.route("/check_activity", methods=['GET'])
@login_required
# API End Point with all WARden metadata
@api.route("/warden_metadata", methods=['GET'])
@login_required
# Returns a JSON with Test Response on TOR
@api.route("/testtor", methods=["GET"])
@login_required
# API End point
# Json for main page with realtime positions
@api.route("/positions_json", methods=["GET"])
@login_required
# Returns current BTC price and FX rate for current user
# This is the function used at the layout navbar to update BTC price
# Please note that the default is to update every 20s (MWT(20) above)
@ api.route("/realtime_btc", methods=["GET"])
@login_required
# API end point - cleans notifications and creates a new checkpoint
@api.route("/dismiss_notification", methods=["POST"])
@login_required
# API end point to return Specter data
# args: ?load=True (True = loads saved json, False = refresh data)
@api.route("/specter", methods=["GET"])
@login_required
# Latest Traceback message
@api.route("/traceback_error", methods=["GET"])
@login_required
# API end point
# Function returns summary statistics for portfolio NAV and values
# Main function for portfolio page
@api.route("/portstats", methods=["GET", "POST"])
@login_required
# API end point - returns a json with NAV Chartdata
@api.route("/navchartdatajson", methods=["GET", "POST"])
@login_required
# Creates a table with dates and NAV values
# API end point - returns a json with NAV Chartdata
@api.route("/stackchartdatajson", methods=["GET", "POST"])
@login_required
# Creates a table with dates and NAV values
# Return the price of a ticker on a given date
# Takes arguments:
# ticker: Single ticker for filter (default = NAV)
# date: date to get price
@api.route("/getprice_ondate", methods=["GET"])
@login_required
@api.route("/fx_lst", methods=["GET"])
@login_required
# Receiver argument ?term to return a list of fx (fiat and digital)
# Searches the list both inside the key as well as value of dict
@api.route("/heatmapbenchmark_json", methods=["GET"])
@login_required
# Return Monthly returns for Benchmark and Benchmark difference from NAV
# Takes arguments:
# ticker - single ticker for filter
@api.route("/histvol", methods=["GET", "POST"])
@login_required
# Returns a json with data to create the vol chart
# takes inputs from get:
# ticker, meta (true returns only metadata), rolling (in days)
# metadata (max, mean, etc)
@api.route("/mempool_json", methods=["GET", "POST"])
@login_required
@api.route("/portfolio_compare_json", methods=["GET"])
@login_required
# Compare portfolio performance to a list of assets
# Takes arguments:
# tickers - (comma separated. ex: BTC,ETH,AAPL)
# start - start date in the format YYMMDD
# end - end date in the format YYMMDD
# method - "chart": returns NAV only data for charts
# - "all": returns all data (prices and NAV)
# - "meta": returns metadata information
@api.route('/log')
@login_required
@api.route('/broadcaster')
@login_required
@api.route("/assetlist", methods=["GET", "POST"])
# List of available tickers. Also takes argument {term} so this can be used
# in autocomplete forms
@api.route("/aclst", methods=["GET", "POST"])
@login_required
# Returns JSON for autocomplete on account names.
# Gathers account names from trades and account_info tables
# Takes on input ?term - which is the string to be found
@api.route("/portfolio_tickers_json", methods=["GET", "POST"])
@login_required
# Returns a list of all tickers ever traded in this portfolio
@api.route("/generatenav_json", methods=["GET", "POST"])
@login_required
# Creates a table with dates and NAV values
# Takes 2 arguments:
# force=False (default) : Forces the NAV generation without reading saved file
# filter=None (default): Filter to be applied to Pandas df (df.query(filter))
| 34.807923 | 143 | 0.613002 | from flask import (Blueprint, flash, request, current_app, jsonify, Response)
from warden_modules import (warden_metadata,
positions_dynamic,
generatenav, specter_df,
current_path, regenerate_nav,
home_path, transactions_fx)
from connections import tor_request
from pricing_engine.engine import price_ondate, historical_prices
from flask_login import login_required, current_user
from random import randrange
from pricing_engine.engine import fx_rate, realtime_price
from utils import heatmap_generator, pickle_it
from models import Trades, AccountInfo, TickerInfo
from datetime import datetime, timedelta
from dateutil import parser
from dateutil.relativedelta import relativedelta
import mhp as mrh
import simplejson
import logging
import pandas as pd
import numpy as np
import json
import os
import math
import csv
import requests
api = Blueprint('api', __name__)
@api.route("/gitreleases", methods=["GET"])
@login_required
def gitreleases():
url = 'https://api.github.com/repos/pxsocs/warden/releases'
request = tor_request(url)
try:
data = request.json()
except Exception:
try: # Try again - some APIs return a json already
data = json.loads(request)
except Exception:
data = json.dumps("Error getting request")
return json.dumps(data)
@api.route("/txs_json", methods=['GET'])
@login_required
def txs_json():
df_pkl = 'warden/txs_pf.pkl'
tx_df = pickle_it(action='load', filename=df_pkl)
return tx_df.to_json(orient='table')
@api.route("/satoshi_quotes_json", methods=['GET'])
@login_required
def satoshi_quotes_json():
url = 'https://raw.githubusercontent.com/NakamotoInstitute/nakamotoinstitute.org/0bf08c48cd21655c76e8db06da39d16036a88594/data/quotes.json'
try:
quotes = tor_request(url).json()
except Exception:
return (json.dumps(' >> Error contacting server. Retrying... '))
quote = quotes[randrange(len(quotes))]
return (quote)
def alert_activity():
alerts = False
# Don't send any alerts as activity still being downloaded
if current_app.downloading:
return alerts
ack_file = 'txs_diff.pkl'
try:
data = pickle_it(action='load', filename=ack_file)
if data == 'file not found':
raise FileNotFoundError
if data['changes_detected_on'] is not None:
return (True)
else:
return (False)
except Exception:
return (False)
# API End Point checks for wallet activity
# Gets a local pickle file and dumps - does not work with pandas df
# Do not include extension pkl on argument
@api.route("/get_pickle", methods=['GET'])
@login_required
def get_pickle():
filename = request.args.get("filename")
if not filename:
return None
filename += ".pkl"
data_loader = pickle_it(action='load', filename=filename)
return (json.dumps(data_loader, default=lambda o: '<not serializable>'))
@api.route("/check_activity", methods=['GET'])
@login_required
def check_activity():
alerts = alert_activity()
return (json.dumps(alerts))
# API End Point with all WARden metadata
@api.route("/warden_metadata", methods=['GET'])
@login_required
def metadata_json():
meta = warden_metadata()
# jsonify transactions
for wallet in meta['txs']:
try:
meta['txs'][wallet] = meta['txs'][wallet].to_dict()
except Exception:
pass
try:
meta['df_old'] = meta['df_old'].to_dict()
except Exception:
pass
try:
meta['full_df'] = meta['full_df'].to_dict()
except Exception:
pass
try:
meta['old_new_df_old'] = meta['old_new_df_old'].to_dict()
except Exception:
pass
return (json.dumps(meta, default=lambda o: '<not serializable>'))
# Returns a JSON with Test Response on TOR
@api.route("/testtor", methods=["GET"])
@login_required
def testtor():
from connections import test_tor
return json.dumps(test_tor())
# API End point
# Json for main page with realtime positions
@api.route("/positions_json", methods=["GET"])
@login_required
def positions_json():
# Get all transactions and cost details
# This serves the main page
try:
dfdyn, piedata = positions_dynamic()
btc_price = realtime_price("BTC")['price'] * fx_rate()['fx_rate']
dfdyn = dfdyn.to_dict(orient='index')
except Exception:
dfdyn = piedata = None
btc_price = 0
try:
btc = realtime_price("BTC")['price']
except TypeError:
btc = 0
if not btc:
btc = 0
json_dict = {
'positions': dfdyn,
'piechart': piedata,
'user': current_app.fx,
'btc': btc_price
}
return simplejson.dumps(json_dict, ignore_nan=True)
# Returns current BTC price and FX rate for current user
# This is the function used at the layout navbar to update BTC price
# Please note that the default is to update every 20s (MWT(20) above)
@ api.route("/realtime_btc", methods=["GET"])
@login_required
def realtime_btc():
try:
fx_details = fx_rate()
fx_r = {'cross': fx_details['symbol'], 'fx_rate': fx_details['fx_rate']}
fx_r['btc_usd'] = realtime_price("BTC", fx='USD')['price']
fx_r['btc_fx'] = fx_r['btc_usd'] * fx_r['fx_rate']
except Exception as e:
logging.warn(f"There was an error while getting realtime prices. Error: {e}")
fx_r = 0
return json.dumps(fx_r)
# API end point - cleans notifications and creates a new checkpoint
@api.route("/dismiss_notification", methods=["POST"])
@login_required
def dismiss_notification():
# Run the df and clean the files (True)
specter_df(delete_files=True)
flash("Notification dismissed. New CheckPoint created.", "success")
return json.dumps("Done")
# API end point to return Specter data
# args: ?load=True (True = loads saved json, False = refresh data)
@api.route("/specter", methods=["GET"])
@login_required
def specter_json():
data = current_app.specter.home_parser(),
return simplejson.dumps(data, ignore_nan=True)
# Latest Traceback message
@api.route("/traceback_error", methods=["GET"])
@login_required
def traceback_error():
import traceback
trace = traceback.format_exc()
return simplejson.dumps(trace, ignore_nan=True)
# API end point
# Function returns summary statistics for portfolio NAV and values
# Main function for portfolio page
@api.route("/portstats", methods=["GET", "POST"])
@login_required
def portstats():
meta = {}
# Looking to generate the following data here and return as JSON
# for AJAX query on front page:
# Start date, End Date, Start NAV, End NAV, Returns (1d, 1wk, 1mo, 1yr,
# YTD), average daily return. Best day, worse day. Std dev of daily ret,
# Higher NAV, Lower NAV + dates. Higher Port Value (date).
data = generatenav()
meta["start_date"] = (data.index.min()).date().strftime("%B %d, %Y")
meta["end_date"] = data.index.max().date().strftime("%B %d, %Y")
meta["start_nav"] = data["NAV_fx"][0]
meta["end_nav"] = float(data["NAV_fx"][-1])
meta["max_nav"] = float(data["NAV_fx"].max())
meta["max_nav_date"] = data[
data["NAV_fx"] == data["NAV_fx"].max()].index.strftime("%B %d, %Y")[0]
meta["min_nav"] = float(data["NAV_fx"].min())
meta["min_nav_date"] = data[
data["NAV_fx"] == data["NAV_fx"].min()].index.strftime("%B %d, %Y")[0]
meta["end_portvalue"] = data["PORT_fx_pos"][-1].astype(float)
meta["end_portvalue_usd"] = meta["end_portvalue"] / fx_rate()['fx_rate']
meta["max_portvalue"] = data["PORT_fx_pos"].astype(float).max()
meta["max_port_date"] = data[data["PORT_fx_pos"] == data["PORT_fx_pos"].
max()].index.strftime("%B %d, %Y")[0]
meta["min_portvalue"] = round(data["PORT_fx_pos"].min(), 0)
meta["min_port_date"] = data[data["PORT_fx_pos"] == data["PORT_fx_pos"].
min()].index.strftime("%B %d, %Y")[0]
meta["return_SI"] = (meta["end_nav"] / meta["start_nav"]) - 1
# Temporary fix for an issue with portfolios that are just too new
# Create a function to handle this
try:
meta["return_1d"] = (meta["end_nav"] / data["NAV_fx"][-2]) - 1
except IndexError:
meta["return_1d"] = "-"
try:
meta["return_1wk"] = (meta["end_nav"] / data["NAV_fx"][-7]) - 1
except IndexError:
meta["return_1wk"] = "-"
try:
meta["return_30d"] = (meta["end_nav"] / data["NAV_fx"][-30]) - 1
except IndexError:
meta["return_30d"] = "-"
try:
meta["return_90d"] = (meta["end_nav"] / data["NAV_fx"][-90]) - 1
except IndexError:
meta["return_90d"] = "-"
try:
meta["return_ATH"] = (meta["end_nav"] / meta["max_nav"]) - 1
except IndexError:
meta["return_ATH"] = "-"
try:
yr_ago = pd.to_datetime(datetime.today() - relativedelta(years=1))
yr_ago_NAV = data.NAV_fx[data.index.get_loc(yr_ago, method="nearest")]
meta["return_1yr"] = meta["end_nav"] / yr_ago_NAV - 1
except IndexError:
meta["return_1yr"] = "-"
# Create data for summa"age
meta["fx"] = current_app.settings['PORTFOLIO']['base_fx']
meta["daily"] = {}
for days in range(1, 8):
meta["daily"][days] = {}
meta["daily"][days]["date"] = data.index[days * -1].date().strftime(
"%A <br> %m/%d")
meta["daily"][days]["nav"] = data["NAV_fx"][days * -1]
meta["daily"][days]["nav_prev"] = data["NAV_fx"][(days + 1) * -1]
meta["daily"][days]["perc_chg"] = (meta["daily"][days]["nav"] /
meta["daily"][days]["nav_prev"]) - 1
meta["daily"][days]["port"] = data["PORT_fx_pos"][days * -1]
meta["daily"][days]["port_prev"] = data["PORT_fx_pos"][(days + 1) * -1]
meta["daily"][days]["port_chg"] = (meta["daily"][days]["port"] -
meta["daily"][days]["port_prev"])
# Removes Numpy type from json - returns int instead
def convert(o):
if isinstance(o, np.int64):
return int(o)
else:
return (o)
# create chart data for a small NAV chart
return simplejson.dumps(meta, ignore_nan=True, default=convert)
# API end point - returns a json with NAV Chartdata
@api.route("/navchartdatajson", methods=["GET", "POST"])
@login_required
# Creates a table with dates and NAV values
def navchartdatajson():
data = generatenav()
# Generate data for NAV chart
navchart = data[["NAV_fx"]]
# dates need to be in Epoch time for Highcharts
navchart.index = (navchart.index - datetime(1970, 1, 1)).total_seconds()
navchart.index = navchart.index * 1000
navchart.index = navchart.index.astype(np.int64)
navchart = navchart.to_dict()
navchart = navchart["NAV_fx"]
# Sort for HighCharts
import collections
navchart = collections.OrderedDict(sorted(navchart.items()))
navchart = json.dumps(navchart)
return navchart
# API end point - returns a json with NAV Chartdata
@api.route("/stackchartdatajson", methods=["GET", "POST"])
@login_required
# Creates a table with dates and NAV values
def stackchartdatajson():
data = generatenav()
# Generate data for Stack chart
# Filter to Only BTC Positions
try:
data['BTC_cum'] = data['PORT_VALUE_BTC']
stackchart = data[["BTC_cum"]]
# dates need to be in Epoch time for Highcharts
stackchart.index = (stackchart.index - datetime(1970, 1, 1)).total_seconds()
stackchart.index = stackchart.index * 1000
stackchart.index = stackchart.index.astype(np.int64)
stackchart = stackchart.to_dict()
stackchart = stackchart["BTC_cum"]
# Sort for HighCharts
import collections
stackchart = collections.OrderedDict(sorted(stackchart.items()))
stackchart = json.dumps(stackchart)
except Exception as e:
return (json.dumps({"Error": str(e)}))
return stackchart
# Return the price of a ticker on a given date
# Takes arguments:
# ticker: Single ticker for filter (default = NAV)
# date: date to get price
@api.route("/getprice_ondate", methods=["GET"])
@login_required
def getprice_ondate():
# Get the arguments and store
if request.method == "GET":
date_input = request.args.get("date")
ticker = request.args.get("ticker")
if (not ticker) or (not date_input):
return 0
ticker = ticker.upper()
get_date = datetime.strptime(date_input, "%Y-%m-%d")
# Create price object
try:
price = str(price_ondate(ticker, get_date).close)
except Exception as e:
price = "Not Found. Error: " + str(e)
return price
@api.route("/fx_lst", methods=["GET"])
@login_required
# Receiver argument ?term to return a list of fx (fiat and digital)
# Searches the list both inside the key as well as value of dict
def fx_list():
fx_dict = {}
filename = os.path.join(current_path(),
'static/csv_files/physical_currency_list.csv')
with open(filename, newline='') as csvfile:
reader = csv.reader(csvfile)
fx_dict = {rows[0]: rows[1] for rows in reader}
q = request.args.get("term")
if q is None:
q = ""
list_key = {
key: value
for key, value in fx_dict.items() if q.upper() in key.upper()
}
list_value = {
key: value
for key, value in fx_dict.items() if q.upper() in value.upper()
}
list = {**list_key, **list_value}
list = json.dumps(list)
return list
@api.route("/heatmapbenchmark_json", methods=["GET"])
@login_required
# Return Monthly returns for Benchmark and Benchmark difference from NAV
# Takes arguments:
# ticker - single ticker for filter
def heatmapbenchmark_json():
# Get portfolio data first
heatmap_gen, heatmap_stats, years, cols = heatmap_generator()
# Now get the ticker information and run comparison
if request.method == "GET":
ticker = request.args.get("ticker")
# Defaults to king BTC
if not ticker:
ticker = "BTC"
# Gather the first trade date in portfolio and store
# used to match the matrixes later
# Panda dataframe with transactions
df = transactions_fx()
# Filter the df acccoring to filter passed as arguments
df["trade_date"] = pd.to_datetime(df["trade_date"])
start_date = df["trade_date"].min()
start_date -= timedelta(days=1) # start on t-1 of first trade
# Generate price Table now for the ticker and trim to match portfolio
fx = current_app.settings['PORTFOLIO']['base_fx']
data = historical_prices(ticker, fx)
mask = data.index >= start_date
data = data.loc[mask]
# If notification is an error, skip this ticker
if data is None:
messages = data.errors
return jsonify(messages)
data = data.rename(columns={'close_converted': ticker + '_price'})
data = data[[ticker + '_price']]
data.sort_index(ascending=True, inplace=True)
data["pchange"] = (data / data.shift(1)) - 1
# Run the mrh function to generate heapmap table
heatmap = mrh.get(data["pchange"], eoy=True)
heatmap_stats = heatmap
cols = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
"eoy",
]
cols_months = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
years = heatmap.index.tolist()
# Create summary stats for the Ticker
heatmap_stats["MAX"] = heatmap_stats[heatmap_stats[cols_months] != 0].max(
axis=1)
heatmap_stats["MIN"] = heatmap_stats[heatmap_stats[cols_months] != 0].min(
axis=1)
heatmap_stats["POSITIVES"] = heatmap_stats[
heatmap_stats[cols_months] > 0].count(axis=1)
heatmap_stats["NEGATIVES"] = heatmap_stats[
heatmap_stats[cols_months] < 0].count(axis=1)
heatmap_stats["POS_MEAN"] = heatmap_stats[
heatmap_stats[cols_months] > 0].mean(axis=1)
heatmap_stats["NEG_MEAN"] = heatmap_stats[
heatmap_stats[cols_months] < 0].mean(axis=1)
heatmap_stats["MEAN"] = heatmap_stats[
heatmap_stats[cols_months] != 0].mean(axis=1)
# Create the difference between the 2 df - Pandas is cool!
heatmap_difference = heatmap_gen - heatmap
# return (heatmap, heatmap_stats, years, cols, ticker, heatmap_diff)
return simplejson.dumps(
{
"heatmap": heatmap.to_dict(),
"heatmap_stats": heatmap_stats.to_dict(),
"cols": cols,
"years": years,
"ticker": ticker,
"heatmap_diff": heatmap_difference.to_dict(),
},
ignore_nan=True,
default=datetime.isoformat,
)
@api.route("/histvol", methods=["GET", "POST"])
@login_required
# Returns a json with data to create the vol chart
# takes inputs from get:
# ticker, meta (true returns only metadata), rolling (in days)
# metadata (max, mean, etc)
def histvol():
# if there's rolling variable, get it, otherwise default to 30
if request.method == "GET":
try:
q = int(request.args.get("rolling"))
except ValueError:
q = 30
else:
q = 30
ticker = request.args.get("ticker")
metadata = request.args.get("meta")
# When ticker is not sent, will calculate for portfolio
if not ticker:
data = generatenav()
data["vol"] = (data["NAV_fx"].pct_change().rolling(q).std() *
(365**0.5) * 100)
# data.set_index('date', inplace=True)
vollist = data[["vol"]]
vollist.index = vollist.index.strftime("%Y-%m-%d")
datajson = vollist.to_json()
if ticker:
filename = "thewarden/historical_data/" + ticker + ".json"
filename = os.path.join(current_path(), filename)
try:
with open(filename) as data_file:
local_json = json.loads(data_file.read())
data_file.close()
prices = pd.DataFrame(
local_json["Time Series (Digital Currency Daily)"]).T
prices["4b. close (USD)"] = prices["4b. close (USD)"].astype(
np.float)
prices["vol"] = (
prices["4b. close (USD)"].pct_change().rolling(q).std() *
(365**0.5) * 100)
pricelist = prices[["vol"]]
datajson = pricelist.to_json()
except (FileNotFoundError, KeyError):
datajson = "Ticker Not Found"
if metadata is not None:
metatable = {}
metatable["mean"] = vollist.vol.mean()
metatable["max"] = vollist.vol.max()
metatable["min"] = vollist.vol.min()
metatable["last"] = vollist.vol[-1]
metatable["lastvsmean"] = (
(vollist.vol[-1] / vollist.vol.mean()) - 1) * 100
metatable = json.dumps(metatable)
return metatable
return datajson
@api.route("/mempool_json", methods=["GET", "POST"])
@login_required
def mempool_json():
try:
mp_config = current_app.settings['MEMPOOL']
url = mp_config.get('url')
from urllib.parse import urlparse
parse_object = urlparse(url)
scheme = 'http' if parse_object.scheme == '' else parse_object.scheme
if parse_object.netloc != '':
url = scheme + '://' + parse_object.netloc + '/'
if not url.startswith('http'):
url = 'http://' + url
if url[-1] != '/':
url += '/'
# Get recommended fees
mp_fee = tor_request(url + 'api/v1/fees/recommended').json()
mp_blocks = tor_request(url + 'api/blocks').json()
return json.dumps({'mp_fee': mp_fee, 'mp_blocks': mp_blocks, 'mp_url': url})
except Exception:
return json.dumps({'mp_fee': '-', 'mp_blocks': '-', 'mp_url': 'Error: Retrying...'})
@api.route("/portfolio_compare_json", methods=["GET"])
@login_required
# Compare portfolio performance to a list of assets
# Takes arguments:
# tickers - (comma separated. ex: BTC,ETH,AAPL)
# start - start date in the format YYMMDD
# end - end date in the format YYMMDD
# method - "chart": returns NAV only data for charts
# - "all": returns all data (prices and NAV)
# - "meta": returns metadata information
def portfolio_compare_json():
if request.method == "GET":
tickers = request.args.get("tickers").upper()
tickers = tickers.split(",")
start_date = request.args.get("start")
method = request.args.get("method")
# Check if start and end dates exist, if not assign values
try:
start_date = datetime.strptime(start_date, "%Y-%m-%d")
except (ValueError, TypeError) as e:
logging.info(f"[portfolio_compare_json] Error: {e}, " +
"setting start_date to zero")
start_date = datetime.strptime('2011-01-01', "%Y-%m-%d")
end_date = request.args.get("end")
try:
end_date = datetime.strptime(end_date, "%Y-%m-%d")
except (ValueError, TypeError) as e:
logging.info(f"[portfolio_compare_json] Error: {e}, " +
"setting end_date to now")
end_date = datetime.now()
data = {}
logging.info("[portfolio_compare_json] NAV requested in list of " +
"tickers, requesting generatenav.")
nav = generatenav()
nav_only = nav["NAV_fx"]
# Now go over tickers and merge into nav_only df
messages = {}
meta_data = {}
fx = current_app.settings['PORTFOLIO']['base_fx']
if fx is None:
fx = 'USD'
for ticker in tickers:
if ticker == "NAV":
# Ticker was NAV, skipped
continue
# Generate price Table now for the ticker and trim to match portfolio
data = historical_prices(ticker, fx=fx)
data.index = data.index.astype('datetime64[ns]')
# If notification is an error, skip this ticker
if data is None:
messages = data.errors
return jsonify(messages)
data = data.rename(columns={'close_converted': ticker + '_price'})
data = data[ticker + '_price']
nav_only = pd.merge(nav_only, data, on="date", how="left")
nav_only[ticker + "_price"].fillna(method="bfill", inplace=True)
messages[ticker] = "ok"
logging.info(f"[portfolio_compare_json] {ticker}: Success - Merged OK")
nav_only.fillna(method="ffill", inplace=True)
# Trim this list only to start_date to end_date:
mask = (nav_only.index >= start_date) & (nav_only.index <= end_date)
nav_only = nav_only.loc[mask]
# Now create the list of normalized Returns for the available period
# Plus create a table with individual analysis for each ticker and NAV
nav_only["NAV_norm"] = (nav_only["NAV_fx"] / nav_only["NAV_fx"][0]) * 100
nav_only["NAV_ret"] = nav_only["NAV_norm"].pct_change()
table = {}
table["meta"] = {}
table["meta"]["start_date"] = nav_only.index[0].strftime("%m-%d-%Y")
table["meta"]["end_date"] = nav_only.index[-1].strftime("%m-%d-%Y")
table["meta"]["number_of_days"] = ((nav_only.index[-1] -
nav_only.index[0])).days
table["meta"]["count_of_points"] = nav_only["NAV_fx"].count().astype(float)
table["NAV"] = {}
table["NAV"]["start"] = nav_only["NAV_fx"][0]
table["NAV"]["end"] = nav_only["NAV_fx"][-1]
table["NAV"]["return"] = (nav_only["NAV_fx"][-1] /
nav_only["NAV_fx"][0]) - 1
table["NAV"]["avg_return"] = nav_only["NAV_ret"].mean()
table["NAV"]["ann_std_dev"] = nav_only["NAV_ret"].std() * math.sqrt(365)
for ticker in tickers:
if messages[ticker] == "ok":
# Include new columns for return and normalized data
nav_only[ticker + "_norm"] = (nav_only[ticker + "_price"] /
nav_only[ticker + "_price"][0]) * 100
nav_only[ticker + "_ret"] = nav_only[ticker + "_norm"].pct_change()
# Create Metadata
table[ticker] = {}
table[ticker]["start"] = nav_only[ticker + "_price"][0]
table[ticker]["end"] = nav_only[ticker + "_price"][-1]
table[ticker]["return"] = (nav_only[ticker + "_price"][-1] /
nav_only[ticker + "_price"][0]) - 1
table[ticker]["comp2nav"] = table[ticker]["return"] - \
table["NAV"]["return"]
table[ticker]["avg_return"] = nav_only[ticker + "_ret"].mean()
table[ticker]["ann_std_dev"] = nav_only[
ticker + "_ret"].std() * math.sqrt(365)
logging.info("[portfolio_compare_json] Success")
# Create Correlation Matrix
filter_col = [col for col in nav_only if col.endswith("_ret")]
nav_matrix = nav_only[filter_col]
corr_matrix = nav_matrix.corr(method="pearson").round(2)
corr_html = corr_matrix.to_html(classes="table small text-center",
border=0,
justify="center")
# Now, let's return the data in the correct format as requested
if method == "chart":
return_data = {
"data": nav_only.to_json(),
"messages": messages,
"meta_data": meta_data,
"table": table,
"corr_html": corr_html,
}
return jsonify(return_data)
return nav_only.to_json()
@api.route('/log')
@login_required
def progress_log():
from config import Config
from warden_modules import tail
debug = Config.debug_file
data = tail(debug, 200)
return json.dumps(str(data))
@api.route('/broadcaster')
@login_required
def broadcaster():
category = request.args.get("category")
if category == 'None':
category = None
return current_app.message_handler.to_json(category=category)
@api.route("/assetlist", methods=["GET", "POST"])
# List of available tickers. Also takes argument {term} so this can be used
# in autocomplete forms
def assetlist():
q = request.args.get("term")
jsonlist = []
if len(q) < 2:
return jsonify(jsonlist)
# Get list of available tickers from pricing APIs
from pricing_engine.alphavantage import asset_list
jsonlist.extend(asset_list(q))
# jsonlist.extend(asset_list_cc(q))
# jsonlist.extend(asset_list_fp(q))
return jsonify(jsonlist)
@api.route("/aclst", methods=["GET", "POST"])
@login_required
# Returns JSON for autocomplete on account names.
# Gathers account names from trades and account_info tables
# Takes on input ?term - which is the string to be found
def aclst():
list = []
if request.method == "GET":
tradeaccounts = Trades.query.filter_by(
user_id=current_user.username).group_by(Trades.trade_account)
accounts = AccountInfo.query.filter_by(
user_id=current_user.username).group_by(
AccountInfo.account_longname)
q = request.args.get("term")
for item in tradeaccounts:
if q.upper() in item.trade_account.upper():
list.append(item.trade_account)
for item in accounts:
if q.upper() in item.account_longname.upper():
list.append(item.account_longname)
list = json.dumps(list)
return list
@api.route("/portfolio_tickers_json", methods=["GET", "POST"])
@login_required
# Returns a list of all tickers ever traded in this portfolio
def portfolio_tickers_json():
if request.method == "GET":
df = pd.read_sql_table("trades", current_app.db.engine)
df = df[(df.user_id == current_user.username)]
list_of_tickers = df.trade_asset_ticker.unique().tolist()
if ('BTC' not in list_of_tickers):
list_of_tickers.append('BTC')
return jsonify(list_of_tickers)
@api.route("/generatenav_json", methods=["GET", "POST"])
@login_required
# Creates a table with dates and NAV values
# Takes 2 arguments:
# force=False (default) : Forces the NAV generation without reading saved file
# filter=None (default): Filter to be applied to Pandas df (df.query(filter))
def generatenav_json():
if request.method == "GET":
filter = request.args.get("filter")
force = request.args.get("force")
if not filter:
filter = ""
if not force:
force = False
nav = generatenav(current_user.username, force, filter)
return nav.to_json()
| 23,133 | 0 | 617 |
b8b90853e75374d2c2f02d73b3181e8fc358fe59 | 438 | py | Python | demo/security.py | bogdan-veliscu/learn-flask | bc587abaa6049ca15bc390bb37356d657c01550a | [
"Apache-2.0"
] | null | null | null | demo/security.py | bogdan-veliscu/learn-flask | bc587abaa6049ca15bc390bb37356d657c01550a | [
"Apache-2.0"
] | null | null | null | demo/security.py | bogdan-veliscu/learn-flask | bc587abaa6049ca15bc390bb37356d657c01550a | [
"Apache-2.0"
] | null | null | null | from user import User
users = [
User(1, 'bogdan', 's3cret'),
User(2, 'georgi', "pass")
]
user_mapping = {u.username: u for u in users}
userid_mapping = {u.id: u for u in users}
| 20.857143 | 45 | 0.666667 | from user import User
users = [
User(1, 'bogdan', 's3cret'),
User(2, 'georgi', "pass")
]
user_mapping = {u.username: u for u in users}
userid_mapping = {u.id: u for u in users}
def authenticate(username, password):
user = user_mapping.get(username, None)
if user and user.password == password:
return user
def identity(payload):
user_id = payload['identity']
return userid_mapping.get(user_id, None)
| 203 | 0 | 46 |
cd1b8f7052e978aa6ae994c255ef1fa0d50e8c47 | 1,938 | py | Python | third_party/ffmpeg/PRESUBMIT.py | zipated/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | third_party/ffmpeg/PRESUBMIT.py | cangulcan/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | third_party/ffmpeg/PRESUBMIT.py | cangulcan/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for FFmpeg repository.
Does the following:
- Warns users that changes must be submitted via Gerrit.
- Warns users when a change is made without updating the README file.
"""
import re
import subprocess
def _WarnIfReadmeIsUnchanged(input_api, output_api):
"""Warn if the README file hasn't been updated with change notes."""
has_ffmpeg_changes = False
chromium_re = re.compile(r'.*[/\\]?chromium.*|PRESUBMIT.py$|.*\.chromium$')
readme_re = re.compile(r'.*[/\\]?chromium[/\\]patches[/\\]README$')
for f in input_api.AffectedFiles():
if readme_re.match(f.LocalPath()):
return []
if not has_ffmpeg_changes and not chromium_re.match(f.LocalPath()):
has_ffmpeg_changes = True
if not has_ffmpeg_changes:
return []
return [output_api.PresubmitPromptWarning('\n'.join([
'FFmpeg changes detected without any update to chromium/patches/README,',
'it\'s good practice to update this file with a note about your changes.'
]))]
def _WarnIfGenerateGnTestsFail(input_api, output_api):
"""Error if generate_gn.py was changed and tests are now failing."""
should_run_tests = False
generate_gn_re = re.compile(r'.*generate_gn.*\.py$')
for f in input_api.AffectedFiles():
if generate_gn_re.match(f.LocalPath()):
should_run_tests = True
break;
errors = []
if should_run_tests:
errors += input_api.RunTests(
input_api.canned_checks.GetUnitTests(
input_api, output_api,
['chromium/scripts/generate_gn_unittest.py']))
return errors
| 32.847458 | 79 | 0.720846 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for FFmpeg repository.
Does the following:
- Warns users that changes must be submitted via Gerrit.
- Warns users when a change is made without updating the README file.
"""
import re
import subprocess
def _WarnIfReadmeIsUnchanged(input_api, output_api):
"""Warn if the README file hasn't been updated with change notes."""
has_ffmpeg_changes = False
chromium_re = re.compile(r'.*[/\\]?chromium.*|PRESUBMIT.py$|.*\.chromium$')
readme_re = re.compile(r'.*[/\\]?chromium[/\\]patches[/\\]README$')
for f in input_api.AffectedFiles():
if readme_re.match(f.LocalPath()):
return []
if not has_ffmpeg_changes and not chromium_re.match(f.LocalPath()):
has_ffmpeg_changes = True
if not has_ffmpeg_changes:
return []
return [output_api.PresubmitPromptWarning('\n'.join([
'FFmpeg changes detected without any update to chromium/patches/README,',
'it\'s good practice to update this file with a note about your changes.'
]))]
def _WarnIfGenerateGnTestsFail(input_api, output_api):
"""Error if generate_gn.py was changed and tests are now failing."""
should_run_tests = False
generate_gn_re = re.compile(r'.*generate_gn.*\.py$')
for f in input_api.AffectedFiles():
if generate_gn_re.match(f.LocalPath()):
should_run_tests = True
break;
errors = []
if should_run_tests:
errors += input_api.RunTests(
input_api.canned_checks.GetUnitTests(
input_api, output_api,
['chromium/scripts/generate_gn_unittest.py']))
return errors
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_WarnIfReadmeIsUnchanged(input_api, output_api))
results.extend(_WarnIfGenerateGnTestsFail(input_api, output_api))
return results
| 192 | 0 | 23 |
f0bf069ce8f4876da4dd3cdfba27081e2694dd0d | 1,540 | py | Python | graphics/screen.py | lokopak/snake-game | 332dcc0677380b6ffac65a35e245b90194daa3ee | [
"MIT"
] | null | null | null | graphics/screen.py | lokopak/snake-game | 332dcc0677380b6ffac65a35e245b90194daa3ee | [
"MIT"
] | null | null | null | graphics/screen.py | lokopak/snake-game | 332dcc0677380b6ffac65a35e245b90194daa3ee | [
"MIT"
] | null | null | null | from os.path import join
from typing import Tuple
from constants import PATH, SCREEN_HEIGHT, SCREEN_WIDTH, WHITE
from pygame import Rect, Surface, display, image, mouse
from graphics.font import Font, FontType
class Screen:
""" This class is responsible for drawing elements on the screen """
__cursor: Surface = None
def __load_resources(self) -> None:
""" Pre-loads required resources like cursor texture """
self.__cursor = image.load(join(PATH, 'res', 'images', 'mouse_icon.png'))
def fill(self, color, rect: Tuple = None) -> Rect:
""" Fills screen surface with a given solid color """
return self.__canvas.fill(color, rect)
def blit(self, surface: Surface, dest: Tuple) -> Rect:
""" Draws an image onto screen surface """
return self.__canvas.blit(surface, dest)
def update_cursor(self):
""" Updates mouse cursor position """
self.blit(self.__cursor, mouse.get_pos())
return
def draw_string(self, string: str, dest: Tuple, font: FontType = FontType.MD, color: Tuple = WHITE) -> None:
""" Draws a text string onto screen surface """
text_surface = self.__font.draw_string(string, font, color)
self.blit(text_surface, dest)
| 35.813953 | 112 | 0.647403 | from os.path import join
from typing import Tuple
from constants import PATH, SCREEN_HEIGHT, SCREEN_WIDTH, WHITE
from pygame import Rect, Surface, display, image, mouse
from graphics.font import Font, FontType
class Screen:
""" This class is responsible for drawing elements on the screen """
__cursor: Surface = None
def __init__(self, game) -> None:
self.__canvas = display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
display.set_caption('The Snake')
mouse.set_visible(False)
self.__font = Font()
self.__load_resources()
def __load_resources(self) -> None:
""" Pre-loads required resources like cursor texture """
self.__cursor = image.load(join(PATH, 'res', 'images', 'mouse_icon.png'))
def fill(self, color, rect: Tuple = None) -> Rect:
""" Fills screen surface with a given solid color """
return self.__canvas.fill(color, rect)
def blit(self, surface: Surface, dest: Tuple) -> Rect:
""" Draws an image onto screen surface """
return self.__canvas.blit(surface, dest)
def update_cursor(self):
""" Updates mouse cursor position """
self.blit(self.__cursor, mouse.get_pos())
return
def draw_string(self, string: str, dest: Tuple, font: FontType = FontType.MD, color: Tuple = WHITE) -> None:
""" Draws a text string onto screen surface """
text_surface = self.__font.draw_string(string, font, color)
self.blit(text_surface, dest)
| 219 | 0 | 31 |
3409745f720b9add32aa4e381d9c2ec267bda62d | 1,456 | py | Python | Query Optimization/algorithms/ikkbz_hist/qoalgo/graphviz.py | mrahtapot/TUM | b736fc4ae065612dc988b6cb220fcf2f6119a138 | [
"Apache-2.0"
] | 225 | 2019-10-02T10:49:41.000Z | 2022-03-29T22:25:38.000Z | Query Optimization/algorithms/ikkbz_hist/qoalgo/graphviz.py | mrahtapot/TUM | b736fc4ae065612dc988b6cb220fcf2f6119a138 | [
"Apache-2.0"
] | 6 | 2021-02-16T12:22:43.000Z | 2021-07-31T19:35:57.000Z | Query Optimization/algorithms/ikkbz_hist/qoalgo/graphviz.py | mrahtapot/TUM | b736fc4ae065612dc988b6cb220fcf2f6119a138 | [
"Apache-2.0"
] | 69 | 2019-10-02T21:46:57.000Z | 2022-03-17T19:27:50.000Z | from qoalgo.base import JoinTree, Relation, QueryGraph
| 25.103448 | 83 | 0.56044 | from qoalgo.base import JoinTree, Relation, QueryGraph
def _precedence_graph_to_graphviz_inner(pg):
dot = f'"{str(pg)}" [label="{str(pg)}"];\n'
for child in pg.children:
dot += f'"{str(pg)}" -> "{str(child)}"; \n'
dot += _precedence_graph_to_graphviz_inner(child)
return dot
def precedence_graph_to_graphviz(pg):
dot = 'digraph G {\n'
dot += _precedence_graph_to_graphviz_inner(pg)
dot += '}'
return dot
def join_tree_to_graphviz_inner(t):
if isinstance(t, Relation):
return f'"{hash(t)}" [label="{str(t)}"];\n'
dot = f'"{hash(t)}" [label="{str(t.operator)} c:{t.cost:.2f}"];\n'
for c in [t.left, t.right]:
if c is None:
continue
dot += f'"{hash(t)}" -> "{hash(c)}"; \n'
dot += join_tree_to_graphviz_inner(c)
return dot
def join_tree_to_graphviz(t):
dot = 'digraph G {\n'
dot += join_tree_to_graphviz_inner(t)
dot += '}'
return dot
def querygraph_to_graphviz(qg: QueryGraph, cardinalities=True, selectivities=True):
dot = 'digraph G {\n'
for r in qg.relations:
dot += f'"{hash(r)}" [label="{str(r)}'
if cardinalities:
dot += f' ({r.cardinality})'
dot += '"];\n'
for j in qg.joins:
dot += f'"{hash(j.r1)}" -> "{hash(j.r2)}" [dir=none'
if selectivities:
dot += f', label="{j.selectivity:.2f}"'
dot += '];\n'
dot += '}'
return dot
| 1,281 | 0 | 115 |
115598580691b7652be83137464d81aa5043395d | 12,235 | py | Python | hsstock/utils/date_util.py | hsstock/hsstock | f8841331022e8844537a5c5b08d047e2cc328856 | [
"Apache-2.0"
] | 2 | 2018-10-04T08:04:24.000Z | 2021-01-21T06:58:30.000Z | hsstock/utils/date_util.py | hsstock/hsstock | f8841331022e8844537a5c5b08d047e2cc328856 | [
"Apache-2.0"
] | null | null | null | hsstock/utils/date_util.py | hsstock/hsstock | f8841331022e8844537a5c5b08d047e2cc328856 | [
"Apache-2.0"
] | 1 | 2018-10-20T09:39:50.000Z | 2018-10-20T09:39:50.000Z | import time
from datetime import datetime, date, timedelta
import six as six
"""默认的时间日期格式,项目中金融时间序列等时间相关默认格式"""
K_DEFAULT_DT_FMT2 = "%Y-%m-%d"
def str_to_datetime(date_str, fmt=K_DEFAULT_DT_FMT2, fix=True):
"""
将字符串日期格式转换成datetime.datetime对象 eg. '2016-01-01' -> datetime.datetime(2016, 1, 1, 0, 0)
:param date_str: %Y-%m-%d 形式str对象,eg. '2016-01-01'
:param fmt: 如date_str不是%Y-%m-%d形式,对应的格式str对象
:param fix: 是否修复日期不规范的写法,eg. 2016-1-1 fix 2016-01-01
:return: datetime.datetime对象,eg. datetime.datetime(2016, 1, 1, 0, 0)
"""
if fix and fmt == K_DEFAULT_DT_FMT2:
# 只针对%Y-%m-%d形式格式标准化日期格式
date_str = fix_date(date_str)
return datetime.strptime(date_str, fmt)
def date_str_to_int(date_str, split='-', fix=True):
"""
eg. 2016-01-01 -> 20160101
不使用时间api,直接进行字符串解析,执行效率高
:param date_str: %Y-%m-%d形式时间str对象
:param split: 年月日的分割符,默认'-'
:param fix: 是否修复日期不规范的写法,eg. 2016-1-1 fix 2016-01-01
:return: int类型时间
"""
if fix and split == '-':
# 只针对%Y-%m-%d形式格式标准化日期格式
date_str = fix_date(date_str)
string_date = date_str.replace(split, '')
return int(string_date)
def fix_date(date_str):
"""
修复日期不规范的写法:
eg. 2016-1-1 fix 2016-01-01
eg. 2016:01-01 fix 2016-01-01
eg. 2016,01 01 fix 2016-01-01
eg. 2016/01-01 fix 2016-01-01
eg. 2016/01/01 fix 2016-01-01
eg. 2016/1/1 fix 2016-01-01
eg. 2016:1:1 fix 2016-01-01
eg. 2016 1 1 fix 2016-01-01
eg. 2016 01 01 fix 2016-01-01
.............................
不使用时间api,直接进行字符串解析,执行效率高,注意fix_date内部会使用fmt_date
:param date_str: 检测需要修复的日期str对象或者int对象
:return: 修复了的日期str对象
"""
if date_str is not None:
# 如果是字符串先统一把除了数字之外的都干掉,变成干净的数字串
if isinstance(date_str, six.string_types):
# eg, 2016:01-01, 201601-01, 2016,01 01, 2016/01-01 -> 20160101
date_str = ''.join(list(filter(lambda c: c.isdigit(), date_str)))
# 再统一确定%Y-%m-%d形式
date_str = fmt_date(date_str)
y, m, d = date_str.split('-')
if len(m) == 1:
# 月上补0
m = '0{}'.format(m)
if len(d) == 1:
# 日上补0
d = '0{}'.format(d)
date_str = "%s-%s-%s" % (y, m, d)
return date_str
def fmt_date(convert_date):
"""
将时间格式如20160101转换为2016-01-01日期格式, 注意没有对如 201611
这样的做fix适配,外部需要明确知道参数的格式,针对特定格式,不使用时间api,
直接进行字符串解析,执行效率高
:param convert_date: 时间格式如20160101所示,int类型或者str类型对象
:return: %Y-%m-%d日期格式str类型对象
"""
if isinstance(convert_date, float):
# float先转换int
convert_date = int(convert_date)
convert_date = str(convert_date)
if len(convert_date) > 8 and convert_date.startswith('20'):
# eg '20160310000000000'
convert_date = convert_date[:8]
if '-' not in convert_date:
if len(convert_date) == 8:
# 20160101 to 2016-01-01
convert_date = "%s-%s-%s" % (convert_date[0:4],
convert_date[4:6], convert_date[6:8])
elif len(convert_date) == 6:
# 201611 to 2016-01-01
convert_date = "%s-0%s-0%s" % (convert_date[0:4],
convert_date[4:5], convert_date[5:6])
else:
raise ValueError('fmt_date: convert_date fmt error {}'.format(convert_date))
return convert_date
#如:03:10AM, 10:35PM 转换成date
if __name__ == "__main__":
print(DateUtil.getTodayStr() )
print(DateUtil.getDatetimeToday())
print(DateUtil.getDatetimeYesterdayStr( DateUtil.getDatetimeToday()))
print(DateUtil.format_date('07-02 06:00'))
print(DateUtil.datetime_toString(datetime.now()))
print(DateUtil.string_toDatetime(DateUtil.format_date('07-02 06:00')))
print(DateUtil.string_toTimestamp(DateUtil.format_date('07-02 06:00')))
print(DateUtil.timestamp_toString(DateUtil.string_toTimestamp(DateUtil.format_date('07-02 06:00'))))
print(DateUtil.date_str_to_int('2007-07-07'))
print(DateUtil.date_to_millisecond(str(DateUtil.date_str_to_int('2007-07-07'))))
print(abbr_to_normal('03:35AM'))
gen = DateUtil.getNextHalfYear( DateUtil.string_toDate('2016-01-01'),DateUtil.string_toDate('2018-01-01') )
while True:
try:
end = next(gen)
print( end )
print( DateUtil.getDatetimeFutureStr(DateUtil.string_toDate(end),1) )
except StopIteration as e:
print(e)
break
| 30.81864 | 111 | 0.58586 | import time
from datetime import datetime, date, timedelta
import six as six
class DateUtil():
def __init__(self):
pass
@staticmethod
def getDatetimeToday():
return date.today()
@staticmethod
def getDatetimeToday3():
today = datetime.today()
return today.strftime('%Y-%m-%d %H:%M:%S')
@staticmethod
def getDatetimeToday4():
today = datetime.today()
return today.strftime('%Y-%m-%d %H:%M:%S')
@staticmethod
def getDatetimeToday2():
t = date.today()
dt = datetime.strptime(str(t), '%Y-%m-%d %H:%M:%S') # date转str再转datetime
return dt
@staticmethod
def getDatetimeYesterdayStr(today):
yesterday = today + timedelta(days=-1) # 减去一天
return yesterday.isoformat()
@staticmethod
def getDatetimePastStr(today, ndays):
past = today + timedelta(days=-ndays) # 减去ndays天
return DateUtil.date_toString(past)
@staticmethod
def getDatetimeFutureStr(today, ndays):
past = today + timedelta(days=ndays) # 加上ndays天
return DateUtil.date_toString(past)
@staticmethod
def getNextHalfYear(start, end,ndays=180):
future = start
while future < end:
if future + timedelta(days=ndays) < end:
future += timedelta(days=ndays)
yield DateUtil.date_toString(future)
else:
yield DateUtil.date_toString(end)
break
@staticmethod
def getTodayStr():
return time.strftime('%Y-%m-%d', time.localtime())
@staticmethod
def format_date(strDate):
if DateUtil.isVaildDate(strDate):
return strDate
md = str();
hms = str();
ymdhms = str();
if " " in strDate:
tupDate = strDate.partition(" ")
md = tupDate[0]
hms = tupDate[2] + ":00"
else:
md = strDate
hms = "00:00:00"
ymdhms = str(datetime.now().year) + "-" + md + " " + hms
return ymdhms
@staticmethod
def isVaildDate(date):
try:
time.strptime(date, "%Y-%m-%d %H:%M:%S")
return True
except:
return False
@staticmethod
def format_date_us_history(strDate):
if DateUtil.isVaildDate(strDate):
return strDate
tupDate = strDate.partition("|")
chineseDate = tupDate[2] + ":00"
date = str(chineseDate)
date = date.replace("年", "-")
date = date.replace("月", "-")
date = date.replace("日", "")
date = date.strip()
return date
# 把datetime转成字符串
@staticmethod
def datetime_toString(dt):
return dt.strftime("%Y-%m-%d %H:%M:%S")
# 把字符串转成datetime
# '2018-06-14\xa014:09\xa0\xa0</a>:00' does not match format '%Y-%m-%d %H:%M:%S'
@staticmethod
def string_toDatetime(string):
dt = None
try:
dt = datetime.strptime(string, "%Y-%m-%d %H:%M:%S")
except:
print('1')
return dt
# 把字符串转成datetime
@staticmethod
def string_toDatetime2(string):
dt = None
try:
dt = datetime.strptime(string, "%Y-%m-%d %H:%M")
except:
print('11')
return dt
@staticmethod
def date_toString(dt):
return dt.strftime("%Y-%m-%d")
@staticmethod
def string_toDate(string):
return datetime.strptime(string, "%Y-%m-%d")
# 把字符串转成时间戳形式
@staticmethod
def string_toTimestamp(strTime):
return time.mktime(DateUtil.string_toDatetime(strTime).timetuple())
# 把时间戳转成字符串形式
@staticmethod
def timestamp_toString(stamp):
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(stamp))
# 把datetime类型转外时间戳形式
def datetime_toTimestamp(dateTim):
return time.mktime(dateTim.timetuple())
@staticmethod
def millisecond_to_date(millisecond, format):
return time.strftime(format, time.localtime(millisecond / 1000))
@staticmethod
def date_to_millisecond(date="20100101", format='%Y%m%d'):
return int(time.mktime(time.strptime(date, format)) * 1000)
@staticmethod
def date_str_to_int(date="2010-01-01"):
return int(date.replace("-", ""))
@staticmethod
def week_today():
d = datetime.now()
return d.weekday()
K_DEFAULT_DT_FMT = "%Y-%m-%d %H:%M:%S"
@staticmethod
def week_of_date(date_str, fmt=K_DEFAULT_DT_FMT):
"""
输入'2016-01-01' 转换为星期几,返回int 0-6分别代表周一到周日
:param date_str: 式时间日期str对象
:param fmt: 如date_str不是%Y-%m-%d形式,对应的格式str对象
:param fix: 是否修复日期不规范的写法,eg. 2016-1-1 fix 2016-01-01
:return: 返回int 0-6分别代表周一到周日
"""
return datetime.strptime(date_str, fmt).weekday()
@staticmethod
def year_of_date(date_str, fmt=K_DEFAULT_DT_FMT):
"""
输入'2016-01-01' 转换为星期几,返回int 0-6分别代表周一到周日
:param date_str: 式时间日期str对象
:param fmt: 如date_str不是%Y-%m-%d形式,对应的格式str对象
:param fix: 是否修复日期不规范的写法,eg. 2016-1-1 fix 2016-01-01
:return: 返回int 0-6分别代表周一到周日
"""
return datetime.strptime(date_str, fmt).year
@staticmethod
def diff(start_date, end_date, check_order=True):
"""
对两个输入日期计算间隔的天数,如果check_order=False, str日期对象效率最高
:param start_date: str对象或者int对象,如果check_order=True int对象效率最高
:param end_date: str对象或者int对象,如果check_order=True int对象效率最高
:param check_order: 是否纠正参数顺序是否放置正常,默认check
:return:
"""
# 首先进来的date都格式化,主要进行的是fix操作,不管是int还是str,这样20160101转换为2016-01-01日期格式
start_date = fix_date(start_date)
end_date = fix_date(end_date)
if check_order and isinstance(start_date, six.string_types):
# start_date字符串的日期格式转换为int
start_date = date_str_to_int(start_date)
if check_order and isinstance(end_date, six.string_types):
# end_date字符串的日期格式转换为int
end_date = date_str_to_int(end_date)
# 是否纠正参数顺序是否放置正常,默认check
if check_order and start_date > end_date:
# start_date > end_date说明要换一下
tmp = end_date
end_date = start_date
start_date = tmp
# fmt_date,但在不需要纠正check_order的情况,这些就都不会执行
if isinstance(start_date, int):
# noinspection PyTypeChecker
start_date = fmt_date(start_date)
if isinstance(end_date, int):
# noinspection PyTypeChecker
end_date = fmt_date(end_date)
# 在不需要纠正check_order的情况, 直接执行的是这里
sd = str_to_datetime(start_date)
ed = str_to_datetime(end_date)
return (ed - sd).days
"""默认的时间日期格式,项目中金融时间序列等时间相关默认格式"""
K_DEFAULT_DT_FMT2 = "%Y-%m-%d"
def str_to_datetime(date_str, fmt=K_DEFAULT_DT_FMT2, fix=True):
"""
将字符串日期格式转换成datetime.datetime对象 eg. '2016-01-01' -> datetime.datetime(2016, 1, 1, 0, 0)
:param date_str: %Y-%m-%d 形式str对象,eg. '2016-01-01'
:param fmt: 如date_str不是%Y-%m-%d形式,对应的格式str对象
:param fix: 是否修复日期不规范的写法,eg. 2016-1-1 fix 2016-01-01
:return: datetime.datetime对象,eg. datetime.datetime(2016, 1, 1, 0, 0)
"""
if fix and fmt == K_DEFAULT_DT_FMT2:
# 只针对%Y-%m-%d形式格式标准化日期格式
date_str = fix_date(date_str)
return datetime.strptime(date_str, fmt)
def date_str_to_int(date_str, split='-', fix=True):
"""
eg. 2016-01-01 -> 20160101
不使用时间api,直接进行字符串解析,执行效率高
:param date_str: %Y-%m-%d形式时间str对象
:param split: 年月日的分割符,默认'-'
:param fix: 是否修复日期不规范的写法,eg. 2016-1-1 fix 2016-01-01
:return: int类型时间
"""
if fix and split == '-':
# 只针对%Y-%m-%d形式格式标准化日期格式
date_str = fix_date(date_str)
string_date = date_str.replace(split, '')
return int(string_date)
def fix_date(date_str):
"""
修复日期不规范的写法:
eg. 2016-1-1 fix 2016-01-01
eg. 2016:01-01 fix 2016-01-01
eg. 2016,01 01 fix 2016-01-01
eg. 2016/01-01 fix 2016-01-01
eg. 2016/01/01 fix 2016-01-01
eg. 2016/1/1 fix 2016-01-01
eg. 2016:1:1 fix 2016-01-01
eg. 2016 1 1 fix 2016-01-01
eg. 2016 01 01 fix 2016-01-01
.............................
不使用时间api,直接进行字符串解析,执行效率高,注意fix_date内部会使用fmt_date
:param date_str: 检测需要修复的日期str对象或者int对象
:return: 修复了的日期str对象
"""
if date_str is not None:
# 如果是字符串先统一把除了数字之外的都干掉,变成干净的数字串
if isinstance(date_str, six.string_types):
# eg, 2016:01-01, 201601-01, 2016,01 01, 2016/01-01 -> 20160101
date_str = ''.join(list(filter(lambda c: c.isdigit(), date_str)))
# 再统一确定%Y-%m-%d形式
date_str = fmt_date(date_str)
y, m, d = date_str.split('-')
if len(m) == 1:
# 月上补0
m = '0{}'.format(m)
if len(d) == 1:
# 日上补0
d = '0{}'.format(d)
date_str = "%s-%s-%s" % (y, m, d)
return date_str
def fmt_date(convert_date):
"""
将时间格式如20160101转换为2016-01-01日期格式, 注意没有对如 201611
这样的做fix适配,外部需要明确知道参数的格式,针对特定格式,不使用时间api,
直接进行字符串解析,执行效率高
:param convert_date: 时间格式如20160101所示,int类型或者str类型对象
:return: %Y-%m-%d日期格式str类型对象
"""
if isinstance(convert_date, float):
# float先转换int
convert_date = int(convert_date)
convert_date = str(convert_date)
if len(convert_date) > 8 and convert_date.startswith('20'):
# eg '20160310000000000'
convert_date = convert_date[:8]
if '-' not in convert_date:
if len(convert_date) == 8:
# 20160101 to 2016-01-01
convert_date = "%s-%s-%s" % (convert_date[0:4],
convert_date[4:6], convert_date[6:8])
elif len(convert_date) == 6:
# 201611 to 2016-01-01
convert_date = "%s-0%s-0%s" % (convert_date[0:4],
convert_date[4:5], convert_date[5:6])
else:
raise ValueError('fmt_date: convert_date fmt error {}'.format(convert_date))
return convert_date
#如:03:10AM, 10:35PM 转换成date
def abbr_to_normal(time):
time = time.strip()
"""
I don't know this issue.
12:24:35AM -> 0:24:35
12:24:35PM -> 12:24:35
"""
if time.find("AM") >= 0:
time = time.strip("AM")
if time == "12:00":
time = "00:00"
time = time.split(':')
hh = int(time[0])
mm = int(time[1])
ss = int(0)
if hh == 12 and (mm > 0 or ss > 0):
hh = 0
today = date.today()
timestr = ("%04d-%02d-%02d %02d:%02d:%02d" % (today.year,today.month, today.day,hh, mm, ss))
dt = datetime.strptime(timestr, "%Y-%m-%d %H:%M:%S")
else:
time = time.strip("PM")
time = time.split(':')
hh = int(time[0])
mm = int(time[1])
ss = int(0)
if hh != 12:
hh = hh + 12
today = date.today()
timestr = ("%04d-%02d-%02d %02d:%02d:%02d" % (today.year,today.month, today.day,hh, mm, ss))
dt = datetime.strptime(timestr, "%Y-%m-%d %H:%M:%S")
return dt
if __name__ == "__main__":
print(DateUtil.getTodayStr() )
print(DateUtil.getDatetimeToday())
print(DateUtil.getDatetimeYesterdayStr( DateUtil.getDatetimeToday()))
print(DateUtil.format_date('07-02 06:00'))
print(DateUtil.datetime_toString(datetime.now()))
print(DateUtil.string_toDatetime(DateUtil.format_date('07-02 06:00')))
print(DateUtil.string_toTimestamp(DateUtil.format_date('07-02 06:00')))
print(DateUtil.timestamp_toString(DateUtil.string_toTimestamp(DateUtil.format_date('07-02 06:00'))))
print(DateUtil.date_str_to_int('2007-07-07'))
print(DateUtil.date_to_millisecond(str(DateUtil.date_str_to_int('2007-07-07'))))
print(abbr_to_normal('03:35AM'))
gen = DateUtil.getNextHalfYear( DateUtil.string_toDate('2016-01-01'),DateUtil.string_toDate('2018-01-01') )
while True:
try:
end = next(gen)
print( end )
print( DateUtil.getDatetimeFutureStr(DateUtil.string_toDate(end),1) )
except StopIteration as e:
print(e)
break
| 4,028 | 4,343 | 46 |
a5af6c0ce2e69540230f8212a70815a1f85ad977 | 2,158 | py | Python | rapport/models.py | amineadmane/AutoCare | f4339cdc2d78c985b77b7b1cc469bae67728086b | [
"MIT"
] | null | null | null | rapport/models.py | amineadmane/AutoCare | f4339cdc2d78c985b77b7b1cc469bae67728086b | [
"MIT"
] | null | null | null | rapport/models.py | amineadmane/AutoCare | f4339cdc2d78c985b77b7b1cc469bae67728086b | [
"MIT"
] | null | null | null | from django.db import models
from mission.models import Vehicule, Conducteur
from users.models import User
# Create your models here.
| 39.236364 | 87 | 0.696478 | from django.db import models
from mission.models import Vehicule, Conducteur
from users.models import User
# Create your models here.
class RapportSignalProbleme(models.Model):
description = models.TextField(null=False, blank=False)
vehicule = models.ForeignKey(Vehicule,on_delete=models.CASCADE, null=False)
date = models.DateField(auto_now=False,auto_now_add=False, null=False, blank=False)
redacteur = models.ForeignKey(User,on_delete=models.CASCADE, null=False)
GRAVITE_CHOICES = (
('FAIBLE', 'FAIBLE'),
('MOYEN', 'MOYEN'),
('FORT', 'FORT'),
('CRITIQUE', 'CRITIQUE'),
)
gravite = models.CharField(max_length=20,choices=GRAVITE_CHOICES)
TYPE_PROBLEME_CHOICES = (
('PANNE', 'PANNE'),
('JCP', 'JCP'),
)
type_probleme = models.CharField(max_length=30,choices=TYPE_PROBLEME_CHOICES)
confirmed = models.BooleanField(default=None,null=True)
class RapportSignalChauffeur(models.Model):
description = models.TextField(null=False, blank=False)
conducteur = models.ForeignKey(Conducteur,on_delete=models.CASCADE, null=False)
date = models.DateField(auto_now=False,auto_now_add=False, null=False, blank=False)
redacteur = models.ForeignKey(User,on_delete=models.CASCADE, null=False)
GRAVITE_CHOICES = (
('FAIBLE', 'FAIBLE'),
('MOYEN', 'MOYEN'),
('FORT', 'FORT'),
('CRITIQUE', 'CRITIQUE'),
)
gravite = models.CharField(max_length=20,choices=GRAVITE_CHOICES)
class RapportSignalSinistre(models.Model):
description = models.TextField(null=False, blank=False)
vehicule = models.ForeignKey(Vehicule,on_delete=models.CASCADE, null=False)
conducteur = models.ForeignKey(Conducteur,on_delete=models.CASCADE, null=False)
date = models.DateField(auto_now=False,auto_now_add=False, null=False, blank=False)
redacteur = models.ForeignKey(User,on_delete=models.CASCADE, null=False)
GRAVITE_CHOICES = (
('FAIBLE', 'FAIBLE'),
('MOYEN', 'MOYEN'),
('FORT', 'FORT'),
('CRITIQUE', 'CRITIQUE'),
)
gravite = models.CharField(max_length=20,choices=GRAVITE_CHOICES)
| 0 | 1,951 | 69 |
e659798959ccc9578bd6057f02b19c89f7c9f6ea | 5,763 | py | Python | blocking_cache/test/MultiCacheTestCases.py | cornell-brg/pymtl3-mem | 458cbc0c667cf8645aad0e33489458eb696f6928 | [
"BSD-3-Clause"
] | 4 | 2020-03-13T11:35:30.000Z | 2020-04-21T03:01:34.000Z | blocking_cache/test/MultiCacheTestCases.py | cornell-brg/pymtl3-mem | 458cbc0c667cf8645aad0e33489458eb696f6928 | [
"BSD-3-Clause"
] | 4 | 2020-01-07T20:58:32.000Z | 2020-04-01T22:02:31.000Z | blocking_cache/test/MultiCacheTestCases.py | cornell-brg/pymtl3-mem | 458cbc0c667cf8645aad0e33489458eb696f6928 | [
"BSD-3-Clause"
] | 2 | 2020-09-08T19:34:52.000Z | 2021-06-18T14:58:47.000Z | """
=========================================================================
MultiCacheTestCases.py
=========================================================================
Test cases for multicache configs
Author : Xiaoyu Yan (xy97), Eric Tang (et396)
Date : 13 April 2020
"""
import pytest
from test.sim_utils import (
mreq, resp, CacheReqType, CacheRespType, MemReqType, MemRespType, CacheTestParams
)
# Main test memory for dmapped tests
| 44.330769 | 94 | 0.490023 | """
=========================================================================
MultiCacheTestCases.py
=========================================================================
Test cases for multicache configs
Author : Xiaoyu Yan (xy97), Eric Tang (et396)
Date : 13 April 2020
"""
import pytest
from test.sim_utils import (
mreq, resp, CacheReqType, CacheRespType, MemReqType, MemRespType, CacheTestParams
)
# Main test memory for dmapped tests
def multicache_mem():
return [
0x00000000, 0,
0x00000004, 1,
0x00000008, 2,
0x0000000c, 3,
0x00000010, 4,
0x00000014, 5,
0x00000018, 6,
0x0000001c, 7,
0x00000020, 8,
0x00000024, 9,
0x00000028, 0xa,
0x0000002c, 0xb,
0x00020000, 0xc,
0x00020004, 0xd,
0x00020008, 0xe,
0x0002000c, 0xf,
0x00020010, 0x10,
0x00020014, 0x11,
0x00001000, 0x01020304,
0x00001004, 0x05060708,
0x00001008, 0x090a0b0c,
0x0000100c, 0x0d0e0f10,
0x00002000, 0x00facade,
0x00002004, 0x05ca1ded,
0x00002070, 0x70facade,
0x00002074, 0x75ca1ded,
]
def rd_wr_2c():
# Read from 1 core and then writeback to mem. Read from second core -> mem should be updated
associativities = [ 1, 1 ]
cache_sizes = [ 32, 32 ]
msgs = [
# cache ord type opq addr len data type opq test len data
mreq( 0, 0, 'rd', 0, 0x00000000, 0, 0x00), resp( 'rd', 0, 0, 0, 0 ),
mreq( 0, 0, 'wr', 0, 0x00000000, 0, 0x01), resp( 'wr', 0, 1, 0, 0 ),
mreq( 0, 0, 'rd', 0, 0x00020004, 0, 0x00), resp( 'rd', 0, 0, 0, 0xd ),
mreq( 1, 1, 'rd', 0, 0x00000000, 0, 0x00), resp( 'rd', 0, 0, 0, 0x01),
]
return associativities, cache_sizes, msgs
def amo_2c():
# Test AMO transactions for 2 caches
associativities = [ 1, 1 ]
cache_sizes = [ 32, 32 ]
msgs = [
# cache ord type opq addr len data type opq test len data
mreq( 0, 0, 'ad', 0, 0x00000000, 0, 0x01), resp( 'ad', 0, 0, 0, 0 ),
mreq( 0, 1, 'rd', 1, 0x00000000, 0, 0x00), resp( 'rd', 1, 0, 0, 0x01),
mreq( 1, 1, 'rd', 0, 0x00000000, 0, 0x00), resp( 'rd', 0, 0, 0, 0x01),
]
return associativities, cache_sizes, msgs
def inv_fl_2c():
associativities = [ 1, 1 ]
cache_sizes = [ 32, 32 ]
msgs = [
# cache ord type opq addr len data type opq test len data
mreq( 0, 0, 'wr', 0, 0x00000000, 0, 0x01), resp( 'wr', 0, 0, 0, 0 ),
mreq( 0, 0, 'wr', 1, 0x00000008, 0, 0x03), resp( 'wr', 1, 1, 0, 0 ),
mreq( 1, 0, 'wr', 0, 0x00000004, 0, 0x12), resp( 'wr', 0, 0, 0, 0 ),
mreq( 1, 0, 'fl', 1, 0x00000000, 0, 0x00), resp( 'fl', 1, 0, 0, 0 ),
mreq( 0, 0, 'inv', 2, 0x00000000, 0, 0x00), resp( 'inv', 2, 0, 0, 0x0 ),
mreq( 0, 1, 'rd', 3, 0x00000008, 0, 0x00), resp( 'rd', 3, 0, 0, 0x03 ),
mreq( 0, 1, 'rd', 4, 0x00000004, 0, 0x00), resp( 'rd', 4, 1, 0, 0x12 ),
]
return associativities, cache_sizes, msgs
def inv_fl_4c():
associativities = [ 2, 2, 2, 2 ]
cache_sizes = [ 64, 64, 64, 64 ]
msgs = [
# cache ord type opq addr len data type opq test len data
mreq( 0, 0, 'wr', 0, 0x00000000, 0, 0x01), resp( 'wr', 0, 0, 0, 0 ),
mreq( 1, 0, 'wr', 0, 0x00000004, 0, 0x12), resp( 'wr', 0, 0, 0, 0 ),
mreq( 2, 0, 'wr', 0, 0x00000008, 0, 0x23), resp( 'wr', 0, 0, 0, 0 ),
mreq( 3, 0, 'wr', 0, 0x0000000c, 0, 0x34), resp( 'wr', 0, 0, 0, 0 ),
mreq( 0, 0, 'fl', 1, 0x00000000, 0, 0x00), resp( 'fl', 1, 0, 0, 0 ),
mreq( 1, 0, 'fl', 1, 0x00000000, 0, 0x00), resp( 'fl', 1, 0, 0, 0 ),
mreq( 2, 0, 'fl', 1, 0x00000000, 0, 0x00), resp( 'fl', 1, 0, 0, 0 ),
mreq( 3, 0, 'fl', 1, 0x00000000, 0, 0x00), resp( 'fl', 1, 0, 0, 0 ),
mreq( 0, 0, 'inv', 2, 0x00000000, 0, 0x00), resp( 'inv', 2, 0, 0, 0 ),
mreq( 1, 0, 'inv', 2, 0x00000000, 0, 0x00), resp( 'inv', 2, 0, 0, 0 ),
mreq( 2, 0, 'inv', 2, 0x00000000, 0, 0x00), resp( 'inv', 2, 0, 0, 0 ),
mreq( 3, 0, 'inv', 2, 0x00000000, 0, 0x00), resp( 'inv', 2, 0, 0, 0 ),
mreq( 0, 1, 'rd', 3, 0x0000000c, 0, 0x00), resp( 'rd', 3, 0, 0, 0x34 ),
mreq( 1, 1, 'rd', 3, 0x00000008, 0, 0x00), resp( 'rd', 3, 0, 0, 0x23 ),
mreq( 2, 1, 'rd', 3, 0x00000004, 0, 0x00), resp( 'rd', 3, 0, 0, 0x12 ),
mreq( 3, 1, 'rd', 3, 0x00000000, 0, 0x00), resp( 'rd', 3, 0, 0, 0x01 ),
]
return associativities, cache_sizes, msgs
class MultiCacheTestCases:
@pytest.mark.parametrize(
" name, test, stall_prob,latency,src_delay,sink_delay", [
("SIMP", rd_wr_2c, 0.0, 1, 0, 0 ),
("AMO", amo_2c, 0.0, 1, 0, 0 ),
("INVFL", inv_fl_2c, 0.0, 1, 0, 0 ),
("INVFL", inv_fl_4c, 0.0, 1, 0, 0 ),
("SIMP", rd_wr_2c, 0.0, 2, 1, 1 ),
])
# defaults to 4 word cache line by importing from sim_utils
def test_generic( s, name, test, stall_prob, latency, src_delay, sink_delay,
cmdline_opts, line_trace ):
mem = multicache_mem()
associativities, cache_sizes, msgs = test()
tp = CacheTestParams( msgs, mem, CacheReqType, CacheRespType, MemReqType,
MemRespType, associativities, cache_sizes, stall_prob,
latency, src_delay, sink_delay )
s.run_test( tp, cmdline_opts, line_trace )
| 4,673 | 501 | 137 |
96ed53e2484dd4c575af3c463e44f6ef77c182be | 2,641 | py | Python | services/weather.py | aalcala07/home_dashboard | 3a6606f4afc457d44fc238972bc7e8d36a64ba7b | [
"MIT"
] | null | null | null | services/weather.py | aalcala07/home_dashboard | 3a6606f4afc457d44fc238972bc7e8d36a64ba7b | [
"MIT"
] | 6 | 2022-02-11T14:14:08.000Z | 2022-02-26T05:15:29.000Z | services/weather.py | aalcala07/home_dashboard | 3a6606f4afc457d44fc238972bc7e8d36a64ba7b | [
"MIT"
] | 1 | 2022-02-11T01:19:02.000Z | 2022-02-11T01:19:02.000Z | import requests, json
from os.path import exists
from decouple import config
import logging
OPEN_WEATHER_MAP_API_URL = "https://api.openweathermap.org"
with open('cache/services.json') as json_file:
service_config = json.load(json_file)
configs = [service['configs'] for service in service_config['services'] if service['service'] == 'weather'][0]
# Request data from API
# Get data from cache file
# Get the current weather
# Get the daily forecast
| 31.819277 | 183 | 0.664142 | import requests, json
from os.path import exists
from decouple import config
import logging
OPEN_WEATHER_MAP_API_URL = "https://api.openweathermap.org"
with open('cache/services.json') as json_file:
service_config = json.load(json_file)
configs = [service['configs'] for service in service_config['services'] if service['service'] == 'weather'][0]
# Request data from API
def update():
api_key = config('open_weather_map_api_key')
if not api_key or api_key == '':
logging.error('Cannot fetch weather. Missing OPEN_WEATHER_MAP_API_KEY')
return
for location in locations():
logging.info('Fetching weather data for ' + location['name'])
try:
r = requests.get(f'{OPEN_WEATHER_MAP_API_URL}/data/2.5/onecall?lat={location["lat"]}&lon={location["long"]}&appid={api_key}&exclude=minutely,hourly,alerts&units=imperial')
weather_data = r.json()
if 'current' in weather_data:
weather_data_file = open("cache/.weather_data_" + location['name'], "w")
weather_data_file.write(json.dumps(weather_data, indent = 4))
weather_data_file.close()
logging.info('Weather data saved for ' + location['name'])
else:
# Rate limit reached or other error
logging.error('Weather was not provided. Check API rate limit.')
except requests.exceptions.JSONDecodeError:
logging.error('Weather data not properly formed JSON.')
except requests.exceptions.RequestException as e:
logging.error('Connection error while trying to retrieve weather data.')
# Get data from cache file
def get(location_name):
location_name = 'local' if location_name == '' else location_name
filepath = 'cache/.weather_data_' + location_name
if(exists(filepath) == False):
return None
with open(filepath) as json_file:
weather_data = json.load(json_file)
return weather_data
# Get the current weather
def current(location_name):
data = get(location_name)
if not data:
return None
return data['current']
# Get the daily forecast
def daily(location_name):
data = get(location_name)
if not data:
return None
return data['daily']
def config(name):
return [config['value'] for config in configs if config['name'] == name][0]
def locations():
with open('cache/locations.json') as json_file:
locations_config = json.load(json_file)
return [location for location in locations_config['locations'] if location['name'] in config('locations')] | 2,047 | 0 | 134 |
018efbf54b9a358bd0c9109793e8c9841607e645 | 1,157 | py | Python | MTandHJ/p17.py | MTandHJ/leetcode | f3832ed255d259cb881666ec8bd3de090d34e883 | [
"MIT"
] | null | null | null | MTandHJ/p17.py | MTandHJ/leetcode | f3832ed255d259cb881666ec8bd3de090d34e883 | [
"MIT"
] | null | null | null | MTandHJ/p17.py | MTandHJ/leetcode | f3832ed255d259cb881666ec8bd3de090d34e883 | [
"MIT"
] | null | null | null |
from typing import List
from base import version
| 24.617021 | 77 | 0.452895 |
from typing import List
from base import version
class Solution:
mapping = {
'1': '',
'2': 'abc',
'3': 'def',
'4': 'ghi',
'5': 'jkl',
'6': 'mno',
'7': 'pqrs',
'8': 'tuv',
'9': 'wxyz'
}
@version("40ms, 15.1mb")
def letterCombinations(self, digits: str) -> List[str]:
if len(digits) == 0:
return []
def search(start, digits):
if len(digits) == 0:
return [start]
elif len(digits) == 1:
return [start + item for item in self.mapping[digits[0]]]
ans = []
for letter in self.mapping[digits[0]]:
for item in search(letter, digits[1:]):
ans.append(start + item)
return ans
return search('', digits)
@version("36ms, 15.1mb")
def letterCombinations(self, digits: str) -> List[str]:
if len(digits) == 0:
return []
ans = ['']
for digit in digits:
ans = [prev + cur for prev in ans for cur in self.mapping[digit]]
return ans
| 753 | 307 | 23 |
7ce4af0cf9f76cffec16110c3f2022883f828de9 | 204 | py | Python | mini-scripts/Python_RegEx_metacharacters__end_with.txt.py | Web-Dev-Collaborative/PYTHON_PRAC | 856f902fb43dcccae168d34ee6aacc02427a7ac6 | [
"MIT"
] | 5 | 2021-06-02T23:44:25.000Z | 2021-12-27T16:21:57.000Z | mini-scripts/Python_RegEx_metacharacters__end_with.txt.py | Web-Dev-Collaborative/PYTHON_PRAC | 856f902fb43dcccae168d34ee6aacc02427a7ac6 | [
"MIT"
] | 22 | 2021-05-31T01:33:25.000Z | 2021-10-18T18:32:39.000Z | mini-scripts/Python_RegEx_metacharacters__end_with.txt.py | Web-Dev-Collaborative/PYTHON_PRAC | 856f902fb43dcccae168d34ee6aacc02427a7ac6 | [
"MIT"
] | 3 | 2021-06-19T03:37:47.000Z | 2021-08-31T00:49:51.000Z | import re
txt = "hello world"
# Check if the string ends with 'world':
x = re.findall("world$", txt)
if x:
print("Yes, the string ends with 'world'")
else:
print("No match")
# Author: Bryan G
| 15.692308 | 46 | 0.632353 | import re
txt = "hello world"
# Check if the string ends with 'world':
x = re.findall("world$", txt)
if x:
print("Yes, the string ends with 'world'")
else:
print("No match")
# Author: Bryan G
| 0 | 0 | 0 |
df8d267fc858dc0e92de5c35f582ec7e8b0a3e39 | 605 | py | Python | src/messagetypes/vote.py | coffeedogs/PyBitmessage | 634a49cd6d8fc2f52504586be4c4766340641b25 | [
"MIT"
] | 1 | 2018-07-17T09:51:23.000Z | 2018-07-17T09:51:23.000Z | src/messagetypes/vote.py | coffeedogs/PyBitmessage | 634a49cd6d8fc2f52504586be4c4766340641b25 | [
"MIT"
] | 4 | 2019-11-30T14:01:12.000Z | 2020-01-03T13:20:57.000Z | src/messagetypes/vote.py | coffeedogs/PyBitmessage | 634a49cd6d8fc2f52504586be4c4766340641b25 | [
"MIT"
] | 1 | 2018-04-25T08:08:48.000Z | 2018-04-25T08:08:48.000Z | from debug import logger
from messagetypes import MsgBase
| 25.208333 | 50 | 0.563636 | from debug import logger
from messagetypes import MsgBase
class Vote(MsgBase):
def __init__(self):
return
def decode(self, data):
self.msgid = data["msgid"]
self.vote = data["vote"]
def encode(self, data):
super(Vote, self).encode()
try:
self.data["msgid"] = data["msgid"]
self.data["vote"] = data["vote"]
except KeyError as e:
logger.error("Missing key %s", e.name)
return self.data
def process(self):
logger.debug("msgid: %s", self.msgid)
logger.debug("vote: %s", self.vote)
| 418 | -1 | 130 |
e284bdf8197d5c25272d53dac5816af3341fd2f8 | 1,446 | py | Python | dlk/python/dlk/frontend/base.py | toohsk/blueoil | 596922caa939db9c5ecbac3286fbf6f703865ee6 | [
"Apache-2.0"
] | null | null | null | dlk/python/dlk/frontend/base.py | toohsk/blueoil | 596922caa939db9c5ecbac3286fbf6f703865ee6 | [
"Apache-2.0"
] | 1 | 2018-11-21T07:06:17.000Z | 2018-11-21T07:06:17.000Z | dlk/python/dlk/frontend/base.py | toohsk/blueoil | 596922caa939db9c5ecbac3286fbf6f703865ee6 | [
"Apache-2.0"
] | 2 | 2019-02-08T10:03:34.000Z | 2019-03-20T06:25:55.000Z | # -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Base IO module."""
from abc import ABCMeta, abstractmethod
from core.model import Model
class BaseIO(metaclass=ABCMeta):
"""Base class for model IO."""
@abstractmethod
def read(self, path: str) -> Model:
"""Read a model.
Parameters
----------
path : str
Path to the file to be read
Returns
-------
model : Model
The model
"""
pass
@abstractmethod
def write(self, model: Model, path: str) -> None:
"""Write the model to a file.
Parameters
----------
model : Model
Model to be written
path : str
Path to the written file
"""
pass
| 26.290909 | 79 | 0.578838 | # -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Base IO module."""
from abc import ABCMeta, abstractmethod
from core.model import Model
class BaseIO(metaclass=ABCMeta):
"""Base class for model IO."""
@abstractmethod
def read(self, path: str) -> Model:
"""Read a model.
Parameters
----------
path : str
Path to the file to be read
Returns
-------
model : Model
The model
"""
pass
@abstractmethod
def write(self, model: Model, path: str) -> None:
"""Write the model to a file.
Parameters
----------
model : Model
Model to be written
path : str
Path to the written file
"""
pass
| 0 | 0 | 0 |
4c646e04d014d1da78010596fc641c62e6c33206 | 203 | py | Python | Lambda/FileAddedInS3.py | nihitjain11/DevOps | 44fa0dc3a18cbcae8de635bf7e77836edf1fb5e6 | [
"MIT"
] | null | null | null | Lambda/FileAddedInS3.py | nihitjain11/DevOps | 44fa0dc3a18cbcae8de635bf7e77836edf1fb5e6 | [
"MIT"
] | null | null | null | Lambda/FileAddedInS3.py | nihitjain11/DevOps | 44fa0dc3a18cbcae8de635bf7e77836edf1fb5e6 | [
"MIT"
] | null | null | null | import boto3
import json | 25.375 | 51 | 0.684729 | import boto3
import json
def lambda_handler(event, context):
s3 = boto3.resource('s3')
bucket = s3.Bucket('terraform-devops-bootcamp')
for obj in bucket.objects.all():
print(obj.key) | 156 | 0 | 23 |
9eab3df60bae334b85cf23312305a03c5374fbad | 364 | py | Python | backend/GDVoteSys/apps/migrations/0008_auto_20201218_1104.py | isatisroot/GDTPS | 92a599751d46c1a36149de3e283d8c63aa66d1f9 | [
"Xnet",
"X11"
] | null | null | null | backend/GDVoteSys/apps/migrations/0008_auto_20201218_1104.py | isatisroot/GDTPS | 92a599751d46c1a36149de3e283d8c63aa66d1f9 | [
"Xnet",
"X11"
] | null | null | null | backend/GDVoteSys/apps/migrations/0008_auto_20201218_1104.py | isatisroot/GDTPS | 92a599751d46c1a36149de3e283d8c63aa66d1f9 | [
"Xnet",
"X11"
] | null | null | null | # Generated by Django 3.0.6 on 2020-12-18 03:04
from django.db import migrations
| 19.157895 | 47 | 0.585165 | # Generated by Django 3.0.6 on 2020-12-18 03:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('apps', '0007_auto_20201218_1039'),
]
operations = [
migrations.RenameField(
model_name='meeting',
old_name='addrress',
new_name='address',
),
]
| 0 | 258 | 23 |