blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3905f5f1c9b86468bc04cf0f37ce790e482ffe99 | 7c0026e4e6e47114c93bd8a49c4d2759cf7a8613 | /starter_code_section_7/tests/system/models/test_system_user.py | 0cc5b14a8a835e16cc7eb267ebbef77c11866c73 | [] | no_license | ikostan/automation_with_python | 5edf9bca971629fc7621af2957d26c3f0f73fe9e | 131a0699f927ac40ea625a3526c9193863b1cc1c | refs/heads/master | 2020-05-05T09:42:14.360826 | 2019-07-03T01:49:38 | 2019-07-03T01:49:38 | 179,913,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,443 | py | from starter_code_section_7.tests.base_test import BaseTest
from starter_code_section_7.models.user import UserModel
import json
class UserSystemTest(BaseTest):
# Test User registration:
def test_register_user(self):
with self.app() as client:
with self.app_context():
username = 'uname'
response = client.post('/register',
data={'username': username,
'password': 'password'})
# Assert response
self.assertEqual(response.status_code, 201)
self.assertDictEqual(json.loads(response.data),
{'message': 'User created successfully.'})
# Assert user in DB
self.assertIsNotNone(UserModel.find_by_username(username))
self.assertIsNotNone(UserModel.find_by_id(1))
def test_register_and_login(self):
with self.app() as client:
with self.app_context():
username = 'uname'
password = 'password'
client.post('/register',
data={'username': username,
'password': password})
auth_response = client.post('/auth',
data=json.dumps({'username': username,
'password': password}),
headers={'Content-Type': 'application/json'})
self.assertIn('access_token', json.loads(auth_response.data).keys()) # 'access_token'
def test_register_duplicate_user(self):
with self.app() as client:
with self.app_context():
username = 'uname'
password = 'password'
client.post('/register',
data={'username': username,
'password': password})
response = client.post('/register',
data={'username': username,
'password': password})
self.assertEqual(response.status_code, 400)
self.assertDictEqual(json.loads(response.data),
{'message': 'A user with that username already exists.'})
| [
"igorkostan@gmail.com"
] | igorkostan@gmail.com |
83d01be9053a9d3d262a71ece694582b6fa24bb6 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/C/colby_neeb/swr3_twitter_search_1.py | 03b363ad975f75f06e90c4374e6f58115dd8bf9c | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,430 | py | ###############################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###############################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'swr3'
RESULTS_PER_PAGE = '100'
LANGUAGE = ''
NUM_PAGES = 50
UNTIL = '2012-12-27'
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s&until=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page, UNTIL)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['to_user'] = result['to_user']
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
data['from_user_id'] = result['from_user_id']
data['to_user_id'] = result['to_user_id']
data['source'] = result['source']
data['iso_language_code'] = result['iso_language_code']
data['profile_image_url'] = result['profile_image_url']
data['created_at'] = result['created_at']
data['geo'] = result['geo']
print data ['created_at'], ['iso_language_code'], ['from_user'], ['from_user_id'], ['to_user'], ['to_user_id'], ['source'], ['profile_image_url'], ['geo'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
###############################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###############################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'swr3'
RESULTS_PER_PAGE = '100'
LANGUAGE = ''
NUM_PAGES = 50
UNTIL = '2012-12-27'
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s&until=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page, UNTIL)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['to_user'] = result['to_user']
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
data['from_user_id'] = result['from_user_id']
data['to_user_id'] = result['to_user_id']
data['source'] = result['source']
data['iso_language_code'] = result['iso_language_code']
data['profile_image_url'] = result['profile_image_url']
data['created_at'] = result['created_at']
data['geo'] = result['geo']
print data ['created_at'], ['iso_language_code'], ['from_user'], ['from_user_id'], ['to_user'], ['to_user_id'], ['source'], ['profile_image_url'], ['geo'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
45f489be1aa74e7d787ad35306662e37defc136b | e1c900578859ab537d0ca626b8446152b140d0e7 | /ntds/views.py | e2876669a41e303609eecb4f6a21cca83edfe99e | [] | no_license | apavluck/SMS | 521202865e240b365a2e9425415b897ba475b419 | 5da660391a3788c3858eb51c82f4cd729fdc90b5 | refs/heads/master | 2020-04-09T10:16:44.814700 | 2015-01-27T09:25:21 | 2015-01-27T09:25:21 | 29,913,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,348 | py | import os
from django.template import RequestContext
from django.shortcuts import redirect, get_object_or_404, render_to_response
from django.conf import settings
from rapidsms_xforms.models import XForm, XFormSubmission
from generic.views import generic
from django.contrib.auth.decorators import user_passes_test
from django.views.decorators.csrf import csrf_exempt
from django.utils.safestring import mark_safe
from .models import NTDReport,Reporter
from django.db.models import Sum
from rapidsms.contrib.locations.models import Location
from skipdict import SkipDict
from generic.views import generic
from generic.sorters import SimpleSorter
from .forms import *
from rapidsms_httprouter.models import Message
def get_prevelance(reports,rep_toret,pdata,ldata):
toret={}
for d in reports:
loc=Location.objects.get(pk=d.pop("reporter__district"))
v_dict=SkipDict(d)
toret["name"]="%s-%s"%(str(loc.name),v_dict.keys()[-1].split("__")[0])
toret["style"]={"fill": '#007ACC',"fill-opacity": 0.8,"stroke":'rgba(29,210,175,0.3)',"stroke-width": 3}
toret["latLng"]=[float(loc.point.latitude),float(loc.point.longitude)]
rep_toret.append(toret)
pdata[str(loc.name).upper()]=v_dict.values()[-1]
ldata[str(loc.name).upper()]=v_dict.keys()[-1].split("__")[0]
def dashboard(request):
bubbles_to_ret=[]
pdata={}
ldata={}
report_q = NTDReport.objects.values("reporter__district", "trachoma", "helminthiasis", "schistosomiasis",
"onchocerciasis", "filariasis", "lymphatic").annotate(Sum("lymphatic"),
Sum("filariasis"),
Sum("onchocerciasis"),
Sum("schistosomiasis"),
Sum("trachoma"),
Sum("helminthiasis"))
get_prevelance(report_q,bubbles_to_ret,pdata,ldata)
context = {
'bubbles_to_ret':mark_safe(bubbles_to_ret),
'pdata':mark_safe(pdata),
'ldata':mark_safe(ldata)
}
return render_to_response("ntds/dashboard.html",context,context_instance=RequestContext(request))
def view_analytics(request):
bubbles_to_ret=[]
pdata={}
ldata={}
report_q = NTDReport.objects.values("reporter__district", "trachoma", "helminthiasis", "schistosomiasis",
"onchocerciasis", "filariasis", "lymphatic").annotate(Sum("lymphatic"),
Sum("filariasis"),
Sum("onchocerciasis"),
Sum("schistosomiasis"),
Sum("trachoma"),
Sum("helminthiasis"))
get_prevelance(report_q,bubbles_to_ret,pdata,ldata)
context = {
'bubbles_to_ret':mark_safe(bubbles_to_ret),
'pdata':mark_safe(pdata),
'ldata':mark_safe(ldata)
}
return render_to_response("ntds/view_analytics.html",context,context_instance=RequestContext(request))
def manage_reporters(request):
queryset=Reporter.objects.all(),
columns = [('Name', True, 'title', SimpleSorter()),
('Parish', True, 'decription', SimpleSorter()),
('Subcounty', True, 'decription', SimpleSorter()),
('District', True, 'decription', SimpleSorter()),
('Mobile', True, 'questions__name', SimpleSorter()),
('Status', True, 'enabled', SimpleSorter()),
('Submissions', False, '', ''),
]
filter_forms = [FreeSearchForm,MultipleDistictFilterForm]
action_forms = [DownloadForm, SendTextForm]
if not request.POST.get("page_num") and request.method =="POST":
return ExcelResponse(queryset)
return generic(
request,
model=Reporter,
queryset=queryset,
filter_forms=filter_forms,
action_forms=action_forms,
objects_per_page=25,
partial_row='ntds/partials/reporter_row.html',
base_template='ntds/reporter_base.html',
columns=columns,
sort_column='pk',
sort_ascending=False,
)
def reports(request):
columns = [('Name', True, 'title', SimpleSorter()),
('Parish', True, 'decription', SimpleSorter()),
('Mobile', True, 'questions__name', SimpleSorter()),
('Status', True, 'enabled', SimpleSorter()),
('Submissions', False, '', ''),
('Last Submission', False, '', ''),
]
filter_forms = [FreeSearchForm,MultipleDistictFilterForm]
action_forms = [DownloadForm, SendTextForm]
return generic(
request,
model=Reporter,
queryset=Reporter.objects.all(),
filter_forms=filter_forms,
action_forms=action_forms,
objects_per_page=25,
partial_row='ntds/partials/report_row.html',
base_template='ntds/report_base.html',
columns=columns,
sort_column='pk',
sort_ascending=False,
)
def view_messages(request):
context={}
columns = [('Name', True, 'title', SimpleSorter()),
('Parish', True, 'decription', SimpleSorter()),
('Mobile', True, 'questions__name', SimpleSorter()),
('Status', True, 'enabled', SimpleSorter()),
('Submissions', False, '', ''),
('Last Submission', False, '', ''),
]
messages=Message.objects.filter(connection__pk__in= Reporter.objects.values("connection")).order_by("-pk")
return generic(
request,
model=Message,
queryset=Reporter.objects.all(),
filter_forms=[],
action_forms=[],
objects_per_page=25,
partial_row='ntds/partials/reporter_row.html',
base_template='ntds/reporter_base.html',
columns=columns,
sort_column='pk',
sort_ascending=False,
current='survey'
)
def disease_report(request):
context={}
columns = [('Name', True, 'title', SimpleSorter()),
('Parish', True, 'decription', SimpleSorter()),
('Mobile', True, 'questions__name', SimpleSorter()),
('Status', True, 'enabled', SimpleSorter()),
('Submissions', False, '', ''),
('Last Submission', False, '', ''),
]
return generic(
request,
model=NTDReport,
queryset=NTDReport.objects.all(),
filter_forms=[],
action_forms=[],
objects_per_page=25,
partial_row='ntds/partials/reporter_row.html',
base_template='ntds/reporter_base.html',
columns=columns,
sort_column='pk',
sort_ascending=False,
current='survey'
)
def drug_report(request):
context={}
columns = [('Name', True, 'title', SimpleSorter()),
('Parish', True, 'decription', SimpleSorter()),
('Mobile', True, 'questions__name', SimpleSorter()),
('Status', True, 'enabled', SimpleSorter()),
('Submissions', False, '', ''),
('Last Submission', False, '', ''),
]
return generic(
request,
model=NTDReport,
queryset=NTDReport.objects.all(),
filter_forms=[],
action_forms=[],
objects_per_page=25,
partial_row='ntds/partials/reporter_row.html',
base_template='ntds/reporter_base.html',
columns=columns,
sort_column='pk',
sort_ascending=False,
current='survey'
)
def view_submissions(request, reporter=None):
columns = [('Name', True, 'title', SimpleSorter()),
('Parish', True, 'decription', SimpleSorter()),
('Mobile', True, 'questions__name', SimpleSorter()),
('Status', True, 'enabled', SimpleSorter()),
('Submissions', False, '', ''),
('Last Submission', False, '', ''),
]
return generic(
request,
model=XFormSubmission,
queryset=XFormSubmission.objects.all(),
filter_forms=[],
action_forms=[],
objects_per_page=25,
partial_row='ntds/partials/submission_row.html',
base_template='ntds/submissions_base.html',
columns=columns,
sort_column='pk',
sort_ascending=False,
current='survey'
)
def edit_reporter(request, pk):
instance = Patient.objects.get(pk=pk)
patient_form = PatientForm(instance=instance, mission=instance.mission)
phase1_form = Phase1SessionForm(instance=instance.phase1)
#missions=Mission.objects.filter(active=True)
if request.method == "POST":
patient_form = PatientForm(request.POST, instance=instance, mission=instance.mission)
phase1_form = Phase1SessionForm(request.POST, instance=instance.phase1)
if patient_form.is_valid() and phase1_form.is_valid():
patient = patient_form.save()
phase1_form.save()
messages.add_message(request, messages.SUCCESS, 'Successfully Updated Patient %s' % patient.identifier)
if request.POST.get("next", None):
return HttpResponseRedirect("/patients/%d/phase2/edit/" % patient.pk)
return HttpResponseRedirect("/patients/new/")
return render_to_response("mission/edit_patient.html",
{"patient_form": patient_form, "patient": instance, "phase1_form": phase1_form,
'tab': 'patient', "identifier": instance.identifier, 'country': instance.country},
context_instance=RequestContext(request))
@user_passes_test(lambda u: u.has_perm('accounts.can_upload_excel'))
def excel_reports(request):
upload_form = ExcelUploadForm()
countries = Location.objects.filter(pk__in=MissionSite.objects.values("country")).order_by("name")
missions = Mission.objects.filter(active=True).order_by("country__name")
if request.method == "POST" :
upload_form = ExcelUploadForm(request.POST, request.FILES)
if upload_form.is_valid():
excel = request.FILES['excel_file']
format=request.FILES['excel_file'].name.split('.')[-1]
if format in ["xlsx","xls"]:
message = upload_mission_excel_xls.delay(excel.temporary_file_path(),request.POST.get("phase"),request.POST.get("mission"),request.POST.get("site"),format=format)
#upload_mission_excel_xls.delay(excel, request.POST.get("phase"), request.POST.get("mission"),request.POST.get("site"),format=format)
messages.add_message(request, messages.SUCCESS, 'Successfully Uploaded Excel sheet.')
else:
messages.add_message(request, messages.ERROR, 'Invalid File format')
else:
messages.add_message(request, messages.ERROR, str(upload_form.errors))
return render_to_response("mission/excel.html",
dict(upload_form=upload_form, countries=countries, missions=missions),
context_instance=RequestContext(request))
@csrf_exempt
@user_passes_test(lambda u: u.has_perm('accounts.can_view_patients'))
def patients(request):
from generic.views import generic
from generic.sorters import SimpleSorter
filter_forms = [SearchPatientsForm, AgeFilterForm]
action_forms = [DownloadForm, SendTextForm, SendEmailForm]
if not request.user.is_superuser or not request.user.is_staff:
country=request.user.get_profile().country
title="Patient Listing For %s" %(" ".join(country.values_list("name",flat=True)))
patients=Patient.objects.filter(mission__country__in=country.values("pk")).prefetch_related("phase1","phase2","phase3","mission")
filter_forms.append(SiteFilterForm)
else:
title="All Patients"
patients=Patient.objects.all().prefetch_related("phase1","phase2","phase3","mission")
filter_forms.append(CountryFilterForm)
partial_row = 'mission/partials/patient_row.html'
base_template = 'mission/partials/patients_base.html'
paginator_template = 'mission/partials/pagination.html'
columns = [('Name', True, 'first_name', SimpleSorter()),
('Age', True, 'age', SimpleSorter()),
('Gender', True, 'gender', SimpleSorter()),
('Country', True, 'mission__country__name', SimpleSorter()),
('Mobile', True, 'mobile', SimpleSorter()),
('Email', True, 'email', SimpleSorter()),
('User', True, 'user', SimpleSorter()),
('Actions', False, '', '')]
return generic(
request,
model=Patient,
queryset=patients,
filter_forms=filter_forms,
action_forms=action_forms,
objects_per_page=25,
partial_row=partial_row,
results_title="Patients",
title=title,
base_template=base_template,
paginator_template=paginator_template,
paginator_func=paginate,
columns=columns,
sort_column='pk',
show_unfiltered=False,
sort_ascending=False,
)
@csrf_exempt
@user_passes_test(lambda u: u.is_staff or u.is_superuser)
def view_messages(request):
#filter_forms = []
#action_forms = []
partial_row = 'mission/partials/messages_row.html'
base_template = 'mission/partials/messages_base.html'
paginator_template = 'mission/partials/pagination.html'
columns = [('Message', True, 'text', SimpleSorter()),
('Type', True, 'type', SimpleSorter()),
('sender', True, 'sender__username', SimpleSorter()),
('Destination', True, 'identifier', SimpleSorter()),
('Date', True, 'created', SimpleSorter()),
('Status', True, 'delivered', SimpleSorter()),
]
return generic(
request,
model=Message,
queryset=Message.objects.all(),
objects_per_page=25,
partial_row=partial_row,
results_title="Messages",
title="All SMS and Emails",
base_template=base_template,
paginator_template=paginator_template,
paginator_func=paginate,
columns=columns,
sort_column='pk',
show_unfiltered=False,
sort_ascending=False,
)
def missions_json(request):
feature_collection = {"type": "FeatureCollection",
"features": []
}
qdict = Patient.objects.exclude(mission__country=None).values("mission__country__name",
"mission__country__pk").annotate(
patients=Count("mission__country__name"), number_of_aids=Sum('aids_received')).order_by("-patients")
features = []
for data in qdict:
feature = {
"type": "Feature",
"properties": {
"name": "name",
"amenity": "Mission",
"popupContent": "popup"
},
"geometry": {
"type": "Point",
"coordinates": []
}
}
feature["properties"]["name"] = data["mission__country__name"]
feature["properties"]["popupContent"] = "<h3>%s</h3>%d hearing aids given to %d People" % (
data["mission__country__name"], data["number_of_aids"], data["patients"])
location = Location.objects.get(pk=data["mission__country__pk"])
feature["geometry"]["coordinates"] = [float(location.longitude), float(location.latitude)]
feature_collection["features"].append(feature)
return JSONResponse(feature_collection)
| [
"mossplix@gmail.com"
] | mossplix@gmail.com |
e931234a89b9968113423e19b604d36d8cbe450c | 4da471a4f93d4365547078dc370d5b2fc627f909 | /K in S.py | 4d2c9ae87b5e4757e7dd80df2eabe9675ac98b54 | [] | no_license | divyamadhurikalluri/pythonprogramming | 2b7b54d355213a44213e59f9137dfba1184cc548 | f7eaf97c7c2aa47358e6494da064d390329971a7 | refs/heads/master | 2020-03-26T23:26:12.631881 | 2019-11-22T14:37:23 | 2019-11-22T14:37:23 | 145,541,952 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 37 | py | S=raw_input()
K=input()
print(S[:K])
| [
"noreply@github.com"
] | divyamadhurikalluri.noreply@github.com |
f0b7e9b2949c6da520a21ecbd4335a424a92f82d | 20b1642035f3d52607ccde30cfdb3ee89552c9f1 | /backend/detector/service.py | 694ae0e3ad9a4bd835afb863ebc19b4642961e4b | [] | no_license | ScrollPage/Course-work | 26341fc194a8f5acb0b2fa33e3725d72ce09d5e5 | e5de9c6afa393da7065a6468b92a7e48620cb8de | refs/heads/master | 2023-02-13T18:04:32.158034 | 2021-01-05T14:40:31 | 2021-01-05T14:40:31 | 317,627,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | from django.conf import settings
import json
from random import uniform, randint
def get_data(id):
lower_limit = settings.LOWER_DETECTOR_DATA_LIMIT
higher_limit = settings.HIGHER_DETECTOR_DATA_LIMIT
d = {
'id': id,
'temp': round(uniform(lower_limit, higher_limit), 2),
'Co2': round(uniform(lower_limit, higher_limit), 2),
'humidity': round(uniform(lower_limit, higher_limit), 2),
'lightning': round(uniform(lower_limit, higher_limit), 2),
'pH': round(uniform(lower_limit, higher_limit), 2),
}
return json.dumps(d) | [
"54814200+reqww@users.noreply.github.com"
] | 54814200+reqww@users.noreply.github.com |
4cafa17a8364e9198707ce77a3f2512935633502 | 38144f6bf69812ee7cde65c4c1cd1748a0711352 | /scripts/pyperfprof | e42eff1cf844ad4772e4ca42948c4f857d57bcef | [
"MIT"
] | permissive | ogrisel/pyperfprof | d1ecbb3ed6ca28b367189a291281845aa9c0ce18 | 6a29278857d86814b4f96cf0c90853eec80fa4d6 | refs/heads/master | 2020-06-09T02:27:53.628483 | 2013-10-29T13:53:22 | 2013-10-29T13:53:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | #!/usr/bin/env python
from pyperfprof.commandline import main
if __name__ == '__main__':
main()
| [
"olivier.grisel@ensta.org"
] | olivier.grisel@ensta.org | |
420485c92ef6a53b2f95460695bd7fac891eeb19 | c026581b6c3855c75e7c9f9c6397acadc7833fb7 | /idm_core/organization/migrations/0005_auto_20170730_0837.py | 4b1297ed480e1e8fa7b7f304ecc27dae6d827fe2 | [] | no_license | mans0954/idm-core | 5734fd08a3c8c5deaec62167c9470336f0c6c6ef | 2a3cf326e0bb3db469e2b318b122033a7dd92b83 | refs/heads/master | 2021-07-24T04:13:47.021951 | 2017-11-02T22:09:25 | 2017-11-02T22:09:25 | 109,317,967 | 1 | 0 | null | 2017-11-02T20:56:01 | 2017-11-02T20:55:58 | null | UTF-8 | Python | false | false | 878 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-30 07:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organization', '0004_auto_20170730_0835'),
]
operations = [
migrations.AlterField(
model_name='affiliationtype',
name='edu_person_affiliation_value',
field=models.CharField(blank=True, max_length=64),
),
migrations.AlterField(
model_name='affiliationtype',
name='id',
field=models.CharField(max_length=64, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='roletype',
name='id',
field=models.CharField(max_length=64, primary_key=True, serialize=False),
),
]
| [
"alexander.dutton@it.ox.ac.uk"
] | alexander.dutton@it.ox.ac.uk |
b3db4e93af61bb05a18da0a3896c467c3c863720 | 4589a9ea76e458793ad78059839b81d365f433de | /athena_automation/athenataf/tests/user_management/user_account_settings/UserAccountSettings.py | a655eab509ba12f775b6cb6e5fdba1f6d8c0f8eb | [] | no_license | cash2one/reautomation_handoff | 5e2c4c432d8f658d1b57211782744bd0b56c52f6 | 7ef83572d659db35036189eb394f99de1369db5a | refs/heads/master | 2020-05-22T17:56:33.214080 | 2015-07-13T07:51:18 | 2015-07-13T07:51:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | import logging
logger = logging.getLogger('athenataf')
from athenataf.lib.functionality.test.AthenaGUITestCase import AthenaGUITestCase
class UserAccountSettings(AthenaGUITestCase):
'''
Test class to validate the user maintenance info
'''
def test_ath_8214_validate_user_interface(self):
self.TopPanel.validate_user_interface()
self.TopPanel.setting_default_value()
def test_ath_8219_validate_time_zone(self):
self.TopPanel.validate_time_zone()
def test_ath_8220_validate_idle_timeout(self):
self.TopPanel.validate_idle_timeout()
self.TopPanel.setting_default_value()
def test_ath_8217_login_Logout(self):
self.logout()
self.login('default')
def test_ath_8218_login_Logout(self):
conf = self.config.config_vars
user_management_page=self.LeftPanel.go_to_user_management()
user_management_page.delete_if_any_user_present()
user_management_page.create_new_user(conf.email_read_write,conf.user_setting_group_value,conf.user_access_level_read_write)
self.logout()
self.login('read_write')
inner_left_panel = self.TopPanel.click_slider_icon()
inner_left_panel.assert_virtual_controller()
inner_left_panel.click_on_close_icon()
| [
"raju_set@testmile.com"
] | raju_set@testmile.com |
33ee35caf546fe61d1dea7525568c641b2011f77 | bc6b561958649c391c159d4dd3363c60eeabc7e4 | /mayan/apps/common/migrations/0013_auto_20190725_0452.py | 1a8d0b92cd3b1fc9fc3b6c322f3f6323185be7d8 | [
"Apache-2.0"
] | permissive | chrisranjana/Mayan-EDMS | 37deb105cda268768fea502491ae875ff905e0e9 | 34b414ce49a2eb156e27dc1a2915e52121c9d1b7 | refs/heads/master | 2020-12-22T13:50:41.263625 | 2020-01-28T18:45:24 | 2020-01-28T18:45:24 | 236,804,825 | 0 | 1 | NOASSERTION | 2020-01-28T18:12:53 | 2020-01-28T18:12:52 | null | UTF-8 | Python | false | false | 18,413 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-07-25 04:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('common', '0012_auto_20190711_0548'),
]
operations = [
migrations.AlterField(
model_name='userlocaleprofile',
name='timezone',
field=models.CharField(choices=[('Africa/Abidjan', 'Africa/Abidjan'), ('Africa/Accra', 'Africa/Accra'), ('Africa/Addis_Ababa', 'Africa/Addis_Ababa'), ('Africa/Algiers', 'Africa/Algiers'), ('Africa/Asmara', 'Africa/Asmara'), ('Africa/Bamako', 'Africa/Bamako'), ('Africa/Bangui', 'Africa/Bangui'), ('Africa/Banjul', 'Africa/Banjul'), ('Africa/Bissau', 'Africa/Bissau'), ('Africa/Blantyre', 'Africa/Blantyre'), ('Africa/Brazzaville', 'Africa/Brazzaville'), ('Africa/Bujumbura', 'Africa/Bujumbura'), ('Africa/Cairo', 'Africa/Cairo'), ('Africa/Casablanca', 'Africa/Casablanca'), ('Africa/Ceuta', 'Africa/Ceuta'), ('Africa/Conakry', 'Africa/Conakry'), ('Africa/Dakar', 'Africa/Dakar'), ('Africa/Dar_es_Salaam', 'Africa/Dar_es_Salaam'), ('Africa/Djibouti', 'Africa/Djibouti'), ('Africa/Douala', 'Africa/Douala'), ('Africa/El_Aaiun', 'Africa/El_Aaiun'), ('Africa/Freetown', 'Africa/Freetown'), ('Africa/Gaborone', 'Africa/Gaborone'), ('Africa/Harare', 'Africa/Harare'), ('Africa/Johannesburg', 'Africa/Johannesburg'), ('Africa/Juba', 'Africa/Juba'), ('Africa/Kampala', 'Africa/Kampala'), ('Africa/Khartoum', 'Africa/Khartoum'), ('Africa/Kigali', 'Africa/Kigali'), ('Africa/Kinshasa', 'Africa/Kinshasa'), ('Africa/Lagos', 'Africa/Lagos'), ('Africa/Libreville', 'Africa/Libreville'), ('Africa/Lome', 'Africa/Lome'), ('Africa/Luanda', 'Africa/Luanda'), ('Africa/Lubumbashi', 'Africa/Lubumbashi'), ('Africa/Lusaka', 'Africa/Lusaka'), ('Africa/Malabo', 'Africa/Malabo'), ('Africa/Maputo', 'Africa/Maputo'), ('Africa/Maseru', 'Africa/Maseru'), ('Africa/Mbabane', 'Africa/Mbabane'), ('Africa/Mogadishu', 'Africa/Mogadishu'), ('Africa/Monrovia', 'Africa/Monrovia'), ('Africa/Nairobi', 'Africa/Nairobi'), ('Africa/Ndjamena', 'Africa/Ndjamena'), ('Africa/Niamey', 'Africa/Niamey'), ('Africa/Nouakchott', 'Africa/Nouakchott'), ('Africa/Ouagadougou', 'Africa/Ouagadougou'), ('Africa/Porto-Novo', 'Africa/Porto-Novo'), ('Africa/Sao_Tome', 'Africa/Sao_Tome'), ('Africa/Tripoli', 'Africa/Tripoli'), ('Africa/Tunis', 'Africa/Tunis'), ('Africa/Windhoek', 'Africa/Windhoek'), ('America/Adak', 'America/Adak'), ('America/Anchorage', 'America/Anchorage'), ('America/Anguilla', 'America/Anguilla'), ('America/Antigua', 'America/Antigua'), ('America/Araguaina', 'America/Araguaina'), ('America/Argentina/Buenos_Aires', 'America/Argentina/Buenos_Aires'), ('America/Argentina/Catamarca', 'America/Argentina/Catamarca'), ('America/Argentina/Cordoba', 'America/Argentina/Cordoba'), ('America/Argentina/Jujuy', 'America/Argentina/Jujuy'), ('America/Argentina/La_Rioja', 'America/Argentina/La_Rioja'), ('America/Argentina/Mendoza', 'America/Argentina/Mendoza'), ('America/Argentina/Rio_Gallegos', 'America/Argentina/Rio_Gallegos'), ('America/Argentina/Salta', 'America/Argentina/Salta'), ('America/Argentina/San_Juan', 'America/Argentina/San_Juan'), ('America/Argentina/San_Luis', 'America/Argentina/San_Luis'), ('America/Argentina/Tucuman', 'America/Argentina/Tucuman'), ('America/Argentina/Ushuaia', 'America/Argentina/Ushuaia'), ('America/Aruba', 'America/Aruba'), ('America/Asuncion', 'America/Asuncion'), ('America/Atikokan', 'America/Atikokan'), ('America/Bahia', 'America/Bahia'), ('America/Bahia_Banderas', 'America/Bahia_Banderas'), ('America/Barbados', 'America/Barbados'), ('America/Belem', 'America/Belem'), ('America/Belize', 'America/Belize'), ('America/Blanc-Sablon', 'America/Blanc-Sablon'), ('America/Boa_Vista', 'America/Boa_Vista'), ('America/Bogota', 'America/Bogota'), ('America/Boise', 'America/Boise'), ('America/Cambridge_Bay', 'America/Cambridge_Bay'), ('America/Campo_Grande', 'America/Campo_Grande'), ('America/Cancun', 'America/Cancun'), ('America/Caracas', 'America/Caracas'), ('America/Cayenne', 'America/Cayenne'), ('America/Cayman', 'America/Cayman'), ('America/Chicago', 'America/Chicago'), ('America/Chihuahua', 'America/Chihuahua'), ('America/Costa_Rica', 'America/Costa_Rica'), ('America/Creston', 'America/Creston'), ('America/Cuiaba', 'America/Cuiaba'), ('America/Curacao', 'America/Curacao'), ('America/Danmarkshavn', 'America/Danmarkshavn'), ('America/Dawson', 'America/Dawson'), ('America/Dawson_Creek', 'America/Dawson_Creek'), ('America/Denver', 'America/Denver'), ('America/Detroit', 'America/Detroit'), ('America/Dominica', 'America/Dominica'), ('America/Edmonton', 'America/Edmonton'), ('America/Eirunepe', 'America/Eirunepe'), ('America/El_Salvador', 'America/El_Salvador'), ('America/Fort_Nelson', 'America/Fort_Nelson'), ('America/Fortaleza', 'America/Fortaleza'), ('America/Glace_Bay', 'America/Glace_Bay'), ('America/Godthab', 'America/Godthab'), ('America/Goose_Bay', 'America/Goose_Bay'), ('America/Grand_Turk', 'America/Grand_Turk'), ('America/Grenada', 'America/Grenada'), ('America/Guadeloupe', 'America/Guadeloupe'), ('America/Guatemala', 'America/Guatemala'), ('America/Guayaquil', 'America/Guayaquil'), ('America/Guyana', 'America/Guyana'), ('America/Halifax', 'America/Halifax'), ('America/Havana', 'America/Havana'), ('America/Hermosillo', 'America/Hermosillo'), ('America/Indiana/Indianapolis', 'America/Indiana/Indianapolis'), ('America/Indiana/Knox', 'America/Indiana/Knox'), ('America/Indiana/Marengo', 'America/Indiana/Marengo'), ('America/Indiana/Petersburg', 'America/Indiana/Petersburg'), ('America/Indiana/Tell_City', 'America/Indiana/Tell_City'), ('America/Indiana/Vevay', 'America/Indiana/Vevay'), ('America/Indiana/Vincennes', 'America/Indiana/Vincennes'), ('America/Indiana/Winamac', 'America/Indiana/Winamac'), ('America/Inuvik', 'America/Inuvik'), ('America/Iqaluit', 'America/Iqaluit'), ('America/Jamaica', 'America/Jamaica'), ('America/Juneau', 'America/Juneau'), ('America/Kentucky/Louisville', 'America/Kentucky/Louisville'), ('America/Kentucky/Monticello', 'America/Kentucky/Monticello'), ('America/Kralendijk', 'America/Kralendijk'), ('America/La_Paz', 'America/La_Paz'), ('America/Lima', 'America/Lima'), ('America/Los_Angeles', 'America/Los_Angeles'), ('America/Lower_Princes', 'America/Lower_Princes'), ('America/Maceio', 'America/Maceio'), ('America/Managua', 'America/Managua'), ('America/Manaus', 'America/Manaus'), ('America/Marigot', 'America/Marigot'), ('America/Martinique', 'America/Martinique'), ('America/Matamoros', 'America/Matamoros'), ('America/Mazatlan', 'America/Mazatlan'), ('America/Menominee', 'America/Menominee'), ('America/Merida', 'America/Merida'), ('America/Metlakatla', 'America/Metlakatla'), ('America/Mexico_City', 'America/Mexico_City'), ('America/Miquelon', 'America/Miquelon'), ('America/Moncton', 'America/Moncton'), ('America/Monterrey', 'America/Monterrey'), ('America/Montevideo', 'America/Montevideo'), ('America/Montserrat', 'America/Montserrat'), ('America/Nassau', 'America/Nassau'), ('America/New_York', 'America/New_York'), ('America/Nipigon', 'America/Nipigon'), ('America/Nome', 'America/Nome'), ('America/Noronha', 'America/Noronha'), ('America/North_Dakota/Beulah', 'America/North_Dakota/Beulah'), ('America/North_Dakota/Center', 'America/North_Dakota/Center'), ('America/North_Dakota/New_Salem', 'America/North_Dakota/New_Salem'), ('America/Ojinaga', 'America/Ojinaga'), ('America/Panama', 'America/Panama'), ('America/Pangnirtung', 'America/Pangnirtung'), ('America/Paramaribo', 'America/Paramaribo'), ('America/Phoenix', 'America/Phoenix'), ('America/Port-au-Prince', 'America/Port-au-Prince'), ('America/Port_of_Spain', 'America/Port_of_Spain'), ('America/Porto_Velho', 'America/Porto_Velho'), ('America/Puerto_Rico', 'America/Puerto_Rico'), ('America/Punta_Arenas', 'America/Punta_Arenas'), ('America/Rainy_River', 'America/Rainy_River'), ('America/Rankin_Inlet', 'America/Rankin_Inlet'), ('America/Recife', 'America/Recife'), ('America/Regina', 'America/Regina'), ('America/Resolute', 'America/Resolute'), ('America/Rio_Branco', 'America/Rio_Branco'), ('America/Santarem', 'America/Santarem'), ('America/Santiago', 'America/Santiago'), ('America/Santo_Domingo', 'America/Santo_Domingo'), ('America/Sao_Paulo', 'America/Sao_Paulo'), ('America/Scoresbysund', 'America/Scoresbysund'), ('America/Sitka', 'America/Sitka'), ('America/St_Barthelemy', 'America/St_Barthelemy'), ('America/St_Johns', 'America/St_Johns'), ('America/St_Kitts', 'America/St_Kitts'), ('America/St_Lucia', 'America/St_Lucia'), ('America/St_Thomas', 'America/St_Thomas'), ('America/St_Vincent', 'America/St_Vincent'), ('America/Swift_Current', 'America/Swift_Current'), ('America/Tegucigalpa', 'America/Tegucigalpa'), ('America/Thule', 'America/Thule'), ('America/Thunder_Bay', 'America/Thunder_Bay'), ('America/Tijuana', 'America/Tijuana'), ('America/Toronto', 'America/Toronto'), ('America/Tortola', 'America/Tortola'), ('America/Vancouver', 'America/Vancouver'), ('America/Whitehorse', 'America/Whitehorse'), ('America/Winnipeg', 'America/Winnipeg'), ('America/Yakutat', 'America/Yakutat'), ('America/Yellowknife', 'America/Yellowknife'), ('Antarctica/Casey', 'Antarctica/Casey'), ('Antarctica/Davis', 'Antarctica/Davis'), ('Antarctica/DumontDUrville', 'Antarctica/DumontDUrville'), ('Antarctica/Macquarie', 'Antarctica/Macquarie'), ('Antarctica/Mawson', 'Antarctica/Mawson'), ('Antarctica/McMurdo', 'Antarctica/McMurdo'), ('Antarctica/Palmer', 'Antarctica/Palmer'), ('Antarctica/Rothera', 'Antarctica/Rothera'), ('Antarctica/Syowa', 'Antarctica/Syowa'), ('Antarctica/Troll', 'Antarctica/Troll'), ('Antarctica/Vostok', 'Antarctica/Vostok'), ('Arctic/Longyearbyen', 'Arctic/Longyearbyen'), ('Asia/Aden', 'Asia/Aden'), ('Asia/Almaty', 'Asia/Almaty'), ('Asia/Amman', 'Asia/Amman'), ('Asia/Anadyr', 'Asia/Anadyr'), ('Asia/Aqtau', 'Asia/Aqtau'), ('Asia/Aqtobe', 'Asia/Aqtobe'), ('Asia/Ashgabat', 'Asia/Ashgabat'), ('Asia/Atyrau', 'Asia/Atyrau'), ('Asia/Baghdad', 'Asia/Baghdad'), ('Asia/Bahrain', 'Asia/Bahrain'), ('Asia/Baku', 'Asia/Baku'), ('Asia/Bangkok', 'Asia/Bangkok'), ('Asia/Barnaul', 'Asia/Barnaul'), ('Asia/Beirut', 'Asia/Beirut'), ('Asia/Bishkek', 'Asia/Bishkek'), ('Asia/Brunei', 'Asia/Brunei'), ('Asia/Chita', 'Asia/Chita'), ('Asia/Choibalsan', 'Asia/Choibalsan'), ('Asia/Colombo', 'Asia/Colombo'), ('Asia/Damascus', 'Asia/Damascus'), ('Asia/Dhaka', 'Asia/Dhaka'), ('Asia/Dili', 'Asia/Dili'), ('Asia/Dubai', 'Asia/Dubai'), ('Asia/Dushanbe', 'Asia/Dushanbe'), ('Asia/Famagusta', 'Asia/Famagusta'), ('Asia/Gaza', 'Asia/Gaza'), ('Asia/Hebron', 'Asia/Hebron'), ('Asia/Ho_Chi_Minh', 'Asia/Ho_Chi_Minh'), ('Asia/Hong_Kong', 'Asia/Hong_Kong'), ('Asia/Hovd', 'Asia/Hovd'), ('Asia/Irkutsk', 'Asia/Irkutsk'), ('Asia/Jakarta', 'Asia/Jakarta'), ('Asia/Jayapura', 'Asia/Jayapura'), ('Asia/Jerusalem', 'Asia/Jerusalem'), ('Asia/Kabul', 'Asia/Kabul'), ('Asia/Kamchatka', 'Asia/Kamchatka'), ('Asia/Karachi', 'Asia/Karachi'), ('Asia/Kathmandu', 'Asia/Kathmandu'), ('Asia/Khandyga', 'Asia/Khandyga'), ('Asia/Kolkata', 'Asia/Kolkata'), ('Asia/Krasnoyarsk', 'Asia/Krasnoyarsk'), ('Asia/Kuala_Lumpur', 'Asia/Kuala_Lumpur'), ('Asia/Kuching', 'Asia/Kuching'), ('Asia/Kuwait', 'Asia/Kuwait'), ('Asia/Macau', 'Asia/Macau'), ('Asia/Magadan', 'Asia/Magadan'), ('Asia/Makassar', 'Asia/Makassar'), ('Asia/Manila', 'Asia/Manila'), ('Asia/Muscat', 'Asia/Muscat'), ('Asia/Nicosia', 'Asia/Nicosia'), ('Asia/Novokuznetsk', 'Asia/Novokuznetsk'), ('Asia/Novosibirsk', 'Asia/Novosibirsk'), ('Asia/Omsk', 'Asia/Omsk'), ('Asia/Oral', 'Asia/Oral'), ('Asia/Phnom_Penh', 'Asia/Phnom_Penh'), ('Asia/Pontianak', 'Asia/Pontianak'), ('Asia/Pyongyang', 'Asia/Pyongyang'), ('Asia/Qatar', 'Asia/Qatar'), ('Asia/Qostanay', 'Asia/Qostanay'), ('Asia/Qyzylorda', 'Asia/Qyzylorda'), ('Asia/Riyadh', 'Asia/Riyadh'), ('Asia/Sakhalin', 'Asia/Sakhalin'), ('Asia/Samarkand', 'Asia/Samarkand'), ('Asia/Seoul', 'Asia/Seoul'), ('Asia/Shanghai', 'Asia/Shanghai'), ('Asia/Singapore', 'Asia/Singapore'), ('Asia/Srednekolymsk', 'Asia/Srednekolymsk'), ('Asia/Taipei', 'Asia/Taipei'), ('Asia/Tashkent', 'Asia/Tashkent'), ('Asia/Tbilisi', 'Asia/Tbilisi'), ('Asia/Tehran', 'Asia/Tehran'), ('Asia/Thimphu', 'Asia/Thimphu'), ('Asia/Tokyo', 'Asia/Tokyo'), ('Asia/Tomsk', 'Asia/Tomsk'), ('Asia/Ulaanbaatar', 'Asia/Ulaanbaatar'), ('Asia/Urumqi', 'Asia/Urumqi'), ('Asia/Ust-Nera', 'Asia/Ust-Nera'), ('Asia/Vientiane', 'Asia/Vientiane'), ('Asia/Vladivostok', 'Asia/Vladivostok'), ('Asia/Yakutsk', 'Asia/Yakutsk'), ('Asia/Yangon', 'Asia/Yangon'), ('Asia/Yekaterinburg', 'Asia/Yekaterinburg'), ('Asia/Yerevan', 'Asia/Yerevan'), ('Atlantic/Azores', 'Atlantic/Azores'), ('Atlantic/Bermuda', 'Atlantic/Bermuda'), ('Atlantic/Canary', 'Atlantic/Canary'), ('Atlantic/Cape_Verde', 'Atlantic/Cape_Verde'), ('Atlantic/Faroe', 'Atlantic/Faroe'), ('Atlantic/Madeira', 'Atlantic/Madeira'), ('Atlantic/Reykjavik', 'Atlantic/Reykjavik'), ('Atlantic/South_Georgia', 'Atlantic/South_Georgia'), ('Atlantic/St_Helena', 'Atlantic/St_Helena'), ('Atlantic/Stanley', 'Atlantic/Stanley'), ('Australia/Adelaide', 'Australia/Adelaide'), ('Australia/Brisbane', 'Australia/Brisbane'), ('Australia/Broken_Hill', 'Australia/Broken_Hill'), ('Australia/Currie', 'Australia/Currie'), ('Australia/Darwin', 'Australia/Darwin'), ('Australia/Eucla', 'Australia/Eucla'), ('Australia/Hobart', 'Australia/Hobart'), ('Australia/Lindeman', 'Australia/Lindeman'), ('Australia/Lord_Howe', 'Australia/Lord_Howe'), ('Australia/Melbourne', 'Australia/Melbourne'), ('Australia/Perth', 'Australia/Perth'), ('Australia/Sydney', 'Australia/Sydney'), ('Canada/Atlantic', 'Canada/Atlantic'), ('Canada/Central', 'Canada/Central'), ('Canada/Eastern', 'Canada/Eastern'), ('Canada/Mountain', 'Canada/Mountain'), ('Canada/Newfoundland', 'Canada/Newfoundland'), ('Canada/Pacific', 'Canada/Pacific'), ('Europe/Amsterdam', 'Europe/Amsterdam'), ('Europe/Andorra', 'Europe/Andorra'), ('Europe/Astrakhan', 'Europe/Astrakhan'), ('Europe/Athens', 'Europe/Athens'), ('Europe/Belgrade', 'Europe/Belgrade'), ('Europe/Berlin', 'Europe/Berlin'), ('Europe/Bratislava', 'Europe/Bratislava'), ('Europe/Brussels', 'Europe/Brussels'), ('Europe/Bucharest', 'Europe/Bucharest'), ('Europe/Budapest', 'Europe/Budapest'), ('Europe/Busingen', 'Europe/Busingen'), ('Europe/Chisinau', 'Europe/Chisinau'), ('Europe/Copenhagen', 'Europe/Copenhagen'), ('Europe/Dublin', 'Europe/Dublin'), ('Europe/Gibraltar', 'Europe/Gibraltar'), ('Europe/Guernsey', 'Europe/Guernsey'), ('Europe/Helsinki', 'Europe/Helsinki'), ('Europe/Isle_of_Man', 'Europe/Isle_of_Man'), ('Europe/Istanbul', 'Europe/Istanbul'), ('Europe/Jersey', 'Europe/Jersey'), ('Europe/Kaliningrad', 'Europe/Kaliningrad'), ('Europe/Kiev', 'Europe/Kiev'), ('Europe/Kirov', 'Europe/Kirov'), ('Europe/Lisbon', 'Europe/Lisbon'), ('Europe/Ljubljana', 'Europe/Ljubljana'), ('Europe/London', 'Europe/London'), ('Europe/Luxembourg', 'Europe/Luxembourg'), ('Europe/Madrid', 'Europe/Madrid'), ('Europe/Malta', 'Europe/Malta'), ('Europe/Mariehamn', 'Europe/Mariehamn'), ('Europe/Minsk', 'Europe/Minsk'), ('Europe/Monaco', 'Europe/Monaco'), ('Europe/Moscow', 'Europe/Moscow'), ('Europe/Oslo', 'Europe/Oslo'), ('Europe/Paris', 'Europe/Paris'), ('Europe/Podgorica', 'Europe/Podgorica'), ('Europe/Prague', 'Europe/Prague'), ('Europe/Riga', 'Europe/Riga'), ('Europe/Rome', 'Europe/Rome'), ('Europe/Samara', 'Europe/Samara'), ('Europe/San_Marino', 'Europe/San_Marino'), ('Europe/Sarajevo', 'Europe/Sarajevo'), ('Europe/Saratov', 'Europe/Saratov'), ('Europe/Simferopol', 'Europe/Simferopol'), ('Europe/Skopje', 'Europe/Skopje'), ('Europe/Sofia', 'Europe/Sofia'), ('Europe/Stockholm', 'Europe/Stockholm'), ('Europe/Tallinn', 'Europe/Tallinn'), ('Europe/Tirane', 'Europe/Tirane'), ('Europe/Ulyanovsk', 'Europe/Ulyanovsk'), ('Europe/Uzhgorod', 'Europe/Uzhgorod'), ('Europe/Vaduz', 'Europe/Vaduz'), ('Europe/Vatican', 'Europe/Vatican'), ('Europe/Vienna', 'Europe/Vienna'), ('Europe/Vilnius', 'Europe/Vilnius'), ('Europe/Volgograd', 'Europe/Volgograd'), ('Europe/Warsaw', 'Europe/Warsaw'), ('Europe/Zagreb', 'Europe/Zagreb'), ('Europe/Zaporozhye', 'Europe/Zaporozhye'), ('Europe/Zurich', 'Europe/Zurich'), ('GMT', 'GMT'), ('Indian/Antananarivo', 'Indian/Antananarivo'), ('Indian/Chagos', 'Indian/Chagos'), ('Indian/Christmas', 'Indian/Christmas'), ('Indian/Cocos', 'Indian/Cocos'), ('Indian/Comoro', 'Indian/Comoro'), ('Indian/Kerguelen', 'Indian/Kerguelen'), ('Indian/Mahe', 'Indian/Mahe'), ('Indian/Maldives', 'Indian/Maldives'), ('Indian/Mauritius', 'Indian/Mauritius'), ('Indian/Mayotte', 'Indian/Mayotte'), ('Indian/Reunion', 'Indian/Reunion'), ('Pacific/Apia', 'Pacific/Apia'), ('Pacific/Auckland', 'Pacific/Auckland'), ('Pacific/Bougainville', 'Pacific/Bougainville'), ('Pacific/Chatham', 'Pacific/Chatham'), ('Pacific/Chuuk', 'Pacific/Chuuk'), ('Pacific/Easter', 'Pacific/Easter'), ('Pacific/Efate', 'Pacific/Efate'), ('Pacific/Enderbury', 'Pacific/Enderbury'), ('Pacific/Fakaofo', 'Pacific/Fakaofo'), ('Pacific/Fiji', 'Pacific/Fiji'), ('Pacific/Funafuti', 'Pacific/Funafuti'), ('Pacific/Galapagos', 'Pacific/Galapagos'), ('Pacific/Gambier', 'Pacific/Gambier'), ('Pacific/Guadalcanal', 'Pacific/Guadalcanal'), ('Pacific/Guam', 'Pacific/Guam'), ('Pacific/Honolulu', 'Pacific/Honolulu'), ('Pacific/Kiritimati', 'Pacific/Kiritimati'), ('Pacific/Kosrae', 'Pacific/Kosrae'), ('Pacific/Kwajalein', 'Pacific/Kwajalein'), ('Pacific/Majuro', 'Pacific/Majuro'), ('Pacific/Marquesas', 'Pacific/Marquesas'), ('Pacific/Midway', 'Pacific/Midway'), ('Pacific/Nauru', 'Pacific/Nauru'), ('Pacific/Niue', 'Pacific/Niue'), ('Pacific/Norfolk', 'Pacific/Norfolk'), ('Pacific/Noumea', 'Pacific/Noumea'), ('Pacific/Pago_Pago', 'Pacific/Pago_Pago'), ('Pacific/Palau', 'Pacific/Palau'), ('Pacific/Pitcairn', 'Pacific/Pitcairn'), ('Pacific/Pohnpei', 'Pacific/Pohnpei'), ('Pacific/Port_Moresby', 'Pacific/Port_Moresby'), ('Pacific/Rarotonga', 'Pacific/Rarotonga'), ('Pacific/Saipan', 'Pacific/Saipan'), ('Pacific/Tahiti', 'Pacific/Tahiti'), ('Pacific/Tarawa', 'Pacific/Tarawa'), ('Pacific/Tongatapu', 'Pacific/Tongatapu'), ('Pacific/Wake', 'Pacific/Wake'), ('Pacific/Wallis', 'Pacific/Wallis'), ('US/Alaska', 'US/Alaska'), ('US/Arizona', 'US/Arizona'), ('US/Central', 'US/Central'), ('US/Eastern', 'US/Eastern'), ('US/Hawaii', 'US/Hawaii'), ('US/Mountain', 'US/Mountain'), ('US/Pacific', 'US/Pacific'), ('UTC', 'UTC')], max_length=48, verbose_name='Timezone'),
),
]
| [
"roberto.rosario@mayan-edms.com"
] | roberto.rosario@mayan-edms.com |
f2d40a5292c310b764bec1b7376a825777ec117b | d7b3b2bb269934deee2ff49078ab602d642f5c88 | /atspy/ssa.py | 98a39e01a0606e4d9f6a0629277d1995f90e92b1 | [] | no_license | MindaugasVaitkus2/atspy | 4b7239f95b17ba3f59eae756611f096fe9c7138a | 48ca00db115c3de1a99b3dc57cffb5339dfa46c7 | refs/heads/master | 2020-12-28T01:15:52.993999 | 2020-02-04T05:52:43 | 2020-02-04T05:52:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,205 | py | import numpy as np
import pandas as pd
from numpy import matrix as m
from scipy import linalg
try:
import seaborn
except:
pass
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 11, 4
class mySSA(object):
'''Singular Spectrum Analysis object'''
def __init__(self, time_series):
self.ts = pd.DataFrame(time_series)
self.ts_name = self.ts.columns.tolist()[0]
if self.ts_name==0:
self.ts_name = 'ts'
self.ts_v = self.ts.values
self.ts_N = self.ts.shape[0]
self.freq = self.ts.index.inferred_freq
@staticmethod
def _printer(name, *args):
'''Helper function to print messages neatly'''
print('-'*40)
print(name+':')
for msg in args:
print(msg)
@staticmethod
def _dot(x,y):
'''Alternative formulation of dot product to allow missing values in arrays/matrices'''
pass
@staticmethod
def get_contributions(X=None, s=None, plot=True):
'''Calculate the relative contribution of each of the singular values'''
lambdas = np.power(s,2)
frob_norm = np.linalg.norm(X)
ret = pd.DataFrame(lambdas/(frob_norm**2), columns=['Contribution'])
ret['Contribution'] = ret.Contribution.round(4)
if plot:
ax = ret[ret.Contribution!=0].plot.bar(legend=False)
ax.set_xlabel("Lambda_i")
ax.set_title('Non-zero contributions of Lambda_i')
vals = ax.get_yticks()
ax.set_yticklabels(['{:3.2f}%'.format(x*100) for x in vals])
return ax
return ret[ret.Contribution>0]
@staticmethod
def diagonal_averaging(hankel_matrix):
'''Performs anti-diagonal averaging from given hankel matrix
Returns: Pandas DataFrame object containing the reconstructed series'''
mat = m(hankel_matrix)
L, K = mat.shape
L_star, K_star = min(L,K), max(L,K)
new = np.zeros((L,K))
if L > K:
mat = mat.T
ret = []
#Diagonal Averaging
for k in range(1-K_star, L_star):
mask = np.eye(K_star, k=k, dtype='bool')[::-1][:L_star,:]
mask_n = sum(sum(mask))
ma = np.ma.masked_array(mat.A, mask=1-mask)
ret+=[ma.sum()/mask_n]
return pd.DataFrame(ret).rename(columns={0:'Reconstruction'})
def view_time_series(self):
'''Plot the time series'''
self.ts.plot(title='Original Time Series')
def embed(self, embedding_dimension=None, suspected_frequency=None, verbose=False, return_df=False):
'''Embed the time series with embedding_dimension window size.
Optional: suspected_frequency changes embedding_dimension such that it is divisible by suspected frequency'''
if not embedding_dimension:
self.embedding_dimension = self.ts_N//2
else:
self.embedding_dimension = embedding_dimension
if suspected_frequency:
self.suspected_frequency = suspected_frequency
self.embedding_dimension = (self.embedding_dimension//self.suspected_frequency)*self.suspected_frequency
self.K = self.ts_N-self.embedding_dimension+1
self.X = m(linalg.hankel(self.ts, np.zeros(self.embedding_dimension))).T[:,:self.K]
self.X_df = pd.DataFrame(self.X)
self.X_complete = self.X_df.dropna(axis=1)
self.X_com = m(self.X_complete.values)
self.X_missing = self.X_df.drop(self.X_complete.columns, axis=1)
self.X_miss = m(self.X_missing.values)
self.trajectory_dimentions = self.X_df.shape
self.complete_dimensions = self.X_complete.shape
self.missing_dimensions = self.X_missing.shape
self.no_missing = self.missing_dimensions[1]==0
if verbose:
msg1 = 'Embedding dimension\t: {}\nTrajectory dimensions\t: {}'
msg2 = 'Complete dimension\t: {}\nMissing dimension \t: {}'
msg1 = msg1.format(self.embedding_dimension, self.trajectory_dimentions)
msg2 = msg2.format(self.complete_dimensions, self.missing_dimensions)
self._printer('EMBEDDING SUMMARY', msg1, msg2)
if return_df:
return self.X_df
def decompose(self, verbose=False):
'''Perform the Singular Value Decomposition and identify the rank of the embedding subspace
Characteristic of projection: the proportion of variance captured in the subspace'''
X = self.X_com
self.S = X*X.T
self.U, self.s, self.V = linalg.svd(self.S)
self.U, self.s, self.V = m(self.U), np.sqrt(self.s), m(self.V)
self.d = np.linalg.matrix_rank(X)
Vs, Xs, Ys, Zs = {}, {}, {}, {}
for i in range(self.d):
Zs[i] = self.s[i]*self.V[:,i]
Vs[i] = X.T*(self.U[:,i]/self.s[i])
Ys[i] = self.s[i]*self.U[:,i]
Xs[i] = Ys[i]*(m(Vs[i]).T)
self.Vs, self.Xs = Vs, Xs
self.s_contributions = self.get_contributions(X, self.s, False)
self.r = len(self.s_contributions[self.s_contributions>0])
self.r_characteristic = round((self.s[:self.r]**2).sum()/(self.s**2).sum(),4)
self.orthonormal_base = {i:self.U[:,i] for i in range(self.r)}
if verbose:
msg1 = 'Rank of trajectory\t\t: {}\nDimension of projection space\t: {}'
msg1 = msg1.format(self.d, self.r)
msg2 = 'Characteristic of projection\t: {}'.format(self.r_characteristic)
self._printer('DECOMPOSITION SUMMARY', msg1, msg2)
def view_s_contributions(self, adjust_scale=False, cumulative=False, return_df=False):
'''View the contribution to variance of each singular value and its corresponding signal'''
contribs = self.s_contributions.copy()
contribs = contribs[contribs.Contribution!=0]
if cumulative:
contribs['Contribution'] = contribs.Contribution.cumsum()
if adjust_scale:
contribs = (1/contribs).max()*1.1-(1/contribs)
ax = contribs.plot.bar(legend=False)
ax.set_xlabel("Singular_i")
ax.set_title('Non-zero{} contribution of Singular_i {}'.\
format(' cumulative' if cumulative else '', '(scaled)' if adjust_scale else ''))
if adjust_scale:
ax.axes.get_yaxis().set_visible(False)
vals = ax.get_yticks()
ax.set_yticklabels(['{:3.0f}%'.format(x*100) for x in vals])
if return_df:
return contribs
@classmethod
def view_reconstruction(cls, *hankel, names=None, return_df=False, plot=True, symmetric_plots=False):
'''Visualise the reconstruction of the hankel matrix/matrices passed to *hankel'''
hankel_mat = None
for han in hankel:
if isinstance(hankel_mat,m):
hankel_mat = hankel_mat + han
else:
hankel_mat = han.copy()
hankel_full = cls.diagonal_averaging(hankel_mat)
title = 'Reconstruction of signal'
if names or names==0:
title += ' associated with singular value{}: {}'
title = title.format('' if len(str(names))==1 else 's', names)
if plot:
ax = hankel_full.plot(legend=False, title=title)
if symmetric_plots:
velocity = hankel_full.abs().max()[0]
ax.set_ylim(bottom=-velocity, top=velocity)
if return_df:
return hankel_full
def _forecast_prep(self, singular_values=None):
self.X_com_hat = np.zeros(self.complete_dimensions)
self.verticality_coefficient = 0
self.forecast_orthonormal_base = {}
if singular_values:
try:
for i in singular_values:
self.forecast_orthonormal_base[i] = self.orthonormal_base[i]
except:
if singular_values==0:
self.forecast_orthonormal_base[0] = self.orthonormal_base[0]
else:
raise('Please pass in a list/array of singular value indices to use for forecast')
else:
self.forecast_orthonormal_base = self.orthonormal_base
self.R = np.zeros(self.forecast_orthonormal_base[0].shape)[:-1]
for Pi in self.forecast_orthonormal_base.values():
self.X_com_hat += Pi*Pi.T*self.X_com
pi = np.ravel(Pi)[-1]
self.verticality_coefficient += pi**2
self.R += pi*Pi[:-1]
self.R = m(self.R/(1-self.verticality_coefficient))
self.X_com_tilde = self.diagonal_averaging(self.X_com_hat)
def forecast_recurrent(self, steps_ahead=12, singular_values=None, plot=False, return_df=False, **plotargs):
'''Forecast from last point of original time series up to steps_ahead using recurrent methodology
This method also fills any missing data from the original time series.'''
try:
self.X_com_hat
except(AttributeError):
self._forecast_prep(singular_values)
self.ts_forecast = np.array(self.ts_v[0])
for i in range(1, self.ts_N+steps_ahead):
try:
if np.isnan(self.ts_v[i]):
x = self.R.T*m(self.ts_forecast[max(0,i-self.R.shape[0]): i]).T
self.ts_forecast = np.append(self.ts_forecast,x[0])
else:
self.ts_forecast = np.append(self.ts_forecast,self.ts_v[i])
except(IndexError):
x = self.R.T*m(self.ts_forecast[i-self.R.shape[0]: i]).T
self.ts_forecast = np.append(self.ts_forecast, x[0])
self.forecast_N = i+1
new_index = pd.date_range(start=self.ts.index.min(),periods=self.forecast_N, freq=self.freq)
forecast_df = pd.DataFrame(self.ts_forecast, columns=['Forecast'], index=new_index)
forecast_df['Original'] = np.append(self.ts_v, [np.nan]*steps_ahead)
if plot:
forecast_df.plot(title='Forecasted vs. original time series', **plotargs)
if return_df:
return forecast_df
| [
"islashires@gmail.com"
] | islashires@gmail.com |
f9a01659bf39a63cdd339d284da8f4b70134e253 | a564b8277e33eb27009089ec2e216a4d266a8861 | /官方配套代码/11/11.5/ttk_test.py | 5be23b3a31f839e1487702c786002ba43db633c6 | [
"Unlicense"
] | permissive | yifengyou/crazy-python | 3cb50f462e4ddb921c365e2f0cb3e846e6539383 | 28099bd5011de6981a7c5412783952cc7601ae0c | refs/heads/main | 2023-06-18T18:10:52.691245 | 2021-07-18T14:21:03 | 2021-07-18T14:21:03 | 387,088,939 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,131 | py | # coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee kongyeeku@163.com #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
from tkinter import *
# 导入ttk
from tkinter import ttk
class App:
def __init__(self, master):
self.master = master
self.initWidgets()
def initWidgets(self):
# ttk使用Combobox取代了Listbox
cb = ttk.Combobox(self.master, font=24)
# 为Combobox设置列表项
cb['values'] = ('Python', 'Swift', 'Kotlin')
# cb = Listbox(self.master, font=24)
# 为Listbox设置列表项
# for s in ('Python', 'Swift', 'Kotlin'):
# cb.insert(END, s)
cb.pack(side=LEFT, fill=X, expand=YES)
f = ttk.Frame(self.master)
# f = Frame(self.master)
f.pack(side=RIGHT, fill=BOTH, expand=YES)
lab = ttk.Label(self.master, text='我的标签', font=24)
# lab = Label(self.master, text='我的标签', font=24)
lab.pack(side=TOP, fill=BOTH, expand=YES)
bn = ttk.Button(self.master, text='我的按钮')
# bn = Button(self.master, text='我的按钮')
bn.pack()
root = Tk()
root.title("简单事件处理")
App(root)
root.mainloop() | [
"842056007@qq.com"
] | 842056007@qq.com |
479d1aeeff3bb58b0fe996c995e56889a34455b3 | 6bdb32ddbd72c4337dab12002ff05d6966538448 | /gridpack_folder/mc_request/LHEProducer/Spin-1/Wprime_Wh_Whadhbb/Wprime_Wh_Whadhbb_narrow_M3500_13TeV-madgraph_cff.py | 4a14e9796316d1a471cfe37d24b515494d0bcdc6 | [] | no_license | cyrilbecot/DibosonBSMSignal_13TeV | 71db480de274c893ba41453025d01bfafa19e340 | d8e685c40b16cde68d25fef9af257c90bee635ba | refs/heads/master | 2021-01-11T10:17:05.447035 | 2016-08-17T13:32:12 | 2016-08-17T13:32:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | import FWCore.ParameterSet.Config as cms
# link to cards:
# https://github.com/cms-sw/genproductions/tree/master/bin/MadGraph5_aMCatNLO/cards/production/13TeV/exo_diboson/Spin-1/Wprime_Wh_Whadhbb/Wprime_Wh_Whadhbb_narrow_M3500
externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/slc6_amd64_gcc481/13TeV/madgraph/V5_2.2.2/exo_diboson/Spin-1/Wprime_Wh_Whadhbb/narrow/v2/Wprime_Wh_Whadhbb_narrow_M3500_tarball.tar.xz'),
nEvents = cms.untracked.uint32(5000),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
| [
"syu@cern.ch"
] | syu@cern.ch |
08dfd039fb10960d1b3682f7d5d8df927decb3e6 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_207/ch147_2020_04_12_19_01_16_048371.py | 8523a46d0bf284b44d23c7a6f18a43e0bf7b00f6 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py |
def conta_ocorrencias (lista_palavras):
dicio = {}
for word in lista_palavras:
if word in dicio:
dicio[word] +=1
else:
dicio[word] =1
return dicio
new_dicio = conta_ocorrencias(lista_palavras)
def mais_frequente (conta_ocorrencias):
max = 0
for v in new_dicio.values():
if max < v:
max = v
for k, v in new_dicio.items():
if v == max:
chave = k
return chave
| [
"you@example.com"
] | you@example.com |
5b3db41f2da38a3972e9b3c5e8d65841ed63c755 | 8afc9a84162f82b8afb586c56befca0703e5c4cc | /mercurius/core/http/HTTPParser.py | 5fc4e05df93d1861c5e9a110e8a1152f829ef2c0 | [
"BSD-2-Clause"
] | permissive | bossiernesto/mercurius | 70028d4fdc360f7dcd51df5084efea8e60202463 | 9c4bc26f45a317d4e22137b412f63f18976461fe | refs/heads/master | 2021-01-19T05:46:58.848795 | 2017-02-26T20:19:43 | 2017-02-26T20:19:43 | 27,944,892 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | class HTTPHeader(object):
pass
class HTTPParser(object):
def parse_header(self, rfile):
headers = {}
name = ''
while 1:
line = rfile.readline()
if line == '\r\n' or line == '\n':
break
if line[0] in ' \t':
# continued header
headers[name] = headers[name] + '\r\n ' + line.strip()
else:
i = line.find(':')
assert(i != -1)
name = line[:i].lower()
if name in headers:
# merge value
headers[name] = headers[name] + ', ' + line.strip()
else:
headers[name] = line[i+1:].strip()
return headers | [
"bossi.ernestog@gmail.com"
] | bossi.ernestog@gmail.com |
c169615d4389cd23ed594af719981407e753774b | fa346a2d5886420e22707a7be03599e634b230a9 | /temboo/Library/Amazon/EC2/RunInstances.py | e62821a18b03d9fa0cdd7880b3f414ee896991aa | [] | no_license | elihuvillaraus/entity-resolution | cebf937499ed270c3436b1dd25ab4aef687adc11 | 71dd49118a6e11b236861289dcf36436d31f06bc | refs/heads/master | 2021-12-02T17:29:11.864065 | 2014-01-08T04:29:30 | 2014-01-08T04:29:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,722 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# RunInstances
# Launches the specified number of instances of an AMI for which you have permissions.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class RunInstances(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the RunInstances Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/Amazon/EC2/RunInstances')
def new_input_set(self):
return RunInstancesInputSet()
def _make_result_set(self, result, path):
return RunInstancesResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return RunInstancesChoreographyExecution(session, exec_id, path)
class RunInstancesInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the RunInstances
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AWSAccessKeyId(self, value):
"""
Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.)
"""
InputSet._set_input(self, 'AWSAccessKeyId', value)
def set_AWSSecretKeyId(self, value):
"""
Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.)
"""
InputSet._set_input(self, 'AWSSecretKeyId', value)
def set_DeleteOnTermination(self, value):
"""
Set the value of the DeleteOnTermination input for this Choreo. ((optional, boolean) Sets whether the volume is deleted on instance termination. Defaults to "true".)
"""
InputSet._set_input(self, 'DeleteOnTermination', value)
def set_DeviceName(self, value):
"""
Set the value of the DeviceName input for this Choreo. ((optional, string) The device name exposed to the instance (i.e. /dev/sdh or xvdh).)
"""
InputSet._set_input(self, 'DeviceName', value)
def set_ImageId(self, value):
"""
Set the value of the ImageId input for this Choreo. ((required, string) The ID of the AMI.)
"""
InputSet._set_input(self, 'ImageId', value)
def set_InstanceType(self, value):
"""
Set the value of the InstanceType input for this Choreo. ((optional, string) The instance type (i.e. t1.micro, m1.small, m1.medium, m1.large, m1.xlarge). Default is m1.small.)
"""
InputSet._set_input(self, 'InstanceType', value)
def set_Iops(self, value):
"""
Set the value of the Iops input for this Choreo. ((optional, integer) The number of I/O operations per second (IOPS) that the volume supports. Valid range is 100 to 2000.)
"""
InputSet._set_input(self, 'Iops', value)
def set_KernelId(self, value):
"""
Set the value of the KernelId input for this Choreo. ((optional, string) The ID of the kernel with which to launch the instance.)
"""
InputSet._set_input(self, 'KernelId', value)
def set_KeyName(self, value):
"""
Set the value of the KeyName input for this Choreo. ((optional, string) The name of the key pair to use.)
"""
InputSet._set_input(self, 'KeyName', value)
def set_MaxCount(self, value):
"""
Set the value of the MaxCount input for this Choreo. ((required, integer) The maximum number of instances to launch. If the value is more than Amazon EC2 can launch, the largest possible number above MinCount will be launched instead.)
"""
InputSet._set_input(self, 'MaxCount', value)
def set_MinCount(self, value):
"""
Set the value of the MinCount input for this Choreo. ((required, integer) The minimum number of instances to launch. If the value is more than Amazon EC2 can launch, no instances are launched at all.)
"""
InputSet._set_input(self, 'MinCount', value)
def set_MonitoringEnabled(self, value):
"""
Set the value of the MonitoringEnabled input for this Choreo. ((optional, boolean) Enables monitoring for the instance. Defaults to false.)
"""
InputSet._set_input(self, 'MonitoringEnabled', value)
def set_NoDevice(self, value):
"""
Set the value of the NoDevice input for this Choreo. ((optional, boolean) Suppresses a device mapping.)
"""
InputSet._set_input(self, 'NoDevice', value)
def set_PlacementAvailabilityZone(self, value):
"""
Set the value of the PlacementAvailabilityZone input for this Choreo. ((optional, string) The Availability Zone to launch the instance into.)
"""
InputSet._set_input(self, 'PlacementAvailabilityZone', value)
def set_PlacementGroupName(self, value):
"""
Set the value of the PlacementGroupName input for this Choreo. ((optional, string) The name of an existing placement group you want to launch the instance into (for cluster instances).)
"""
InputSet._set_input(self, 'PlacementGroupName', value)
def set_PlacementTenancy(self, value):
"""
Set the value of the PlacementTenancy input for this Choreo. ((optional, string) The tenancy of the instance. When set to "dedicated", the instance will run on single-tenant hardware and can only be launched into a VPC.)
"""
InputSet._set_input(self, 'PlacementTenancy', value)
def set_RamdiskId(self, value):
"""
Set the value of the RamdiskId input for this Choreo. ((optional, string) The ID of the RAM disk.)
"""
InputSet._set_input(self, 'RamdiskId', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are "xml" (the default) and "json".)
"""
InputSet._set_input(self, 'ResponseFormat', value)
def set_SecurityGroupId(self, value):
"""
Set the value of the SecurityGroupId input for this Choreo. ((optional, string) One or more security group IDs. This can be a comma-separated list of up to 10 security group ids.)
"""
InputSet._set_input(self, 'SecurityGroupId', value)
def set_SecurityGroup(self, value):
"""
Set the value of the SecurityGroup input for this Choreo. ((optional, string) One or more security group names. This can be a comma-separated list of up to 10 security group names.)
"""
InputSet._set_input(self, 'SecurityGroup', value)
def set_ShutdownBehavior(self, value):
"""
Set the value of the ShutdownBehavior input for this Choreo. ((optional, string) Whether the instance stops or terminates on instance-initiated shutdown. Valid values are: stop and terminate.)
"""
InputSet._set_input(self, 'ShutdownBehavior', value)
def set_SnapshotId(self, value):
"""
Set the value of the SnapshotId input for this Choreo. ((optional, string) The ID of the snapshot.)
"""
InputSet._set_input(self, 'SnapshotId', value)
def set_SubnetId(self, value):
"""
Set the value of the SubnetId input for this Choreo. ((optional, string) The ID of the subnet to launch the instance into (i.e. subnet-dea63cb7).)
"""
InputSet._set_input(self, 'SubnetId', value)
def set_UserData(self, value):
"""
Set the value of the UserData input for this Choreo. ((optional, string) The Base64-encoded MIME user data to be made available to the instance(s).)
"""
InputSet._set_input(self, 'UserData', value)
def set_VirtualName(self, value):
"""
Set the value of the VirtualName input for this Choreo. ((optional, string) The name of the virtual device.)
"""
InputSet._set_input(self, 'VirtualName', value)
def set_VolumeSize(self, value):
"""
Set the value of the VolumeSize input for this Choreo. ((optional, string) The size of the volume, in GiBs. Required unless you're creating the volume from a snapshot which indicates that the size will be the size of the snapshot.)
"""
InputSet._set_input(self, 'VolumeSize', value)
def set_VolumeType(self, value):
"""
Set the value of the VolumeType input for this Choreo. ((optional, string) The volume type. Valid values are: standard (the default) and io1.)
"""
InputSet._set_input(self, 'VolumeType', value)
class RunInstancesResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the RunInstances Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Amazon.)
"""
return self._output.get('Response', None)
class RunInstancesChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return RunInstancesResultSet(response, path)
| [
"cedric.warny@gmail.com"
] | cedric.warny@gmail.com |
55b3249472d35e55db8967d38f1ac71a1ec55abd | 1b5630e4fd275540aac3b5653ebccacd4067a124 | /stpipeline/common/clustering.py | f9e6ea870c6af674eafcc5819b1a7772a25602ba | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | dhtc/st_pipeline | aa25845d3b787bdc3e32a78d14ec96fa1565076a | 766ad6f039ac27609934d1c7e7d70c34a0f3df73 | refs/heads/master | 2023-04-15T19:35:14.464233 | 2021-05-06T12:51:15 | 2021-05-06T12:51:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,455 | py | """
This module contains some functions to cluster
molecular barcodes (UMIs) by hamming distance
"""
import numpy as np
from scipy.cluster.hierarchy import linkage, fcluster
from sklearn.cluster import AffinityPropagation
from collections import defaultdict
from stpipeline.common.cdistance import hamming_distance
import random
from collections import Counter
def countUMIHierarchical(molecular_barcodes,
allowed_mismatches,
method="single"):
"""
Tries to finds clusters of similar UMIs using a hierarchical clustering
with a minimum distance (allowed_mismatches).
It returns a list with all the non clustered UMIs, for clusters of
multiple UMIs a random one will be selected.
:param molecular_barcodes: a list of UMIs
:param allowed_mismatches: how much distance we allow between clusters
:param method: the type of distance algorithm when clustering
(single more restrictive or complete less restrictive)
:type allowed_mismatches: integer
:type method: str
:return: a list of unique UMIs
:rtype: list
"""
# linkage will not work for distance matrices of 1x1 or 2x2 so for these rare cases
# we use the naive clustering
if len(molecular_barcodes) <= 2:
return countUMINaive(molecular_barcodes, allowed_mismatches)
# Distance computation function
def d(coord):
i, j = coord
return hamming_distance(molecular_barcodes[i].encode("UTF-8"),
molecular_barcodes[j].encode("UTF-8"))
# Create hierarchical clustering and obtain flat clusters at the distance given
indices = np.triu_indices(len(molecular_barcodes), 1)
distance_matrix = np.apply_along_axis(d, 0, indices)
linkage_cluster = linkage(distance_matrix, method=method)
flat_clusters = fcluster(linkage_cluster, allowed_mismatches, criterion='distance')
# Retrieve the unique clustered UMIs
items = defaultdict(list)
for i, item in enumerate(flat_clusters):
items[item].append(i)
return [molecular_barcodes[random.choice(members)] for members in list(items.values())]
def countUMINaive(molecular_barcodes, allowed_mismatches):
"""
Tries to finds clusters of similar UMIs using a naive proximity
approach where UMIs are sorted and the ones that are consecutive
and has hamming distance below the given number of miss-matches will
be clustered together.
It returns a list with all the non clustered UMIs, for clusters of
multiple UMIs a random one will be selected.
:param molecular_barcodes: a list of UMIs
:param allowed_mismatches: how much distance we allow between clusters
:param method: the type of distance algorithm when clustering
(single more restrictive or complete less restrictive)
:type allowed_mismatches: integer
:type method: str
:return: a list of unique UMIs
:rtype: list
"""
clusters_dict = {}
nclusters = 0
for i, molecular_barcode in enumerate(sorted(molecular_barcodes)):
if i == 0:
clusters_dict[nclusters] = [molecular_barcode]
else:
# compare distant of previous molecular barcodes and new one
# if distance is between threshold we add it to the cluster
# otherwise we create a new cluster
if hamming_distance(clusters_dict[nclusters][-1].encode("UTF-8"),
molecular_barcode.encode("UTF-8")) <= allowed_mismatches:
clusters_dict[nclusters].append(molecular_barcode)
else:
nclusters += 1
clusters_dict[nclusters] = [molecular_barcode]
# Return the non clustered UMIs
return [random.choice(members) for members in list(clusters_dict.values())]
def breadth_first_search(node, adj_list):
"""
This function has been obtained from
https://github.com/CGATOxford/UMI-tools
The logic behind the algorithm to cluster UMIs using
an adjacent distance matrix is described in
http://genome.cshlp.org/content/early/2017/01/18/gr.209601.116.abstract
"""
searched = set()
found = set()
queue = set()
queue.update((node,))
found.update((node,))
while len(queue) > 0:
node = (list(queue))[0]
found.update(adj_list[node])
queue.update(adj_list[node])
searched.update((node,))
queue.difference_update(searched)
return found
def remove_umis(adj_list, cluster, nodes):
"""
Removes the specified nodes from the cluster and returns
the remaining nodes
"""
# list incomprehension: for x in nodes: for node in adj_list[x]: yield node
nodes_to_remove = set([node for x in nodes for node in adj_list[x]] + nodes)
return cluster - nodes_to_remove
def dedup_adj(molecular_barcodes, allowed_mismatches):
"""
This function has been obtained from
https://github.com/CGATOxford/UMI-tools
The logic behind the algorithm to cluster UMIs using
an adjacent distance matrix is described in
http://genome.cshlp.org/content/early/2017/01/18/gr.209601.116.abstract
"""
c = Counter(molecular_barcodes)
def get_adj_list_adjacency(umis):
return {umi: [umi2 for umi2 in umis if hamming_distance(umi.encode("UTF-8"),
umi2.encode("UTF-8")) \
<= allowed_mismatches] for umi in umis}
def get_connected_components_adjacency(graph, Counter):
found = list()
components = list()
for node in sorted(graph, key=lambda x: Counter[x], reverse=True):
if node not in found:
component = breadth_first_search(node, graph)
found.extend(component)
components.append(component)
return components
def get_best_adjacency(cluster, adj_list, counts):
if len(cluster) == 1: return list(cluster)
sorted_nodes = sorted(cluster, key=lambda x: counts[x], reverse=True)
for i in range(len(sorted_nodes) - 1):
if len(remove_umis(adj_list, cluster, sorted_nodes[:i + 1])) == 0:
return sorted_nodes[:i + 1]
def reduce_clusters_adjacency(adj_list, clusters, counts):
# TS - the "adjacency" variant of this function requires an adjacency
# list to identify the best umi, whereas the other variants don't
# As temporary solution, pass adj_list to all variants
unique_umis = list()
for cluster in clusters:
parent_umis = get_best_adjacency(cluster, adj_list, counts)
unique_umis += parent_umis
return unique_umis
adj_list = get_adj_list_adjacency(c.keys())
clusters = get_connected_components_adjacency(adj_list, c)
unique_umis = reduce_clusters_adjacency(adj_list, clusters, c)
return unique_umis
def dedup_dir_adj(molecular_barcodes, allowed_mismatches):
"""
This function has been obtained from
https://github.com/CGATOxford/UMI-tools
The logic behind the algorithm to cluster UMIs using
an adjacent distance matrix is described in
http://genome.cshlp.org/content/early/2017/01/18/gr.209601.116.abstract
"""
c = Counter(molecular_barcodes)
def get_adj_list_directional_adjacency(umis, counts):
return {umi: [umi2 for umi2 in umis if hamming_distance(umi.encode("UTF-8"),
umi2.encode("UTF-8")) <= allowed_mismatches and
counts[umi] >= (counts[umi2] * 2) - 1] for umi in umis}
def get_connected_components_adjacency(graph, Counter):
found = list()
components = list()
for node in sorted(graph, key=lambda x: Counter[x], reverse=True):
if node not in found:
component = breadth_first_search(node, graph)
found.extend(component)
components.append(component)
return components
def reduce_clusters_directional_adjacency(clusters):
return [cluster.pop() for cluster in clusters]
adj_list = get_adj_list_directional_adjacency(c.keys(), c)
clusters = get_connected_components_adjacency(adj_list, c)
unique_umis = reduce_clusters_directional_adjacency(clusters)
return unique_umis
def affinity_umi_removal(molecular_barcodes, _):
"""
Tries to finds clusters of similar UMIs using an affinity based approach.
It returns a list with all the non clustered UMIs, for clusters of
multiple UMIs a random one will be selected.
:param molecular_barcodes: a list of UMIs
:return: a list of unique UMIs
:rtype: list
"""
if len(molecular_barcodes) <= 2:
return countUMINaive(molecular_barcodes, 0)
words = np.asarray(molecular_barcodes)
lev_similarity = -1 * np.array([[hamming_distance(w1.encode("UTF-8"),
w2.encode("UTF-8")) for w1 in words] for w2 in words])
affprop = AffinityPropagation(affinity="precomputed", damping=0.5)
affprop.fit(lev_similarity)
unique_clusters = list()
for cluster_id in np.unique(affprop.labels_):
exemplar = words[affprop.cluster_centers_indices_[cluster_id]]
cluster = np.unique(words[np.nonzero(affprop.labels_ == cluster_id)])
unique_clusters.append(random.choice(cluster))
return unique_clusters
| [
"jc.fernandez.navarro@gmail.com"
] | jc.fernandez.navarro@gmail.com |
e66349eb51113b722d1e45f10283c997625af004 | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/sql/azure-mgmt-sql/azure/mgmt/sql/aio/operations/_job_target_groups_operations.py | 32caa1fd6c61fe42ac7c638deda512b1f0c2103f | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 19,269 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._job_target_groups_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_by_agent_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class JobTargetGroupsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.sql.aio.SqlManagementClient`'s
:attr:`job_target_groups` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_agent(
self, resource_group_name: str, server_name: str, job_agent_name: str, **kwargs: Any
) -> AsyncIterable["_models.JobTargetGroup"]:
"""Gets all target groups in an agent.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param job_agent_name: The name of the job agent. Required.
:type job_agent_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either JobTargetGroup or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.sql.models.JobTargetGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-11-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.JobTargetGroupListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_agent_request(
resource_group_name=resource_group_name,
server_name=server_name,
job_agent_name=job_agent_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_agent.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("JobTargetGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_agent.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/jobAgents/{jobAgentName}/targetGroups"} # type: ignore
@distributed_trace_async
async def get(
self, resource_group_name: str, server_name: str, job_agent_name: str, target_group_name: str, **kwargs: Any
) -> _models.JobTargetGroup:
"""Gets a target group.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param job_agent_name: The name of the job agent. Required.
:type job_agent_name: str
:param target_group_name: The name of the target group. Required.
:type target_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobTargetGroup or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.JobTargetGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-11-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.JobTargetGroup]
request = build_get_request(
resource_group_name=resource_group_name,
server_name=server_name,
job_agent_name=job_agent_name,
target_group_name=target_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("JobTargetGroup", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/jobAgents/{jobAgentName}/targetGroups/{targetGroupName}"} # type: ignore
@overload
async def create_or_update(
self,
resource_group_name: str,
server_name: str,
job_agent_name: str,
target_group_name: str,
parameters: _models.JobTargetGroup,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.JobTargetGroup:
"""Creates or updates a target group.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param job_agent_name: The name of the job agent. Required.
:type job_agent_name: str
:param target_group_name: The name of the target group. Required.
:type target_group_name: str
:param parameters: The requested state of the target group. Required.
:type parameters: ~azure.mgmt.sql.models.JobTargetGroup
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobTargetGroup or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.JobTargetGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create_or_update(
self,
resource_group_name: str,
server_name: str,
job_agent_name: str,
target_group_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.JobTargetGroup:
"""Creates or updates a target group.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param job_agent_name: The name of the job agent. Required.
:type job_agent_name: str
:param target_group_name: The name of the target group. Required.
:type target_group_name: str
:param parameters: The requested state of the target group. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobTargetGroup or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.JobTargetGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
server_name: str,
job_agent_name: str,
target_group_name: str,
parameters: Union[_models.JobTargetGroup, IO],
**kwargs: Any
) -> _models.JobTargetGroup:
"""Creates or updates a target group.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param job_agent_name: The name of the job agent. Required.
:type job_agent_name: str
:param target_group_name: The name of the target group. Required.
:type target_group_name: str
:param parameters: The requested state of the target group. Is either a model type or a IO
type. Required.
:type parameters: ~azure.mgmt.sql.models.JobTargetGroup or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobTargetGroup or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.JobTargetGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-11-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.JobTargetGroup]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "JobTargetGroup")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
server_name=server_name,
job_agent_name=job_agent_name,
target_group_name=target_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("JobTargetGroup", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("JobTargetGroup", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/jobAgents/{jobAgentName}/targetGroups/{targetGroupName}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, server_name: str, job_agent_name: str, target_group_name: str, **kwargs: Any
) -> None:
"""Deletes a target group.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param job_agent_name: The name of the job agent. Required.
:type job_agent_name: str
:param target_group_name: The name of the target group. Required.
:type target_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-11-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
server_name=server_name,
job_agent_name=job_agent_name,
target_group_name=target_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/jobAgents/{jobAgentName}/targetGroups/{targetGroupName}"} # type: ignore
| [
"noreply@github.com"
] | test-repo-billy.noreply@github.com |
b5b0eda2ff81030eeb4431d1b82a28006d6c50e1 | 81cac5d646fc14e52b3941279d59fdd957b10f7e | /homeassistant/components/motioneye/config_flow.py | 84a6d0771e6ed5972d3e9aa1ba08fceffc0e0d28 | [
"Apache-2.0"
] | permissive | arsaboo/home-assistant | 6b6617f296408a42874a67a71ad9bc6074acd000 | 554e51017e7b1b6949783d9684c4a0e8ca21e466 | refs/heads/dev | 2023-07-27T20:56:52.656891 | 2022-01-19T19:30:57 | 2022-01-19T19:30:57 | 207,046,472 | 2 | 0 | Apache-2.0 | 2019-09-08T01:35:16 | 2019-09-08T01:35:16 | null | UTF-8 | Python | false | false | 8,630 | py | """Config flow for motionEye integration."""
from __future__ import annotations
from typing import Any, cast
from motioneye_client.client import (
MotionEyeClientConnectionError,
MotionEyeClientInvalidAuthError,
MotionEyeClientRequestError,
)
import voluptuous as vol
from homeassistant.components.hassio import HassioServiceInfo
from homeassistant.config_entries import (
SOURCE_REAUTH,
ConfigEntry,
ConfigFlow,
OptionsFlow,
)
from homeassistant.const import CONF_SOURCE, CONF_URL, CONF_WEBHOOK_ID
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from . import create_motioneye_client
from .const import (
CONF_ADMIN_PASSWORD,
CONF_ADMIN_USERNAME,
CONF_STREAM_URL_TEMPLATE,
CONF_SURVEILLANCE_PASSWORD,
CONF_SURVEILLANCE_USERNAME,
CONF_WEBHOOK_SET,
CONF_WEBHOOK_SET_OVERWRITE,
DEFAULT_WEBHOOK_SET,
DEFAULT_WEBHOOK_SET_OVERWRITE,
DOMAIN,
)
class MotionEyeConfigFlow(ConfigFlow, domain=DOMAIN):
"""Handle a config flow for motionEye."""
VERSION = 1
_hassio_discovery: dict[str, Any] | None = None
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle the initial step."""
def _get_form(
user_input: dict[str, Any], errors: dict[str, str] | None = None
) -> FlowResult:
"""Show the form to the user."""
url_schema: dict[vol.Required, type[str]] = {}
if not self._hassio_discovery:
# Only ask for URL when not discovered
url_schema[
vol.Required(CONF_URL, default=user_input.get(CONF_URL, ""))
] = str
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
**url_schema,
vol.Optional(
CONF_ADMIN_USERNAME,
default=user_input.get(CONF_ADMIN_USERNAME),
): str,
vol.Optional(
CONF_ADMIN_PASSWORD,
default=user_input.get(CONF_ADMIN_PASSWORD),
): str,
vol.Optional(
CONF_SURVEILLANCE_USERNAME,
default=user_input.get(CONF_SURVEILLANCE_USERNAME),
): str,
vol.Optional(
CONF_SURVEILLANCE_PASSWORD,
default=user_input.get(CONF_SURVEILLANCE_PASSWORD),
): str,
}
),
errors=errors,
)
reauth_entry = None
if self.context.get("entry_id"):
reauth_entry = self.hass.config_entries.async_get_entry(
self.context["entry_id"]
)
if user_input is None:
return _get_form(
cast(dict[str, Any], reauth_entry.data) if reauth_entry else {}
)
if self._hassio_discovery:
# In case of Supervisor discovery, use pushed URL
user_input[CONF_URL] = self._hassio_discovery[CONF_URL]
try:
# Cannot use cv.url validation in the schema itself, so
# apply extra validation here.
cv.url(user_input[CONF_URL])
except vol.Invalid:
return _get_form(user_input, {"base": "invalid_url"})
client = create_motioneye_client(
user_input[CONF_URL],
admin_username=user_input.get(CONF_ADMIN_USERNAME),
admin_password=user_input.get(CONF_ADMIN_PASSWORD),
surveillance_username=user_input.get(CONF_SURVEILLANCE_USERNAME),
surveillance_password=user_input.get(CONF_SURVEILLANCE_PASSWORD),
session=async_get_clientsession(self.hass),
)
errors = {}
try:
await client.async_client_login()
except MotionEyeClientConnectionError:
errors["base"] = "cannot_connect"
except MotionEyeClientInvalidAuthError:
errors["base"] = "invalid_auth"
except MotionEyeClientRequestError:
errors["base"] = "unknown"
finally:
await client.async_client_close()
if errors:
return _get_form(user_input, errors)
if self.context.get(CONF_SOURCE) == SOURCE_REAUTH and reauth_entry is not None:
# Persist the same webhook id across reauths.
if CONF_WEBHOOK_ID in reauth_entry.data:
user_input[CONF_WEBHOOK_ID] = reauth_entry.data[CONF_WEBHOOK_ID]
self.hass.config_entries.async_update_entry(reauth_entry, data=user_input)
# Need to manually reload, as the listener won't have been
# installed because the initial load did not succeed (the reauth
# flow will not be initiated if the load succeeds).
await self.hass.config_entries.async_reload(reauth_entry.entry_id)
return self.async_abort(reason="reauth_successful")
# Search for duplicates: there isn't a useful unique_id, but
# at least prevent entries with the same motionEye URL.
self._async_abort_entries_match({CONF_URL: user_input[CONF_URL]})
title = user_input[CONF_URL]
if self._hassio_discovery:
title = "Add-on"
return self.async_create_entry(
title=title,
data=user_input,
)
async def async_step_reauth(
self,
config_data: dict[str, Any] | None = None,
) -> FlowResult:
"""Handle a reauthentication flow."""
return await self.async_step_user(config_data)
async def async_step_hassio(self, discovery_info: HassioServiceInfo) -> FlowResult:
"""Handle Supervisor discovery."""
self._hassio_discovery = discovery_info.config
await self._async_handle_discovery_without_unique_id()
return await self.async_step_hassio_confirm()
async def async_step_hassio_confirm(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Confirm Supervisor discovery."""
if user_input is None and self._hassio_discovery is not None:
return self.async_show_form(
step_id="hassio_confirm",
description_placeholders={"addon": self._hassio_discovery["addon"]},
)
return await self.async_step_user()
@staticmethod
@callback
def async_get_options_flow(config_entry: ConfigEntry) -> MotionEyeOptionsFlow:
"""Get the Hyperion Options flow."""
return MotionEyeOptionsFlow(config_entry)
class MotionEyeOptionsFlow(OptionsFlow):
"""motionEye options flow."""
def __init__(self, config_entry: ConfigEntry) -> None:
"""Initialize a motionEye options flow."""
self._config_entry = config_entry
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
schema: dict[vol.Marker, type] = {
vol.Required(
CONF_WEBHOOK_SET,
default=self._config_entry.options.get(
CONF_WEBHOOK_SET,
DEFAULT_WEBHOOK_SET,
),
): bool,
vol.Required(
CONF_WEBHOOK_SET_OVERWRITE,
default=self._config_entry.options.get(
CONF_WEBHOOK_SET_OVERWRITE,
DEFAULT_WEBHOOK_SET_OVERWRITE,
),
): bool,
}
if self.show_advanced_options:
# The input URL is not validated as being a URL, to allow for the possibility
# the template input won't be a valid URL until after it's rendered.
schema.update(
{
vol.Required(
CONF_STREAM_URL_TEMPLATE,
default=self._config_entry.options.get(
CONF_STREAM_URL_TEMPLATE,
"",
),
): str
}
)
return self.async_show_form(step_id="init", data_schema=vol.Schema(schema))
| [
"noreply@github.com"
] | arsaboo.noreply@github.com |
9a99a3346de89b4d3b489a277a7773de3b3a22ae | b7d6b8918a5d32ee2e22fc2e97c25b96ffe24110 | /project/users/views.py | 966ae886f3753c49b76bf66d8d8b40eaa5915bdb | [] | no_license | Briankr33/FlaskTaskr | d935d2a97b2a36288c2aec297f6d1fbf21a9ca1e | 18e0290b08cef973c5f67bd3fb50970b104a534a | refs/heads/master | 2020-04-14T23:13:32.879566 | 2019-02-05T20:43:03 | 2019-02-05T20:43:03 | 164,194,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,754 | py | # project/users/views.py
#################
#### imports ####
#################
from functools import wraps
from flask import flash, redirect, render_template, \
request, session, url_for, Blueprint
from sqlalchemy.exc import IntegrityError
from .forms import RegisterForm, LoginForm
from project import db, bcrypt
from project.models import User
################
#### config ####
################
users_blueprint = Blueprint('users', __name__)
##########################
#### helper functions ####
##########################
def login_required(test):
@wraps(test)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return test(*args, **kwargs)
else:
flash('You need to login first.')
return redirect(url_for('users.login'))
return wrap
################
#### routes ####
################
@users_blueprint.route('/logout/')
@login_required
def logout():
session.pop('logged_in', None)
session.pop('user_id', None)
session.pop('role', None)
session.pop('name', None)
flash('Goodbye!')
return redirect(url_for('users.login'))
@users_blueprint.route('/', methods=['GET', 'POST'])
def login():
error = None
form = LoginForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
user = User.query.filter_by(name=request.form['name']).first()
if user is not None and bcrypt.check_password_hash(user.password, request.form['password']):
session['logged_in'] = True
session['user_id'] = user.id
session['role'] = user.role
session['name'] = user.name
flash('Welcome!')
return redirect(url_for('tasks.tasks'))
else:
error = 'Invalid username or password.'
return render_template('login.html', form=form, error=error)
@users_blueprint.route('/register/', methods=['GET', 'POST'])
def register():
error = None
form = RegisterForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
new_user = User(
form.name.data,
form.email.data,
bcrypt.generate_password_hash(form.password.data),
)
try:
db.session.add(new_user)
db.session.commit()
flash('Thanks for registering. Please login.')
return redirect(url_for('users.login'))
except IntegrityError:
error = 'That username and/or email already exists.'
return render_template('register.html', form=form, error=error)
return render_template('register.html', form=form, error=error)
| [
"you@example.com"
] | you@example.com |
fc5dd4af74160a03bd97cc7c32cd9f983cfa7f43 | a4f3e7f4f0d28f2c072a6378487760a067e838e6 | /Array Values From User.py | cdd6dcee4acd5a44c0a567747aa9ae2c9296aed5 | [] | no_license | algebra-det/Python | 50cacdb517a863ef914dd8ce8b835091f28aa9f6 | c4506f4df0c2ec103b1bcebcfedca78cbe705541 | refs/heads/master | 2020-12-14T15:35:26.727430 | 2020-05-01T10:21:09 | 2020-05-01T10:21:09 | 234,789,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | """
Here we will learn how to take array values from the user
"""
from array import *
arr = array('i',[])
y = int(input("Enter the length of the array "))
for z in range(y):
a = int(input("Enter the next value "))
arr.append(a)
print(arr)
b = int(input("which number do you want to search "))
# Searching manually i.e. without inbuilt function
d=0
for c in arr:
if c==b:
print("its at index ",d)
break
d+=1
else:
print("It does not match any value you entered")
print()
print()
print()
# searching with function
print("It's at : ",arr.index(b)) | [
"noreply@github.com"
] | algebra-det.noreply@github.com |
6c7260b28424b01279f297879ffc39b9b925ae24 | 0fd7a471a63e2bed2857976d8bf2c28bb7f6d1bb | /小练习/557.反转字符串中的单词.py | 826b71429220ffeb669e00d1cd3967030832822e | [] | no_license | zjf201811/LeetCode__exercises | 0432f97d314303a5b2305d745cae9d998b92a851 | 6e5975172dfd17d71b0c6bacc34d51e9e96b6a36 | refs/heads/master | 2020-04-18T01:47:52.285975 | 2019-01-27T00:28:58 | 2019-01-27T00:28:58 | 167,134,587 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | # Author:ZJF
class Solution:
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
str_list=s.split(" ")
for i in range(len(str_list)):
str_list[i]=str_list[i][-1::-1]
return " ".join(str_list)
print(Solution().reverseWords("12 12 12")) | [
"thor201105@163.com"
] | thor201105@163.com |
7a0a3e64463202ee168755c2e84375ca5feb28fd | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/permutations_20200723154606.py | 437d1c04ed157dfb596f458593ff9ff7960a1e5c | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | def perm(arr):
# sort the array
arr.sort()
perm = set(arr)
print(arr)
perm([4,1,3,2]) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
aeb0cf39d35aac50a81a46cef5ddeaaf6d247c96 | be3d301bf8c502bb94149c76cc09f053c532d87a | /python/GafferTest/PathFilterTest.py | e2c2749e516f4438ce09f52a932096caaf67d2a0 | [
"BSD-3-Clause"
] | permissive | ljkart/gaffer | 28be401d04e05a3c973ef42d29a571aba6407665 | d2ce0eb7134a33ceee375d0a3676129a9bdcfbc6 | refs/heads/master | 2021-01-18T08:30:19.763744 | 2014-08-10T13:48:10 | 2014-08-10T13:48:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,264 | py | ##########################################################################
#
# Copyright (c) 2011, John Haddon. All rights reserved.
# Copyright (c) 2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import glob
import IECore
import Gaffer
import GafferTest
class PathFilterTest( GafferTest.TestCase ) :
def test( self ) :
path = Gaffer.FileSystemPath( "test/data/scripts" )
children = path.children()
self.assertEqual( len( children ), len( glob.glob( "test/data/scripts/*" ) ) )
# attach a filter
gfrFilter = Gaffer.FileNamePathFilter( [ "*.gfr" ] )
path.addFilter( gfrFilter )
children = path.children()
self.assertEqual( len( children ), len( glob.glob( "test/data/scripts/*.gfr" ) ) )
# copy the path and check the filter is working on the copy
pathCopy = path.copy()
self.assertEqual( len( pathCopy.children() ), len( children ) )
# detach the filter and check that behaviour has reverted
path.removeFilter( gfrFilter )
children = path.children()
self.assertEqual( len( children ), len( glob.glob( "test/data/scripts/*" ) ) )
def testEnabledState( self ) :
path = Gaffer.FileSystemPath( "test/data/scripts" )
f = Gaffer.FileNamePathFilter( [ "*.gfr" ] )
self.assertEqual( f.getEnabled(), True )
path.setFilter( f )
self.assertEqual( len( path.children() ), len( glob.glob( "test/data/scripts/*.gfr" ) ) )
f.setEnabled( False )
self.assertEqual( f.getEnabled(), False )
self.assertEqual( len( path.children() ), len( glob.glob( "test/data/scripts/*" ) ) )
f.setEnabled( True )
self.assertEqual( f.getEnabled(), True )
self.assertEqual( len( path.children() ), len( glob.glob( "test/data/scripts/*.gfr" ) ) )
def testChangedSignal( self ) :
pathFilter = Gaffer.FileNamePathFilter( [ "*.gfr" ] )
enabledStates = []
def f( pf ) :
self.failUnless( pf is pathFilter )
enabledStates.append( pf.getEnabled() )
c = pathFilter.changedSignal().connect( f )
pathFilter.setEnabled( False )
pathFilter.setEnabled( False )
pathFilter.setEnabled( True )
pathFilter.setEnabled( True )
pathFilter.setEnabled( False )
self.assertEqual( enabledStates, [ False, True, False ] )
def testUserData( self ) :
pathFilter = Gaffer.FileNamePathFilter( [ "*.gfr" ] )
self.assertEqual( pathFilter.userData(), {} )
ud = { "a" : "a" }
pathFilter = Gaffer.FileNamePathFilter( [ "*.gfr" ], userData = ud )
self.assertEqual( pathFilter.userData(), ud )
self.failIf( pathFilter.userData() is ud )
if __name__ == "__main__":
unittest.main()
| [
"thehaddonyoof@gmail.com"
] | thehaddonyoof@gmail.com |
bd4a60acea7ad6f199bcb3f98e075600430ce483 | e7ba4626bd239c20f48a49e8b198dace1391b403 | /Plotter/test/testPlot.py | 8d7cb251cad176a82b7f5cde5fb9d603af112911 | [] | no_license | ArturAkh/TauFW | b6952edb7ce6f1e29ee8c9f4501a035a7bd1729e | df209f865d3aacb72ffecb2e02126d57e4646181 | refs/heads/master | 2023-02-19T18:14:40.269908 | 2021-01-20T23:02:50 | 2021-01-20T23:04:15 | 293,569,311 | 0 | 12 | null | 2020-09-07T15:47:06 | 2020-09-07T15:47:06 | null | UTF-8 | Python | false | false | 2,640 | py | #! /usr/bin/env python
# Author: Izaak Neutelings (June 2020)
# Description: Test script for Plot class
# test/testPlot.py -v2 && eog plots/testPlot*.png
from TauFW.Plotter.plot.utils import LOG, ensuredir
from TauFW.Plotter.plot.Plot import Plot, CMSStyle
from ROOT import TH1D, gRandom
def plothist(xtitle,hists,ratio=False,logy=False,norm=False):
# SETTING
outdir = ensuredir("plots/")
fname = outdir+"testPlot"
if ratio:
fname += "_ratio"
if logy:
fname += "_logy"
if norm:
fname += "_norm" # normalize each histogram
rrange = 0.5
width = 0.2 # legend width
position = 'topright' # legend position
header = "Gaussians" # legend header
text = "#mu#tau_{h}" # corner text
grid = True #and False
staterr = True and False # add uncertainty band to first histogram
lstyle = 1 # solid lines
# PLOT
LOG.header(fname)
plot = Plot(xtitle,hists,norm=norm)
plot.draw(ratio=ratio,logy=logy,ratiorange=rrange,lstyle=lstyle,grid=grid,staterr=staterr)
plot.drawlegend(position,header=header,width=width)
plot.drawtext(text)
plot.saveas(fname+".png")
plot.saveas(fname+".pdf")
#plot.saveas(fname+".C")
#plot.saveas(fname+".png",fname+".C")
#plot.saveas(fname,ext=['png','pdf'])
plot.close()
print
def createhists(nhist=3):
nbins = 50
xmin = 0
xmax = 100
nevts = 10000
rrange = 0.5
hists = [ ]
gRandom.SetSeed(1777)
for i in xrange(1,nhist+1):
mu = 48+i
sigma = 10
hname = "hist%d"%(i)
htitle = "#mu = %s, #sigma = %s"%(mu,sigma)
hist = TH1D(hname,hname,nbins,xmin,xmax)
for j in xrange(nevts):
hist.Fill(gRandom.Gaus(mu,sigma))
hists.append(hist)
return hists
def main():
CMSStyle.setCMSEra(2018)
xtitle = "p_{T}^{MET} [GeV]"
#xtitle = "Leading jet p_{T} [GeV]"
#plothist(variable,hists,ratio=False,logy=False)
for ratio in [True,False]:
for logy in [True,False]:
for norm in [True,False]:
hists = createhists()
plothist(xtitle,hists,ratio=ratio,logy=logy,norm=norm)
if __name__ == "__main__":
import sys
from argparse import ArgumentParser
argv = sys.argv
description = '''Script to test the Plot class for comparing histograms.'''
parser = ArgumentParser(prog="testPlot",description=description,epilog="Good luck!")
parser.add_argument('-v', '--verbose', dest='verbosity', type=int, nargs='?', const=1, default=0, action='store',
help="set verbosity" )
args = parser.parse_args()
LOG.verbosity = args.verbosity
main()
| [
"iwn_@hotmail.com"
] | iwn_@hotmail.com |
ce40f7f0feca28259d678a1a8d0daf5153c8a968 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02796/s921715617.py | f4b6921f3b9803e4c79bddb581b27d145fb52df9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | N = int(input())
robots = [tuple(map(int,input().split())) for _ in range(N)]
robots = [(x+l,x-l) for x,l in robots]
robots.sort()
cnt = 0
last = -float('inf')
for r,l in robots:
if last <= l:
cnt += 1
last = r
print(cnt) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
eac6d6b3556237519c304e69f5902010cad91848 | 96dcea595e7c16cec07b3f649afd65f3660a0bad | /homeassistant/components/plugwise/switch.py | 8639826e37a7e890708b33604b2e0102551bff17 | [
"Apache-2.0"
] | permissive | home-assistant/core | 3455eac2e9d925c92d30178643b1aaccf3a6484f | 80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743 | refs/heads/dev | 2023-08-31T15:41:06.299469 | 2023-08-31T14:50:53 | 2023-08-31T14:50:53 | 12,888,993 | 35,501 | 20,617 | Apache-2.0 | 2023-09-14T21:50:15 | 2013-09-17T07:29:48 | Python | UTF-8 | Python | false | false | 3,593 | py | """Plugwise Switch component for HomeAssistant."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Any
from plugwise.constants import SwitchType
from homeassistant.components.switch import (
SwitchDeviceClass,
SwitchEntity,
SwitchEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EntityCategory
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DOMAIN
from .coordinator import PlugwiseDataUpdateCoordinator
from .entity import PlugwiseEntity
from .util import plugwise_command
@dataclass
class PlugwiseSwitchEntityDescription(SwitchEntityDescription):
"""Describes Plugwise switch entity."""
key: SwitchType
SWITCHES: tuple[PlugwiseSwitchEntityDescription, ...] = (
PlugwiseSwitchEntityDescription(
key="dhw_cm_switch",
translation_key="dhw_cm_switch",
icon="mdi:water-plus",
entity_category=EntityCategory.CONFIG,
),
PlugwiseSwitchEntityDescription(
key="lock",
translation_key="lock",
icon="mdi:lock",
entity_category=EntityCategory.CONFIG,
),
PlugwiseSwitchEntityDescription(
key="relay",
translation_key="relay",
device_class=SwitchDeviceClass.SWITCH,
),
PlugwiseSwitchEntityDescription(
key="cooling_ena_switch",
translation_key="cooling_ena_switch",
name="Cooling",
icon="mdi:snowflake-thermometer",
entity_category=EntityCategory.CONFIG,
),
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Smile switches from a config entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
entities: list[PlugwiseSwitchEntity] = []
for device_id, device in coordinator.data.devices.items():
if not (switches := device.get("switches")):
continue
for description in SWITCHES:
if description.key not in switches:
continue
entities.append(PlugwiseSwitchEntity(coordinator, device_id, description))
async_add_entities(entities)
class PlugwiseSwitchEntity(PlugwiseEntity, SwitchEntity):
"""Representation of a Plugwise plug."""
entity_description: PlugwiseSwitchEntityDescription
def __init__(
self,
coordinator: PlugwiseDataUpdateCoordinator,
device_id: str,
description: PlugwiseSwitchEntityDescription,
) -> None:
"""Set up the Plugwise API."""
super().__init__(coordinator, device_id)
self.entity_description = description
self._attr_unique_id = f"{device_id}-{description.key}"
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
return self.device["switches"][self.entity_description.key]
@plugwise_command
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the device on."""
await self.coordinator.api.set_switch_state(
self._dev_id,
self.device.get("members"),
self.entity_description.key,
"on",
)
@plugwise_command
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the device off."""
await self.coordinator.api.set_switch_state(
self._dev_id,
self.device.get("members"),
self.entity_description.key,
"off",
)
| [
"noreply@github.com"
] | home-assistant.noreply@github.com |
79c18c16a152bb69f091ecc1823996773ca60131 | 64ee2a144acbaf7565560aa51ba6dd7eb4f6b8b8 | /小甲鱼练习/小甲鱼练习题辅助文档/习题051/bb测试.py | 9d0394cb977db9f079b5071b42305fc611fd1d6d | [] | no_license | HimriZngz/Code | c61c5ecd9b18157089b49089f22bb7d335a6f4f5 | a7eb93ddcde88075bb2217fc285c19ca349af8d7 | refs/heads/master | 2020-09-14T01:32:19.819942 | 2020-03-24T06:16:49 | 2020-03-24T06:16:49 | 222,969,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | import Pack_a.a as a
a.x()
| [
"yingqi1561@qq.com"
] | yingqi1561@qq.com |
72d53a8968b10d535de42aabd8740462fb1ca955 | 77e5ed0d08a5187ca323a30f0d41591c38e82963 | /src/lib/reprlib.py | 827e2561295f8c01c797981d7ccbe4f1df130be6 | [
"MIT",
"Python-2.0"
] | permissive | maliaoMJ/skulpt | 9f40012dc234c58017531bc278b0753d485ea9ad | f812dc1f0d0c58855478bd8b4afbde70886a9180 | refs/heads/master | 2022-03-29T02:19:24.718295 | 2020-01-27T17:34:43 | 2020-01-27T17:34:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,272 | py | """Redo the builtin repr() (representation) but with limits on most sizes."""
__all__ = ["Repr", "repr", "recursive_repr"]
from itertools import islice
from _thread import get_ident
def recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__qualname__ = getattr(user_function, '__qualname__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class Repr:
def __init__(self):
self.maxlevel = 6
self.maxtuple = 6
self.maxlist = 6
self.maxarray = 5
self.maxdict = 4
self.maxset = 6
self.maxfrozenset = 6
self.maxdeque = 6
self.maxstring = 30
self.maxlong = 40
self.maxother = 30
def repr(self, x):
return self.repr1(x, self.maxlevel)
def repr1(self, x, level):
typename = type(x).__name__
if ' ' in typename:
parts = typename.split()
typename = '_'.join(parts)
if hasattr(self, 'repr_' + typename):
return getattr(self, 'repr_' + typename)(x, level)
else:
return self.repr_instance(x, level)
def _repr_iterable(self, x, level, left, right, maxiter, trail=''):
n = len(x)
if level <= 0 and n:
s = '...'
else:
newlevel = level - 1
repr1 = self.repr1
pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)]
if n > maxiter: pieces.append('...')
s = ', '.join(pieces)
if n == 1 and trail: right = trail + right
return '%s%s%s' % (left, s, right)
def repr_tuple(self, x, level):
return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',')
def repr_list(self, x, level):
return self._repr_iterable(x, level, '[', ']', self.maxlist)
def repr_array(self, x, level):
if not x:
return "array('%s')" % x.typecode
header = "array('%s', [" % x.typecode
return self._repr_iterable(x, level, header, '])', self.maxarray)
def repr_set(self, x, level):
if not x:
return 'set()'
x = _possibly_sorted(x)
return self._repr_iterable(x, level, '{', '}', self.maxset)
def repr_frozenset(self, x, level):
if not x:
return 'frozenset()'
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'frozenset({', '})',
self.maxfrozenset)
def repr_deque(self, x, level):
return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque)
def repr_dict(self, x, level):
n = len(x)
if n == 0: return '{}'
if level <= 0: return '{...}'
newlevel = level - 1
repr1 = self.repr1
pieces = []
for key in islice(_possibly_sorted(x), self.maxdict):
keyrepr = repr1(key, newlevel)
valrepr = repr1(x[key], newlevel)
pieces.append('%s: %s' % (keyrepr, valrepr))
if n > self.maxdict: pieces.append('...')
s = ', '.join(pieces)
return '{%s}' % (s,)
def repr_str(self, x, level):
s = original_repr(x[:self.maxstring])
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = original_repr(x[:i] + x[len(x)-j:])
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_int(self, x, level):
s = original_repr(x) # XXX Hope this isn't too slow...
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)//2)
j = max(0, self.maxlong-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_instance(self, x, level):
try:
s = original_repr(x)
# Bugs in x.__repr__() can cause arbitrary
# exceptions -- then make up something
except Exception:
return '<%s instance at %#x>' % (x.__class__.__name__, id(x))
if len(s) > self.maxother:
i = max(0, (self.maxother-3)//2)
j = max(0, self.maxother-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def _possibly_sorted(x):
# Since not all sequences of items can be sorted and comparison
# functions may raise arbitrary exceptions, return an unsorted
# sequence in that case.
try:
return sorted(x)
except Exception:
return list(x)
original_repr = repr
aRepr = Repr()
repr = aRepr.repr
| [
"acbart@vt.edu"
] | acbart@vt.edu |
a4b92b26eeaf9e77e86e6cd45ae5432c84e829cf | bc441bb06b8948288f110af63feda4e798f30225 | /artifact_sdk/api/version/get_clean_version_list_pb2.py | d08e1aead6b5fefc2296fc8e4603377aa5af8c3c | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 9,239 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: get_clean_version_list.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from artifact_sdk.model.artifact import version_pb2 as artifact__sdk_dot_model_dot_artifact_dot_version__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='get_clean_version_list.proto',
package='version',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x1cget_clean_version_list.proto\x12\x07version\x1a)artifact_sdk/model/artifact/version.proto\"]\n\x1aGetCleanVersionListRequest\x12\x0c\n\x04page\x18\x01 \x01(\x05\x12\x10\n\x08pageSize\x18\x02 \x01(\x05\x12\x11\n\tpackageId\x18\x03 \x01(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\"n\n\x1bGetCleanVersionListResponse\x12\x0c\n\x04page\x18\x01 \x01(\x05\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\r\n\x05total\x18\x03 \x01(\x05\x12\x1f\n\x04list\x18\x04 \x03(\x0b\x32\x11.artifact.Version\"\x8a\x01\n\"GetCleanVersionListResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x32\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32$.version.GetCleanVersionListResponseb\x06proto3')
,
dependencies=[artifact__sdk_dot_model_dot_artifact_dot_version__pb2.DESCRIPTOR,])
_GETCLEANVERSIONLISTREQUEST = _descriptor.Descriptor(
name='GetCleanVersionListRequest',
full_name='version.GetCleanVersionListRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='page', full_name='version.GetCleanVersionListRequest.page', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pageSize', full_name='version.GetCleanVersionListRequest.pageSize', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='packageId', full_name='version.GetCleanVersionListRequest.packageId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='version.GetCleanVersionListRequest.name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=84,
serialized_end=177,
)
_GETCLEANVERSIONLISTRESPONSE = _descriptor.Descriptor(
name='GetCleanVersionListResponse',
full_name='version.GetCleanVersionListResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='page', full_name='version.GetCleanVersionListResponse.page', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_size', full_name='version.GetCleanVersionListResponse.page_size', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='total', full_name='version.GetCleanVersionListResponse.total', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='list', full_name='version.GetCleanVersionListResponse.list', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=179,
serialized_end=289,
)
_GETCLEANVERSIONLISTRESPONSEWRAPPER = _descriptor.Descriptor(
name='GetCleanVersionListResponseWrapper',
full_name='version.GetCleanVersionListResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='version.GetCleanVersionListResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='version.GetCleanVersionListResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='version.GetCleanVersionListResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='version.GetCleanVersionListResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=292,
serialized_end=430,
)
_GETCLEANVERSIONLISTRESPONSE.fields_by_name['list'].message_type = artifact__sdk_dot_model_dot_artifact_dot_version__pb2._VERSION
_GETCLEANVERSIONLISTRESPONSEWRAPPER.fields_by_name['data'].message_type = _GETCLEANVERSIONLISTRESPONSE
DESCRIPTOR.message_types_by_name['GetCleanVersionListRequest'] = _GETCLEANVERSIONLISTREQUEST
DESCRIPTOR.message_types_by_name['GetCleanVersionListResponse'] = _GETCLEANVERSIONLISTRESPONSE
DESCRIPTOR.message_types_by_name['GetCleanVersionListResponseWrapper'] = _GETCLEANVERSIONLISTRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetCleanVersionListRequest = _reflection.GeneratedProtocolMessageType('GetCleanVersionListRequest', (_message.Message,), {
'DESCRIPTOR' : _GETCLEANVERSIONLISTREQUEST,
'__module__' : 'get_clean_version_list_pb2'
# @@protoc_insertion_point(class_scope:version.GetCleanVersionListRequest)
})
_sym_db.RegisterMessage(GetCleanVersionListRequest)
GetCleanVersionListResponse = _reflection.GeneratedProtocolMessageType('GetCleanVersionListResponse', (_message.Message,), {
'DESCRIPTOR' : _GETCLEANVERSIONLISTRESPONSE,
'__module__' : 'get_clean_version_list_pb2'
# @@protoc_insertion_point(class_scope:version.GetCleanVersionListResponse)
})
_sym_db.RegisterMessage(GetCleanVersionListResponse)
GetCleanVersionListResponseWrapper = _reflection.GeneratedProtocolMessageType('GetCleanVersionListResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _GETCLEANVERSIONLISTRESPONSEWRAPPER,
'__module__' : 'get_clean_version_list_pb2'
# @@protoc_insertion_point(class_scope:version.GetCleanVersionListResponseWrapper)
})
_sym_db.RegisterMessage(GetCleanVersionListResponseWrapper)
# @@protoc_insertion_point(module_scope)
| [
"service@easyops.cn"
] | service@easyops.cn |
ff84efeb02608cd2c2eb9a7672ddf1a671751511 | 4879f75dc2cfe3e983bdec07782cb0986b61103e | /util/time_multi.py | 7020446cb4c0f7b15d391a7de194d53657f9b1aa | [
"MIT",
"CC0-1.0"
] | permissive | haosu1987/duktape | befe386c330e20c32e5bb8221a33af9c6062924d | 4c1d4bfc12f16bb389a12bbc930568afe12a71e8 | refs/heads/master | 2016-09-06T12:14:09.269999 | 2015-08-04T09:06:59 | 2015-08-04T09:06:59 | 39,752,467 | 1 | 0 | null | 2015-08-04T09:07:00 | 2015-07-27T03:07:59 | JavaScript | UTF-8 | Python | false | false | 759 | py | #!/usr/bin/python
#
# Small helper for perftest runs.
#
import os
import sys
import subprocess
def main():
count = int(sys.argv[1])
time_min = None
for i in xrange(count):
cmd = [
'time',
'-f', '%U',
'--quiet',
sys.argv[2], # cmd
sys.argv[3] # testcase
]
#print(repr(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
retval = p.wait()
#print(i, retval, stdout, stderr)
if retval != 0:
print 'n/a'
return
time = float(stderr)
#print(i, time)
if time_min is None:
time_min = time
else:
time_min = min(time_min, time)
# /usr/bin/time has only two digits of resolution
print('%.02f' % time_min)
if __name__ == '__main__':
main()
| [
"sami.vaarala@iki.fi"
] | sami.vaarala@iki.fi |
00aac13e36808410e6f5c29082c0af2fadedbab6 | 1bdc746ecd775dcd3cf0965deb23a9c86f826706 | /bcbiovm/shared/retriever.py | 06e7c8e895a073df4ee0e518acc7e505731e80e4 | [
"MIT"
] | permissive | bcbio/bcbio-nextgen-vm | 84b681a258ddacd8656f8cdbca7a2ec8d3ed7a0b | cd703e9d0dd5c6e25ddefc39efd9fd87ec815615 | refs/heads/master | 2021-06-08T13:32:03.854484 | 2020-06-03T14:37:01 | 2020-06-03T14:37:01 | 15,653,572 | 10 | 6 | MIT | 2020-06-03T14:37:03 | 2014-01-05T15:37:31 | Python | UTF-8 | Python | false | false | 8,045 | py | """Shared code for retrieving resources from external integrations.
"""
import os
import yaml
import six
import toolz as tz
from bcbio import utils
def get_resources(genome_build, fasta_ref, config, data, open_fn, list_fn, find_fn=None,
normalize_fn=None):
"""Add genome resources defined in configuration file to data object.
"""
resources_file = "%s-resources.yaml" % (os.path.splitext(fasta_ref)[0])
if find_fn:
resources_file = find_fn(resources_file)
base_dir = os.path.dirname(resources_file)
with open_fn(resources_file) as in_handle:
resources = yaml.safe_load(in_handle)
cfiles = list_fn(os.path.dirname(base_dir))
for k1, v1 in list(resources.items()):
if isinstance(v1, dict):
for k2, v2 in list(v1.items()):
if isinstance(v2, six.string_types) and v2.startswith("../"):
test_v2 = _normpath_remote(os.path.join(base_dir, v2), normalize_fn=normalize_fn)
if find_fn and find_fn(test_v2) is not None:
resources[k1][k2] = find_fn(test_v2)
elif test_v2 in cfiles:
resources[k1][k2] = test_v2
else:
del resources[k1][k2]
data["genome_resources"] = _ensure_annotations(resources, cfiles, data, normalize_fn)
data = _add_configured_indices(base_dir, cfiles, data, normalize_fn)
data = _add_data_versions(base_dir, cfiles, data, normalize_fn)
data = _add_viral(base_dir, cfiles, data, normalize_fn)
return _add_genome_context(base_dir, cfiles, data, normalize_fn)
def _add_data_versions(base_dir, cfiles, data, norm_fn=None):
"""Add versions file with data names mapped to current version.
"""
search_name = _normpath_remote(os.path.join(os.path.dirname(base_dir), "versions.csv"),
normalize_fn=norm_fn)
version_files = [x for x in cfiles if search_name == (norm_fn(x) if norm_fn else x)]
version_file = version_files[0] if version_files else None
data["reference"]["versions"] = version_file
return data
def _add_viral(base_dir, cfiles, data, norm_fn=None):
"""Add fasta and indices for viral QC.
"""
viral_dir = _normpath_remote(os.path.join(os.path.dirname(base_dir), "viral"),
normalize_fn=norm_fn)
viral_files = [x for x in cfiles if x.startswith(viral_dir)]
if viral_files:
data["reference"]["viral"] = {"base": [x for x in viral_files if x.endswith(".fa")][0],
"indexes": [x for x in viral_files if not x.endswith(".fa")]}
else:
data["reference"]["viral"] = None
return data
def _ensure_annotations(resources, cfiles, data, normalize_fn):
"""Retrieve additional annotations for downstream processing.
Mirrors functionality in bcbio.pipeline.run_info.ensure_annotations
"""
transcript_gff = tz.get_in(["rnaseq", "transcripts"], resources)
if transcript_gff:
gene_bed = utils.splitext_plus(transcript_gff)[0] + ".bed"
test_gene_bed = normalize_fn(gene_bed) if normalize_fn else gene_bed
for fname in cfiles:
test_fname = normalize_fn(fname) if normalize_fn else fname
if test_fname == test_gene_bed:
resources["rnaseq"]["gene_bed"] = fname
break
return resources
def _add_configured_indices(base_dir, cfiles, data, norm_fn=None):
"""Add additional resource indices defined in genome_resources: snpeff
"""
snpeff_db = tz.get_in(["genome_resources", "aliases", "snpeff"], data)
if snpeff_db:
tarball = _normpath_remote(os.path.join(os.path.dirname(base_dir), "snpeff--%s-wf.tar.gz" % snpeff_db),
normalize_fn=norm_fn)
snpeff_files = [x for x in cfiles if tarball == (norm_fn(x) if norm_fn else x)]
if len(snpeff_files) == 1:
data["reference"]["snpeff"] = {snpeff_db: snpeff_files[0]}
else:
index_dir = _normpath_remote(os.path.join(os.path.dirname(base_dir), "snpeff", snpeff_db),
normalize_fn=norm_fn)
if not index_dir.endswith("/"):
index_dir += "/"
snpeff_files = [x for x in cfiles if x.startswith(index_dir)]
if len(snpeff_files) > 0:
base_files = [x for x in snpeff_files if x.endswith("/snpEffectPredictor.bin")]
assert len(base_files) == 1, base_files
del snpeff_files[snpeff_files.index(base_files[0])]
data["reference"]["snpeff"] = {"base": base_files[0], "indexes": snpeff_files}
return data
def _add_genome_context(base_dir, cfiles, data, norm_fn=None):
"""Add associated genome context files, if present.
"""
index_dir = _normpath_remote(os.path.join(os.path.dirname(base_dir), "coverage", "problem_regions"),
normalize_fn=norm_fn)
context_files = [x for x in cfiles if x.startswith(index_dir) and x.endswith(".gz")]
if len(context_files) > 0:
data["reference"]["genome_context"] = sorted(context_files, key=os.path.basename)
return data
def _normpath_remote(orig, normalize_fn=None):
"""Normalize a path, avoiding removing initial s3:// style keys
"""
if normalize_fn:
return os.path.normpath(normalize_fn(orig))
elif orig.find("://") > 0:
key, curpath = orig.split(":/")
return key + ":/" + os.path.normpath(curpath)
else:
return os.path.normpath(orig)
def standard_genome_refs(genome_build, aligner, ref_prefix, list_fn):
"""Retrieve standard genome references: sequence, rtg and aligner.
"""
out = {}
base_targets = ("/%s.fa" % genome_build, "/mainIndex")
for dirname in [x for x in ["seq", "rtg", aligner] if x]:
key = {"seq": "fasta", "ucsc": "twobit"}.get(dirname, dirname)
tarball_files = [x for x in list_fn(ref_prefix)
if os.path.basename(x).startswith(dirname) and x.endswith("-wf.tar.gz")]
if len(tarball_files) > 0:
assert len(tarball_files) == 1, tarball_files
if dirname == aligner:
out[key] = {"base": tarball_files[0], "indexes": tarball_files}
else:
out[key] = tarball_files[0]
else:
cur_files = list_fn(os.path.join(ref_prefix, dirname))
base_files = [x for x in cur_files if x.endswith(base_targets)]
if len(base_files) > 0:
assert len(base_files) == 1, base_files
base_file = base_files[0]
del cur_files[cur_files.index(base_file)]
out[key] = {"base": base_file, "indexes": cur_files}
elif len(cur_files) == 1:
out[key] = cur_files[0]
else:
out[key] = {"indexes": cur_files}
return out
def find_ref_prefix(genome_build, find_fn):
"""Identify reference prefix in folders for genome build.
"""
for prefix in ["%s", "genomes/%s"]:
cur_prefix = prefix % genome_build
remote_dir = find_fn(cur_prefix)
if remote_dir:
return remote_dir
raise ValueError("Did not find genome files for %s" % (genome_build))
def fill_remote(cur, find_fn, is_remote_fn):
"""Add references in data dictionary to remote files if present and not local.
"""
if isinstance(cur, (list, tuple)):
return [fill_remote(x, find_fn, is_remote_fn) for x in cur]
elif isinstance(cur, dict):
out = {}
for k, v in cur.items():
out[k] = fill_remote(v, find_fn, is_remote_fn)
return out
elif (isinstance(cur, six.string_types) and os.path.splitext(cur)[-1] and not os.path.exists(cur)
and not is_remote_fn(cur)):
remote_cur = find_fn(cur)
if remote_cur:
return remote_cur
else:
return cur
else:
return cur
| [
"chapmanb@50mail.com"
] | chapmanb@50mail.com |
9180db9fbe7b19367e0b3ec313c3e0e46559ccf1 | f2c6ecbb99e8e606cd47a02045ad1bb6c881cfbd | /castjeeves/sqltables/TableLoader.py | d301f68460220e1e51f5aa8cda5a7bd17a74a8d7 | [
"BSD-3-Clause"
] | permissive | dkauf42/bayota | 36dc37f5f6fc8072567d970b1d8a379d8fb55b99 | 104ba91746231be10419390a9d7ed8f2593a21bc | refs/heads/master | 2023-04-10T18:22:56.979432 | 2021-11-17T15:54:48 | 2021-11-17T15:54:48 | 180,859,770 | 2 | 0 | NOASSERTION | 2021-11-17T15:54:49 | 2019-04-11T19:03:09 | Python | UTF-8 | Python | false | false | 2,238 | py | import pandas as pd
class TableLoader(object):
def __init__(self, tableSet):
object.__setattr__(self, "tableSet", set(tableSet))
def __getattribute__(self, attr):
if attr == "tableSet":
raise AttributeError("instance <attr>:tableSet is not directly accessible, use <method>:getTblList instead")
else:
# This was: tableSet = object.__getattribute__(self, "tableSet")
# But D.E.Kaufman added this if, then block...
if attr == '__dict__':
try:
tableSet = object.__getattribute__(self, "tableSet")
except AttributeError:
tableSet = ['', '']
else:
tableSet = object.__getattribute__(self, "tableSet")
try:
item = object.__getattribute__(self, attr)
if attr in tableSet:
return item # pd.DataFrame.copy(item)
else:
return item
except AttributeError:
if attr in tableSet:
raise AttributeError("use <method>:addTable to add <attr>:{:s}".format(attr))
else:
raise AttributeError("invalid attribute specification")
def __setattr__(self, attr, value):
if attr == "tableSet":
raise AttributeError("<attr>:tableSet cannot be changed")
tableSet = object.__getattribute__(self, "tableSet")
if attr in tableSet:
if hasattr(self, attr):
raise AttributeError("attribute has already been set and may not be changed")
else:
object.__setattr__(self, attr, value)
else:
raise AttributeError("invalid attribute specification")
def getTblList(self):
tableSet = object.__getattribute__(self, "tableSet")
return sorted(list(tableSet))
def addTable(self, tblName, tbl):
if not isinstance(tbl, pd.DataFrame):
raise TypeError("<arg>:tbl should be of type pandas.DataFrame")
try:
self.__setattr__(tblName, tbl)
except AttributeError as err:
raise err
| [
"dkauf42@gmail.com"
] | dkauf42@gmail.com |
21c11c3fb01b55fd8c0c50c00c4455cbb7d1f743 | 9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56 | /google/ads/googleads/v11/services/services/ad_group_criterion_label_service/client.py | 22d5a89e5ec3805b8bfcfc64cdc041016290e1a3 | [
"Apache-2.0"
] | permissive | GerhardusM/google-ads-python | 73b275a06e5401e6b951a6cd99af98c247e34aa3 | 676ac5fcb5bec0d9b5897f4c950049dac5647555 | refs/heads/master | 2022-07-06T19:05:50.932553 | 2022-06-17T20:41:17 | 2022-06-17T20:41:17 | 207,535,443 | 0 | 0 | Apache-2.0 | 2019-09-10T10:58:55 | 2019-09-10T10:58:55 | null | UTF-8 | Python | false | false | 22,120 | py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v11.services.types import (
ad_group_criterion_label_service,
)
from google.rpc import status_pb2 # type: ignore
from .transports.base import (
AdGroupCriterionLabelServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import AdGroupCriterionLabelServiceGrpcTransport
class AdGroupCriterionLabelServiceClientMeta(type):
"""Metaclass for the AdGroupCriterionLabelService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[AdGroupCriterionLabelServiceTransport]]
_transport_registry["grpc"] = AdGroupCriterionLabelServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[AdGroupCriterionLabelServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AdGroupCriterionLabelServiceClient(
metaclass=AdGroupCriterionLabelServiceClientMeta
):
"""Service to manage labels on ad group criteria."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdGroupCriterionLabelServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdGroupCriterionLabelServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> AdGroupCriterionLabelServiceTransport:
"""Returns the transport used by the client instance.
Returns:
AdGroupCriterionLabelServiceTransport: The transport used by the client
instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def ad_group_criterion_path(
customer_id: str, ad_group_id: str, criterion_id: str,
) -> str:
"""Returns a fully-qualified ad_group_criterion string."""
return "customers/{customer_id}/adGroupCriteria/{ad_group_id}~{criterion_id}".format(
customer_id=customer_id,
ad_group_id=ad_group_id,
criterion_id=criterion_id,
)
@staticmethod
def parse_ad_group_criterion_path(path: str) -> Dict[str, str]:
"""Parses a ad_group_criterion path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/adGroupCriteria/(?P<ad_group_id>.+?)~(?P<criterion_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def ad_group_criterion_label_path(
customer_id: str, ad_group_id: str, criterion_id: str, label_id: str,
) -> str:
"""Returns a fully-qualified ad_group_criterion_label string."""
return "customers/{customer_id}/adGroupCriterionLabels/{ad_group_id}~{criterion_id}~{label_id}".format(
customer_id=customer_id,
ad_group_id=ad_group_id,
criterion_id=criterion_id,
label_id=label_id,
)
@staticmethod
def parse_ad_group_criterion_label_path(path: str) -> Dict[str, str]:
"""Parses a ad_group_criterion_label path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/adGroupCriterionLabels/(?P<ad_group_id>.+?)~(?P<criterion_id>.+?)~(?P<label_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def label_path(customer_id: str, label_id: str,) -> str:
"""Returns a fully-qualified label string."""
return "customers/{customer_id}/labels/{label_id}".format(
customer_id=customer_id, label_id=label_id,
)
@staticmethod
def parse_label_path(path: str) -> Dict[str, str]:
"""Parses a label path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/labels/(?P<label_id>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[
str, AdGroupCriterionLabelServiceTransport, None
] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the ad group criterion label service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, AdGroupCriterionLabelServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AdGroupCriterionLabelServiceTransport):
# transport is a AdGroupCriterionLabelServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def mutate_ad_group_criterion_labels(
self,
request: Union[
ad_group_criterion_label_service.MutateAdGroupCriterionLabelsRequest,
dict,
] = None,
*,
customer_id: str = None,
operations: Sequence[
ad_group_criterion_label_service.AdGroupCriterionLabelOperation
] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ad_group_criterion_label_service.MutateAdGroupCriterionLabelsResponse:
r"""Creates and removes ad group criterion labels. Operation
statuses are returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `DatabaseError <>`__ `FieldError <>`__
`HeaderError <>`__ `InternalError <>`__ `QuotaError <>`__
`RequestError <>`__
Args:
request (Union[google.ads.googleads.v11.services.types.MutateAdGroupCriterionLabelsRequest, dict]):
The request object. Request message for
[AdGroupCriterionLabelService.MutateAdGroupCriterionLabels][google.ads.googleads.v11.services.AdGroupCriterionLabelService.MutateAdGroupCriterionLabels].
customer_id (str):
Required. ID of the customer whose ad
group criterion labels are being
modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (Sequence[google.ads.googleads.v11.services.types.AdGroupCriterionLabelOperation]):
Required. The list of operations to
perform on ad group criterion labels.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v11.services.types.MutateAdGroupCriterionLabelsResponse:
Response message for an ad group
criterion labels mutate.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([customer_id, operations])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a ad_group_criterion_label_service.MutateAdGroupCriterionLabelsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
ad_group_criterion_label_service.MutateAdGroupCriterionLabelsRequest,
):
request = ad_group_criterion_label_service.MutateAdGroupCriterionLabelsRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.mutate_ad_group_criterion_labels
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("AdGroupCriterionLabelServiceClient",)
| [
"noreply@github.com"
] | GerhardusM.noreply@github.com |
7aed7b9ed202249c59c62c752ec52f0d75b18f27 | f1738cd603e0b2e31143f4ebf7eba403402aecd6 | /ucs/management/univention-directory-manager-modules/scripts/proof_kerberos_deactivation | 7c56f2eaf92fa5f45cb3010e4ca890f1c778bc7b | [] | no_license | m-narayan/smart | 92f42bf90d7d2b24f61915fac8abab70dd8282bc | 1a6765deafd8679079b64dcc35f91933d37cf2dd | refs/heads/master | 2016-08-05T17:29:30.847382 | 2013-01-04T04:50:26 | 2013-01-04T04:50:26 | 7,079,786 | 8 | 6 | null | 2015-04-29T08:54:12 | 2012-12-09T14:56:27 | Python | UTF-8 | Python | false | false | 4,758 | #!/usr/bin/python2.6
# -*- coding: utf-8 -*-
#
# Univention Directory Manager Modules
# sync posix flags to kerberos flags
#
# Copyright 2004-2012 Univention GmbH
#
# http://www.univention.de/
#
# All rights reserved.
#
# The source code of this program is made available
# under the terms of the GNU Affero General Public License version 3
# (GNU AGPL V3) as published by the Free Software Foundation.
#
# Binary versions of this program provided by Univention to you as
# well as other copyrighted, protected or trademarked materials like
# Logos, graphics, fonts, specific documentations and configurations,
# cryptographic keys etc. are subject to a license agreement between
# you and Univention and not subject to the GNU AGPL V3.
#
# In the case you use this program under the terms of the GNU AGPL V3,
# the program is provided in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License with the Debian GNU/Linux or Univention distribution in file
# /usr/share/common-licenses/AGPL-3; if not, see
# <http://www.gnu.org/licenses/>.
# set activation state of kerberos account to same state as posix account
# set password and account exiration date to the same value as shadowExpiry and shadowLastChange
import ldap, re, time
import univention.baseconfig
baseConfig=univention.baseconfig.baseConfig()
baseConfig.load()
baseDN=baseConfig['ldap/base']
print "using baseDN",baseDN
lo=ldap.open('localhost', 7389)
bindpw=open('/etc/ldap.secret').read()
if bindpw[-1] == '\n':
bindpw=bindpw[0:-1]
lo.simple_bind_s("cn=admin,"+baseDN, bindpw)
count_changes = 0
warning = 0
# passwords will only be found in posixAccount
res_pA=lo.search_s(baseDN, ldap.SCOPE_SUBTREE, 'objectClass=posixAccount')
for i in range(0,len(res_pA)):
dn_pA=res_pA[i][0]
print dn_pA
if res_pA[i][1].has_key('objectClass'):
if 'krb5KDCEntry' in res_pA[i][1]['objectClass']:
if res_pA[i][1].has_key('userPassword'):
_re = re.compile('^\{crypt\}!.*$')
disabled = _re.match(res_pA[i][1]['userPassword'][0])
if res_pA[i][1].has_key('krb5KDCFlags'):
if disabled and not res_pA[i][1]['krb5KDCFlags'][0] == '254':
modlist = [(ldap.MOD_REPLACE,'krb5KDCFlags','254')]
lo.modify_s(dn_pA,modlist)
print " - kerberos disabled"
elif not disabled and not res_pA[i][1]['krb5KDCFlags'][0] == '126':
modlist = [(ldap.MOD_REPLACE,'krb5KDCFlags','126')]
lo.modify_s(dn_pA,modlist)
print " - kerberos enabled"
else:
print " - enable/disable OK"
else:
if disabled:
modlist = [(ldap.MOD_ADD,'krb5KDCFlags','254')]
lo.modify_s(dn_pA,modlist)
print " - kerberos initial disabled"
else:
modlist = [(ldap.MOD_ADD,'krb5KDCFlags','126')]
lo.modify_s(dn_pA,modlist)
print " - kerberos initial enabled"
else:
print " - user password not set"
if res_pA[i][1].has_key('shadowExpire') and res_pA[i][1]['shadowExpire'][0]:
userexpiry=time.strftime("%d.%m.%y",time.gmtime((long(res_pA[i][1]['shadowExpire'][0]))*3600*24))
krb5ValidEnd="%s" % "20"+userexpiry[6:8]+userexpiry[3:5]+userexpiry[0:2]+"000000Z"
if not res_pA[i][1].has_key('krb5ValidEnd'):
modlist = [(ldap.MOD_ADD,'krb5ValidEnd',krb5ValidEnd)]
lo.modify_s(dn_pA,modlist)
print " - kerberos expiry initial set"
elif not res_pA[i][1]['krb5ValidEnd'][0] == krb5ValidEnd:
modlist = [(ldap.MOD_REPLACE,'krb5ValidEnd',krb5ValidEnd)]
lo.modify_s(dn_pA,modlist)
print " - kerberos expiry set"
else:
print " - kerberos expiry OK"
else:
print " - account expire not set"
if res_pA[i][1].has_key('shadowLastChange') and res_pA[i][1].has_key('shadowMax'):
passwordexpiry=time.strftime("%d.%m.%y",time.gmtime((long(res_pA[i][1]['shadowLastChange'][0])+long(res_pA[i][1]['shadowMax'][0]))*3600*24))
krb5PasswordEnd="%s" % "20"+passwordexpiry[6:8]+passwordexpiry[3:5]+passwordexpiry[0:2]+"000000Z"
if not res_pA[i][1].has_key('krb5PasswordEnd'):
modlist = [(ldap.MOD_ADD,'krb5PasswordEnd',krb5PasswordEnd)]
lo.modify_s(dn_pA,modlist)
print "kerberos password end initial set"
elif not res_pA[i][1]['krb5PasswordEnd'][0] == krb5PasswordEnd:
modlist = [(ldap.MOD_REPLACE,'krb5PasswordEnd',krb5PasswordEnd)]
lo.modify_s(dn_pA,modlist)
print " - kerberos password end set"
else:
print " - kerberos password end OK"
else:
print " - Password expire not set"
else:
print " - no kerberos account"
else:
print " - WARNING: no key objectClass found !"
| [
"kartik@debian.org"
] | kartik@debian.org | |
561784c981e82de125085710eae2d409b6574fe5 | 3529ecaa44a53172094ba13498097057c8972723 | /Questiondir/761.employee-free-time/761.employee-free-time_134959127.py | 4ab403cf8042979a9028ef45e7132ecc3206df1a | [] | no_license | cczhong11/Leetcode-contest-code-downloader | 0681f0f8c9e8edd5371fd8d0a1d37dcc368566b6 | db64a67869aae4f0e55e78b65a7e04f5bc2e671c | refs/heads/master | 2021-09-07T15:36:38.892742 | 2018-02-25T04:15:17 | 2018-02-25T04:15:17 | 118,612,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 965 | py | # Definition for an interval.
# class Interval(object):
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution(object):
def employeeFreeTime(self, avails):
"""
:type avails: List[List[Interval]]
:rtype: List[Interval]
"""
schedule = []
for lst in avails:
if lst:
schedule.append((lst[0].start, lst))
heapq.heapify(schedule)
freetimes = []
lastend = 0
if schedule:
lastend = schedule[0][0]
while schedule:
newstart, newlist = heapq.heappop(schedule)
if newstart > lastend:
freetimes.append((lastend, newstart))
lastsch = newlist.pop(0)
lastend = max(lastend, lastsch.end)
if newlist:
heapq.heappush(schedule, (newlist[0].start, newlist))
return freetimes | [
"tczhong24@gmail.com"
] | tczhong24@gmail.com |
3a63964aaa86ddd207c4953e4157339d539b2660 | e121dcc5d23e225891420e730549b9cc7ebe8e88 | /python/lib/direct/extensions/NurbsSurfaceEvaluator.py | 7cdf5e0a01a26643840d05f6d9cb94eaf7642328 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | PlumpMath/panda3d-3 | 4f4cf7627eddae9b7f30795e0a0657b01fdf670d | 5c0be0e1cd46b422d28d5b81ffb1e8b28c3ac914 | refs/heads/master | 2021-01-25T06:55:36.209044 | 2014-09-29T14:24:53 | 2014-09-29T14:24:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py |
"""
NurbsSurfaceEvaluator-extensions module: contains methods to extend
functionality of the NurbsSurfaceEvaluator class
"""
def getUKnots(self):
"""Returns the U knot vector as a Python list of floats"""
knots = []
for i in range(self.getNumUKnots()):
knots.append(self.getUKnot(i))
return knots
def getVKnots(self):
"""Returns the V knot vector as a Python list of floats"""
knots = []
for i in range(self.getNumVKnots()):
knots.append(self.getVKnot(i))
return knots
def getVertices(self, relTo = None):
"""Returns the vertices as a 2-d Python list of Vec4's, relative
to the indicated space if given."""
verts = []
for ui in range(self.getNumUVertices()):
v = []
if relTo:
for vi in range(self.getNumVVertices()):
v.append(self.getVertex(ui, vi, relTo))
else:
for vi in range(self.getNumVVertices()):
v.append(self.getVertex(ui, vi))
verts.append(v)
return verts
| [
"ralf.kaestner@gmail.com"
] | ralf.kaestner@gmail.com |
ceda5f266f4d61d019be258bfcb20c68883dd20a | 648f5af4f4e95b0f7ad4943254abcacfe520c685 | /Labs_4/Scripting labs/cast_list.py | b1643ca7af1b9d7e220d663f19e3e835548a1647 | [
"MIT"
] | permissive | damiso15/we_japa_data_science_lab | 64a1ccbcff10554505dc55172991a9ed920f1295 | ada2a358753e1f1db087d410808524e7546284f6 | refs/heads/master | 2022-11-30T12:57:55.920142 | 2020-08-13T21:20:01 | 2020-08-13T21:20:01 | 283,690,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | # You're going to create a list of the actors who appeared in the television programme Monty
# Python's Flying Circus.
# Write a function called create_cast_list that takes a filename as input and returns a list of
# actors' names. It will be run on the file flying_circus_cast.txt (this information was collected
# from imdb.com). Each line of that file consists of an actor's name, a comma, and then some
# (messy) information about roles they played in the programme. You'll need to extract only the
# name and add it to a list. You might use the .split() method to process each line.
# def create_cast_list(filename):
def create_cast_list(filename):
cast_list = []
with open(filename, 'r') as file:
cast = file.readlines()
for actor in cast:
actor_split = actor.split(',')[0]
cast_list.append(actor_split)
return cast_list
cast_list = create_cast_list('flying_circus_cast.txt')
print(cast_list)
| [
"damiso15@yahoo.com"
] | damiso15@yahoo.com |
28a7e27fedcf4fc6f018822c08f203e5a447e126 | c85aede0797e73dd719646a0f7671594b0d4e4e9 | /docs/support/pcsv_example_2.py | 80e6f2247fa5e47cd8f9fa880a93077fd2edcb53 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mtiid/putil | c0493535ed5ee7694546ee9193cad0a764c440fc | a99c84ee781aa9eb6e45272f95b82ac35648ba4b | refs/heads/master | 2021-01-18T09:05:50.437577 | 2016-01-20T16:01:12 | 2016-01-20T16:01:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,870 | py | # pcsv_example_2.py
# Copyright (c) 2013-2016 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0111,C0410,W0104
import putil.misc, putil.pcsv
def main():
ctx = putil.misc.TmpFile
with ctx() as ifname:
with ctx() as rfname:
with ctx() as ofname:
# Create first (input) data file
input_data = [
['Item', 'Cost'],
[1, 9.99],
[2, 10000],
[3, 0.10]
]
putil.pcsv.write(ifname, input_data, append=False)
# Create second (replacement) data file
replacement_data = [
['Staff', 'Rate', 'Days'],
['Joe', 10, 'Sunday'],
['Sue', 20, 'Thursday'],
['Pat', 15, 'Tuesday']
]
putil.pcsv.write(rfname, replacement_data, append=False)
# Replace "Cost" column of input file with "Rate" column
# of replacement file for "Items" 2 and 3 with "Staff" data
# from Joe and Pat. Save resulting data to another file
putil.pcsv.replace(
ifname=ifname,
idfilter=('Cost', {'Item':[1, 3]}),
rfname=rfname,
rdfilter=('Rate', {'Staff':['Joe', 'Pat']}),
ofname=ofname
)
# Verify that resulting file is correct
ref_data = [
['Item', 'Cost'],
[1, 10],
[2, 10000],
[3, 15]
]
obj = putil.pcsv.CsvFile(ofname)
assert obj.header() == ref_data[0]
assert obj.data() == ref_data[1:]
if __name__ == '__main__':
main()
| [
"pmasdev@gmail.com"
] | pmasdev@gmail.com |
2904bf6014e3068e49cc729e4052857b4387ca52 | d99ac626d62c663704444a9cce7e7fc793a9e75e | /crypto_implementations/virgil-crypto-c/wrappers/python/virgil_crypto_lib/foundation/_c_bridge/_vscf_hybrid_private_key.py | 8947131a381161d91493cb49fb7178371e71ba0a | [
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Experiment5X/CryptoFunctionDetection | 3ab32d5573a249d24db1faf772721bc80b8d905d | dac700193e7e84963943593e36844b173211a8a1 | refs/heads/master | 2023-04-19T09:12:35.828268 | 2021-05-13T22:39:27 | 2021-05-13T22:39:27 | 355,299,557 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,617 | py | # Copyright (C) 2015-2020 Virgil Security, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Lead Maintainer: Virgil Security Inc. <support@virgilsecurity.com>
from virgil_crypto_lib._libs import *
from ctypes import *
from ._vscf_impl import vscf_impl_t
class vscf_hybrid_private_key_t(Structure):
pass
class VscfHybridPrivateKey(object):
"""Handles a hybrid private key.
The hybrid private key contains 2 private keys."""
def __init__(self):
"""Create underlying C context."""
self._ll = LowLevelLibs()
self._lib = self._ll.foundation
def vscf_hybrid_private_key_new(self):
vscf_hybrid_private_key_new = self._lib.vscf_hybrid_private_key_new
vscf_hybrid_private_key_new.argtypes = []
vscf_hybrid_private_key_new.restype = POINTER(vscf_hybrid_private_key_t)
return vscf_hybrid_private_key_new()
def vscf_hybrid_private_key_delete(self, ctx):
vscf_hybrid_private_key_delete = self._lib.vscf_hybrid_private_key_delete
vscf_hybrid_private_key_delete.argtypes = [POINTER(vscf_hybrid_private_key_t)]
vscf_hybrid_private_key_delete.restype = None
return vscf_hybrid_private_key_delete(ctx)
def vscf_hybrid_private_key_alg_id(self, ctx):
"""Algorithm identifier the key belongs to."""
vscf_hybrid_private_key_alg_id = self._lib.vscf_hybrid_private_key_alg_id
vscf_hybrid_private_key_alg_id.argtypes = [POINTER(vscf_hybrid_private_key_t)]
vscf_hybrid_private_key_alg_id.restype = c_int
return vscf_hybrid_private_key_alg_id(ctx)
def vscf_hybrid_private_key_alg_info(self, ctx):
"""Return algorithm information that can be used for serialization."""
vscf_hybrid_private_key_alg_info = self._lib.vscf_hybrid_private_key_alg_info
vscf_hybrid_private_key_alg_info.argtypes = [POINTER(vscf_hybrid_private_key_t)]
vscf_hybrid_private_key_alg_info.restype = POINTER(vscf_impl_t)
return vscf_hybrid_private_key_alg_info(ctx)
def vscf_hybrid_private_key_len(self, ctx):
"""Length of the key in bytes."""
vscf_hybrid_private_key_len = self._lib.vscf_hybrid_private_key_len
vscf_hybrid_private_key_len.argtypes = [POINTER(vscf_hybrid_private_key_t)]
vscf_hybrid_private_key_len.restype = c_size_t
return vscf_hybrid_private_key_len(ctx)
def vscf_hybrid_private_key_bitlen(self, ctx):
"""Length of the key in bits."""
vscf_hybrid_private_key_bitlen = self._lib.vscf_hybrid_private_key_bitlen
vscf_hybrid_private_key_bitlen.argtypes = [POINTER(vscf_hybrid_private_key_t)]
vscf_hybrid_private_key_bitlen.restype = c_size_t
return vscf_hybrid_private_key_bitlen(ctx)
def vscf_hybrid_private_key_is_valid(self, ctx):
"""Check that key is valid.
Note, this operation can be slow."""
vscf_hybrid_private_key_is_valid = self._lib.vscf_hybrid_private_key_is_valid
vscf_hybrid_private_key_is_valid.argtypes = [POINTER(vscf_hybrid_private_key_t)]
vscf_hybrid_private_key_is_valid.restype = c_bool
return vscf_hybrid_private_key_is_valid(ctx)
def vscf_hybrid_private_key_extract_public_key(self, ctx):
"""Extract public key from the private key."""
vscf_hybrid_private_key_extract_public_key = self._lib.vscf_hybrid_private_key_extract_public_key
vscf_hybrid_private_key_extract_public_key.argtypes = [POINTER(vscf_hybrid_private_key_t)]
vscf_hybrid_private_key_extract_public_key.restype = POINTER(vscf_impl_t)
return vscf_hybrid_private_key_extract_public_key(ctx)
def vscf_hybrid_private_key_first_key(self, ctx):
"""Return first private key."""
vscf_hybrid_private_key_first_key = self._lib.vscf_hybrid_private_key_first_key
vscf_hybrid_private_key_first_key.argtypes = [POINTER(vscf_hybrid_private_key_t)]
vscf_hybrid_private_key_first_key.restype = POINTER(vscf_impl_t)
return vscf_hybrid_private_key_first_key(ctx)
def vscf_hybrid_private_key_second_key(self, ctx):
"""Return second private key."""
vscf_hybrid_private_key_second_key = self._lib.vscf_hybrid_private_key_second_key
vscf_hybrid_private_key_second_key.argtypes = [POINTER(vscf_hybrid_private_key_t)]
vscf_hybrid_private_key_second_key.restype = POINTER(vscf_impl_t)
return vscf_hybrid_private_key_second_key(ctx)
def vscf_hybrid_private_key_shallow_copy(self, ctx):
vscf_hybrid_private_key_shallow_copy = self._lib.vscf_hybrid_private_key_shallow_copy
vscf_hybrid_private_key_shallow_copy.argtypes = [POINTER(vscf_hybrid_private_key_t)]
vscf_hybrid_private_key_shallow_copy.restype = POINTER(vscf_hybrid_private_key_t)
return vscf_hybrid_private_key_shallow_copy(ctx)
def vscf_hybrid_private_key_impl(self, ctx):
vscf_hybrid_private_key_impl = self._lib.vscf_hybrid_private_key_impl
vscf_hybrid_private_key_impl.argtypes = [POINTER(vscf_hybrid_private_key_t)]
vscf_hybrid_private_key_impl.restype = POINTER(vscf_impl_t)
return vscf_hybrid_private_key_impl(ctx)
| [
"xmeadamx@gmail.com"
] | xmeadamx@gmail.com |
fdbd8ab6dd8434081f22a6b4b6f184edcf9ca47e | 95d34e34c000710b000f094b0b51f91c9637f928 | /.circleci/cimodel/data/pytorch_build_data.py | ba5510a9c2558b934a4f456535c9cb5d555041be | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | kerbowa/pytorch | ead4f726b1cbf3646755dbf5ac2bf9ec3cc74be2 | 655960460ccca936fa5c06df6bbafd25b5582115 | refs/heads/master | 2023-08-14T22:30:56.911535 | 2021-10-06T05:48:42 | 2021-10-06T05:48:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,574 | py | from cimodel.lib.conf_tree import ConfigNode, X, XImportant
CONFIG_TREE_DATA = [
("xenial", [
("gcc", [
("5.4", [ # All this subtree rebases to master and then build
("3.6", [
("important", [X(True)]),
]),
]),
# TODO: bring back libtorch test
("7", [X("3.6")]),
]),
("clang", [
("7", [
("3.6", [
("asan", [
(True, [
("shard_test", [XImportant(True)]),
]),
]),
("onnx", [XImportant(True)]),
]),
]),
]),
("cuda", [
("10.2", [
("3.6", [
# Build are needed for slow_gradcheck
('build_only', [X(True)]),
("slow_gradcheck", [
# If you update this slow gradcheck, you should
# also update docker_definitions.py to make sure
# the docker image match the config used here
(True, [
('shard_test', [XImportant(True)]),
]),
]),
# UNCOMMENT THE BELOW TO REENABLE LIBTORCH
# ("libtorch", [
# (True, [
# ('build_only', [X(True)]),
# ]),
# ]),
]),
]),
]),
]),
("bionic", [
("clang", [
("9", [
("3.6", [
("xla", [XImportant(True)]),
("vulkan", [XImportant(True)]),
]),
]),
]),
# @jithunnair-amd believes Jenkins builds are sufficient
# ("rocm", [
# ("3.9", [
# ("3.6", [
# ('build_only', [XImportant(True)]),
# ]),
# ]),
# ]),
]),
]
def get_major_pyver(dotted_version):
parts = dotted_version.split(".")
return "py" + parts[0]
class TreeConfigNode(ConfigNode):
def __init__(self, parent, node_name, subtree):
super(TreeConfigNode, self).__init__(parent, self.modify_label(node_name))
self.subtree = subtree
self.init2(node_name)
def modify_label(self, label):
return label
def init2(self, node_name):
pass
def get_children(self):
return [self.child_constructor()(self, k, v) for (k, v) in self.subtree]
class TopLevelNode(TreeConfigNode):
def __init__(self, node_name, subtree):
super(TopLevelNode, self).__init__(None, node_name, subtree)
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return DistroConfigNode
class DistroConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["distro_name"] = node_name
def child_constructor(self):
distro = self.find_prop("distro_name")
next_nodes = {
"xenial": XenialCompilerConfigNode,
"bionic": BionicCompilerConfigNode,
}
return next_nodes[distro]
class PyVerConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["pyver"] = node_name
self.props["abbreviated_pyver"] = get_major_pyver(node_name)
if node_name == "3.9":
self.props["abbreviated_pyver"] = "py3.9"
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return ExperimentalFeatureConfigNode
class ExperimentalFeatureConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["experimental_feature"] = node_name
def child_constructor(self):
experimental_feature = self.find_prop("experimental_feature")
next_nodes = {
"asan": AsanConfigNode,
"xla": XlaConfigNode,
"mlc": MLCConfigNode,
"vulkan": VulkanConfigNode,
"parallel_tbb": ParallelTBBConfigNode,
"noarch": NoarchConfigNode,
"parallel_native": ParallelNativeConfigNode,
"onnx": ONNXConfigNode,
"libtorch": LibTorchConfigNode,
"important": ImportantConfigNode,
"build_only": BuildOnlyConfigNode,
"shard_test": ShardTestConfigNode,
"cuda_gcc_override": CudaGccOverrideConfigNode,
"pure_torch": PureTorchConfigNode,
"slow_gradcheck": SlowGradcheckConfigNode,
}
return next_nodes[experimental_feature]
class SlowGradcheckConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["is_slow_gradcheck"] = True
def child_constructor(self):
return ExperimentalFeatureConfigNode
class PureTorchConfigNode(TreeConfigNode):
def modify_label(self, label):
return "PURE_TORCH=" + str(label)
def init2(self, node_name):
self.props["is_pure_torch"] = node_name
def child_constructor(self):
return ImportantConfigNode
class XlaConfigNode(TreeConfigNode):
def modify_label(self, label):
return "XLA=" + str(label)
def init2(self, node_name):
self.props["is_xla"] = node_name
def child_constructor(self):
return ImportantConfigNode
class MLCConfigNode(TreeConfigNode):
def modify_label(self, label):
return "MLC=" + str(label)
def init2(self, node_name):
self.props["is_mlc"] = node_name
def child_constructor(self):
return ImportantConfigNode
class AsanConfigNode(TreeConfigNode):
def modify_label(self, label):
return "Asan=" + str(label)
def init2(self, node_name):
self.props["is_asan"] = node_name
def child_constructor(self):
return ExperimentalFeatureConfigNode
class ONNXConfigNode(TreeConfigNode):
def modify_label(self, label):
return "Onnx=" + str(label)
def init2(self, node_name):
self.props["is_onnx"] = node_name
def child_constructor(self):
return ImportantConfigNode
class VulkanConfigNode(TreeConfigNode):
def modify_label(self, label):
return "Vulkan=" + str(label)
def init2(self, node_name):
self.props["is_vulkan"] = node_name
def child_constructor(self):
return ImportantConfigNode
class ParallelTBBConfigNode(TreeConfigNode):
def modify_label(self, label):
return "PARALLELTBB=" + str(label)
def init2(self, node_name):
self.props["parallel_backend"] = "paralleltbb"
def child_constructor(self):
return ImportantConfigNode
class NoarchConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["is_noarch"] = node_name
def child_constructor(self):
return ImportantConfigNode
class ParallelNativeConfigNode(TreeConfigNode):
def modify_label(self, label):
return "PARALLELNATIVE=" + str(label)
def init2(self, node_name):
self.props["parallel_backend"] = "parallelnative"
def child_constructor(self):
return ImportantConfigNode
class LibTorchConfigNode(TreeConfigNode):
def modify_label(self, label):
return "BUILD_TEST_LIBTORCH=" + str(label)
def init2(self, node_name):
self.props["is_libtorch"] = node_name
def child_constructor(self):
return ExperimentalFeatureConfigNode
class CudaGccOverrideConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["cuda_gcc_override"] = node_name
def child_constructor(self):
return ExperimentalFeatureConfigNode
class BuildOnlyConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["build_only"] = node_name
def child_constructor(self):
return ExperimentalFeatureConfigNode
class ShardTestConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["shard_test"] = node_name
def child_constructor(self):
return ImportantConfigNode
class ImportantConfigNode(TreeConfigNode):
def modify_label(self, label):
return "IMPORTANT=" + str(label)
def init2(self, node_name):
self.props["is_important"] = node_name
def get_children(self):
return []
class XenialCompilerConfigNode(TreeConfigNode):
def modify_label(self, label):
return label or "<unspecified>"
def init2(self, node_name):
self.props["compiler_name"] = node_name
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return XenialCompilerVersionConfigNode if self.props["compiler_name"] else PyVerConfigNode
class BionicCompilerConfigNode(TreeConfigNode):
def modify_label(self, label):
return label or "<unspecified>"
def init2(self, node_name):
self.props["compiler_name"] = node_name
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return BionicCompilerVersionConfigNode if self.props["compiler_name"] else PyVerConfigNode
class XenialCompilerVersionConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["compiler_version"] = node_name
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return PyVerConfigNode
class BionicCompilerVersionConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["compiler_version"] = node_name
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return PyVerConfigNode
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
ca04af47cfd93b5d7ecf1ff6e78f5cbdec860b7e | d485ac12220d6febfe383bde45d55b3160cdc930 | /info_system/migrations/0009_auto_20170130_2303.py | 9f8f2f82e021335ef285819d5d7a7138f7db4229 | [] | no_license | argon2008-aiti/lcidarkuman | 03ef2b2c200ca21b57f7b8089976c8b3a1c03612 | 3e54fffdf9605edd87e7bfce134d0c5203dc72a9 | refs/heads/master | 2021-01-13T03:09:32.388512 | 2019-05-19T14:16:48 | 2019-05-19T14:16:48 | 77,407,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 852 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('info_system', '0008_auto_20170127_2125'),
]
operations = [
migrations.AlterField(
model_name='member',
name='date_joined',
field=models.DateField(default=datetime.date(2017, 1, 30)),
),
migrations.AlterField(
model_name='member',
name='date_of_birth',
field=models.DateField(default=datetime.date(2017, 1, 30)),
),
migrations.AlterField(
model_name='member',
name='profile',
field=models.ImageField(default=b'static/profiles/banner1.png', upload_to=b'static/profiles/', blank=True),
),
]
| [
"yunguta@gmail.com"
] | yunguta@gmail.com |
c3359088dd949acb690856b2887ae91fd9aee11e | 9a8d9125009e24b883c6883030c9cffa31d46ad6 | /vim.py | 5a787256dc52786928b71f55af4bbf654796aaaa | [
"MIT"
] | permissive | liveforeverx/actualvim | f2c43fa56bde6eab53babca082d707f324012fec | 694f7b6ce13fe551c054d9765650c5de17a536ec | refs/heads/master | 2021-01-22T00:19:40.917273 | 2013-11-07T01:32:00 | 2013-11-07T01:32:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,264 | py | #!/usr/bin/env python2
# vim.py
# launches and manages a headless vim instance
import itertools
import os
import pty
import select
import socket
import sublime
import subprocess
import threading
from .edit import Edit
from .term import VT100
VISUAL_MODES = ('V', 'v', '^V', '\x16')
replace = [
('\\', '\\\\'),
('"', '\\"'),
('\n', '\\n'),
('\r', '\\r'),
('\t', '\\t'),
]
def encode(s, t=None):
types = [
(str, 'string'),
((int, float), 'number'),
(bool, 'boolean'),
]
if t is None:
for typ, b in types:
if isinstance(s, typ):
t = b
break
else:
return ''
if t == 'string':
for a, b in replace:
s = s.replace(a, b)
return '"' + s + '"'
elif t == 'number':
return str(s)
elif t == 'boolean':
return 'T' if s else 'F'
elif t == 'color':
if isinstance(s, (int, float)) or s:
return str(s)
else:
return encode('none')
def decode(s, t=None):
if t is None:
if s.startswith('"'):
t = 'string'
elif s.replace('.', '', 1).isdigit():
t = 'number'
elif s in 'TF':
t = 'boolean'
else:
return s
if t == 'string':
s = s[1:-1]
lookup = {r[1]: r[0] for r in replace}
i = 0
while i < len(s) - 1:
cur = s[i:i+2]
if cur in lookup:
rep = lookup[cur]
s = s[:i] + rep + s[i+2:]
i += len(rep)
continue
i += 1
return s
elif t == 'number':
return float(s)
elif t == 'boolean':
return True if s == 'T' else False
else:
return s
class VimSocket:
def __init__(self, vim, view, callback=None):
self.vim = vim
self.view = view
self.server = socket.socket()
self.server.bind(('localhost', 0))
self.server.listen(1)
self.client = None
self.extra = ''
self.port = self.server.getsockname()[1]
self.serial = itertools.count(start=2)
self.callbacks = {}
self.callback = callback
self.preload = []
def spawn(self):
threading.Thread(target=self.loop).start()
def active(self):
return self.view.buffer_id() != 0 and self.server.fileno() >= 0
def handle(self, data):
view = self.view
data = self.extra + data
commands = data.split('\n')
self.extra = commands.pop()
edits = []
for cmd in commands:
if ':' in cmd:
buf, cmd = cmd.split(':', 1)
cmd, args = cmd.split('=', 1)
if ' ' in args:
seq, args = args.split(' ', 1)
else:
seq, args = args, None
seq = int(seq)
if cmd == 'insert':
pos, text = args.split(' ', 1)
text = decode(text, 'string')
pos = decode(pos)
edits.append(('insert', pos, text))
elif cmd == 'remove':
pos, length = args.split(' ', 1)
pos, length = int(pos), int(length)
if length > 0:
edits.append(('erase', sublime.Region(pos, pos+length)))
elif cmd == 'disconnect':
view.set_scratch(True)
raise socket.error
else:
if ' ' in cmd:
seq, cmd = cmd.split(' ', 1)
else:
seq, cmd = cmd, ''
if seq.isdigit():
seq = int(seq)
callback = self.callbacks.pop(seq, None)
if callback:
callback(cmd)
if edits:
def cursor(args):
buf, lnum, col, off = [int(a) for a in args.split(' ')]
with Edit(view) as edit:
for args in edits:
edit.step(*args)
edit.reselect(off)
self.callback(self.vim)
self.get_cursor(cursor)
def send(self, data):
if not self.client:
self.preload.append(data)
return
try:
data = (data + '\r\n').encode('utf8')
self.client.send(data)
except socket.error:
self.close()
def close(self, disconnect=False):
self.view.close()
if self.client:
if disconnect:
self.send('1:disconnect!1')
self.client.close()
def loop(self):
sockets = [self.server]
try:
while self.active():
try:
ready, _, _ = select.select(sockets, [], [], 0.1)
except ValueError:
raise socket.error
if not self.client:
if self.server in ready:
print('client connection')
self.client, addr = self.server.accept()
sockets = [self.client]
self.send('1:create!1')
for line in self.preload:
self.send(line)
else:
continue
elif self.client in ready:
# we're willing to wait up to 1/120 of a second
# for a delete following an erase
# this and a big buffer prevent flickering.
data = self.client.recv(102400).decode('utf8')
if 'remove' in data and not 'insert' in data:
more, _, _ = select.select([self.client], [], [], 1.0 / 120)
if more:
data += self.client.recv(102400).decode('utf8')
# print('data:', data)
if data:
self.handle(data)
else:
break
except socket.error:
pass
finally:
self.close(disconnect=True)
def cmd(self, buf, name, *args, **kwargs):
seq = kwargs.get('seq', 1)
sep = kwargs.get('sep', '!')
cmd = '{}:{}{}{}'.format(buf, name, sep, seq)
if args is not None:
cmd += ' ' + ' '.join(encode(a) for a in args)
self.send(cmd)
def func(self, *args, **kwargs):
return self.cmd(*args, sep='/', **kwargs)
def add_callback(self, callback):
if not callback:
return None
serial = next(self.serial)
self.callbacks[serial] = callback
return serial
def get_cursor(self, callback):
serial = self.add_callback(callback)
self.func('1', 'getCursor', seq=serial)
def set_cursor(self, offset, callback=None):
serial = self.add_callback(callback)
self.cmd('1', 'setDot', offset, seq=serial)
def insert(self, offset, text):
self.func('1', 'insert', offset, str(text or ''))
def init_done(self):
self.cmd('1', 'initDone')
class Vim:
DEFAULT_CMD = ('vim',)
@property
def vimrc(self):
return (
'--cmd', 'set fileformat=unix',
'--cmd', 'set lines={} columns={}'.format(self.rows, self.cols),
'--cmd', '''set statusline=%{printf(\\"%d+%d,%s,%d+%d\\",line(\\".\\"),col(\\".\\"),mode(),line(\\"v\\"),col(\\"v\\"))}''',
'--cmd', 'set laststatus=2',
'--cmd', 'set shortmess=aoOtTWAI',
)
def __init__(self, view, rows=24, cols=80, monitor=None, cmd=None, update=None, modify=None):
self.view = view
self.monitor = monitor
self.rows = rows
self.cols = cols
self.cmd = cmd or self.DEFAULT_CMD
self.update_callback = update
self.modify_callback = modify
self.proc = None
self.input = None
self.output = None
self.row = self.col = 0
self.mode = 'n'
self.visual = (0, 0)
self.visual_selected = False
self.panel = None
self.tty = None
self.__serve()
self.__spawn()
def __spawn(self):
master, slave = pty.openpty()
devnul = open(os.devnull, 'r')
cmd = self.cmd + ('-nb::{}'.format(self.port),) + self.vimrc
self.proc = subprocess.Popen(
cmd, stdin=slave, stdout=slave,
stderr=devnul, close_fds=True)
self.output = os.fdopen(master, 'rb')
self.input = os.fdopen(master, 'wb')
def pump():
self.tty = v = VT100(self.cols, self.rows, callback=self._update)
while True:
b = self.output.read(1)
if not b:
# TODO: subprocess closed tty. recover somehow?
break
v.append(b)
threading.Thread(target=pump).start()
def __serve(self):
self.socket = VimSocket(self, self.view, callback=self.modify_callback)
self.port = self.socket.port
self.socket.spawn()
def _update(self, v, dirty, moved):
data = v.dump()
self.status, self.cmdline = [
s.strip() for s in data.rsplit('\n')[-3:-1]
]
try:
if self.status.count('+') >= 2:
pos, rest = self.status.split(',', 1)
row, col = pos.split('+', 1)
self.row, self.col = int(row), int(col)
self.mode, rest = rest.split(',', 1)
a, b = rest.split('+', 1)
self.visual = (int(a), int(b))
# print(self.status)
except ValueError:
pass
if self.monitor:
with Edit(self.monitor) as edit:
if dirty:
edit.erase(sublime.Region(0, self.monitor.size()))
edit.insert(0, data)
edit.reselect(
lambda view: view.text_point(v.row - 1, v.col - 1))
def update_cursor(view, edit):
row, col = (self.row - 1, self.col + 1)
# see if it's prompting for input
if v.row == self.rows and v.col > 0:
char = v.buf[v.row - 1][0]
if char in ':/':
row, col = (v.row - 1, v.col - 1)
pos = view.text_point(row, col)
sel = sublime.Region(pos, pos)
view.add_regions(
'cursor', [sel], 'comment',
'', sublime.DRAW_EMPTY,
)
if moved:
edit.callback(update_cursor)
if self.update_callback:
self.update_callback(self, dirty, moved)
def send(self, b):
# send input
if self.input:
self.input.write(b.encode('utf8'))
self.input.flush()
def press(self, *keys):
for key in keys:
b = VT100.map(key)
self.send(b)
def type(self, text):
self.press(*list(text))
def close(self):
print('ending Vim')
self.view.close()
if self.panel:
self.panel.close()
if self.monitor:
self.monitor.close()
self.proc.kill()
self.socket.close()
def update_cursor(self, *args, **kwargs):
def callback(args):
buf, lnum, col, off = [int(a) for a in args.split(' ')]
with Edit(self.view) as edit:
edit.reselect(off)
self.socket.get_cursor(callback)
def get_cursor(self, callback):
self.socket.get_cursor(callback)
def set_cursor(self, offset, callback=None):
self.socket.set_cursor(offset, callback=callback)
def insert(self, offset, text):
self.socket.insert(offset, text)
def init_done(self):
self.socket.init_done()
if __name__ == '__main__':
import time
v = Vim()
time.sleep(3)
v.send('i')
while True:
v.send('asdfjkl ')
time.sleep(1)
| [
"lunixbochs@gmail.com"
] | lunixbochs@gmail.com |
77fad9c6a8296a55f65e79ad21de98f7209e2477 | f324dba8769c8fb0f23693faa0cf8cba0dd66e0b | /setup.py | 5912fc516eee8aed5bbe84c6e3e67b05b0dc469f | [
"BSD-3-Clause"
] | permissive | sourcery-ai-bot/roles | e3587829ca2870c4f5848cbea4605330cc985383 | e945d4d4a265cc41216cf46b2325d0bba15f03ad | refs/heads/master | 2022-11-05T11:33:27.802375 | 2020-06-18T10:53:16 | 2020-06-18T10:53:16 | 273,213,490 | 0 | 0 | null | 2020-06-18T10:53:09 | 2020-06-18T10:53:09 | null | UTF-8 | Python | false | false | 1,013 | py | """
Setup script for roles module.
"""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from contextlib import closing
import glob
VERSION = '0.10'
with closing(open('README.txt')) as f:
doc = f.read()
setup(
name='roles',
version=VERSION,
description='Role based development',
long_description=doc,
author='Arjan Molenaar',
author_email='gaphor@gmail.com',
url='http://github.com/amolenaar/roles',
license="BSD License",
packages = [ 'roles' ],
keywords="role DCI data context interaction",
platforms=["All"],
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries']
)
#vim:sw=4:et:ai
| [
"gaphor@gmail.com"
] | gaphor@gmail.com |
53592dc4b0ddb1d02dce8614f5215fc71d2939bb | e1429633ac8989e9cee6089c43c4f54e3553cab3 | /UefiTestingPkg/AuditTests/UefiVarLockAudit/Windows/UefiVarAudit.py | c97d8b1543b2852d24a92f95699fbe94f0bcf1ac | [
"BSD-2-Clause"
] | permissive | Perry31/mu_plus | 7bfd4a3c773384ff44df53a794382c7a047b1702 | 4cee2caffa0344517ce713c4066629160f1968d8 | refs/heads/release/20180529 | 2022-07-09T09:08:27.770548 | 2018-10-22T19:51:46 | 2018-10-22T19:51:46 | 154,810,652 | 0 | 0 | BSD-2-Clause | 2022-07-04T15:08:56 | 2018-10-26T09:30:49 | C | UTF-8 | Python | false | false | 5,383 | py | #
# Script to iterate thru an xml file and
# check the UEFI variable read/write properties of a given variable
#
# Copyright (c) 2016, Microsoft Corporation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
##
import os, sys
import argparse
import logging
import datetime
import struct
import hashlib
import shutil
import time
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import Element
from UefiVariablesSupportLib import UefiVariable
#
#main script function
#
def main():
parser = argparse.ArgumentParser(description='Variable Audit Tool')
#Output debug log
parser.add_argument("-l", dest="OutputLog", help="Create an output log file: ie -l out.txt", default=None)
parser.add_argument("--OutputXml", dest="OutputXml", help="Output Xml file that contains final results", default=None)
parser.add_argument("--InputXml", dest="InputXml", help="Input Xml file", default=None)
#Turn on dubug level logging
parser.add_argument("--debug", action="store_true", dest="debug", help="turn on debug logging level for file log", default=False)
options = parser.parse_args()
#setup file based logging if outputReport specified
if(options.OutputLog):
if(len(options.OutputLog) < 2):
logging.critical("the output log file parameter is invalid")
return -2
else:
#setup file based logging
filelogger = logging.FileHandler(filename=options.OutputLog, mode='w')
if(options.debug):
filelogger.setLevel(logging.DEBUG)
else:
filelogger.setLevel(logging.INFO)
filelogger.setFormatter(formatter)
logging.getLogger('').addHandler(filelogger)
logging.info("Log Started: " + datetime.datetime.strftime(datetime.datetime.now(), "%A, %B %d, %Y %I:%M%p" ))
#Check for required input parameters
if(not options.InputXml) or (not os.path.isfile(options.InputXml)):
logging.critical("No Input Xml file specified")
return -1
if(not options.OutputXml):
logging.critical("Output Xml file path not specified")
return -2
Uefi = UefiVariable()
#read in XML file as doc
XmlFile = ET.parse(options.InputXml)
XmlRoot = XmlFile.getroot()
for var in XmlRoot.findall("Variable"):
name = var.get("Name")
guid = var.get("Guid")
(ReadStatus, Data, ReadErrorString) = Uefi.GetUefiVar(name, guid)
(WriteSuccess, ErrorCode, WriteErrorString)= Uefi.SetUefiVar(name, guid)
if(WriteSuccess != 0):
logging.info("Must Restore Var %s:%s" % (name, guid))
(RestoreSuccess, RestoreEC, RestoreErrorString) = Uefi.SetUefiVar(name, guid, Data)
if (RestoreSuccess == 0):
logging.critical("Restoring failed for Var %s:%s 0x%X ErrorCode: 0x%X %s" % (name, guid, RestoreSuccess, RestoreEC, RestoreErrorString))
#append
#<FromOs>
#<ReadStatus>0x0 Success</ReadStatus>
#<WriteStatus>0x8000000000000002 Invalid Parameter</WriteStatus>
ele = Element("FromOs")
rs = Element("ReadStatus")
ws = Element("WriteStatus")
rs.text = "0x%lX" % (ReadStatus)
if(ReadErrorString is not None):
rs.text = rs.text + " %s" % ReadErrorString
ws.text = "0x%lX" % ErrorCode
if(WriteErrorString is not None):
ws.text = ws.text + " %s" % WriteErrorString
ele.append(rs)
ele.append(ws)
var.append(ele)
XmlFile.write(options.OutputXml)
return 0
if __name__ == '__main__':
#setup main console as logger
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(levelname)s - %(message)s")
console = logging.StreamHandler()
console.setLevel(logging.CRITICAL)
console.setFormatter(formatter)
logger.addHandler(console)
#call main worker function
retcode = main()
if retcode != 0:
logging.critical("Failed. Return Code: %i" % retcode)
#end logging
logging.shutdown()
sys.exit(retcode)
| [
"brbarkel@microsoft.com"
] | brbarkel@microsoft.com |
d81dbf76ee1a57f5b2cbec719ab8285289bca57e | a7926ba10e6c3717c27884eaacacfd02e86e0e7e | /0x04-python-more_data_structures/10-best_score.py | db9c1ecd229810b012783f95512468c924d4266a | [] | no_license | Yosri-ctrl/holbertonschool-higher_level_programming | e7516d33a49b7001eab1c33ca3ec236025a81fe5 | 4654b00f8ea7e8e013b131ffd4fc835de2f986fa | refs/heads/master | 2023-01-05T17:03:59.350837 | 2020-11-04T17:40:46 | 2020-11-04T17:40:46 | 259,329,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | #!/usr/bin/python3
def best_score(a_dictionary):
max = 0
maxi = ""
if a_dictionary is None:
return None
for i, j in a_dictionary.items():
if max <= j:
max = j
maxi = i
if j is None:
max = None
maxi = None
return maxi
| [
"yosribouabid@gmail.com"
] | yosribouabid@gmail.com |
a08d5e055b20803571c91a862d0c579d8e4518f4 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2188/60634/257261.py | 03165fc942fd1f7a8effcb92592e622dbddad9a9 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | def equal(s1,s2):
if len(s1) == len(s2):
size = len(s1)
i = 0
while i <= size:
if i == size:
return True
if s1[i] != s2[i]:
return False
i += 1
return False
temp = input().split(" ")
n = int(temp[0])
k = int(temp[1])
A = input()
B = input()
problems = int(input())
for p in range(problems):
temp = input().split(" ")
T = A[int(temp[0])-1:int(temp[1])]
P = B[int(temp[2])-1:int(temp[3])]
count = 0
i = 0
while i <= len(T) - len(P):
if equal(T[i:i+len(P)],P):
count += k - (i + int(temp[0]))
i += len(P) - 1
i += 1
print(count)
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
1bbdce025b29c7652d4da8243f6310d769b29477 | a799a105ab2aba39a475bf2ce086405def0351c2 | /src/gluonts/transform/_base.py | 15545292fb649a4c9b0dd15bdcaea3571bfd2db0 | [
"Apache-2.0"
] | permissive | mbohlkeschneider/gluon-ts | d663750d13798624eca5c9d6f12a87e321ce7334 | df4256b0e67120db555c109a1bf6cfa2b3bd3cd8 | refs/heads/master | 2021-11-24T06:09:49.905352 | 2021-10-14T09:30:38 | 2021-10-14T09:30:38 | 192,546,557 | 54 | 10 | Apache-2.0 | 2022-08-31T18:36:44 | 2019-06-18T13:33:36 | Python | UTF-8 | Python | false | false | 6,305 | py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import abc
from typing import Callable, Iterable, Iterator, List
from gluonts.core.component import validated
from gluonts.dataset.common import DataEntry, Dataset
from gluonts.env import env
class Transformation(metaclass=abc.ABCMeta):
"""
Base class for all Transformations.
A Transformation processes works on a stream (iterator) of dictionaries.
"""
@abc.abstractmethod
def __call__(
self, data_it: Iterable[DataEntry], is_train: bool
) -> Iterable[DataEntry]:
pass
def chain(self, other: "Transformation") -> "Chain":
return Chain([self, other])
def __add__(self, other: "Transformation") -> "Chain":
return self.chain(other)
def apply(
self, dataset: Dataset, is_train: bool = True
) -> "TransformedDataset":
return TransformedDataset(dataset, self, is_train=is_train)
class Chain(Transformation):
"""
Chain multiple transformations together.
"""
@validated()
def __init__(self, trans: List[Transformation]) -> None:
self.transformations: List[Transformation] = []
for transformation in trans:
# flatten chains
if isinstance(transformation, Chain):
self.transformations.extend(transformation.transformations)
else:
self.transformations.append(transformation)
def __call__(
self, data_it: Iterable[DataEntry], is_train: bool
) -> Iterable[DataEntry]:
tmp = data_it
for t in self.transformations:
tmp = t(tmp, is_train)
return tmp
class TransformedDataset(Dataset):
"""
A dataset that corresponds to applying a list of transformations to each
element in the base_dataset.
This only supports SimpleTransformations, which do the same thing at
prediction and training time.
Parameters
----------
base_dataset
Dataset to transform
transformations
List of transformations to apply
"""
def __init__(
self,
base_dataset: Dataset,
transformation: Transformation,
is_train=True,
) -> None:
self.base_dataset = base_dataset
self.transformation = transformation
self.is_train = is_train
def __len__(self):
# NOTE this is unsafe when transformations are run with is_train = True
# since some transformations may not be deterministic (instance splitter)
return sum(1 for _ in self)
def __iter__(self) -> Iterator[DataEntry]:
yield from self.transformation(
self.base_dataset, is_train=self.is_train
)
class Identity(Transformation):
def __call__(
self, data_it: Iterable[DataEntry], is_train: bool
) -> Iterable[DataEntry]:
return data_it
class MapTransformation(Transformation):
"""
Base class for Transformations that returns exactly one result per input in the stream.
"""
def __call__(
self, data_it: Iterable[DataEntry], is_train: bool
) -> Iterator:
for data_entry in data_it:
try:
yield self.map_transform(data_entry.copy(), is_train)
except Exception as e:
raise e
@abc.abstractmethod
def map_transform(self, data: DataEntry, is_train: bool) -> DataEntry:
pass
class SimpleTransformation(MapTransformation):
"""
Element wise transformations that are the same in train and test mode
"""
def map_transform(self, data: DataEntry, is_train: bool) -> DataEntry:
return self.transform(data)
@abc.abstractmethod
def transform(self, data: DataEntry) -> DataEntry:
pass
class AdhocTransform(SimpleTransformation):
"""
Applies a function as a transformation
This is called ad-hoc, because it is not serializable.
It is OK to use this for experiments and outside of a model pipeline that
needs to be serialized.
"""
def __init__(self, func: Callable[[DataEntry], DataEntry]) -> None:
self.func = func
def transform(self, data: DataEntry) -> DataEntry:
return self.func(data.copy())
class FlatMapTransformation(Transformation):
"""
Transformations that yield zero or more results per input, but do not
combine elements from the input stream.
"""
@validated()
def __init__(self):
self.max_idle_transforms = max(env.max_idle_transforms, 100)
def __call__(
self, data_it: Iterable[DataEntry], is_train: bool
) -> Iterator:
num_idle_transforms = 0
for data_entry in data_it:
num_idle_transforms += 1
for result in self.flatmap_transform(data_entry.copy(), is_train):
num_idle_transforms = 0
yield result
if num_idle_transforms > self.max_idle_transforms:
raise Exception(
f"Reached maximum number of idle transformation calls.\n"
f"This means the transformation looped over "
f"{self.max_idle_transforms} inputs without returning any "
f"output.\nThis occurred in the following transformation:\n"
f"{self}"
)
@abc.abstractmethod
def flatmap_transform(
self, data: DataEntry, is_train: bool
) -> Iterator[DataEntry]:
pass
class FilterTransformation(FlatMapTransformation):
def __init__(self, condition: Callable[[DataEntry], bool]) -> None:
super().__init__()
self.condition = condition
def flatmap_transform(
self, data: DataEntry, is_train: bool
) -> Iterator[DataEntry]:
if self.condition(data):
yield data
| [
"noreply@github.com"
] | mbohlkeschneider.noreply@github.com |
300a1a301ecff5601a2b72f089d95804f11bc764 | aca2258cf58e0d2c7e4939e73bcb82b6c135282c | /libs/telepot/telepot/aio/helper.py | e561c78ed7141f16499f90b24d476a44d0fc594c | [] | no_license | masomel/py-import-analysis | cfe6749a1d7430b179559b9e0911b8c8df507be7 | 7edf8148e34b9f73ca6433ceb43a1770f4fa32c1 | refs/heads/master | 2021-03-16T10:00:24.205301 | 2019-08-01T20:32:34 | 2019-08-01T20:32:34 | 112,668,748 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,715 | py | import asyncio
import traceback
from .. import filtering, helper, exception
from .. import (
flavor, chat_flavors, inline_flavors, is_event,
message_identifier, origin_identifier)
# Mirror traditional version
from ..helper import (
Sender, Administrator, Editor, openable,
StandardEventScheduler, StandardEventMixin)
async def _yell(fn, *args, **kwargs):
if asyncio.iscoroutinefunction(fn):
return await fn(*args, **kwargs)
else:
return fn(*args, **kwargs)
def _delay_yell(obj, method_name):
async def d(*a, **kw):
method = getattr(obj, method_name)
return await _yell(method, *a, **kw)
return d
class Microphone(object):
def __init__(self):
self._queues = set()
def add(self, q):
self._queues.add(q)
def remove(self, q):
self._queues.remove(q)
def send(self, msg):
for q in self._queues:
try:
q.put_nowait(msg)
except asyncio.QueueFull:
traceback.print_exc()
pass
class Listener(helper.Listener):
async def wait(self):
"""
Block until a matched message appears.
"""
if not self._patterns:
raise RuntimeError('Listener has nothing to capture')
while 1:
msg = await self._queue.get()
if any(map(lambda p: filtering.match_all(msg, p), self._patterns)):
return msg
from concurrent.futures._base import CancelledError
class Answerer(object):
"""
When processing inline queries, ensures **at most one active task** per user id.
"""
def __init__(self, bot, loop=None):
self._bot = bot
self._loop = loop if loop is not None else asyncio.get_event_loop()
self._working_tasks = {}
def answer(self, inline_query, compute_fn, *compute_args, **compute_kwargs):
"""
Create a task that calls ``compute fn`` (along with additional arguments
``*compute_args`` and ``**compute_kwargs``), then applies the returned value to
:meth:`.Bot.answerInlineQuery` to answer the inline query.
If a preceding task is already working for a user, that task is cancelled,
thus ensuring at most one active task per user id.
:param inline_query:
The inline query to be processed. The originating user is inferred from ``msg['from']['id']``.
:param compute_fn:
A function whose returned value is given to :meth:`.Bot.answerInlineQuery` to send.
May return:
- a *list* of `InlineQueryResult <https://core.telegram.org/bots/api#inlinequeryresult>`_
- a *tuple* whose first element is a list of `InlineQueryResult <https://core.telegram.org/bots/api#inlinequeryresult>`_,
followed by positional arguments to be supplied to :meth:`.Bot.answerInlineQuery`
- a *dictionary* representing keyword arguments to be supplied to :meth:`.Bot.answerInlineQuery`
:param \*compute_args: positional arguments to ``compute_fn``
:param \*\*compute_kwargs: keyword arguments to ``compute_fn``
"""
from_id = inline_query['from']['id']
async def compute_and_answer():
try:
query_id = inline_query['id']
ans = await _yell(compute_fn, *compute_args, **compute_kwargs)
if isinstance(ans, list):
await self._bot.answerInlineQuery(query_id, ans)
elif isinstance(ans, tuple):
await self._bot.answerInlineQuery(query_id, *ans)
elif isinstance(ans, dict):
await self._bot.answerInlineQuery(query_id, **ans)
else:
raise ValueError('Invalid answer format')
except CancelledError:
# Cancelled. Record has been occupied by new task. Don't touch.
raise
except:
# Die accidentally. Remove myself from record.
del self._working_tasks[from_id]
raise
else:
# Die naturally. Remove myself from record.
del self._working_tasks[from_id]
if from_id in self._working_tasks:
self._working_tasks[from_id].cancel()
t = self._loop.create_task(compute_and_answer())
self._working_tasks[from_id] = t
class AnswererMixin(helper.AnswererMixin):
Answerer = Answerer # use async Answerer class
class CallbackQueryCoordinator(helper.CallbackQueryCoordinator):
def augment_send(self, send_func):
async def augmented(*aa, **kw):
sent = await send_func(*aa, **kw)
if self._enable_chat and self._contains_callback_data(kw):
self.capture_origin(message_identifier(sent))
return sent
return augmented
def augment_edit(self, edit_func):
async def augmented(msg_identifier, *aa, **kw):
edited = await edit_func(msg_identifier, *aa, **kw)
if (edited is True and self._enable_inline) or (isinstance(edited, dict) and self._enable_chat):
if self._contains_callback_data(kw):
self.capture_origin(msg_identifier)
else:
self.uncapture_origin(msg_identifier)
return edited
return augmented
def augment_delete(self, delete_func):
async def augmented(msg_identifier, *aa, **kw):
deleted = await delete_func(msg_identifier, *aa, **kw)
if deleted is True:
self.uncapture_origin(msg_identifier)
return deleted
return augmented
def augment_on_message(self, handler):
async def augmented(msg):
if (self._enable_inline
and flavor(msg) == 'chosen_inline_result'
and 'inline_message_id' in msg):
inline_message_id = msg['inline_message_id']
self.capture_origin(inline_message_id)
return await _yell(handler, msg)
return augmented
class InterceptCallbackQueryMixin(helper.InterceptCallbackQueryMixin):
CallbackQueryCoordinator = CallbackQueryCoordinator
class IdleEventCoordinator(helper.IdleEventCoordinator):
def augment_on_message(self, handler):
async def augmented(msg):
# Reset timer if this is an external message
is_event(msg) or self.refresh()
return await _yell(handler, msg)
return augmented
def augment_on_close(self, handler):
async def augmented(ex):
try:
if self._timeout_event:
self._scheduler.cancel(self._timeout_event)
self._timeout_event = None
# This closing may have been caused by my own timeout, in which case
# the timeout event can no longer be found in the scheduler.
except exception.EventNotFound:
self._timeout_event = None
return await _yell(handler, ex)
return augmented
class IdleTerminateMixin(helper.IdleTerminateMixin):
IdleEventCoordinator = IdleEventCoordinator
class Router(helper.Router):
async def route(self, msg, *aa, **kw):
"""
Apply key function to ``msg`` to obtain a key, look up routing table
to obtain a handler function, then call the handler function with
positional and keyword arguments, if any is returned by the key function.
``*aa`` and ``**kw`` are dummy placeholders for easy nesting.
Regardless of any number of arguments returned by the key function,
multi-level routing may be achieved like this::
top_router.routing_table['key1'] = sub_router1.route
top_router.routing_table['key2'] = sub_router2.route
"""
k = self.key_function(msg)
if isinstance(k, (tuple, list)):
key, args, kwargs = {1: tuple(k) + ((),{}),
2: tuple(k) + ({},),
3: tuple(k),}[len(k)]
else:
key, args, kwargs = k, (), {}
try:
fn = self.routing_table[key]
except KeyError as e:
# Check for default handler, key=None
if None in self.routing_table:
fn = self.routing_table[None]
else:
raise RuntimeError('No handler for key: %s, and default handler not defined' % str(e.args))
return await _yell(fn, msg, *args, **kwargs)
class DefaultRouterMixin(object):
def __init__(self, *args, **kwargs):
self._router = Router(flavor, {'chat': _delay_yell(self, 'on_chat_message'),
'callback_query': _delay_yell(self, 'on_callback_query'),
'inline_query': _delay_yell(self, 'on_inline_query'),
'chosen_inline_result': _delay_yell(self, 'on_chosen_inline_result'),
'shipping_query': _delay_yell(self, 'on_shipping_query'),
'pre_checkout_query': _delay_yell(self, 'on_pre_checkout_query'),
'_idle': _delay_yell(self, 'on__idle')})
super(DefaultRouterMixin, self).__init__(*args, **kwargs)
@property
def router(self):
""" See :class:`.helper.Router` """
return self._router
async def on_message(self, msg):
"""
Called when a message is received.
By default, call :meth:`Router.route` to handle the message.
"""
await self._router.route(msg)
@openable
class Monitor(helper.ListenerContext, DefaultRouterMixin):
def __init__(self, seed_tuple, capture, **kwargs):
"""
A delegate that never times-out, probably doing some kind of background monitoring
in the application. Most naturally paired with :func:`telepot.aio.delegate.per_application`.
:param capture: a list of patterns for ``listener`` to capture
"""
bot, initial_msg, seed = seed_tuple
super(Monitor, self).__init__(bot, seed, **kwargs)
for pattern in capture:
self.listener.capture(pattern)
@openable
class ChatHandler(helper.ChatContext,
DefaultRouterMixin,
StandardEventMixin,
IdleTerminateMixin):
def __init__(self, seed_tuple,
include_callback_query=False, **kwargs):
"""
A delegate to handle a chat.
"""
bot, initial_msg, seed = seed_tuple
super(ChatHandler, self).__init__(bot, seed, **kwargs)
self.listener.capture([{'chat': {'id': self.chat_id}}])
if include_callback_query:
self.listener.capture([{'message': {'chat': {'id': self.chat_id}}}])
@openable
class UserHandler(helper.UserContext,
DefaultRouterMixin,
StandardEventMixin,
IdleTerminateMixin):
def __init__(self, seed_tuple,
include_callback_query=False,
flavors=chat_flavors+inline_flavors, **kwargs):
"""
A delegate to handle a user's actions.
:param flavors:
A list of flavors to capture. ``all`` covers all flavors.
"""
bot, initial_msg, seed = seed_tuple
super(UserHandler, self).__init__(bot, seed, **kwargs)
if flavors == 'all':
self.listener.capture([{'from': {'id': self.user_id}}])
else:
self.listener.capture([lambda msg: flavor(msg) in flavors, {'from': {'id': self.user_id}}])
if include_callback_query:
self.listener.capture([{'message': {'chat': {'id': self.user_id}}}])
class InlineUserHandler(UserHandler):
def __init__(self, seed_tuple, **kwargs):
"""
A delegate to handle a user's inline-related actions.
"""
super(InlineUserHandler, self).__init__(seed_tuple, flavors=inline_flavors, **kwargs)
@openable
class CallbackQueryOriginHandler(helper.CallbackQueryOriginContext,
DefaultRouterMixin,
StandardEventMixin,
IdleTerminateMixin):
def __init__(self, seed_tuple, **kwargs):
"""
A delegate to handle callback query from one origin.
"""
bot, initial_msg, seed = seed_tuple
super(CallbackQueryOriginHandler, self).__init__(bot, seed, **kwargs)
self.listener.capture([
lambda msg:
flavor(msg) == 'callback_query' and origin_identifier(msg) == self.origin
])
@openable
class InvoiceHandler(helper.InvoiceContext,
DefaultRouterMixin,
StandardEventMixin,
IdleTerminateMixin):
def __init__(self, seed_tuple, **kwargs):
"""
A delegate to handle messages related to an invoice.
"""
bot, initial_msg, seed = seed_tuple
super(InvoiceHandler, self).__init__(bot, seed, **kwargs)
self.listener.capture([{'invoice_payload': self.payload}])
self.listener.capture([{'successful_payment': {'invoice_payload': self.payload}}])
| [
"msmelara@gmail.com"
] | msmelara@gmail.com |
eddc61c0bbe5792595dcde6f06089a235541632d | 7bb556d5789d5100ca67b78e4adb4fe9fa58bc88 | /AnalysisFW/python/submit.py | 47df8cedcffec93b599bfdf546cc4ac476b1d369 | [] | no_license | MLBasedJEC/2011-jet-inclusivecrosssection-ntupleproduction-optimized | dd20989c3f42f54a9348a3bc7d66fbafc310e7b3 | b41a63698fb285d0676924d78b8a1fb5ecd3c979 | refs/heads/master | 2021-09-08T19:40:21.984668 | 2018-03-12T02:48:45 | 2018-03-12T02:48:45 | 117,272,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,019 | py | import os
samples = ['CMS_MonteCarlo2011_Summer11LegDR_QCD_Pt-120to170_TuneZ2_7TeV_pythia6_AODSIM_PU_S13_START53_LV6-v1_00000_file_index',
'CMS_MonteCarlo2011_Summer11LegDR_QCD_Pt-170to300_TuneZ2_7TeV_pythia6_AODSIM_PU_S13_START53_LV6-v1_00000_file_index',
'CMS_MonteCarlo2011_Summer11LegDR_QCD_Pt-300to470_TuneZ2_7TeV_pythia6_AODSIM_PU_S13_START53_LV6-v1_00000_file_index',
'CMS_MonteCarlo2011_Summer11LegDR_QCD_Pt-470to600_TuneZ2_7TeV_pythia6_AODSIM_PU_S13_START53_LV6-v1_00000_file_index',
'CMS_MonteCarlo2011_Summer11LegDR_QCD_Pt-80to120_TuneZ2_7TeV_pythia6_AODSIM_PU_S13_START53_LV6-v1_00000_file_index',
'CMS_MonteCarlo2011_Summer11LegDR_TT_weights_CT10_TuneZ2_7TeV-powheg-pythia-tauola_AODSIM_PU_S13_START53_LV6-v2_00000_file_index'
]
#samples = ['CMS_Run2011A_Jet_AOD_12Oct2013-v1_10000_file_index']
if __name__ == '__main__':
for sample in samples:
os.system('python baconBatch.py cmsRun params0.npy -a 1:OpenDataTreeProducerOptimized_mcPAT_2011_cfg.py --list 2:%s.txt --outdir %s --eosoutdir root://cmseos.fnal.gov//eos/uscms/store/user/woodson/DSHEP2017v2/%s -q 1nh -n 8000'%(sample,sample,sample))
os.system('python baconBatch.py cmsRun params0.npy -a 1:OpenDataTreeProducerOptimized_mcPAT_2011_cfg.py --list 2:%s.txt --outdir %s --eosoutdir root://cmseos.fnal.gov//eos/uscms/store/user/woodson/DSHEP2017v2/%s -q 1nh -n 8000 --monitor resub'%(sample,sample,sample))
#os.system('python baconBatch.py cmsRun params0.npy -a 1:OpenDataTreeProducerOptimized_dataPAT_2011_cfg.py --list 2:%s.txt --outdir %s --eosoutdir root://cmseos.fnal.gov//eos/uscms/store/user/woodson/DSHEP2017v2/%s -q 1nh -n 8000'%(sample,sample,sample))
#os.system('python baconBatch.py cmsRun params0.npy -a 1:OpenDataTreeProducerOptimized_dataPAT_2011_cfg.py --list 2:%s.txt --outdir %s --eosoutdir root://cmseos.fnal.gov//eos/uscms/store/user/woodson/DSHEP2017v2/%s -q 1nh -n 8000 --monitor resub'%(sample,sample,sample))
| [
"jduarte@caltech.edu"
] | jduarte@caltech.edu |
779c2d4dfe0865a97f1f214fdff1975bed61c2ca | 759d6ff0c4688abf0a3cc6f37f9a8e361fbdeaa0 | /jam/third_party/sqlalchemy/util/langhelpers.py | 09aa94bf2bcfd22d137861ebe44ea9bfe6853f88 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | jam-py/jam-py | a2ee9489aa2fe52578c39f1aa9745e362090b1f6 | c3617b7757985287ebec2d8504b23b0c503d0aed | refs/heads/master | 2023-08-31T12:59:36.985233 | 2023-08-02T18:16:41 | 2023-08-02T18:16:41 | 27,813,428 | 437 | 99 | BSD-3-Clause | 2023-08-28T19:15:56 | 2014-12-10T09:57:38 | JavaScript | UTF-8 | Python | false | false | 50,850 | py | # util/langhelpers.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Routines to help with the creation, loading and introspection of
modules, classes, hierarchies, attributes, functions, and methods.
"""
from functools import update_wrapper
import hashlib
import inspect
import itertools
import operator
import re
import sys
import textwrap
import types
import warnings
from . import _collections
from . import compat
from .. import exc
def md5_hex(x):
if compat.py3k:
x = x.encode("utf-8")
m = hashlib.md5()
m.update(x)
return m.hexdigest()
class safe_reraise(object):
"""Reraise an exception after invoking some
handler code.
Stores the existing exception info before
invoking so that it is maintained across a potential
coroutine context switch.
e.g.::
try:
sess.commit()
except:
with safe_reraise():
sess.rollback()
"""
__slots__ = ("warn_only", "_exc_info")
def __init__(self, warn_only=False):
self.warn_only = warn_only
def __enter__(self):
self._exc_info = sys.exc_info()
def __exit__(self, type_, value, traceback):
# see #2703 for notes
if type_ is None:
exc_type, exc_value, exc_tb = self._exc_info
self._exc_info = None # remove potential circular references
if not self.warn_only:
compat.raise_(
exc_value, with_traceback=exc_tb,
)
else:
if not compat.py3k and self._exc_info and self._exc_info[1]:
# emulate Py3K's behavior of telling us when an exception
# occurs in an exception handler.
warn(
"An exception has occurred during handling of a "
"previous exception. The previous exception "
"is:\n %s %s\n" % (self._exc_info[0], self._exc_info[1])
)
self._exc_info = None # remove potential circular references
compat.raise_(value, with_traceback=traceback)
def string_or_unprintable(element):
if isinstance(element, compat.string_types):
return element
else:
try:
return str(element)
except Exception:
return "unprintable element %r" % element
def clsname_as_plain_name(cls):
return " ".join(
n.lower() for n in re.findall(r"([A-Z][a-z]+)", cls.__name__)
)
def decode_slice(slc):
"""decode a slice object as sent to __getitem__.
takes into account the 2.5 __index__() method, basically.
"""
ret = []
for x in slc.start, slc.stop, slc.step:
if hasattr(x, "__index__"):
x = x.__index__()
ret.append(x)
return tuple(ret)
def _unique_symbols(used, *bases):
used = set(used)
for base in bases:
pool = itertools.chain(
(base,),
compat.itertools_imap(lambda i: base + str(i), range(1000)),
)
for sym in pool:
if sym not in used:
used.add(sym)
yield sym
break
else:
raise NameError("exhausted namespace for symbol base %s" % base)
def map_bits(fn, n):
"""Call the given function given each nonzero bit from n."""
while n:
b = n & (~n + 1)
yield fn(b)
n ^= b
def decorator(target):
"""A signature-matching decorator factory."""
def decorate(fn):
if not inspect.isfunction(fn) and not inspect.ismethod(fn):
raise Exception("not a decoratable function")
spec = compat.inspect_getfullargspec(fn)
names = tuple(spec[0]) + spec[1:3] + (fn.__name__,)
targ_name, fn_name = _unique_symbols(names, "target", "fn")
metadata = dict(target=targ_name, fn=fn_name)
metadata.update(format_argspec_plus(spec, grouped=False))
metadata["name"] = fn.__name__
code = (
"""\
def %(name)s(%(args)s):
return %(target)s(%(fn)s, %(apply_kw)s)
"""
% metadata
)
decorated = _exec_code_in_env(
code, {targ_name: target, fn_name: fn}, fn.__name__
)
decorated.__defaults__ = getattr(fn, "im_func", fn).__defaults__
decorated.__wrapped__ = fn
return update_wrapper(decorated, fn)
return update_wrapper(decorate, target)
def _exec_code_in_env(code, env, fn_name):
exec(code, env)
return env[fn_name]
def public_factory(target, location, class_location=None):
"""Produce a wrapping function for the given cls or classmethod.
Rationale here is so that the __init__ method of the
class can serve as documentation for the function.
"""
if isinstance(target, type):
fn = target.__init__
callable_ = target
doc = (
"Construct a new :class:`.%s` object. \n\n"
"This constructor is mirrored as a public API function; "
"see :func:`sqlalchemy%s` "
"for a full usage and argument description."
% (target.__name__, location)
)
else:
fn = callable_ = target
doc = (
"This function is mirrored; see :func:`sqlalchemy%s` "
"for a description of arguments." % location
)
location_name = location.split(".")[-1]
spec = compat.inspect_getfullargspec(fn)
del spec[0][0]
metadata = format_argspec_plus(spec, grouped=False)
metadata["name"] = location_name
code = (
"""\
def %(name)s(%(args)s):
return cls(%(apply_kw)s)
"""
% metadata
)
env = {"cls": callable_, "symbol": symbol}
exec(code, env)
decorated = env[location_name]
if hasattr(fn, "_linked_to"):
linked_to, linked_to_location = fn._linked_to
linked_to_doc = linked_to.__doc__
if class_location is None:
class_location = "%s.%s" % (target.__module__, target.__name__)
linked_to_doc = inject_docstring_text(
linked_to_doc,
".. container:: inherited_member\n\n "
"Inherited from :func:`sqlalchemy%s`; this constructor "
"creates a :class:`%s` object"
% (linked_to_location, class_location),
0,
)
decorated.__doc__ = linked_to_doc
else:
decorated.__doc__ = fn.__doc__
decorated.__module__ = "sqlalchemy" + location.rsplit(".", 1)[0]
if decorated.__module__ not in sys.modules:
raise ImportError(
"public_factory location %s is not in sys.modules"
% (decorated.__module__,)
)
if compat.py2k or hasattr(fn, "__func__"):
fn.__func__.__doc__ = doc
if not hasattr(fn.__func__, "_linked_to"):
fn.__func__._linked_to = (decorated, location)
else:
fn.__doc__ = doc
if not hasattr(fn, "_linked_to"):
fn._linked_to = (decorated, location)
return decorated
class PluginLoader(object):
def __init__(self, group, auto_fn=None):
self.group = group
self.impls = {}
self.auto_fn = auto_fn
def clear(self):
self.impls.clear()
def load(self, name):
if name in self.impls:
return self.impls[name]()
if self.auto_fn:
loader = self.auto_fn(name)
if loader:
self.impls[name] = loader
return loader()
try:
import pkg_resources
except ImportError:
pass
else:
for impl in pkg_resources.iter_entry_points(self.group, name):
self.impls[name] = impl.load
return impl.load()
raise exc.NoSuchModuleError(
"Can't load plugin: %s:%s" % (self.group, name)
)
def register(self, name, modulepath, objname):
def load():
mod = compat.import_(modulepath)
for token in modulepath.split(".")[1:]:
mod = getattr(mod, token)
return getattr(mod, objname)
self.impls[name] = load
def _inspect_func_args(fn):
try:
co_varkeywords = inspect.CO_VARKEYWORDS
except AttributeError:
# https://docs.python.org/3/library/inspect.html
# The flags are specific to CPython, and may not be defined in other
# Python implementations. Furthermore, the flags are an implementation
# detail, and can be removed or deprecated in future Python releases.
spec = compat.inspect_getfullargspec(fn)
return spec[0], bool(spec[2])
else:
# use fn.__code__ plus flags to reduce method call overhead
co = fn.__code__
nargs = co.co_argcount
return (
list(co.co_varnames[:nargs]),
bool(co.co_flags & co_varkeywords),
)
def get_cls_kwargs(cls, _set=None):
r"""Return the full set of inherited kwargs for the given `cls`.
Probes a class's __init__ method, collecting all named arguments. If the
__init__ defines a \**kwargs catch-all, then the constructor is presumed
to pass along unrecognized keywords to its base classes, and the
collection process is repeated recursively on each of the bases.
Uses a subset of inspect.getfullargspec() to cut down on method overhead,
as this is used within the Core typing system to create copies of type
objects which is a performance-sensitive operation.
No anonymous tuple arguments please !
"""
toplevel = _set is None
if toplevel:
_set = set()
ctr = cls.__dict__.get("__init__", False)
has_init = (
ctr
and isinstance(ctr, types.FunctionType)
and isinstance(ctr.__code__, types.CodeType)
)
if has_init:
names, has_kw = _inspect_func_args(ctr)
_set.update(names)
if not has_kw and not toplevel:
return None
if not has_init or has_kw:
for c in cls.__bases__:
if get_cls_kwargs(c, _set) is None:
break
_set.discard("self")
return _set
def get_func_kwargs(func):
"""Return the set of legal kwargs for the given `func`.
Uses getargspec so is safe to call for methods, functions,
etc.
"""
return compat.inspect_getfullargspec(func)[0]
def get_callable_argspec(fn, no_self=False, _is_init=False):
"""Return the argument signature for any callable.
All pure-Python callables are accepted, including
functions, methods, classes, objects with __call__;
builtins and other edge cases like functools.partial() objects
raise a TypeError.
"""
if inspect.isbuiltin(fn):
raise TypeError("Can't inspect builtin: %s" % fn)
elif inspect.isfunction(fn):
if _is_init and no_self:
spec = compat.inspect_getfullargspec(fn)
return compat.FullArgSpec(
spec.args[1:],
spec.varargs,
spec.varkw,
spec.defaults,
spec.kwonlyargs,
spec.kwonlydefaults,
spec.annotations,
)
else:
return compat.inspect_getfullargspec(fn)
elif inspect.ismethod(fn):
if no_self and (_is_init or fn.__self__):
spec = compat.inspect_getfullargspec(fn.__func__)
return compat.FullArgSpec(
spec.args[1:],
spec.varargs,
spec.varkw,
spec.defaults,
spec.kwonlyargs,
spec.kwonlydefaults,
spec.annotations,
)
else:
return compat.inspect_getfullargspec(fn.__func__)
elif inspect.isclass(fn):
return get_callable_argspec(
fn.__init__, no_self=no_self, _is_init=True
)
elif hasattr(fn, "__func__"):
return compat.inspect_getfullargspec(fn.__func__)
elif hasattr(fn, "__call__"):
if inspect.ismethod(fn.__call__):
return get_callable_argspec(fn.__call__, no_self=no_self)
else:
raise TypeError("Can't inspect callable: %s" % fn)
else:
raise TypeError("Can't inspect callable: %s" % fn)
def format_argspec_plus(fn, grouped=True):
"""Returns a dictionary of formatted, introspected function arguments.
A enhanced variant of inspect.formatargspec to support code generation.
fn
An inspectable callable or tuple of inspect getargspec() results.
grouped
Defaults to True; include (parens, around, argument) lists
Returns:
args
Full inspect.formatargspec for fn
self_arg
The name of the first positional argument, varargs[0], or None
if the function defines no positional arguments.
apply_pos
args, re-written in calling rather than receiving syntax. Arguments are
passed positionally.
apply_kw
Like apply_pos, except keyword-ish args are passed as keywords.
Example::
>>> format_argspec_plus(lambda self, a, b, c=3, **d: 123)
{'args': '(self, a, b, c=3, **d)',
'self_arg': 'self',
'apply_kw': '(self, a, b, c=c, **d)',
'apply_pos': '(self, a, b, c, **d)'}
"""
if compat.callable(fn):
spec = compat.inspect_getfullargspec(fn)
else:
spec = fn
args = compat.inspect_formatargspec(*spec)
if spec[0]:
self_arg = spec[0][0]
elif spec[1]:
self_arg = "%s[0]" % spec[1]
else:
self_arg = None
apply_pos = compat.inspect_formatargspec(
spec[0], spec[1], spec[2], None, spec[4]
)
num_defaults = 0
if spec[3]:
num_defaults += len(spec[3])
if spec[4]:
num_defaults += len(spec[4])
name_args = spec[0] + spec[4]
if num_defaults:
defaulted_vals = name_args[0 - num_defaults :]
else:
defaulted_vals = ()
apply_kw = compat.inspect_formatargspec(
name_args,
spec[1],
spec[2],
defaulted_vals,
formatvalue=lambda x: "=" + x,
)
if grouped:
return dict(
args=args,
self_arg=self_arg,
apply_pos=apply_pos,
apply_kw=apply_kw,
)
else:
return dict(
args=args[1:-1],
self_arg=self_arg,
apply_pos=apply_pos[1:-1],
apply_kw=apply_kw[1:-1],
)
def format_argspec_init(method, grouped=True):
"""format_argspec_plus with considerations for typical __init__ methods
Wraps format_argspec_plus with error handling strategies for typical
__init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
if method is object.__init__:
args = grouped and "(self)" or "self"
else:
try:
return format_argspec_plus(method, grouped=grouped)
except TypeError:
args = (
grouped
and "(self, *args, **kwargs)"
or "self, *args, **kwargs"
)
return dict(self_arg="self", args=args, apply_pos=args, apply_kw=args)
def getargspec_init(method):
"""inspect.getargspec with considerations for typical __init__ methods
Wraps inspect.getargspec with error handling for typical __init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
try:
return compat.inspect_getfullargspec(method)
except TypeError:
if method is object.__init__:
return (["self"], None, None, None)
else:
return (["self"], "args", "kwargs", None)
def unbound_method_to_callable(func_or_cls):
"""Adjust the incoming callable such that a 'self' argument is not
required.
"""
if isinstance(func_or_cls, types.MethodType) and not func_or_cls.__self__:
return func_or_cls.__func__
else:
return func_or_cls
def generic_repr(obj, additional_kw=(), to_inspect=None, omit_kwarg=()):
"""Produce a __repr__() based on direct association of the __init__()
specification vs. same-named attributes present.
"""
if to_inspect is None:
to_inspect = [obj]
else:
to_inspect = _collections.to_list(to_inspect)
missing = object()
pos_args = []
kw_args = _collections.OrderedDict()
vargs = None
for i, insp in enumerate(to_inspect):
try:
spec = compat.inspect_getfullargspec(insp.__init__)
except TypeError:
continue
else:
default_len = spec.defaults and len(spec.defaults) or 0
if i == 0:
if spec.varargs:
vargs = spec.varargs
if default_len:
pos_args.extend(spec.args[1:-default_len])
else:
pos_args.extend(spec.args[1:])
else:
kw_args.update(
[(arg, missing) for arg in spec.args[1:-default_len]]
)
if default_len:
kw_args.update(
[
(arg, default)
for arg, default in zip(
spec.args[-default_len:], spec.defaults
)
]
)
output = []
output.extend(repr(getattr(obj, arg, None)) for arg in pos_args)
if vargs is not None and hasattr(obj, vargs):
output.extend([repr(val) for val in getattr(obj, vargs)])
for arg, defval in kw_args.items():
if arg in omit_kwarg:
continue
try:
val = getattr(obj, arg, missing)
if val is not missing and val != defval:
output.append("%s=%r" % (arg, val))
except Exception:
pass
if additional_kw:
for arg, defval in additional_kw:
try:
val = getattr(obj, arg, missing)
if val is not missing and val != defval:
output.append("%s=%r" % (arg, val))
except Exception:
pass
return "%s(%s)" % (obj.__class__.__name__, ", ".join(output))
class portable_instancemethod(object):
"""Turn an instancemethod into a (parent, name) pair
to produce a serializable callable.
"""
__slots__ = "target", "name", "kwargs", "__weakref__"
def __getstate__(self):
return {
"target": self.target,
"name": self.name,
"kwargs": self.kwargs,
}
def __setstate__(self, state):
self.target = state["target"]
self.name = state["name"]
self.kwargs = state.get("kwargs", ())
def __init__(self, meth, kwargs=()):
self.target = meth.__self__
self.name = meth.__name__
self.kwargs = kwargs
def __call__(self, *arg, **kw):
kw.update(self.kwargs)
return getattr(self.target, self.name)(*arg, **kw)
def class_hierarchy(cls):
"""Return an unordered sequence of all classes related to cls.
Traverses diamond hierarchies.
Fibs slightly: subclasses of builtin types are not returned. Thus
class_hierarchy(class A(object)) returns (A, object), not A plus every
class systemwide that derives from object.
Old-style classes are discarded and hierarchies rooted on them
will not be descended.
"""
if compat.py2k:
if isinstance(cls, types.ClassType):
return list()
hier = {cls}
process = list(cls.__mro__)
while process:
c = process.pop()
if compat.py2k:
if isinstance(c, types.ClassType):
continue
bases = (
_
for _ in c.__bases__
if _ not in hier and not isinstance(_, types.ClassType)
)
else:
bases = (_ for _ in c.__bases__ if _ not in hier)
for b in bases:
process.append(b)
hier.add(b)
if compat.py3k:
if c.__module__ == "builtins" or not hasattr(c, "__subclasses__"):
continue
else:
if c.__module__ == "__builtin__" or not hasattr(
c, "__subclasses__"
):
continue
for s in [_ for _ in c.__subclasses__() if _ not in hier]:
process.append(s)
hier.add(s)
return list(hier)
def iterate_attributes(cls):
"""iterate all the keys and attributes associated
with a class, without using getattr().
Does not use getattr() so that class-sensitive
descriptors (i.e. property.__get__()) are not called.
"""
keys = dir(cls)
for key in keys:
for c in cls.__mro__:
if key in c.__dict__:
yield (key, c.__dict__[key])
break
def monkeypatch_proxied_specials(
into_cls,
from_cls,
skip=None,
only=None,
name="self.proxy",
from_instance=None,
):
"""Automates delegation of __specials__ for a proxying type."""
if only:
dunders = only
else:
if skip is None:
skip = (
"__slots__",
"__del__",
"__getattribute__",
"__metaclass__",
"__getstate__",
"__setstate__",
)
dunders = [
m
for m in dir(from_cls)
if (
m.startswith("__")
and m.endswith("__")
and not hasattr(into_cls, m)
and m not in skip
)
]
for method in dunders:
try:
fn = getattr(from_cls, method)
if not hasattr(fn, "__call__"):
continue
fn = getattr(fn, "im_func", fn)
except AttributeError:
continue
try:
spec = compat.inspect_getfullargspec(fn)
fn_args = compat.inspect_formatargspec(spec[0])
d_args = compat.inspect_formatargspec(spec[0][1:])
except TypeError:
fn_args = "(self, *args, **kw)"
d_args = "(*args, **kw)"
py = (
"def %(method)s%(fn_args)s: "
"return %(name)s.%(method)s%(d_args)s" % locals()
)
env = from_instance is not None and {name: from_instance} or {}
compat.exec_(py, env)
try:
env[method].__defaults__ = fn.__defaults__
except AttributeError:
pass
setattr(into_cls, method, env[method])
def methods_equivalent(meth1, meth2):
"""Return True if the two methods are the same implementation."""
return getattr(meth1, "__func__", meth1) is getattr(
meth2, "__func__", meth2
)
def as_interface(obj, cls=None, methods=None, required=None):
"""Ensure basic interface compliance for an instance or dict of callables.
Checks that ``obj`` implements public methods of ``cls`` or has members
listed in ``methods``. If ``required`` is not supplied, implementing at
least one interface method is sufficient. Methods present on ``obj`` that
are not in the interface are ignored.
If ``obj`` is a dict and ``dict`` does not meet the interface
requirements, the keys of the dictionary are inspected. Keys present in
``obj`` that are not in the interface will raise TypeErrors.
Raises TypeError if ``obj`` does not meet the interface criteria.
In all passing cases, an object with callable members is returned. In the
simple case, ``obj`` is returned as-is; if dict processing kicks in then
an anonymous class is returned.
obj
A type, instance, or dictionary of callables.
cls
Optional, a type. All public methods of cls are considered the
interface. An ``obj`` instance of cls will always pass, ignoring
``required``..
methods
Optional, a sequence of method names to consider as the interface.
required
Optional, a sequence of mandatory implementations. If omitted, an
``obj`` that provides at least one interface method is considered
sufficient. As a convenience, required may be a type, in which case
all public methods of the type are required.
"""
if not cls and not methods:
raise TypeError("a class or collection of method names are required")
if isinstance(cls, type) and isinstance(obj, cls):
return obj
interface = set(methods or [m for m in dir(cls) if not m.startswith("_")])
implemented = set(dir(obj))
complies = operator.ge
if isinstance(required, type):
required = interface
elif not required:
required = set()
complies = operator.gt
else:
required = set(required)
if complies(implemented.intersection(interface), required):
return obj
# No dict duck typing here.
if not isinstance(obj, dict):
qualifier = complies is operator.gt and "any of" or "all of"
raise TypeError(
"%r does not implement %s: %s"
% (obj, qualifier, ", ".join(interface))
)
class AnonymousInterface(object):
"""A callable-holding shell."""
if cls:
AnonymousInterface.__name__ = "Anonymous" + cls.__name__
found = set()
for method, impl in dictlike_iteritems(obj):
if method not in interface:
raise TypeError("%r: unknown in this interface" % method)
if not compat.callable(impl):
raise TypeError("%r=%r is not callable" % (method, impl))
setattr(AnonymousInterface, method, staticmethod(impl))
found.add(method)
if complies(found, required):
return AnonymousInterface
raise TypeError(
"dictionary does not contain required keys %s"
% ", ".join(required - found)
)
class memoized_property(object):
"""A read-only @property that is only evaluated once."""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
def _reset(self, obj):
memoized_property.reset(obj, self.__name__)
@classmethod
def reset(cls, obj, name):
obj.__dict__.pop(name, None)
def memoized_instancemethod(fn):
"""Decorate a method memoize its return value.
Best applied to no-arg methods: memoization is not sensitive to
argument values, and will always return the same value even when
called with different arguments.
"""
def oneshot(self, *args, **kw):
result = fn(self, *args, **kw)
def memo(*a, **kw):
return result
memo.__name__ = fn.__name__
memo.__doc__ = fn.__doc__
self.__dict__[fn.__name__] = memo
return result
return update_wrapper(oneshot, fn)
class group_expirable_memoized_property(object):
"""A family of @memoized_properties that can be expired in tandem."""
def __init__(self, attributes=()):
self.attributes = []
if attributes:
self.attributes.extend(attributes)
def expire_instance(self, instance):
"""Expire all memoized properties for *instance*."""
stash = instance.__dict__
for attribute in self.attributes:
stash.pop(attribute, None)
def __call__(self, fn):
self.attributes.append(fn.__name__)
return memoized_property(fn)
def method(self, fn):
self.attributes.append(fn.__name__)
return memoized_instancemethod(fn)
class MemoizedSlots(object):
"""Apply memoized items to an object using a __getattr__ scheme.
This allows the functionality of memoized_property and
memoized_instancemethod to be available to a class using __slots__.
"""
__slots__ = ()
def _fallback_getattr(self, key):
raise AttributeError(key)
def __getattr__(self, key):
if key.startswith("_memoized"):
raise AttributeError(key)
elif hasattr(self, "_memoized_attr_%s" % key):
value = getattr(self, "_memoized_attr_%s" % key)()
setattr(self, key, value)
return value
elif hasattr(self, "_memoized_method_%s" % key):
fn = getattr(self, "_memoized_method_%s" % key)
def oneshot(*args, **kw):
result = fn(*args, **kw)
def memo(*a, **kw):
return result
memo.__name__ = fn.__name__
memo.__doc__ = fn.__doc__
setattr(self, key, memo)
return result
oneshot.__doc__ = fn.__doc__
return oneshot
else:
return self._fallback_getattr(key)
def dependency_for(modulename, add_to_all=False):
def decorate(obj):
tokens = modulename.split(".")
mod = compat.import_(
".".join(tokens[0:-1]), globals(), locals(), [tokens[-1]]
)
mod = getattr(mod, tokens[-1])
setattr(mod, obj.__name__, obj)
if add_to_all and hasattr(mod, "__all__"):
mod.__all__.append(obj.__name__)
return obj
return decorate
class dependencies(object):
"""Apply imported dependencies as arguments to a function.
E.g.::
@util.dependencies(
"sqlalchemy.sql.widget",
"sqlalchemy.engine.default"
);
def some_func(self, widget, default, arg1, arg2, **kw):
# ...
Rationale is so that the impact of a dependency cycle can be
associated directly with the few functions that cause the cycle,
and not pollute the module-level namespace.
"""
def __init__(self, *deps):
self.import_deps = []
for dep in deps:
tokens = dep.split(".")
self.import_deps.append(
dependencies._importlater(".".join(tokens[0:-1]), tokens[-1])
)
def __call__(self, fn):
import_deps = self.import_deps
spec = compat.inspect_getfullargspec(fn)
spec_zero = list(spec[0])
hasself = spec_zero[0] in ("self", "cls")
for i in range(len(import_deps)):
spec[0][i + (1 if hasself else 0)] = "import_deps[%r]" % i
inner_spec = format_argspec_plus(spec, grouped=False)
for impname in import_deps:
del spec_zero[1 if hasself else 0]
spec[0][:] = spec_zero
outer_spec = format_argspec_plus(spec, grouped=False)
code = "lambda %(args)s: fn(%(apply_kw)s)" % {
"args": outer_spec["args"],
"apply_kw": inner_spec["apply_kw"],
}
decorated = eval(code, locals())
decorated.__defaults__ = getattr(fn, "im_func", fn).__defaults__
return update_wrapper(decorated, fn)
@classmethod
def resolve_all(cls, path):
for m in list(dependencies._unresolved):
if m._full_path.startswith(path):
m._resolve()
_unresolved = set()
_by_key = {}
class _importlater(object):
_unresolved = set()
_by_key = {}
def __new__(cls, path, addtl):
key = path + "." + addtl
if key in dependencies._by_key:
return dependencies._by_key[key]
else:
dependencies._by_key[key] = imp = object.__new__(cls)
return imp
def __init__(self, path, addtl):
self._il_path = path
self._il_addtl = addtl
dependencies._unresolved.add(self)
@property
def _full_path(self):
return self._il_path + "." + self._il_addtl
@memoized_property
def module(self):
if self in dependencies._unresolved:
raise ImportError(
"importlater.resolve_all() hasn't "
"been called (this is %s %s)"
% (self._il_path, self._il_addtl)
)
return getattr(self._initial_import, self._il_addtl)
def _resolve(self):
dependencies._unresolved.discard(self)
self._initial_import = compat.import_(
self._il_path, globals(), locals(), [self._il_addtl]
)
def __getattr__(self, key):
if key == "module":
raise ImportError(
"Could not resolve module %s" % self._full_path
)
try:
attr = getattr(self.module, key)
except AttributeError:
raise AttributeError(
"Module %s has no attribute '%s'" % (self._full_path, key)
)
self.__dict__[key] = attr
return attr
# from paste.deploy.converters
def asbool(obj):
if isinstance(obj, compat.string_types):
obj = obj.strip().lower()
if obj in ["true", "yes", "on", "y", "t", "1"]:
return True
elif obj in ["false", "no", "off", "n", "f", "0"]:
return False
else:
raise ValueError("String is not true/false: %r" % obj)
return bool(obj)
def bool_or_str(*text):
"""Return a callable that will evaluate a string as
boolean, or one of a set of "alternate" string values.
"""
def bool_or_value(obj):
if obj in text:
return obj
else:
return asbool(obj)
return bool_or_value
def asint(value):
"""Coerce to integer."""
if value is None:
return value
return int(value)
def coerce_kw_type(kw, key, type_, flexi_bool=True, dest=None):
r"""If 'key' is present in dict 'kw', coerce its value to type 'type\_' if
necessary. If 'flexi_bool' is True, the string '0' is considered false
when coercing to boolean.
"""
if dest is None:
dest = kw
if (
key in kw
and (not isinstance(type_, type) or not isinstance(kw[key], type_))
and kw[key] is not None
):
if type_ is bool and flexi_bool:
dest[key] = asbool(kw[key])
else:
dest[key] = type_(kw[key])
def constructor_key(obj, cls):
"""Produce a tuple structure that is cacheable using the __dict__ of
obj to retrieve values
"""
names = get_cls_kwargs(cls)
return (cls,) + tuple(
(k, obj.__dict__[k]) for k in names if k in obj.__dict__
)
def constructor_copy(obj, cls, *args, **kw):
"""Instantiate cls using the __dict__ of obj as constructor arguments.
Uses inspect to match the named arguments of ``cls``.
"""
names = get_cls_kwargs(cls)
kw.update(
(k, obj.__dict__[k]) for k in names.difference(kw) if k in obj.__dict__
)
return cls(*args, **kw)
def counter():
"""Return a threadsafe counter function."""
lock = compat.threading.Lock()
counter = itertools.count(1)
# avoid the 2to3 "next" transformation...
def _next():
with lock:
return next(counter)
return _next
def duck_type_collection(specimen, default=None):
"""Given an instance or class, guess if it is or is acting as one of
the basic collection types: list, set and dict. If the __emulates__
property is present, return that preferentially.
"""
if hasattr(specimen, "__emulates__"):
# canonicalize set vs sets.Set to a standard: the builtin set
if specimen.__emulates__ is not None and issubclass(
specimen.__emulates__, set
):
return set
else:
return specimen.__emulates__
isa = isinstance(specimen, type) and issubclass or isinstance
if isa(specimen, list):
return list
elif isa(specimen, set):
return set
elif isa(specimen, dict):
return dict
if hasattr(specimen, "append"):
return list
elif hasattr(specimen, "add"):
return set
elif hasattr(specimen, "set"):
return dict
else:
return default
def assert_arg_type(arg, argtype, name):
if isinstance(arg, argtype):
return arg
else:
if isinstance(argtype, tuple):
raise exc.ArgumentError(
"Argument '%s' is expected to be one of type %s, got '%s'"
% (name, " or ".join("'%s'" % a for a in argtype), type(arg))
)
else:
raise exc.ArgumentError(
"Argument '%s' is expected to be of type '%s', got '%s'"
% (name, argtype, type(arg))
)
def dictlike_iteritems(dictlike):
"""Return a (key, value) iterator for almost any dict-like object."""
if compat.py3k:
if hasattr(dictlike, "items"):
return list(dictlike.items())
else:
if hasattr(dictlike, "iteritems"):
return dictlike.iteritems()
elif hasattr(dictlike, "items"):
return iter(dictlike.items())
getter = getattr(dictlike, "__getitem__", getattr(dictlike, "get", None))
if getter is None:
raise TypeError("Object '%r' is not dict-like" % dictlike)
if hasattr(dictlike, "iterkeys"):
def iterator():
for key in dictlike.iterkeys():
yield key, getter(key)
return iterator()
elif hasattr(dictlike, "keys"):
return iter((key, getter(key)) for key in dictlike.keys())
else:
raise TypeError("Object '%r' is not dict-like" % dictlike)
class classproperty(property):
"""A decorator that behaves like @property except that operates
on classes rather than instances.
The decorator is currently special when using the declarative
module, but note that the
:class:`~.sqlalchemy.ext.declarative.declared_attr`
decorator should be used for this purpose with declarative.
"""
def __init__(self, fget, *arg, **kw):
super(classproperty, self).__init__(fget, *arg, **kw)
self.__doc__ = fget.__doc__
def __get__(desc, self, cls):
return desc.fget(cls)
class hybridproperty(object):
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
clsval = self.func(owner)
clsval.__doc__ = self.func.__doc__
return clsval
else:
return self.func(instance)
class hybridmethod(object):
"""Decorate a function as cls- or instance- level."""
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self.func.__get__(owner, owner.__class__)
else:
return self.func.__get__(instance, owner)
class _symbol(int):
def __new__(self, name, doc=None, canonical=None):
"""Construct a new named symbol."""
assert isinstance(name, compat.string_types)
if canonical is None:
canonical = hash(name)
v = int.__new__(_symbol, canonical)
v.name = name
if doc:
v.__doc__ = doc
return v
def __reduce__(self):
return symbol, (self.name, "x", int(self))
def __str__(self):
return repr(self)
def __repr__(self):
return "symbol(%r)" % self.name
_symbol.__name__ = "symbol"
class symbol(object):
"""A constant symbol.
>>> symbol('foo') is symbol('foo')
True
>>> symbol('foo')
<symbol 'foo>
A slight refinement of the MAGICCOOKIE=object() pattern. The primary
advantage of symbol() is its repr(). They are also singletons.
Repeated calls of symbol('name') will all return the same instance.
The optional ``doc`` argument assigns to ``__doc__``. This
is strictly so that Sphinx autoattr picks up the docstring we want
(it doesn't appear to pick up the in-module docstring if the datamember
is in a different module - autoattribute also blows up completely).
If Sphinx fixes/improves this then we would no longer need
``doc`` here.
"""
symbols = {}
_lock = compat.threading.Lock()
def __new__(cls, name, doc=None, canonical=None):
with cls._lock:
sym = cls.symbols.get(name)
if sym is None:
cls.symbols[name] = sym = _symbol(name, doc, canonical)
return sym
@classmethod
def parse_user_argument(
cls, arg, choices, name, resolve_symbol_names=False
):
"""Given a user parameter, parse the parameter into a chosen symbol.
The user argument can be a string name that matches the name of a
symbol, or the symbol object itself, or any number of alternate choices
such as True/False/ None etc.
:param arg: the user argument.
:param choices: dictionary of symbol object to list of possible
entries.
:param name: name of the argument. Used in an :class:`.ArgumentError`
that is raised if the parameter doesn't match any available argument.
:param resolve_symbol_names: include the name of each symbol as a valid
entry.
"""
# note using hash lookup is tricky here because symbol's `__hash__`
# is its int value which we don't want included in the lookup
# explicitly, so we iterate and compare each.
for sym, choice in choices.items():
if arg is sym:
return sym
elif resolve_symbol_names and arg == sym.name:
return sym
elif arg in choice:
return sym
if arg is None:
return None
raise exc.ArgumentError("Invalid value for '%s': %r" % (name, arg))
_creation_order = 1
def set_creation_order(instance):
"""Assign a '_creation_order' sequence to the given instance.
This allows multiple instances to be sorted in order of creation
(typically within a single thread; the counter is not particularly
threadsafe).
"""
global _creation_order
instance._creation_order = _creation_order
_creation_order += 1
def warn_exception(func, *args, **kwargs):
"""executes the given function, catches all exceptions and converts to
a warning.
"""
try:
return func(*args, **kwargs)
except Exception:
warn("%s('%s') ignored" % sys.exc_info()[0:2])
def ellipses_string(value, len_=25):
try:
if len(value) > len_:
return "%s..." % value[0:len_]
else:
return value
except TypeError:
return value
class _hash_limit_string(compat.text_type):
"""A string subclass that can only be hashed on a maximum amount
of unique values.
This is used for warnings so that we can send out parameterized warnings
without the __warningregistry__ of the module, or the non-overridable
"once" registry within warnings.py, overloading memory,
"""
def __new__(cls, value, num, args):
interpolated = (value % args) + (
" (this warning may be suppressed after %d occurrences)" % num
)
self = super(_hash_limit_string, cls).__new__(cls, interpolated)
self._hash = hash("%s_%d" % (value, hash(interpolated) % num))
return self
def __hash__(self):
return self._hash
def __eq__(self, other):
return hash(self) == hash(other)
def warn(msg):
"""Issue a warning.
If msg is a string, :class:`.exc.SAWarning` is used as
the category.
"""
warnings.warn(msg, exc.SAWarning, stacklevel=2)
def warn_limited(msg, args):
"""Issue a warning with a parameterized string, limiting the number
of registrations.
"""
if args:
msg = _hash_limit_string(msg, 10, args)
warnings.warn(msg, exc.SAWarning, stacklevel=2)
def only_once(fn, retry_on_exception):
"""Decorate the given function to be a no-op after it is called exactly
once."""
once = [fn]
def go(*arg, **kw):
# strong reference fn so that it isn't garbage collected,
# which interferes with the event system's expectations
strong_fn = fn # noqa
if once:
once_fn = once.pop()
try:
return once_fn(*arg, **kw)
except:
if retry_on_exception:
once.insert(0, once_fn)
raise
return go
_SQLA_RE = re.compile(r"sqlalchemy/([a-z_]+/){0,2}[a-z_]+\.py")
_UNITTEST_RE = re.compile(r"unit(?:2|test2?/)")
def chop_traceback(tb, exclude_prefix=_UNITTEST_RE, exclude_suffix=_SQLA_RE):
"""Chop extraneous lines off beginning and end of a traceback.
:param tb:
a list of traceback lines as returned by ``traceback.format_stack()``
:param exclude_prefix:
a regular expression object matching lines to skip at beginning of
``tb``
:param exclude_suffix:
a regular expression object matching lines to skip at end of ``tb``
"""
start = 0
end = len(tb) - 1
while start <= end and exclude_prefix.search(tb[start]):
start += 1
while start <= end and exclude_suffix.search(tb[end]):
end -= 1
return tb[start : end + 1]
NoneType = type(None)
def attrsetter(attrname):
code = "def set(obj, value):" " obj.%s = value" % attrname
env = locals().copy()
exec(code, env)
return env["set"]
class EnsureKWArgType(type):
r"""Apply translation of functions to accept \**kw arguments if they
don't already.
"""
def __init__(cls, clsname, bases, clsdict):
fn_reg = cls.ensure_kwarg
if fn_reg:
for key in clsdict:
m = re.match(fn_reg, key)
if m:
fn = clsdict[key]
spec = compat.inspect_getfullargspec(fn)
if not spec.varkw:
clsdict[key] = wrapped = cls._wrap_w_kw(fn)
setattr(cls, key, wrapped)
super(EnsureKWArgType, cls).__init__(clsname, bases, clsdict)
def _wrap_w_kw(self, fn):
def wrap(*arg, **kw):
return fn(*arg)
return update_wrapper(wrap, fn)
def wrap_callable(wrapper, fn):
"""Augment functools.update_wrapper() to work with objects with
a ``__call__()`` method.
:param fn:
object with __call__ method
"""
if hasattr(fn, "__name__"):
return update_wrapper(wrapper, fn)
else:
_f = wrapper
_f.__name__ = fn.__class__.__name__
if hasattr(fn, "__module__"):
_f.__module__ = fn.__module__
if hasattr(fn.__call__, "__doc__") and fn.__call__.__doc__:
_f.__doc__ = fn.__call__.__doc__
elif fn.__doc__:
_f.__doc__ = fn.__doc__
return _f
def quoted_token_parser(value):
"""Parse a dotted identifier with accommodation for quoted names.
Includes support for SQL-style double quotes as a literal character.
E.g.::
>>> quoted_token_parser("name")
["name"]
>>> quoted_token_parser("schema.name")
["schema", "name"]
>>> quoted_token_parser('"Schema"."Name"')
['Schema', 'Name']
>>> quoted_token_parser('"Schema"."Name""Foo"')
['Schema', 'Name""Foo']
"""
if '"' not in value:
return value.split(".")
# 0 = outside of quotes
# 1 = inside of quotes
state = 0
result = [[]]
idx = 0
lv = len(value)
while idx < lv:
char = value[idx]
if char == '"':
if state == 1 and idx < lv - 1 and value[idx + 1] == '"':
result[-1].append('"')
idx += 1
else:
state ^= 1
elif char == "." and state == 0:
result.append([])
else:
result[-1].append(char)
idx += 1
return ["".join(token) for token in result]
def add_parameter_text(params, text):
params = _collections.to_list(params)
def decorate(fn):
doc = fn.__doc__ is not None and fn.__doc__ or ""
if doc:
doc = inject_param_text(doc, {param: text for param in params})
fn.__doc__ = doc
return fn
return decorate
def _dedent_docstring(text):
split_text = text.split("\n", 1)
if len(split_text) == 1:
return text
else:
firstline, remaining = split_text
if not firstline.startswith(" "):
return firstline + "\n" + textwrap.dedent(remaining)
else:
return textwrap.dedent(text)
def inject_docstring_text(doctext, injecttext, pos):
doctext = _dedent_docstring(doctext or "")
lines = doctext.split("\n")
injectlines = textwrap.dedent(injecttext).split("\n")
if injectlines[0]:
injectlines.insert(0, "")
blanks = [num for num, line in enumerate(lines) if not line.strip()]
blanks.insert(0, 0)
inject_pos = blanks[min(pos, len(blanks) - 1)]
lines = lines[0:inject_pos] + injectlines + lines[inject_pos:]
return "\n".join(lines)
def inject_param_text(doctext, inject_params):
doclines = doctext.splitlines()
lines = []
to_inject = None
while doclines:
line = doclines.pop(0)
if to_inject is None:
m = re.match(r"(\s+):param (?:\\\*\*?)?(.+?):", line)
if m:
param = m.group(2)
if param in inject_params:
# default indent to that of :param: plus one
indent = " " * len(m.group(1)) + " "
# but if the next line has text, use that line's
# indentntation
if doclines:
m2 = re.match(r"(\s+)\S", doclines[0])
if m2:
indent = " " * len(m2.group(1))
to_inject = indent + inject_params[param]
elif line.lstrip().startswith(":param "):
lines.append("\n")
lines.append(to_inject)
lines.append("\n")
to_inject = None
elif not line.rstrip():
lines.append(line)
lines.append(to_inject)
lines.append("\n")
to_inject = None
elif line.endswith("::"):
# TODO: this still wont cover if the code example itself has blank
# lines in it, need to detect those via indentation.
lines.append(line)
lines.append(
doclines.pop(0)
) # the blank line following a code example
continue
lines.append(line)
return "\n".join(lines)
| [
"yushevaa@gmail.com"
] | yushevaa@gmail.com |
34e34b57a15f76966c9e27ff6ef2ed51cbc7481b | 441f0b4b4f2016ace7bed37431779b3352b9c2e4 | /Book Introdução Programação com Python/6 - Listas/06.06 - Calculo da media com notas digitadas.py | bb7d337bfbc147548494e910e77d646532c873ca | [] | no_license | AlexGalhardo/Learning-Python | 936b2eae814d148b0b3b77cc76cf81b45fbb4a02 | b710952101a0409f585ba975e2854bf0e0286ac7 | refs/heads/master | 2020-05-19T23:32:49.285710 | 2019-09-04T17:37:27 | 2019-09-04T17:37:27 | 134,312,273 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | ##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2014
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Primeira reimpressão - Outubro/2011
# Segunda reimpressão - Novembro/1012
# Terceira reimpressão - Agosto/2013
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Site: http://python.nilo.pro.br/
#
# Arquivo: capitulo 06\06.06 - Calculo da media com notas digitadas.py
##############################################################################
notas = [0,0,0,0,0]
soma = 0
x = 0
while x < 5:
notas[x] = float(input("Nota %d:" % x))
soma += notas[x]
x += 1
x = 0
while x < 5:
print("Nota %d: %6.2f" % (x, notas[x]))
x += 1
print("Média: %5.2f" % (soma/x))
| [
"aleexgvieira@gmail.com"
] | aleexgvieira@gmail.com |
a44a3e6650365ff9d37092a330edb5c10091ad47 | a5688a923c488414ecffcb92e3405d3876f1889d | /examples/computer_vision/mmdetection_pytorch/configs/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco.py | ab39c49a7335453756504a84235c99c4463cfda5 | [
"Apache-2.0"
] | permissive | armandmcqueen/determined | ae6e7a4d5d8c3fb6a404ed35519643cf33bd08e4 | 251e7093b60a92633b684586ac7a566379442f15 | refs/heads/master | 2023-05-28T17:52:18.915710 | 2021-06-09T23:55:59 | 2021-06-09T23:55:59 | 259,449,481 | 0 | 0 | Apache-2.0 | 2021-04-09T12:13:11 | 2020-04-27T20:47:23 | Go | UTF-8 | Python | false | false | 3,686 | py | _base_ = [
"../_base_/models/cascade_rcnn_r50_fpn.py",
"../_base_/datasets/coco_detection.py",
"../_base_/schedules/schedule_1x.py",
"../_base_/default_runtime.py",
]
# model settings
model = dict(
pretrained="torchvision://resnet101",
backbone=dict(depth=101),
roi_head=dict(
bbox_head=[
dict(
type="SABLHead",
num_classes=80,
cls_in_channels=256,
reg_in_channels=256,
roi_feat_size=7,
reg_feat_up_ratio=2,
reg_pre_kernel=3,
reg_post_kernel=3,
reg_pre_num=2,
reg_post_num=1,
cls_out_channels=1024,
reg_offset_out_channels=256,
reg_cls_out_channels=256,
num_cls_fcs=1,
num_reg_fcs=0,
reg_class_agnostic=True,
norm_cfg=None,
bbox_coder=dict(
type="BucketingBBoxCoder", num_buckets=14, scale_factor=1.7
),
loss_cls=dict(
type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0
),
loss_bbox_cls=dict(
type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0
),
loss_bbox_reg=dict(type="SmoothL1Loss", beta=0.1, loss_weight=1.0),
),
dict(
type="SABLHead",
num_classes=80,
cls_in_channels=256,
reg_in_channels=256,
roi_feat_size=7,
reg_feat_up_ratio=2,
reg_pre_kernel=3,
reg_post_kernel=3,
reg_pre_num=2,
reg_post_num=1,
cls_out_channels=1024,
reg_offset_out_channels=256,
reg_cls_out_channels=256,
num_cls_fcs=1,
num_reg_fcs=0,
reg_class_agnostic=True,
norm_cfg=None,
bbox_coder=dict(
type="BucketingBBoxCoder", num_buckets=14, scale_factor=1.5
),
loss_cls=dict(
type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0
),
loss_bbox_cls=dict(
type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0
),
loss_bbox_reg=dict(type="SmoothL1Loss", beta=0.1, loss_weight=1.0),
),
dict(
type="SABLHead",
num_classes=80,
cls_in_channels=256,
reg_in_channels=256,
roi_feat_size=7,
reg_feat_up_ratio=2,
reg_pre_kernel=3,
reg_post_kernel=3,
reg_pre_num=2,
reg_post_num=1,
cls_out_channels=1024,
reg_offset_out_channels=256,
reg_cls_out_channels=256,
num_cls_fcs=1,
num_reg_fcs=0,
reg_class_agnostic=True,
norm_cfg=None,
bbox_coder=dict(
type="BucketingBBoxCoder", num_buckets=14, scale_factor=1.3
),
loss_cls=dict(
type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0
),
loss_bbox_cls=dict(
type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0
),
loss_bbox_reg=dict(type="SmoothL1Loss", beta=0.1, loss_weight=1.0),
),
]
),
)
| [
"noreply@github.com"
] | armandmcqueen.noreply@github.com |
bfb7ae8370d6c159723df606e7ca1d00215c9bd5 | d1c427249d1161c1f4f848e1de23d95c03ae40a3 | /501_practitioner_rate_input_landing.py | ae9c1309902dd59363e1fbc687283a03b1e721c4 | [] | no_license | Sangee2610/pythonscripts_march1 | 94b80ab3b037793022d114d7cd3604d69ba82147 | 2fb224fc0753beb3d65d873f658cdae247425cf1 | refs/heads/master | 2020-04-26T05:03:00.998024 | 2019-03-01T15:07:46 | 2019-03-01T15:07:46 | 173,321,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | import psycopg2
import config as cfg
conn = cfg.DATABASE_CONNECT
cur = conn.cursor()
import csv
cur.execute("""
DROP TABLE IF EXISTS Landing_Partitioner_Rate;
CREATE TABLE Landing_Partitioner_Rate(
Name text,
ContactKey text,
Contact text,
PayBand text,
Boundary text,
StartDate text,
EndDate text,
Cost text,
Datasource text,
Owner_ text
)
""")
input_file = '/home/baadmin/NCT_ETL/input_files/practitioner_rate.csv'
def data_cleaning_loading(filename):
new_filename = filename.replace(".csv", "_corrected.csv")
f = open(filename, encoding="ISO-8859-1")
g = open(new_filename, "w+", encoding="utf-8")
new_rows = []
changes = { ',' : ''}
for row in csv.reader(f, quotechar='"', delimiter=',',quoting=csv.QUOTE_ALL, skipinitialspace=True): # iterate over the rows in the file
new_row = row # at first, just copy the row
for key, value in changes.items(): # iterate over 'changes' dictionary
new_row = [ x.replace(key, value) for x in new_row ] # make the substitutions
new_rows.append(new_row) # add the modified rows
new_rows = new_rows[1:] #Remove header
for new_row in new_rows:
g.write(str(",".join(new_row)) + "\n")
g.close()
g = open(new_filename)
cur.copy_from(g, 'Landing_Partitioner_Rate', sep=",")
conn.commit()
g.close()
f.close()
data_cleaning_loading(input_file)
| [
"noreply@github.com"
] | Sangee2610.noreply@github.com |
8c46c93984d57edb4696caf91058e8aa7b2ff09e | ba4f99f24a0a13ff20a07d12adfcc4eba73b874a | /setup.py | 6d3e3b0b140952ba5b5052ee8976dfad14dea044 | [
"MIT"
] | permissive | chenwang12/whitebox-python | d65f7cb774218fe1776f939742d5f90d0cc23b39 | 23d88860332bfe498ac2830fb03b8fd5e4755906 | refs/heads/master | 2023-04-10T10:18:52.885997 | 2021-04-21T01:26:44 | 2021-04-21T01:26:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ['Click>=6.0', ]
setup_requirements = [ ]
test_requirements = [ ]
setup(
author="Qiusheng Wu",
author_email='giswqs@gmail.com',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
description="An advanced geospatial data analysis platform ",
entry_points={
'console_scripts': [
'whitebox=whitebox.cli:main',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='whitebox',
name='whitebox',
packages=find_packages(include=['whitebox']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/giswqs/whitebox',
version='1.4.1',
zip_safe=False,
)
| [
"giswqs@gmail.com"
] | giswqs@gmail.com |
1224fa8663e7f8ddcd037f1d3789e5caf813d63e | c4b94158b0ac8f1c4f3d535b6cdee5d1639743ce | /Python/377__Combination_Sum_IV.py | 4435fd17e5f9fb491a78890b190d9d7d2eafecaa | [] | no_license | FIRESTROM/Leetcode | fc61ae5f11f9cb7a118ae7eac292e8b3e5d10e41 | 801beb43235872b2419a92b11c4eb05f7ea2adab | refs/heads/master | 2020-04-04T17:40:59.782318 | 2019-08-26T18:58:21 | 2019-08-26T18:58:21 | 156,130,665 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | class Solution(object):
def combinationSum4(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
self.dic = {}
def helper(s):
if s in self.dic:
return self.dic[s]
if s == 0:
return 1
result = 0
for val in nums:
if s - val >= 0:
result += helper(s - val)
self.dic[s] = result
return result
return helper(target)
| [
"junou_cui@berkeley.edu"
] | junou_cui@berkeley.edu |
5359c90102abe3c637b3160593118e340ddb6395 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /outposts_write_1/outpost_create.py | 352d0b4588fd7900394d3db6690e24f3bc11c552 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/outposts/create-outpost.html
if __name__ == '__main__':
"""
delete-outpost : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/outposts/delete-outpost.html
get-outpost : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/outposts/get-outpost.html
list-outposts : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/outposts/list-outposts.html
"""
parameter_display_string = """
# site-id : The ID of the site.
"""
add_option_dict = {}
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_one_parameter("outposts", "create-outpost", "site-id", add_option_dict)
| [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
8289c203d520271f1af4b250c4cd28f9e98a894d | d22f8cd1a834f706d2c0cd77a814414cb4650265 | /data/data/models/structures/character/limits.py | f053282a268cf6c670541a5371fa985bd90c5b0e | [
"MIT"
] | permissive | teris1994/L2py | 9e7535935f58d729453f39bee998f21240b85e8b | 07cc5d7c5d52ac4179378b29ef4873b11f6daa0c | refs/heads/master | 2023-09-01T06:21:10.625029 | 2021-10-24T12:48:18 | 2021-10-24T13:21:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | from dataclasses import dataclass, field
from common.dataclass import BaseDataclass
@dataclass
class Limits(BaseDataclass):
inventory: Int32
warehouse: Int32
freight: Int32
sell: Int32
buy: Int32
dwarf_recipe: Int32
common_recipe: Int32
| [
"yurzs@icloud.com"
] | yurzs@icloud.com |
ae0fde02f81e4d5fc05921176c8b982fddf3e2d4 | 7e69ef0295a00d413b79b6c7646ca837e8dcb4fa | /conductor/feeds.py | 27b3f2c14076785163645c18ee493a2b71aefb7d | [
"MIT"
] | permissive | random-labs/conductor | 1d2ac1e2bb4a7d4833636f7a7a8bea432126191c | 547dfb2c2b36c16d828fcaaf11db49d3bdece527 | refs/heads/master | 2021-06-24T21:43:31.372914 | 2017-09-08T20:43:11 | 2017-09-08T20:43:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,165 | py | import time
import traceback
from steem import Steem
from .config import witness
from .markets import Markets
from .utils import unlock_steempy_wallet
steem = Steem()
settings = {
"sleep_time_seconds": 10 * 60,
"minimum_spread_pct": 2.0,
}
def get_last_published_price(witness_name):
my_info = steem.get_witness_by_account(witness_name)
price = 0
if float(my_info["sbd_exchange_rate"]["quote"].split()[0]) != 0:
price = float(my_info["sbd_exchange_rate"]["base"].split()[0]) / float(
my_info["sbd_exchange_rate"]["quote"].split()[0])
return price
def refresh_price_feeds(witness_name, support_peg=False):
print(time.ctime())
markets = Markets(cache_timeout=30)
# old prices
old_adj_price = get_last_published_price(witness_name)
print("Old Price: " + format(old_adj_price, ".3f"))
# new prices
steem_usd = markets.steem_usd_implied()
sbd_usd = markets.sbd_usd_implied()
quote = round(1 / sbd_usd, 3) if support_peg else "1.000"
quote_adj_current_price = round(steem_usd / float(quote), 3)
print('New Price: %s' % quote_adj_current_price)
print('\nCurrent STEEM price: %.3f USD' % steem_usd)
print('Current SBD price: %.3f USD' % sbd_usd)
print('Quote: %s STEEM' % quote)
# publish new price is spread widens
spread = abs(markets.calc_spread(old_adj_price, quote_adj_current_price))
print("\nSpread between prices: %.3f%%" % spread)
if spread > 25:
print("Possibly invalid spread (%.2f%%), ignoring..." % spread)
elif spread > settings['minimum_spread_pct']:
steem.commit.witness_feed_publish(steem_usd, quote=quote, account=witness_name)
print("Updated the witness price feed.")
print('\n\n')
def run_price_feeds(**kwargs):
unlock_steempy_wallet()
while True:
try:
refresh_price_feeds(witness('name'), **kwargs)
time.sleep(settings['sleep_time_seconds'])
except KeyboardInterrupt:
print('Quitting...')
return
except:
print(traceback.format_exc())
time.sleep(10)
if __name__ == '__main__':
pass
| [
"_@furion.me"
] | _@furion.me |
713c2b2c5286ea101228bc49bbc219e3083f413c | e2d22f12f8e540a80d31de9debe775d35c3c5c22 | /blousebrothers/confs/migrations/0027_auto_20161209_1619.py | b224d425b525fa31c181a087f8fe4458290b4a7d | [
"MIT"
] | permissive | sladinji/blousebrothers | 360c3b78ec43379977dbf470e5721e6a695b2354 | 461de3ba011c0aaed3f0014136c4497b6890d086 | refs/heads/master | 2022-12-20T10:24:07.631454 | 2019-06-13T13:17:35 | 2019-06-13T13:17:35 | 66,867,705 | 1 | 0 | NOASSERTION | 2022-12-19T18:15:44 | 2016-08-29T18:04:33 | Python | UTF-8 | Python | false | false | 2,510 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-12-09 16:19
from __future__ import unicode_literals
import blousebrothers.confs.models
from decimal import Decimal
from django.db import migrations, models
import django.db.models.deletion
import image_cropping.fields
class Migration(migrations.Migration):
dependencies = [
('confs', '0026_create_test_for_conferenciers'),
]
operations = [
migrations.CreateModel(
name='AnswerImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', image_cropping.fields.ImageCropField(max_length=255, upload_to=blousebrothers.confs.models.answer_image_directory_path, verbose_name='Image')),
('cropping', image_cropping.fields.ImageRatioField('image', '430x360', adapt_rotation=False, allow_fullsize=False, free_crop=True, help_text=None, hide_image_field=False, size_warning=False, verbose_name='cropping')),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date created')),
('caption', models.CharField(blank=True, max_length=200, verbose_name='Libellé')),
('index', models.PositiveIntegerField(default=0, verbose_name='Ordre')),
],
),
migrations.AddField(
model_name='question',
name='explaination',
field=models.TextField(blank=True, null=True, verbose_name='Remarque globale pour la correction'),
),
migrations.AlterField(
model_name='answer',
name='explaination_image',
field=image_cropping.fields.ImageCropField(blank=True, max_length=255, null=True, upload_to=blousebrothers.confs.models.answer_image_directory_path, verbose_name='Image'),
),
migrations.AlterField(
model_name='conference',
name='price',
field=models.DecimalField(decimal_places=2, default=Decimal('0.5'), help_text='', max_digits=6, verbose_name='Prix de vente'),
),
migrations.AlterField(
model_name='question',
name='question',
field=models.TextField(verbose_name='Enoncé'),
),
migrations.AddField(
model_name='answerimage',
name='answer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images', to='confs.Answer'),
),
]
| [
"julien.almarcha@gmail.com"
] | julien.almarcha@gmail.com |
6c2f8aa083e3f008c65511b61390b8d865a33b09 | 14373275670c1f3065ce9ae195df142146e2c1a4 | /stubs/openpyxl/openpyxl/utils/units.pyi | 6264bc8263e8765abc985e5b35777a99076350cf | [
"Apache-2.0",
"MIT"
] | permissive | sobolevn/typeshed | eb7af17c06a9722f23c337e6b9a4726223155d58 | d63a82640390a9c130e0fe7d409e8b0b836b7c31 | refs/heads/master | 2023-08-04T05:59:29.447015 | 2023-06-14T21:27:53 | 2023-06-14T21:27:53 | 216,265,622 | 2 | 0 | Apache-2.0 | 2022-02-08T10:40:53 | 2019-10-19T20:21:25 | Python | UTF-8 | Python | false | false | 692 | pyi | from typing_extensions import Final
DEFAULT_ROW_HEIGHT: Final[float]
BASE_COL_WIDTH: Final = 8
DEFAULT_COLUMN_WIDTH: Final = 13
DEFAULT_LEFT_MARGIN: Final[float]
DEFAULT_TOP_MARGIN: Final[float]
DEFAULT_HEADER: Final[float]
def inch_to_dxa(value): ...
def dxa_to_inch(value): ...
def dxa_to_cm(value): ...
def cm_to_dxa(value): ...
def pixels_to_EMU(value): ...
def EMU_to_pixels(value): ...
def cm_to_EMU(value): ...
def EMU_to_cm(value): ...
def inch_to_EMU(value): ...
def EMU_to_inch(value): ...
def pixels_to_points(value, dpi: int = 96): ...
def points_to_pixels(value, dpi: int = 96): ...
def degrees_to_angle(value): ...
def angle_to_degrees(value): ...
def short_color(color): ...
| [
"noreply@github.com"
] | sobolevn.noreply@github.com |
4d59d2813350d10612df8936d180ef40f296eed9 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/f56387ebf1894a488924586759d551df.py | 76000a8d0392cc004c9488d8f5416a627db04e28 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 724 | py | from string import punctuation
whiteSpace = " \t\n"
numbers = "1234567890"
def isCapsLock(stuff):
strippedStuff = ""
for i in stuff:
if i.isalpha():
strippedStuff += i
if strippedStuff == "":
return False
elif strippedStuff.upper() == strippedStuff:
return True
else:
return False
def isNotEnglish(stuff):
toReturn = True
for i in stuff:
if i.isalpha() == False and i.isspace() == False and i in punctuation == False and i in numbers == False:
toReturn = False
return toReturn
def hey(stuff):
if stuff == "" or stuff.isspace():
return "Fine. Be that way!"
elif isCapsLock(stuff):
return "Whoa, chill out!"
elif stuff[(len(stuff)-1)] == "?":
return "Sure."
else:
return "Whatever."
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
7ff052bddfeed588e5781b786468c13fa0e360ff | 41e2cf24f0ff3a11a98bb00e03c598dde35452c4 | /reportview/migrations/0020_auto_20180802_1306.py | dedffeb085b6e16e63c3bef27d3101cbb3aed9dd | [] | no_license | anushamokashi/mob | f5dbedc729073092f94323feca6d95dee24087a2 | 37bc0eb033bc23d37e9d4fb9bb8b2b456553ff7f | refs/heads/master | 2020-04-24T08:36:56.008212 | 2019-02-21T09:09:04 | 2019-02-21T09:09:04 | 171,810,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-08-02 07:36
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reportview', '0019_auto_20180802_1246'),
]
operations = [
migrations.RemoveField(
model_name='reportprintformataction',
name='action_type',
),
migrations.RemoveField(
model_name='reportprintformataction',
name='htmlfile',
),
migrations.RemoveField(
model_name='reportprintformataction',
name='iconcls',
),
]
| [
"anusha.mokashi@gmail.com"
] | anusha.mokashi@gmail.com |
79b9eedd6f17c01c7de1fa837f1e31fcb1e6ac50 | 03c9cd5bd96874d6117fb17c37ac4d7450c15933 | /django-tutorial/chapter04/orm_field_demo/article/migrations/0005_person_signature.py | 872309e8204b1b99c5fd56c8430551defb6db66c | [] | no_license | atiger808/opencv-tutorial | 603de35e97679d6beae104298ae355edfdd9036a | 2ea9bb3818284fb75f85697e36fde37b6479d1c6 | refs/heads/master | 2020-05-29T23:16:30.462022 | 2019-11-05T10:08:20 | 2019-11-05T10:08:20 | 189,425,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | # Generated by Django 2.0 on 2019-05-14 09:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('article', '0004_auto_20190514_1622'),
]
operations = [
migrations.AddField(
model_name='person',
name='signature',
field=models.TextField(default=''),
preserve_default=False,
),
]
| [
"atiger0614@163.com"
] | atiger0614@163.com |
a3a7782413ebcec1c92fcaa6dae3cb78c21a3113 | 135f967e9dbbc681e031b9b0adbd85a5dbe43649 | /reveal_graph_embedding/embedding/laplacian.py | 8f6b4e431bdb3bf0c320838624481b8e2ee028ab | [
"Apache-2.0"
] | permissive | gm0907/reveal-graph-embedding | 9d1c9501c542b2f473c73b22c3cc6373910ec8ef | 72d4af794536f97b8ede06c0f27f261ea85d8c4b | refs/heads/master | 2021-02-26T23:42:09.514887 | 2019-07-08T11:12:43 | 2019-07-08T11:12:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,602 | py | __author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import numpy as np
import scipy.sparse as spsp
from reveal_graph_embedding.embedding.implicit import get_implicit_combinatorial_adjacency_matrix,\
get_implicit_directed_adjacency_matrix
def get_unnormalized_laplacian(adjacency_matrix):
# Calculate diagonal matrix of node degrees.
degree = spsp.dia_matrix((adjacency_matrix.sum(axis=0), np.array([0])), shape=adjacency_matrix.shape)
degree = degree.tocsr()
# Calculate sparse graph Laplacian.
laplacian = spsp.csr_matrix(-adjacency_matrix + degree, dtype=np.float64)
return laplacian
def get_normalized_laplacian(adjacency_matrix):
# Calculate diagonal matrix of node degrees.
degree = spsp.dia_matrix((adjacency_matrix.sum(axis=0), np.array([0])), shape=adjacency_matrix.shape)
degree = degree.tocsr()
# Calculate sparse graph Laplacian.
adjacency_matrix = spsp.csr_matrix(-adjacency_matrix + degree, dtype=np.float64)
# Calculate inverse square root of diagonal matrix of node degrees.
degree.data = np.real(1/np.sqrt(degree.data))
# Calculate sparse normalized graph Laplacian.
normalized_laplacian = degree*adjacency_matrix*degree
return normalized_laplacian
def get_random_walk_laplacian(adjacency_matrix):
# Calculate diagonal matrix of node degrees.
degree = spsp.dia_matrix((adjacency_matrix.sum(axis=0), np.array([0])), shape=adjacency_matrix.shape)
degree = degree.tocsr()
# Calculate sparse graph Laplacian.
adjacency_matrix = spsp.csr_matrix(-adjacency_matrix + degree, dtype=np.float64)
# Calculate inverse of diagonal matrix of node degrees.
degree.data = np.real(1/degree.data)
# Calculate sparse normalized graph Laplacian.
random_walk_laplacian = degree*adjacency_matrix
return random_walk_laplacian
"""
def get_directed_laplacian(adjacency_matrix, rho=0.2):
number_of_nodes = adjacency_matrix.shape[0]
effective_adjacency_matrix, rw_distribution = get_implicit_directed_adjacency_matrix(adjacency_matrix, rho)
I = spsp.spdiags(rw_distribution, [0], number_of_nodes, number_of_nodes)
theta_matrix = I - effective_adjacency_matrix
return theta_matrix
def get_combinatorial_laplacian(adjacency_matrix, rho=0.2):
number_of_nodes = adjacency_matrix.shape[0]
effective_adjacency_matrix, rw_distribution = get_implicit_combinatorial_adjacency_matrix(adjacency_matrix, rho)
I = spsp.spdiags(rw_distribution, [0], number_of_nodes, number_of_nodes)
theta_matrix = I - effective_adjacency_matrix
return theta_matrix
""" | [
"georgevrizos@gmail.com"
] | georgevrizos@gmail.com |
c76711de951568a3ad9478239d5c98cbf869d606 | 16d32837fe02613774e64c4b19a3fba20de60d3d | /pebble_tool/version.py | 54b0d2159865cd281768546eeee92d416b9eeda7 | [
"MIT"
] | permissive | bboehmke/pebble-tool | c42558d696c9bceed7f283ef1a1f98b2f5e3d1bd | 64caa870714042df601288463272e17e4f4165b4 | refs/heads/master | 2021-01-17T21:22:11.337174 | 2015-12-15T06:43:24 | 2015-12-15T06:43:24 | 48,221,683 | 0 | 0 | null | 2015-12-18T07:45:53 | 2015-12-18T07:45:53 | null | UTF-8 | Python | false | false | 372 | py | version_base = (4, 0, 0)
version_suffix = None
if version_suffix is None:
__version_info__ = version_base
else:
__version_info__ = version_base + (version_suffix,)
__version__ = '{}.{}'.format(*version_base)
if version_base[2] != 0:
__version__ += '.{}'.format(version_base[3])
if version_suffix is not None:
__version__ += '-{}'.format(version_suffix)
| [
"katharine@getpebble.com"
] | katharine@getpebble.com |
7cef6d955fd4d1a75af3860ab27581432f225c11 | 7d096568677660790479d87c22b47aae838ef96b | /stubs-legacy/System/ComponentModel/__init___parts/DisplayNameAttribute.py | 6564ffaab714f51b11aa1f4215aabc8dcb0b85d1 | [
"MIT"
] | permissive | NISystemsEngineering/rfmx-pythonnet | 30adbdd5660b0d755957f35b68a4c2f60065800c | cd4f90a88a37ed043df880972cb55dfe18883bb7 | refs/heads/master | 2023-02-04T00:39:41.107043 | 2023-02-01T21:58:50 | 2023-02-01T21:58:50 | 191,603,578 | 7 | 5 | MIT | 2023-02-01T21:58:52 | 2019-06-12T16:02:32 | Python | UTF-8 | Python | false | false | 2,076 | py | class DisplayNameAttribute(Attribute,_Attribute):
"""
Specifies the display name for a property,event,or public void method which takes no arguments.
DisplayNameAttribute()
DisplayNameAttribute(displayName: str)
"""
def Equals(self,obj):
"""
Equals(self: DisplayNameAttribute,obj: object) -> bool
Determines whether two System.ComponentModel.DisplayNameAttribute instances are equal.
obj: The System.ComponentModel.DisplayNameAttribute to test the value equality of.
Returns: true if the value of the given object is equal to that of the current object; otherwise,false.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: DisplayNameAttribute) -> int
Returns the hash code for this instance.
Returns: A hash code for the current System.ComponentModel.DisplayNameAttribute.
"""
pass
def IsDefaultAttribute(self):
"""
IsDefaultAttribute(self: DisplayNameAttribute) -> bool
Determines if this attribute is the default.
Returns: true if the attribute is the default value for this attribute class; otherwise,false.
"""
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==y """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,displayName=None):
"""
__new__(cls: type)
__new__(cls: type,displayName: str)
"""
pass
def __ne__(self,*args):
pass
DisplayName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the display name for a property,event,or public void method that takes no arguments stored in this attribute.
Get: DisplayName(self: DisplayNameAttribute) -> str
"""
DisplayNameValue=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the display name.
"""
Default=None
| [
"sean.moore@ni.com"
] | sean.moore@ni.com |
5a10e7dcbb01bcb35ecece0ac1088042ad6fe0ec | ac66a6aa244878a1e0a40b1aa95dd7734d3b9e50 | /scripts/injectr.py | b99799561b0cda07ff24ef3696bcd224b66f4f43 | [
"MIT"
] | permissive | MontyTRC89/TOMB5 | 5fdcb11d1e0909a04ae703f4fe2a74fd56904242 | 8c0c5bc3f6fc04ab13b76126818e95d823b4db03 | refs/heads/master | 2020-08-30T10:52:14.568009 | 2019-10-26T23:53:30 | 2019-10-26T23:53:30 | 218,357,222 | 1 | 0 | MIT | 2019-10-29T18:35:46 | 2019-10-29T18:35:46 | null | UTF-8 | Python | false | false | 28,271 | py | #!/usr/bin/env python
# _ _ _ _
# ______| (_)_ __ ___ ___ _ __ ___(_) ___ _ __( )__
# |_ / _` | | '_ ` _ \ / _ \ '_ \/ __| |/ _ \| '_ \/ __|
# / / (_| | | | | | | | __/ | | \__ \ | (_) | | | \__ \
# /___\__,_|_|_| |_| |_|\___|_| |_|___/_|\___/|_| |_|___/
#
# _ _ _______ _____
# (_) (_) |__ __| __ \
# _ _ __ _ ___ ___| | | |__) |
# | | '_ \| |/ _ \/ __| | | _ /
# | | | | | | __/ (__| | | | \ \
# |_|_| |_| |\___|\___|_| |_| \_\
# _/ |
# |__/
#
# Basically, this takes the original game executable and replaces original
# functions' implementations by ours. For that, it creates a new section in
# the PE file, put all the functions' code in it, and replaces the first 8
# bytes at the beginning of each function by a "jmp ADDR" where ADDR is the
# address of our implementation of the function. This allows for easier
# testing of the validity of the functions.
# Btw it really whips the llama's ass.
# increment this plz
# rev 1
# 2017-10-22
import json
import os.path
import sys
import pefile
from shutil import copyfile
try:
import idc
except ImportError:
print("IDA not detected, restarting with IDA! Make sure that idaq.exe is in PATH!")
pth = "../build/SPEC_PC/Debug/PCTomb5.exe"
if not os.path.isfile(pth):
pth = "../build/SPEC_PC/Debug/PCTOMB5.EXE"
copyfile(pth, os.path.join(os.getcwd(), "PCTOMB5.EXE"))
os.system('idaq.exe -A -S"' + "".join(sys.argv) + '" PCTOMB5.EXE')
with open("output.txt", "r") as f:
print(f.read())
exit()
from urllib2 import urlopen
print(os.getcwd())
orig_stdout = sys.stdout
sys.stdout = file("output.txt", "w")
# STEP
# _
# / |
# | |
# | |
# |_|
#
# Global settings
def isarg(arg, default):
if any(x for x in idc.ARGV if x.strip().lower() == "+%s" % arg):
return True
elif any(x for x in idc.ARGV if x.strip().lower() == "-%s" % arg):
return False
else:
return default
ISSUE_ID = 46 # ID of the issue on GitHub (for the API request)
SHOW_WARN = isarg("warn", True) # Show warnings (error messages)
SHOW_HINT = isarg("hint", True) # Show hints (e.g. function checked in list but not (F) in file)
SHOW_STATUS = isarg("status", False) # Show status messages (function added successfully yay)
USE_REPR = isarg("userepr", False) # Debugging purposes. When outputting a list (e.g. SHOW_UNIMPL), use repr()
EXCLUDED = [] # List of functions that *will* be considered UNIMPLEMENTED, even if they are checked in the list.
# STEP
# ____
# |___ \
# __) |
# / __/
# |_____|
#
# Download the current list
#body = urlopen("https://api.github.com/repos/TOMB5/TOMB5/issues/46").read().decode("utf8")
#issue = json.loads(body)["body"] # loads the Markdown source of the issue
issue = """### GAME
#### BOX.C
- [ ] `AIGuard`
- [ ] `AlertAllGuards`
- [x] `AlertNearbyGuards`
- [ ] `BadFloor`
- [ ] `CalculateTarget`
- [x] `CreatureActive`
- [ ] `CreatureAIInfo`
- [ ] `CreatureAnimation`
- [ ] `CreatureCreature`
- [x] `CreatureDie`
- [ ] `CreatureEffect`
- [ ] `CreatureEffectT`
- [ ] `CreatureFloat`
- [ ] `CreatureJoint`
- [ ] `CreatureKill`
- [ ] `CreatureMood`
- [x] `CreatureTilt`
- [ ] `CreatureTurn`
- [ ] `CreatureUnderwater`
- [ ] `CreatureVault`
- [x] `CreatureYRot`
- [x] `DropBaddyPickups`
- [ ] `EscapeBox`
- [ ] `FindAITargetObject`
- [ ] `GetAITarget`
- [ ] `GetCreatureMood`
- [x] `InitialiseCreature`
- [ ] `MoveCreature3DPos`
- [x] `SameZone`
- [ ] `SearchLOT`
- [ ] `StalkBox`
- [x] `TargetBox`
- [x] `UpdateLOT`
- [x] `ValidBox`
#### CAMERA.C
- [x] `AlterFOV`
- [ ] `BinocularCamera`
- [ ] `CalculateCamera`
- [ ] `CameraCollisionBounds`
- [ ] `ChaseCamera`
- [ ] `CombatCamera`
- [ ] `ConfirmCameraTargetPos`
- [ ] `FixedCamera`
- [x] `InitialiseCamera`
- [ ] `LaraTorch`
- [ ] `LookCamera`
- [ ] `mgLOS`
- [ ] `MoveCamera`
- [ ] `ScreenShake`
- [ ] `UpdateCameraElevation`
#### COLLIDE.C
- [ ] `AIPickupCollision`
- [ ] `CreatureCollision`
- [ ] `GenericSphereBoxCollision`
- [ ] `ItemPushLaraStatic`
- [ ] `LaraBaddieCollision`
- [x] `ShiftItem`
- [ ] `TestBoundsCollideStatic`
- [ ] `TestForObjectOnLedge`
- [ ] `TrapCollision`
- [x] `TriggerLaraBlood`
- [x] `UpdateLaraRoom`
#### CONTROL.C
- [ ] `_TestTriggers`
- [x] `AddFire`
- [x] `AddRoomFlipItems`
- [ ] `AlterFloorHeight`
- [ ] `check_xray_machine_trigger`
- [x] `CheckCutPlayed`
- [ ] `CheckGuardOnTrigger`
- [x] `ClearFires`
- [ ] `ControlPhase`
- [x] `ExplodeItemNode`
- [ ] `FireCrossBowFromLaserSight`
- [x] `FlipMap`
- [x] `GetDoor`
- [x] `GetFloor`
- [x] `GetRandomControl`
- [x] `GetRandomDraw`
- [ ] `GetTargetOnLOS`
- [ ] `GetWaterHeight`
- [x] `InitCutPlayed`
- [ ] `InterpolateAngle`
- [ ] `is_object_in_room`
- [x] `KillMoveEffects`
- [x] `KillMoveItems`
- [x] `LOS`
- [ ] `NeatAndTidyTriggerCutscene`
- [ ] `RefreshCamera`
- [x] `RemoveRoomFlipItems`
- [ ] `ResetGuards`
- [x] `SeedRandomControl`
- [x] `SeedRandomDraw`
- [x] `SetCutNotPlayed`
- [x] `SetCutPlayed`
- [x] `TestTriggers`
- [ ] `TriggerCDTrack`
- [ ] `TriggerNormalCDTrack`
#### DEBRIS.C
- [ ] `GetFreeDebris`
- [ ] `ShatterObject`
- [ ] `TriggerDebris`
#### DELSTUFF.C
- [ ] `CalcLaraMatrices` @Gh0stBlade - WIP
#### DELTAPAK.C
- [x] `andrea1_control`
- [x] `andrea1_end`
- [x] `andrea1_init`
- [ ] `andrea2_control`
- [x] `andrea2_end`
- [x] `andrea2_init`
- [x] `andrea3_control`
- [x] `andrea3_end`
- [x] `andrea3_init`
- [x] `andrea3b_control`
- [x] `andrea3b_end`
- [x] `andrea3b_init`
- [x] `andrea4_control`
- [x] `andrea4_end`
- [x] `andrea4_init`
- [x] `andy10_control`
- [x] `andy10_end`
- [x] `andy10_init`
- [x] `andy11_control`
- [x] `andy11_end`
- [x] `andy11_init`
- [x] `andy1_control`
- [x] `andy1_end`
- [x] `andy1_init`
- [x] `andy2_control`
- [x] `andy2_end`
- [x] `andy2_init`
- [x] `andy3_control`
- [x] `andy3_end`
- [x] `andy3_init`
- [x] `andy4_control`
- [x] `andy4_end`
- [x] `andy4_init`
- [x] `andy4b_control`
- [x] `andy4b_end`
- [x] `andy4b_init`
- [x] `andy5_control`
- [x] `andy5_end`
- [x] `andy5_init`
- [x] `andy6_control`
- [x] `andy6_end`
- [x] `andy6_init`
- [x] `andy7_control`
- [x] `andy7_end`
- [x] `andy7_init`
- [x] `andy8_control`
- [x] `andy8_end`
- [x] `andy8_init`
- [x] `andy9_control`
- [x] `andy9_end`
- [x] `andy9_init`
- [x] `andypew_control`
- [x] `andypew_end`
- [x] `andypew_init`
- [ ] `CalculateObjectLightingLaraCutSeq`
- [x] `cossack_control`
- [x] `cossack_end`
- [x] `cossack_init`
- [x] `cranecut_control`
- [x] `cranecut_end`
- [x] `cranecut_init`
- [x] `Cutanimate`
- [x] `CutLaraBubbles`
- [x] `cutseq_givelara_hk`
- [x] `cutseq_givelara_pistols`
- [x] `cutseq_kill_item`
- [x] `cutseq_malloc`
- [x] `cutseq_removelara_hk`
- [x] `cutseq_removelara_pistols`
- [x] `cutseq_restore_item`
- [ ] `cutseq_shoot_pistols`
- [x] `deal_with_actor_shooting`
- [x] `deal_with_pistols`
- [x] `DelsHandyTeleportLara`
- [x] `DelTorchFlames`
- [x] `do_catapult_meshswap`
- [x] `do_chalk_meshswap`
- [x] `do_clanger_meshswap`
- [x] `do_hammer_meshswap`
- [ ] `do_new_cutscene_camera`
- [x] `do_pierre_gun_meshswap`
- [x] `find_a_fucking_item`
- [x] `finish_cutseq`
- [ ] `frigup_lara`
- [ ] `GetActorJointAbsPosition`
- [ ] `GrabActorMatrix`
- [x] `hamgate_control`
- [x] `hamgate_end`
- [x] `hamgate_init`
- [x] `handle_actor_chatting`
- [ ] `handle_cutseq_triggering`
- [x] `handle_lara_chatting`
- [ ] `init_cutseq_actors`
- [x] `init_cutseq_malloc`
- [x] `init_resident_cutseq`
- [ ] `InitPackNodes`
- [x] `joby10_control`
- [x] `joby10_end`
- [x] `joby10_init`
- [x] `joby2_control`
- [x] `joby2_end`
- [x] `joby2_init`
- [x] `joby3_control`
- [x] `joby3_end`
- [x] `joby3_init`
- [ ] `joby4_control`
- [x] `joby4_end`
- [x] `joby4_init`
- [x] `joby5_control`
- [x] `joby5_end`
- [x] `joby5_init`
- [x] `joby6_control`
- [x] `joby6_end`
- [x] `joby6_init`
- [x] `joby7_control`
- [x] `joby7_end`
- [x] `joby7_init`
- [ ] `joby8_control`
- [x] `joby8_end`
- [x] `joby8_init`
- [x] `joby9_control`
- [x] `joby9_end`
- [x] `joby9_init`
- [ ] `Load_and_Init_Cutseq`
- [x] `monk2_control`
- [x] `monk2_end`
- [x] `monk2_init`
- [x] `ResetCutanimate`
- [x] `richcut1_control`
- [x] `richcut1_end`
- [x] `richcut1_init`
- [ ] `richcut2_control`
- [x] `richcut2_end`
- [x] `richcut2_init`
- [x] `richcut3_control`
- [x] `richcut3_end`
- [x] `richcut3_init`
- [x] `richcut4_control`
- [x] `richcut4_end`
- [x] `richcut4_init`
- [x] `setup_preist_meshswap`
- [ ] `special1_control`
- [ ] `special1_end`
- [ ] `special1_init`
- [ ] `special2_control`
- [ ] `special2_end`
- [ ] `special2_init`
- [ ] `special3_control`
- [ ] `special3_end`
- [ ] `special3_init`
- [ ] `special4_control`
- [ ] `special4_end`
- [ ] `special4_init`
- [ ] `stealth3_end`
- [ ] `stealth3_start`
- [x] `swampy_control`
- [x] `swampy_end`
- [x] `swampy_init`
- [ ] `trigger_title_spotcam`
- [x] `trigger_weapon_dynamics`
- [x] `TriggerActorBlood`
- [x] `TriggerDelBrownSmoke`
- [x] `TriggerDelSmoke`
- [x] `TriggerUnderwaterBlood`
#### DOOR.C
- [ ] `DoorCollision`
- [ ] `DoorControl`
- [ ] `DoubleDoorCollision`
- [ ] `OpenThatDoor`
- [ ] `ProcessClosedDoors`
- [ ] `PushPullKickDoorCollision`
- [ ] `PushPullKickDoorControl`
- [ ] `SequenceDoorControl`
- [ ] `ShutThatDoor`
- [ ] `UnderwaterDoorCollision`
#### DRAW.C
- [ ] `CalculateObjectLightingLara`
- [ ] `UpdateSkyLightning`
#### EFFECT2.C
- [ ] `ControlEnemyMissile`
- [ ] `ControlSmokeEmitter`
- [ ] `DetatchSpark`
- [ ] `KillAllCurrentItems`
- [x] `KillEverything`
- [ ] `TriggerDartSmoke`
- [x] `TriggerGunSmoke`
- [ ] `TriggerSuperJetFlame`
- [ ] `TriggerWaterfallMist`
#### EFFECTS.C
- [x] `ActivateCamera`
- [x] `ActivateKey`
- [ ] `ClearSpidersPatch`
- [x] `ExplosionFX`
- [x] `finish_level_effect`
- [ ] `floor_shake_effect`
- [x] `invisibility_off`
- [x] `invisibility_on`
- [x] `KillActiveBaddies`
- [x] `lara_hands_free`
- [x] `LaraLocation`
- [x] `LaraLocationPad`
- [x] `PoseidonSFX`
- [x] `reset_hair`
- [ ] `ResetTest`
- [x] `RubbleFX`
- [x] `SetFog`
- [x] `shoot_left_gun`
- [x] `shoot_right_gun`
- [ ] `SoundEffects`
- [x] `SoundFlipEffect`
- [x] `SwapCrowbar`
- [x] `TL_1`
- [x] `TL_10`
- [x] `TL_11`
- [x] `TL_12`
- [x] `TL_2`
- [x] `TL_3`
- [x] `TL_4`
- [x] `TL_5`
- [x] `TL_6`
- [x] `TL_7`
- [x] `TL_8`
- [x] `TL_9`
- [x] `turn180_effect`
- [x] `void_effect`
- [ ] `WaterFall`
#### FLMTORCH.C
- [ ] `DoFlameTorch`
- [ ] `FireCollision`
- [ ] `FlameTorchControl`
- [ ] `GetFlameTorch`
- [ ] `TriggerTorchFlame`
#### GAMEFLOW.C
- [ ] `DoGameflow`
- [ ] `DoLevel`
- [ ] `DoTitle`
- [ ] `LoadGameflow`
- [ ] `QuickControlPhase`
#### HAIR.C
- [x] `InitialiseHair`
#### HEALTH.C
- [ ] `AddDisplayPickup`
- [ ] `DrawAirBar`
- [ ] `DrawGameInfo`
- [ ] `DrawHealthBar`
- [ ] `DrawPickups`
- [ ] `FlashIt`
- [x] `InitialisePickUpDisplay`
#### ITEMS.C
- [x] `AddActiveItem`
- [x] `CreateEffect`
- [x] `CreateItem`
- [ ] `EffectNewRoom`
- [x] `InitialiseFXArray`
- [ ] `InitialiseItem`
- [x] `InitialiseItemArray`
- [x] `ItemNewRoom`
- [ ] `KillEffect`
- [ ] `KillItem`
- [ ] `RemoveActiveItem`
- [ ] `RemoveDrawnItem`
#### LARA.C
- [ ] `ApplyVelocityToRope`
- [ ] `CanLaraHangSideways`
- [ ] `FallFromRope`
- [x] `GetDirOctant`
- [x] `GetLaraCollisionInfo`
- [x] `GetTighRopeFallOff`
- [ ] `IsValidHangPos`
- [ ] `JumpOffRope`
- [ ] `lara_as_all4s`
- [x] `lara_as_all4turnl`
- [x] `lara_as_all4turnr`
- [x] `lara_as_back`
- [x] `lara_as_backjump`
- [x] `lara_as_climbrope`
- [ ] `lara_as_climbroped`
- [ ] `lara_as_compress`
- [x] `lara_as_controlled`
- [x] `lara_as_controlledl`
- [x] `lara_as_crawl`
- [ ] `lara_as_crawlb`
- [ ] `lara_as_dash`
- [x] `lara_as_dashdive`
- [x] `lara_as_death`
- [x] `lara_as_deathslide`
- [x] `lara_as_duck`
- [x] `lara_as_duckl`
- [x] `lara_as_duckr`
- [x] `lara_as_extcornerl`
- [x] `lara_as_extcornerr`
- [x] `lara_as_fallback`
- [x] `lara_as_fastback`
- [x] `lara_as_fastdive`
- [x] `lara_as_fastfall`
- [x] `lara_as_fastturn`
- [x] `lara_as_forwardjump`
- [x] `lara_as_gymnast`
- [x] `lara_as_hang`
- [x] `lara_as_hang2`
- [x] `lara_as_hangleft`
- [x] `lara_as_hangright`
- [x] `lara_as_hangturnl`
- [x] `lara_as_hangturnr`
- [x] `lara_as_intcornerl`
- [x] `lara_as_intcornerr`
- [x] `lara_as_leftjump`
- [x] `lara_as_monkey180`
- [x] `lara_as_monkeyl`
- [x] `lara_as_monkeyr`
- [x] `lara_as_monkeyswing`
- [x] `lara_as_null`
- [x] `lara_as_parallelbars`
- [ ] `lara_as_pbleapoff`
- [x] `lara_as_pickup`
- [x] `lara_as_pickupflare`
- [x] `lara_as_poleleft`
- [x] `lara_as_poleright`
- [x] `lara_as_ppready`
- [x] `lara_as_pullblock`
- [x] `lara_as_pulley`
- [x] `lara_as_pushblock`
- [x] `lara_as_reach`
- [x] `lara_as_rightjump`
- [x] `lara_as_rope`
- [x] `lara_as_ropel`
- [x] `lara_as_roper`
- [ ] `lara_as_run`
- [x] `lara_as_slide`
- [x] `lara_as_slideback`
- [x] `lara_as_special`
- [x] `lara_as_splat`
- [x] `lara_as_stepleft`
- [x] `lara_as_stepright`
- [ ] `lara_as_stop`
- [x] `lara_as_swandive`
- [x] `lara_as_switchoff`
- [x] `lara_as_switchon`
- [ ] `lara_as_trfall`
- [x] `lara_as_trpose`
- [x] `lara_as_trwalk`
- [x] `lara_as_turn_l`
- [x] `lara_as_turn_r`
- [x] `lara_as_upjump`
- [x] `lara_as_usekey`
- [x] `lara_as_usepuzzle`
- [x] `lara_as_wade`
- [x] `lara_as_walk`
- [x] `lara_as_waterout`
- [ ] `lara_col_all4s`
- [x] `lara_col_all4turnlr`
- [ ] `lara_col_back`
- [x] `lara_col_backjump`
- [x] `lara_col_compress`
- [ ] `lara_col_crawl`
- [ ] `lara_col_crawl2hang`
- [ ] `lara_col_crawlb`
- [ ] `lara_col_dash`
- [ ] `lara_col_dashdive`
- [x] `lara_col_death`
- [ ] `lara_col_duck`
- [x] `lara_col_ducklr`
- [x] `lara_col_fallback`
- [x] `lara_col_fastback`
- [x] `lara_col_fastdive`
- [x] `lara_col_fastfall`
- [x] `lara_col_fastturn`
- [x] `lara_col_forwardjump`
- [ ] `lara_col_hang`
- [ ] `lara_col_hang2`
- [x] `lara_col_hangleft`
- [x] `lara_col_hangright`
- [x] `lara_col_hangturnlr`
- [x] `lara_col_jumper`
- [x] `lara_col_land`
- [x] `lara_col_leftjump`
- [x] `lara_col_monkey180`
- [x] `lara_col_monkeyl`
- [x] `lara_col_monkeyr`
- [ ] `lara_col_monkeyswing`
- [ ] `lara_col_poledown`
- [ ] `lara_col_polestat`
- [x] `lara_col_poleup`
- [x] `lara_col_pose`
- [ ] `lara_col_reach`
- [x] `lara_col_rightjump`
- [x] `lara_col_roll`
- [x] `lara_col_roll2`
- [x] `lara_col_rope`
- [ ] `lara_col_ropefwd`
- [ ] `lara_col_run`
- [x] `lara_col_slide`
- [x] `lara_col_slideback`
- [x] `lara_col_splat`
- [x] `lara_col_stepleft`
- [x] `lara_col_stepright`
- [x] `lara_col_stop`
- [x] `lara_col_swandive`
- [x] `lara_col_turn_l`
- [x] `lara_col_turn_r`
- [x] `lara_col_turnswitch`
- [ ] `lara_col_upjump`
- [ ] `lara_col_wade`
- [ ] `lara_col_walk`
- [x] `lara_default_col`
- [ ] `lara_slide_slope`
- [x] `lara_void_func`
- [ ] `LaraAboveWater`
- [x] `LaraCeilingFront`
- [x] `LaraCollideStop`
- [x] `LaraDeflectEdge`
- [ ] `LaraDeflectEdgeDuck`
- [ ] `LaraDeflectEdgeJump`
- [x] `LaraFallen`
- [x] `LaraFloorFront`
- [ ] `LaraHangLeftCornerTest`
- [ ] `LaraHangRightCornerTest`
- [ ] `LaraHangTest`
- [x] `LaraHitCeiling`
- [x] `LaraLandedBad`
- [ ] `LaraSlideEdgeJump`
- [ ] `LaraTestClimbStance`
- [ ] `LaraTestEdgeCatch`
- [ ] `LaraTestHangOnClimbWall`
- [x] `LookLeftRight`
- [x] `LookUpDown`
- [x] `MonkeySwingFall`
- [x] `MonkeySwingSnap`
- [x] `ResetLook`
- [x] `SetCornerAnim`
- [ ] `SnapLaraToEdgeOfBlock`
- [ ] `TestHangSwingIn`
- [ ] `TestLaraSlide`
- [ ] `TestLaraVault`
- [x] `TestMonkeyLeft`
- [x] `TestMonkeyRight`
- [ ] `TestWall`
- [ ] `UpdateRopeSwing`
#### LARA1GUN.C
- [ ] `AnimateShotgun`
- [ ] `ControlCrossbow`
- [ ] `CrossbowHitSwitchType78`
- [ ] `DoGrenadeDamageOnBaddie`
- [ ] `draw_shotgun`
- [x] `draw_shotgun_meshes`
- [ ] `FireCrossbow`
- [ ] `FireHK`
- [ ] `FireShotgun`
- [x] `ready_shotgun`
- [ ] `RifleHandler`
- [ ] `TriggerGrapplingEffect`
- [ ] `undraw_shotgun`
- [x] `undraw_shotgun_meshes`
#### LARA2GUN.C
- [ ] `AnimatePistols`
- [x] `draw_pistol_meshes`
- [ ] `draw_pistols`
- [ ] `PistolHandler`
- [ ] `ready_pistols`
- [ ] `set_arm_info`
- [x] `undraw_pistol_mesh_left`
- [x] `undraw_pistol_mesh_right`
- [ ] `undraw_pistols`
#### LARACLMB.C
- [ ] `GetClimbTrigger`
- [x] `lara_as_climbdown`
- [x] `lara_as_climbend`
- [x] `lara_as_climbing`
- [x] `lara_as_climbleft`
- [x] `lara_as_climbright`
- [x] `lara_as_climbstnc`
- [ ] `lara_col_climbdown`
- [x] `lara_col_climbend`
- [ ] `lara_col_climbing`
- [x] `lara_col_climbleft`
- [x] `lara_col_climbright`
- [ ] `lara_col_climbstnc`
- [x] `LaraCheckForLetGo`
- [ ] `LaraClimbLeftCornerTest`
- [ ] `LaraClimbRightCornerTest`
- [ ] `LaraDoClimbLeftRight`
- [ ] `LaraTestClimb`
- [ ] `LaraTestClimbPos`
- [ ] `LaraTestClimbUpPos`
#### LARAFIRE.C
- [ ] `AimWeapon`
- [ ] `CheckForHoldingState`
- [ ] `DoProperDetection`
- [ ] `find_target_point`
- [ ] `FireWeapon`
- [ ] `get_current_ammo_pointer`
- [ ] `HitTarget`
- [ ] `InitialiseNewWeapon`
- [ ] `LaraGetNewTarget`
- [ ] `LaraGun`
- [ ] `LaraTargetInfo`
- [x] `WeaponObject`
- [x] `WeaponObjectMesh`
#### LARAFLAR.C
- [ ] `CreateFlare`
- [ ] `DoFlareInHand`
- [ ] `DoFlareLight`
- [ ] `draw_flare`
- [x] `draw_flare_meshes`
- [ ] `DrawFlareInAir`
- [ ] `FlareControl`
- [x] `ready_flare`
- [ ] `set_flare_arm`
- [ ] `undraw_flare`
- [x] `undraw_flare_meshes`
#### LARAMISC.C
- [ ] `DelAlignLaraToRope`
- [ ] `GetLaraDeadlyBounds`
- [ ] `InitialiseLaraAnims`
- [x] `InitialiseLaraLoad`
- [x] `LaraCheat`
- [ ] `LaraControl`
- [x] `LaraInitialiseMeshes`
#### LARASURF.C
- [x] `lara_as_surfback`
- [x] `lara_as_surfleft`
- [x] `lara_as_surfright`
- [x] `lara_as_surfswim`
- [ ] `lara_as_surftread`
- [x] `lara_col_surfback`
- [x] `lara_col_surfleft`
- [x] `lara_col_surfright`
- [x] `lara_col_surfswim`
- [x] `lara_col_surftread`
- [ ] `LaraSurface`
- [x] `LaraSurfaceCollision`
- [ ] `LaraTestWaterClimbOut`
- [x] `LaraTestWaterStepOut`
#### LARASWIM.C
- [ ] `GetWaterDepth`
- [x] `lara_as_dive`
- [x] `lara_as_glide`
- [x] `lara_as_swim`
- [ ] `lara_as_swimcheat`
- [x] `lara_as_tread`
- [x] `lara_as_uwdeath`
- [x] `lara_as_waterroll`
- [x] `lara_col_dive`
- [x] `lara_col_glide`
- [x] `lara_col_swim`
- [x] `lara_col_tread`
- [x] `lara_col_uwdeath`
- [x] `lara_col_waterroll`
- [ ] `LaraSwimCollision`
- [ ] `LaraTestWaterDepth`
- [ ] `LaraUnderWater`
- [ ] `LaraWaterCurrent`
- [x] `SwimTurn`
- [ ] `SwimTurnSubsuit`
- [ ] `UpdateSubsuitAngles`
#### LOT.C
- [ ] `CreateZone`
- [ ] `DisableBaddieAI`
- [ ] `EnableBaddieAI`
- [x] `InitialiseLOTarray`
- [ ] `InitialiseSlot`
#### MISSILE.C
- [x] `ControlBodyPart`
- [x] `ExplodeFX`
#### NEWINV2.C
- [x] `combine_clothbottle`
- [x] `combine_crossbow_lasersight`
- [x] `combine_HK_SILENCER`
- [x] `combine_KeyItem1`
- [x] `combine_KeyItem2`
- [x] `combine_KeyItem3`
- [x] `combine_KeyItem4`
- [x] `combine_KeyItem5`
- [x] `combine_KeyItem6`
- [x] `combine_KeyItem7`
- [x] `combine_KeyItem8`
- [x] `combine_PickupItem1`
- [x] `combine_PickupItem2`
- [x] `combine_PickupItem3`
- [x] `combine_PickupItem4`
- [x] `combine_PuzzleItem1`
- [x] `combine_PuzzleItem2`
- [x] `combine_PuzzleItem3`
- [x] `combine_PuzzleItem4`
- [x] `combine_PuzzleItem5`
- [x] `combine_PuzzleItem6`
- [x] `combine_PuzzleItem7`
- [x] `combine_PuzzleItem8`
- [x] `combine_revolver_lasersight`
- [x] `combine_these_two_objects`
- [x] `construct_combine_object_list`
- [x] `construct_object_list`
- [x] `convert_invobj_to_obj`
- [x] `convert_obj_to_invobj`
- [x] `DEL_picked_up_object`
- [x] `dels_give_lara_guns_cheat`
- [x] `dels_give_lara_items_cheat`
- [x] `do_debounced_joystick_poo`
- [ ] `do_examine_mode`
- [ ] `do_keypad_mode`
- [ ] `do_playstation_button_prompts_v1`
- [x] `do_stats_mode`
- [x] `do_these_objects_combine`
- [ ] `draw_ammo_selector`
- [ ] `draw_current_object_list`
- [ ] `DrawInventoryItemMe`
- [ ] `DrawThreeDeeObject2D`
- [x] `fade_ammo_selector`
- [x] `go_and_load_game`
- [x] `go_and_save_game`
- [ ] `handle_inventry_menu`
- [x] `handle_object_changeover`
- [x] `have_i_got_item`
- [x] `have_i_got_object`
- [x] `init_keypad_mode`
- [x] `init_new_inventry`
- [x] `insert_object_into_list`
- [x] `insert_object_into_list_v2`
- [x] `is_item_currently_combinable`
- [x] `NailInvItem`
- [x] `remove_inventory_item`
- [ ] `S_CallInventory2`
- [ ] `S_DrawPickup`
- [x] `seperate_object`
- [x] `setup_ammo_selector`
- [x] `setup_objectlist_startposition`
- [x] `setup_objectlist_startposition2`
- [x] `spinback`
- [x] `update_laras_weapons_status`
- [ ] `use_current_item`
#### OBJECTS.C
- [ ] `AnimateWaterfalls`
- [x] `BridgeFlatCeiling`
- [x] `BridgeFlatFloor`
- [x] `BridgeTilt1Ceiling`
- [x] `BridgeTilt1Floor`
- [x] `BridgeTilt2Ceiling`
- [x] `BridgeTilt2Floor`
- [ ] `ControlAnimatingSlots`
- [ ] `ControlTriggerTriggerer`
- [x] `ControlWaterfall`
- [ ] `ControlXRayMachine`
- [ ] `CutsceneRopeControl`
- [ ] `DrawBaddieGunFlash`
- [ ] `EarthQuake`
- [ ] `GetOffset`
- [ ] `HybridCollision`
- [ ] `ParallelBarsCollision`
- [ ] `PoleCollision`
- [x] `SmashObject`
- [x] `SmashObjectControl`
- [ ] `TightRopeCollision`
#### OBJLIGHT.C
- [ ] `ControlBlinker`
- [ ] `ControlColouredLight`
- [ ] `ControlElectricalLight`
- [ ] `ControlPulseLight`
- [ ] `ControlStrobeLight`
- [ ] `TriggerAlertLight`
#### PEOPLE.C
- [ ] `GunHit`
- [ ] `GunMiss`
- [x] `GunShot`
- [ ] `ShotLara`
- [ ] `Targetable`
- [ ] `TargetVisible`
#### PICKUP.C
- [ ] `AnimatingPickUp`
- [ ] `CollectCarriedItems`
- [ ] `FindPlinth`
- [ ] `KeyHoleCollision`
- [ ] `KeyTrigger`
- [x] `MonitorScreenCollision`
- [ ] `PickUpCollision`
- [ ] `PickupTrigger`
- [x] `PuzzleDone`
- [ ] `PuzzleDoneCollision`
- [ ] `PuzzleHoleCollision`
- [ ] `RegeneratePickups`
- [ ] `SearchObjectCollision`
- [ ] `SearchObjectControl`
#### SAVEGAME.C
- [x] `CheckSumValid`
- [x] `ReadSG`
- [x] `RestoreLaraData`
- [ ] `RestoreLevelData`
- [x] `SaveLaraData`
- [ ] `SaveLevelData`
- [x] `sgRestoreGame`
- [ ] `sgSaveGame`
- [x] `WriteSG`
#### SOUND.C
- [x] `SayNo`
- [x] `SOUND_Init`
- [x] `SOUND_Stop`
#### SPHERE.C
- [ ] `TestCollision`
#### SPOTCAM.C
- [ ] `CalculateSpotCams`
- [x] `InitialiseSpotCam`
- [x] `InitSpotCamSequences`
- [x] `Spline`
#### SWITCH.C
- [ ] `CogSwitchCollision`
- [ ] `CogSwitchControl`
- [ ] `CrowbarSwitchCollision`
- [ ] `CrowDoveSwitchCollision`
- [ ] `CrowDoveSwitchControl`
- [ ] `FullBlockSwitchCollision`
- [ ] `FullBlockSwitchControl`
- [ ] `GetKeyTrigger`
- [ ] `GetSwitchTrigger`
- [ ] `JumpSwitchCollision`
- [x] `ProcessExplodingSwitchType8`
- [ ] `PulleyCollision`
- [ ] `RailSwitchCollision`
- [ ] `SwitchCollision`
- [ ] `SwitchCollision2`
- [ ] `SwitchControl`
- [ ] `SwitchTrigger`
- [x] `TestTriggersAtXYZ`
- [x] `TurnSwitchCollision`
- [ ] `TurnSwitchControl`
- [ ] `UnderwaterSwitchCollision`
#### TEXT.C
- [x] `InitFont`
#### TOMB4FX.C
- [ ] `ControlElectricFence`
- [ ] `ControlTeleporter`
- [x] `CreateBubble`
- [x] `DoBloodSplat`
- [x] `DrawLensFlares`
- [ ] `DrawWeaponMissile`
- [ ] `ExplodingDeath2`
- [ ] `Fade`
- [x] `GetFreeBubble`
- [x] `GetFreeShockwave`
- [x] `SetFadeClip`
- [x] `SetScreenFadeIn`
- [x] `SetScreenFadeOut`
- [ ] `SetUpLensFlare`
- [ ] `trig_actor_gunflash`
- [x] `TriggerBlood`
- [x] `TriggerExplosionBubble`
- [x] `TriggerExplosionSmokeEnd`
- [ ] `TriggerFenceSparks`
- [x] `TriggerLaraDrips`
- [x] `TriggerLightningGlow`
- [x] `TriggerShatterSmoke`
- [x] `TriggerShockwave`
- [x] `UpdateFadeClip`
#### TRAPS.C
- [ ] `CeilingTrapDoorCollision`
- [ ] `CloseTrapDoor`
- [ ] `ControlExplosion`
- [ ] `ControlRaisingBlock`
- [ ] `ControlRollingBall`
- [ ] `ControlScaledSpike`
- [ ] `ControlTwoBlockPlatform`
- [ ] `DartEmitterControl`
- [ ] `DartsControl`
- [ ] `DrawScaledSpike`
- [x] `FallingBlock`
- [ ] `FallingBlockCeiling`
- [ ] `FallingBlockCollision`
- [ ] `FallingBlockFloor`
- [ ] `FallingCeiling`
- [ ] `FlameControl`
- [x] `FlameEmitter2Control`
- [ ] `FlameEmitter3Control`
- [ ] `FlameEmitterControl`
- [ ] `FloorTrapDoorCollision`
- [x] `LaraBurn`
- [x] `LavaBurn`
- [ ] `OnTwoBlockPlatform`
- [ ] `OpenTrapDoor`
- [ ] `RollingBallCollision`
- [ ] `TestBoundsCollideTeethSpikes`
- [ ] `TrapDoorCollision`
- [ ] `TrapDoorControl`
- [ ] `TwoBlockPlatformCeiling`
- [ ] `TwoBlockPlatformFloor`
### SPEC_PSX
#### 3D_GEN.C
- [x] `mGetAngle`
- [ ] `phd_InitWindow`
#### 3D_OBJ.C
- [ ] `CreateMonoScreen`
#### CD.C
- [x] `cbvsync`
- [x] `CD_InitialiseReaderPosition`
- [x] `CD_Read`
- [x] `CD_ReaderPositionToCurrent`
- [x] `CD_Seek`
- [x] `CDDA_SetMasterVolume`
- [x] `CDDA_SetVolume`
- [x] `DEL_ChangeCDMode`
- [x] `InitNewCDSystem`
- [x] `S_CDPause`
- [x] `S_CDPlay`
- [ ] `S_CDRestart`
- [ ] `S_CDStop`
- [ ] `S_StartSyncedAudio`
- [ ] `XAReplay`
#### DRAWPHAS.C
- [ ] `DrawPhaseGame`
- [x] `DrawRooms`
- [ ] `MGDrawSprite`
- [ ] `SortOutWreckingBallDraw`
- [x] `UpdateSky`
#### DRAWSPKS.C
- [ ] `S_DrawDarts`
#### FILE.C
- [x] `FILE_Length`
- [x] `FILE_Load`
- [x] `FILE_Read`
- [ ] `RelocateModule`
#### GPU.C
- [x] `clear_a_rect`
- [ ] `do_gfx_debug_mode`
- [x] `GPU_ClearVRAM`
- [ ] `GPU_EndScene`
- [ ] `GPU_FlipNoIdle`
- [ ] `GPU_FlipStory`
- [ ] `GPU_UseOrderingTables`
- [ ] `GPU_UsePolygonBuffers`
#### LOADSAVE.C
- [ ] `DisplayFiles`
- [ ] `LoadGame`
- [ ] `SaveGame`
#### LOAD_LEV.C
- [x] `LOAD_Start`
- [x] `LOAD_Stop`
#### MALLOC.C
- [x] `game_free`
- [x] `game_malloc`
- [x] `init_game_malloc`
- [x] `show_game_malloc_totals`
#### MEMCARD.C
- [ ] `mcClose`
- [ ] `mcDir`
- [x] `mcFormat`
- [x] `mcGetStatus`
- [x] `mcOpen`
#### PROFILE.C
- [ ] `ProfileAddDrawOT`
- [ ] `ProfileAddOT`
- [x] `ProfileCallBack`
- [x] `ProfileInit`
- [ ] `ProfileReadCount`
- [x] `ProfileRGB`
- [ ] `ProfileStartCount`
#### PSOUTPUT.C
- [x] `SetupPadVibration`
- [ ] `VibratePad`
#### PSXINPUT.C
- [ ] `S_UpdateInput`
#### PSXMAIN.C
- [x] `main`
- [x] `VSyncFunc`
#### REQUEST.C
- [ ] `Requester`
#### ROOMLOAD.C
- [ ] `ReloadAnims`
- [x] `S_LoadLevelFile`
#### SPECIFIC.C
- [ ] `DisplayConfig`
- [x] `DisplayStatsUCunt`
- [ ] `DoPauseMenu`
- [x] `gInit`
- [ ] `S_control_screen_position`
- [ ] `S_Death`
- [x] `S_DumpScreen`
- [ ] `S_ExitSystem`
#### SPUSOUND.C
- [x] `SPU_FreeChannel`
- [ ] `SPU_FreeSamples`
- [x] `SPU_Init`"""
# STEP
# _____
# |___ /
# |_ \
# ___) |
# |____/
#
# Parsing the issue source (Markdown)
issue = [x for x in issue.replace("\r\n", "\n").split("\n") if x] # get a clean line array without empty ones
functions = []
for x in issue:
if "###" in x or "[x]" not in x:
continue
fname = x[7:x.rfind("`")]
if SHOW_WARN and fname in functions:
print("[WARN--------] duplicate function '%s'" % fname)
functions.append(fname)
# STEP
# _ _
# | || |
# | || |_
# |__ _|
# |_|
#
# Let the actual shit be done
if SHOW_STATUS:
print("[--------STAT] Brix are expected to be shat")
exe_name = "PCTomb5.exe"
if not os.path.isfile(os.path.join(os.getcwd(), exe_name)):
exe_name = "PCTOMB5.EXE"
if not os.path.isfile(os.path.join(os.getcwd(), exe_name)):
print("[FATAL ERROR] %s not found -- exiting" % exe_name)
#exit()
import idautils
funcs=lambda:[(fea,GetFunctionName(fea)) for fea in Functions(SegStart(BeginEA()),SegEnd(BeginEA()))]
from collections import *
fbytes = OrderedDict()
running = 0
for ea, fn in funcs:
if fn[0] == "_":
fn = fn[1:]
if fn not in functions:
continue
if SHOW_WARN and fn in fbytes:
print("[WARN--------] duplicate function '%s' -- ignoring" % fname)
continue
bs = [idc.Byte(i) for i in range(ea, FindFuncEnd(ea))]
fbytes[fn] = (bs, running)
running += len(bs)
pe = pefile.PE(exe_name)
number_of_section = pe.FILE_HEADER.NumberOfSections
last_section = number_of_section - 1
file_alignment = pe.OPTIONAL_HEADER.FileAlignment
section_alignment = pe.OPTIONAL_HEADER.SectionAlignment
# Quick function to align our values
def align(val_to_align, alignment):
return ((val_to_align + alignment - 1) / alignment) * alignment
raw_size = align(0x1000, pe.OPTIONAL_HEADER.FileAlignment)
virtual_size = align(0x1000, pe.OPTIONAL_HEADER.SectionAlignment)
raw_offset = align((pe.sections[last_section].PointerToRawData +
pe.sections[last_section].SizeOfRawData),
pe.OPTIONAL_HEADER.FileAlignment)
virtual_offset = align((pe.sections[last_section].VirtualAddress +
pe.sections[last_section].Misc_VirtualSize),
pe.OPTIONAL_HEADER.SectionAlignment)
sys.stdout.close()
sys.stdout = orig_stdout
with open("output.txt", "r") as f:
print(f.read()) | [
"zippedfire@free.fr"
] | zippedfire@free.fr |
c79f144acca2207d3e4d4d90e89aa916295f9227 | b53a84f6b0463cd8459e282b77cf3edc61735f61 | /jaraco/mongodb/service.py | 8c14e6c5be8de1b98928e8c672e98fd23982105d | [
"MIT"
] | permissive | jaraco/jaraco.mongodb | 6619f8019d474c7d419346b3a90faa66b6f43e81 | a852399c506c5d2ed71950ecd9b5f469ff4a4040 | refs/heads/main | 2023-08-31T16:42:20.542742 | 2023-08-31T13:26:45 | 2023-08-31T13:26:45 | 45,183,461 | 5 | 1 | MIT | 2023-09-01T20:06:43 | 2015-10-29T13:04:03 | Python | UTF-8 | Python | false | false | 7,994 | py | import os
import sys
import tempfile
import subprocess
import glob
import collections
import importlib
import shutil
import functools
import logging
import datetime
import pathlib
import contextlib
from typing import Dict, Any
import portend
from jaraco.services import paths
from jaraco import services
from tempora import timing
from . import manage
from . import cli
from . import install
log = logging.getLogger(__name__)
class MongoDBFinder(paths.PathFinder):
windows_installed = glob.glob('/Program Files/MongoDB/Server/???/bin')
windows_paths = [
# symlink Server/current to Server/X.X
'/Program Files/MongoDB/Server/current/bin',
# symlink MongoDB to mongodb-win32-x86_64-2008plus-X.X.X-rcX
'/Program Files/MongoDB/bin',
] + list(reversed(windows_installed))
heuristic_paths = [
# on the path
'',
# 10gen Debian package
'/usr/bin',
# custom install in /opt
'/opt/mongodb/bin',
] + windows_paths
# allow the environment to stipulate where mongodb must
# be found.
env_paths = [
os.path.join(os.environ[key], 'bin')
for key in ['MONGODB_HOME']
if key in os.environ
]
candidate_paths = env_paths or heuristic_paths
exe = 'mongod'
args = ['--version']
@classmethod
def find_binary(cls):
return os.path.join(cls.find_root(), cls.exe)
@classmethod
@contextlib.contextmanager
def ensure(cls):
try:
yield cls.find_root()
except RuntimeError:
with tempfile.TemporaryDirectory() as tmp_dir:
tmp = pathlib.Path(tmp_dir)
root = install.install(target=tmp).joinpath('bin')
cls.candidate_paths.append(root)
yield root
cls.candidate_paths.remove(root)
class MongoDBService(MongoDBFinder, services.Subprocess, services.Service):
port = 27017
process_kwargs: Dict[str, Any] = {}
"""
keyword arguments to Popen to control the process creation
"""
@services.Subprocess.PortFree()
def start(self):
super(MongoDBService, self).start()
# start the daemon
mongodb_data = os.path.join(sys.prefix, 'var', 'lib', 'mongodb')
cmd = [
self.find_binary(),
'--dbpath=' + mongodb_data,
]
self.process = subprocess.Popen(cmd, **self.process_kwargs)
self.wait_for_pattern(r'waiting for connections on port (?P<port>\d+)')
log.info('%s listening on %s', self, self.port)
class MongoDBInstance(MongoDBFinder, services.Subprocess, services.Service):
process_kwargs: Dict[str, Any] = {}
"""
keyword arguments to Popen to control the process creation
"""
def merge_mongod_args(self, add_args):
self.port, add_args[:] = cli.extract_param('port', add_args, type=int)
self.mongod_args = add_args
def start(self):
super(MongoDBInstance, self).start()
if not hasattr(self, 'port') or not self.port:
self.port = portend.find_available_local_port()
self.data_dir = tempfile.mkdtemp()
cmd = [
self.find_binary(),
'--dbpath',
self.data_dir,
'--port',
str(self.port),
] + list(self.mongod_args)
if hasattr(self, 'bind_ip') and '--bind_ip' not in cmd:
cmd.extend(['--bind_ip', self.bind_ip])
self.process = subprocess.Popen(cmd, **self.process_kwargs)
portend.occupied('localhost', self.port, timeout=10)
log.info(f'{self} listening on {self.port}')
def get_connection(self):
pymongo = importlib.import_module('pymongo')
return pymongo.MongoClient('localhost', self.port)
def purge_all_databases(self):
manage.purge_all_databases(self.get_connection())
def get_connect_hosts(self):
return [f'localhost:{self.port}']
def get_uri(self):
return 'mongodb://' + ','.join(self.get_connect_hosts())
def stop(self):
super(MongoDBInstance, self).stop()
shutil.rmtree(self.data_dir)
del self.data_dir
class ExtantInstance:
def __init__(self, uri):
self.uri = uri
def get_connection(self):
pymongo = importlib.import_module('pymongo')
return pymongo.MongoClient(self.uri)
def get_uri(self):
return self.uri
class MongoDBReplicaSet(MongoDBFinder, services.Service):
replica_set_name = 'test'
mongod_parameters = (
'--oplogSize',
'10',
)
def start(self):
super(MongoDBReplicaSet, self).start()
self.data_root = tempfile.mkdtemp()
self.instances = list(map(self.start_instance, range(3)))
# initialize the replica set
self.instances[0].connect().admin.command(
'replSetInitiate', self.build_config()
)
# wait until the replica set is initialized
get_repl_set_status = functools.partial(
self.instances[0].connect().admin.command, 'replSetGetStatus', 1
)
errors = importlib.import_module('pymongo.errors')
log.info('Waiting for replica set to initialize')
watch = timing.Stopwatch()
while watch.elapsed < datetime.timedelta(minutes=5):
try:
res = get_repl_set_status()
if res.get('myState') != 1:
continue
except errors.OperationFailure:
continue
break
else:
raise RuntimeError("timeout waiting for replica set to start")
def start_instance(self, number):
port = portend.find_available_local_port()
data_dir = os.path.join(self.data_root, repr(number))
os.mkdir(data_dir)
cmd = [
self.find_binary(),
'--dbpath',
data_dir,
'--port',
str(port),
'--replSet',
self.replica_set_name,
] + list(self.mongod_parameters)
log_file = self.get_log(number)
process = subprocess.Popen(cmd, stdout=log_file)
portend.occupied('localhost', port, timeout=50)
log.info(f'{self}:{number} listening on {port}')
return InstanceInfo(data_dir, port, process, log_file)
def get_log(self, number):
log_filename = os.path.join(self.data_root, f'r{number}.log')
log_file = open(log_filename, 'a')
return log_file
def is_running(self):
return hasattr(self, 'instances') and all(
instance.process.returncode is None for instance in self.instances
)
def stop(self):
super(MongoDBReplicaSet, self).stop()
for instance in self.instances:
if instance.process.returncode is None:
instance.process.terminate()
instance.process.wait()
instance.log_file.close()
del self.instances
shutil.rmtree(self.data_root)
def build_config(self):
return dict(
_id=self.replica_set_name,
members=[
dict(
_id=number,
host=f'localhost:{instance.port}',
)
for number, instance in enumerate(self.instances)
],
)
def get_connect_hosts(self):
return [f'localhost:{instance.port}' for instance in self.instances]
def get_uri(self):
return 'mongodb://' + ','.join(self.get_connect_hosts())
def get_connection(self):
pymongo = importlib.import_module('pymongo')
return pymongo.MongoClient(self.get_uri())
InstanceInfoBase = collections.namedtuple(
'InstanceInfoBase', 'path port process log_file'
)
class InstanceInfo(InstanceInfoBase):
def connect(self):
pymongo = __import__('pymongo')
rp = pymongo.ReadPreference.PRIMARY_PREFERRED
return pymongo.MongoClient(f'localhost:{self.port}', read_preference=rp)
| [
"jaraco@jaraco.com"
] | jaraco@jaraco.com |
b05c7d7aed407916a5a7b0fd8464bc22975e7d0c | 2ba4fa5cb252c7c3d303b456fe8d9afb212f8b1b | /meuprojeto/meuprojeto/urls.py | 05900c38c826b0df661fd7b1fc402a2d70201576 | [] | no_license | marcoribeirojr/aprendendo-django | eb7c72d5cf90e3a9ef3678b2b13bde88f7c4146b | 71104acc68c7b9f0ae4f42234467a53f339522ba | refs/heads/master | 2020-05-05T13:08:49.347476 | 2019-04-11T20:09:56 | 2019-04-11T20:09:56 | 180,062,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | """meuprojeto URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('polls/', include('polls.urls')),
path('admin/', admin.site.urls),
]
| [
"="
] | = |
f66ddbefcac3491e18bd0349b504dd7153773ab6 | 286b6dc56323f982092ffafbfac8a32dbbaeb7ef | /training_assignments/Day01_Assignments/sample_script.py | 65a667fdae2ff91bbab32ae286a3098f284439d4 | [] | no_license | learndevops19/pythonTraining-CalsoftInc | ccee0d90aadc00bfdb17f9578620f6bf92f80a4c | c5f61516b835339b394876edd1c6f62e7cc6f0c3 | refs/heads/master | 2021-02-05T04:27:17.590913 | 2019-11-20T17:27:06 | 2019-11-20T17:27:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | #!usr/bin/env python
"""
This is module level docstring
"""
# import statement to import external lib in the current module
import sample_lib
# Function definition with argument `limit`
def get_all_prime(limit):
# This is a docstring of a function
"""
This is function level docstring
Args:
limit:
Returns:
"""
i = 2 # local variable
# while statement with condition
while limit > 0:
# If statement with external lib function call
if sample_lib.is_prime(i):
limit = limit - 1
#print(i)
i = i+1
# Function scope finishes when the indent is over
# Entry point for the execution of the script
if __name__ == "__main__": # Checking the the script name is __main__
# Getting input from stdin
count = int(input("Enter count of prime numbers:"))
# function call
get_all_prime(count)
| [
"rajpratik71@gmail.com"
] | rajpratik71@gmail.com |
497387657e44a2b62a1cecc699a680722893a502 | 03e5f1fc05a65d6591964341bb90ca20ee9ae687 | /path_finder.py | 81680d22f3823108eb3319b80d4568c78f57d269 | [] | no_license | orenovadia/word_chains | 391ba7c603d20880830ef01fc0fc42b87dee9e1e | 2253c7f808b2d0978581e437232853f8d0eb58bb | refs/heads/master | 2020-06-22T09:38:25.460906 | 2019-07-19T03:34:18 | 2019-07-19T03:34:18 | 197,692,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | from typing import Optional, Dict, List
from oneedgraph import OneEdGraph
class PathFinder(object):
def __init__(self, graph: OneEdGraph) -> None:
super().__init__()
self._graph = graph
def shortest_path(self, source, destination) -> Optional[List[str]]:
if len(source) != len(destination):
return None
prev = {source: None} # type: Dict[str, Optional[str]]
def record_prev(u, v):
prev[u] = v
return u
level = {source}
seen = set()
while level:
if destination in level:
break
seen.update(level)
level = {record_prev(adjacent, current_word)
for current_word in level
for adjacent in self._graph.adjacent(current_word)
if adjacent not in seen}
if destination not in prev:
return None
path = []
v = destination
while v:
path.append(v)
v = prev[v]
path.reverse()
return path
| [
"orenovad@gmail.com"
] | orenovad@gmail.com |
f3e0686647a601213f779653cb8e50d2cd76685a | 7a2ad6c274add0f88cab8f6f89de551ff86597c9 | /AOC2018/days/day4/tests/test_Day4Puzzle2.py | 15151e77987d77062fa679b325f6d1a71dc2718b | [] | no_license | Akivashi/AOC | 8b66cecc9c6cf32c318d5b605f7f9ee952aad373 | cbd3a766db1a4b6560c4adcf978ec79b30707032 | refs/heads/master | 2023-01-29T17:57:39.704089 | 2020-12-08T18:41:15 | 2020-12-09T18:55:10 | 318,249,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | from unittest import TestCase
from Day4Puzzle2 import Day4Puzzle2
class Day4Puzzle2Test(TestCase):
def setUp(self):
self.puzzle = Day4Puzzle2()
def test_puzzle(self):
self.assertEqual(self.puzzle.solution("tests/Day4_test_input1.txt"), 4455)
| [
"rene@infi.nl"
] | rene@infi.nl |
47f43ed12c629953828429ec2c172a3ec9873cfc | a00fcfa8158316fceff0dc9b5d936bba96cca4c3 | /smodels-database/8TeV/CMS/CMS-SUS-14-021/validation/T2bbWW_2EqMassAx_EqMassBy.py | 3de0776e24c485a42cb569b05e37f110a82cc5fd | [] | no_license | andlessa/stopsEFT | 414fac7493c385c5b6d9fda2f17f9ef4658d7884 | 9fae9ef9c96432fecadd7abbb3578b154edc9282 | refs/heads/master | 2020-06-15T06:59:20.544277 | 2019-11-12T19:48:55 | 2019-11-12T19:48:55 | 195,229,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,932 | py | validationData = [{'slhafile': 'T2bbWW_156_77_156_77.slha', 'axes': {'x': 156.596211235, 'y': 77.6349199208}, 't': 0.023210111065445658, 'signal': 53521.6659, 'UL': 74832.696, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_123_44_123_44.slha', 'axes': {'x': 123.861152477, 'y': 44.0152891989}, 't': 0.023210111065445658, 'signal': 164974.07499999998, 'UL': 167810.00000000015, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_174_97_174_97.slha', 'axes': {'x': 174.29962168, 'y': 97.2925085187}, 't': 0.023210111065445658, 'signal': 31486.713900000002, 'UL': 46928.331999999995, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_329_251_329_251.slha', 'axes': {'x': 329.009318097, 'y': 251.565033453}, 't': 0.023210111065445658, 'signal': 1063.86168, 'UL': 4907.424000000002, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_273_197_273_197.slha', 'axes': {'x': 273.764691025, 'y': 197.297526354}, 't': 0.023210111065445658, 'signal': 2994.89998, 'UL': 8077.424000000001, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_111_34_111_34.slha', 'axes': {'x': 111.203569139, 'y': 34.508706107}, 't': 0.023210111065445658, 'signal': 272473.254, 'UL': 265123.2000000003, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_110_32_110_32.slha', 'axes': {'x': 110.499881144, 'y': 32.0908978873}, 't': 0.023210111065445658, 'signal': 280942.487, 'UL': 245624.19999999987, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_150_70_150_70.slha', 'axes': {'x': 150.313053529, 'y': 70.5099590768}, 't': 0.023210111065445658, 'signal': 65313.0615, 'UL': 84827.10799999995, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_330_250_330_250.slha', 'axes': {'x': 330.243853765, 'y': 250.362979868}, 't': 0.023210111065445658, 'signal': 1040.0307, 'UL': 5199.3647999999985, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_357_281_357_281.slha', 'axes': {'x': 357.790604953, 'y': 281.461596395}, 't': 0.023210111065445658, 'signal': 654.9709, 'UL': 3481.304000000001, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_111_31_111_31.slha', 'axes': {'x': 111.036702504, 'y': 31.9703563381}, 't': 0.023210111065445658, 'signal': 274533.687, 'UL': 235483.9999999999, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_187_111_187_111.slha', 'axes': {'x': 187.657154579, 'y': 111.333127436}, 't': 0.023210111065445658, 'signal': 21707.7837, 'UL': 36792.26, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_250_173_250_173.slha', 'axes': {'x': 250.617508995, 'y': 173.524857489}, 't': 0.023210111065445658, 'signal': 4797.427159999999, 'UL': 11173.6072, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_336_259_336_259.slha', 'axes': {'x': 336.725045441, 'y': 259.489256408}, 't': 0.023210111065445658, 'signal': 933.812564, 'UL': 4500.612799999999, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_396_316_396_316.slha', 'axes': {'x': 396.200280361, 'y': 316.628966232}, 't': 0.023210111065445658, 'signal': 355.62050899999997, 'UL': 2984.7983999999965, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_313_235_313_235.slha', 'axes': {'x': 313.57786341, 'y': 235.716587543}, 't': 0.023210111065445658, 'signal': 1401.15546, 'UL': 5647.4752000000035, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_258_181_258_181.slha', 'axes': {'x': 258.333236338, 'y': 181.449080444}, 't': 0.023210111065445658, 'signal': 4103.7322300000005, 'UL': 10095.159599999997, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_219_141_219_141.slha', 'axes': {'x': 219.754599621, 'y': 141.82796567}, 't': 0.023210111065445658, 'signal': 9653.84572, 'UL': 19849.679999999997, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_265_189_265_189.slha', 'axes': {'x': 265.713073762, 'y': 189.700355661}, 't': 0.023210111065445658, 'signal': 3514.6780400000002, 'UL': 8933.472400000002, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_222_144_222_144.slha', 'axes': {'x': 222.066328751, 'y': 144.874181365}, 't': 0.023210111065445658, 'signal': 9131.82583, 'UL': 18132.543999999994, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_396_319_396_319.slha', 'axes': {'x': 396.608242267, 'y': 319.196404096}, 't': 0.023210111065445658, 'signal': 353.433058, 'UL': 2859.3696000000014, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_138_60_138_60.slha', 'axes': {'x': 138.029294714, 'y': 60.042079009}, 't': 0.023210111065445658, 'signal': 98129.6229, 'UL': 122023.95999999998, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_101_22_101_22.slha', 'axes': {'x': 101.758967748, 'y': 22.7916494993}, 't': 0.023210111065445658, 'signal': 410827.657, 'UL': 275209.60000000003, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_298_219_298_219.slha', 'axes': {'x': 298.146408723, 'y': 219.868141634}, 't': 0.023210111065445658, 'signal': 1866.6654, 'UL': 6353.5256, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_122_45_122_45.slha', 'axes': {'x': 122.115255392, 'y': 45.715249681}, 't': 0.023210111065445658, 'signal': 175360.027, 'UL': 186401.88000000018, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_282_204_282_204.slha', 'axes': {'x': 282.714954036, 'y': 204.019695724}, 't': 0.023210111065445658, 'signal': 2512.62716, 'UL': 7607.7083999999995, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_332_255_332_255.slha', 'axes': {'x': 332.929088363, 'y': 255.239656622}, 't': 0.023210111065445658, 'signal': 992.1563639999999, 'UL': 4757.331599999999, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_267_188_267_188.slha', 'axes': {'x': 267.458970847, 'y': 188.000395178}, 't': 0.023210111065445658, 'signal': 3412.2229599999996, 'UL': 9185.91, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_173_94_173_94.slha', 'axes': {'x': 173.46023556, 'y': 94.2826279412}, 't': 0.023210111065445658, 'signal': 32278.179199999995, 'UL': 47551.060000000005, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_278_199_278_199.slha', 'axes': {'x': 278.370657099, 'y': 199.206938752}, 't': 0.023210111065445658, 'signal': 2727.10803, 'UL': 7942.964800000002, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_285_206_285_206.slha', 'axes': {'x': 285.249951021, 'y': 206.960737375}, 't': 0.023210111065445658, 'signal': 2388.19376, 'UL': 7402.318399999999, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_259_180_259_180.slha', 'axes': {'x': 259.567772006, 'y': 180.24702686}, 't': 0.023210111065445658, 'signal': 3995.54594, 'UL': 10251.595200000002, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_211_133_211_133.slha', 'axes': {'x': 211.154642498, 'y': 133.667637791}, 't': 0.023210111065445658, 'signal': 11856.840699999999, 'UL': 23044.043999999994, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_256_176_256_176.slha', 'axes': {'x': 256.547284594, 'y': 176.793851604}, 't': 0.023210111065445658, 'signal': 4259.797780000001, 'UL': 10715.098, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_230_151_230_151.slha', 'axes': {'x': 230.844460571, 'y': 151.085093111}, 't': 0.023210111065445658, 'signal': 7407.88809, 'UL': 16485.023999999998, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_303_225_303_225.slha', 'axes': {'x': 303.385114504, 'y': 225.58595213}, 't': 0.023210111065445658, 'signal': 1688.58736, 'UL': 6017.124799999999, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_319_240_319_240.slha', 'axes': {'x': 319.551471956, 'y': 240.057393484}, 't': 0.023210111065445658, 'signal': 1254.74815, 'UL': 5626.195200000001, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_102_24_102_24.slha', 'axes': {'x': 102.784153801, 'y': 24.1666749325}, 't': 0.023210111065445658, 'signal': 392343.169, 'UL': 276212.3999999996, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_192_115_192_115.slha', 'axes': {'x': 192.434785163, 'y': 115.917723274}, 't': 0.023210111065445658, 'signal': 19198.751099999998, 'UL': 32357.36, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_189_111_189_111.slha', 'axes': {'x': 189.331269993, 'y': 111.254550643}, 't': 0.023210111065445658, 'signal': 20729.5622, 'UL': 35644.11199999998, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_210_134_210_134.slha', 'axes': {'x': 210.569948646, 'y': 134.542938028}, 't': 0.023210111065445658, 'signal': 12034.5309, 'UL': 23212.18, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_195_119_195_119.slha', 'axes': {'x': 195.372881923, 'y': 119.25735039}, 't': 0.023210111065445658, 'signal': 17760.3963, 'UL': 28122.104, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_344_267_344_267.slha', 'axes': {'x': 344.440772784, 'y': 267.413479363}, 't': 0.023210111065445658, 'signal': 817.330569, 'UL': 4180.290400000001, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_254_178_254_178.slha', 'axes': {'x': 254.801387509, 'y': 178.493812087}, 't': 0.023210111065445658, 'signal': 4382.02404, 'UL': 10493.057599999998, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_156_78_156_78.slha', 'axes': {'x': 156.164458197, 'y': 78.6672937639}, 't': 0.023210111065445658, 'signal': 54246.4667, 'UL': 75467.57199999999, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_133_55_133_55.slha', 'axes': {'x': 133.647063175, 'y': 55.8635667518}, 't': 0.023210111065445658, 'signal': 114965.87999999999, 'UL': 135480.51200000002, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_165_86_165_86.slha', 'axes': {'x': 165.744508216, 'y': 86.3584049864}, 't': 0.023210111065445658, 'signal': 40424.5559, 'UL': 60396.032, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_156_79_156_79.slha', 'axes': {'x': 156.794245205, 'y': 79.6362356163}, 't': 0.023210111065445658, 'signal': 53190.6688, 'UL': 74515.76799999995, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_101_25_101_25.slha', 'axes': {'x': 101.549618133, 'y': 25.3687285172}, 't': 0.023210111065445658, 'signal': 414722.119, 'UL': 315080.80000000005, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_181_102_181_102.slha', 'axes': {'x': 181.175962903, 'y': 102.206850896}, 't': 0.023210111065445658, 'signal': 25983.7575, 'UL': 41361.40800000002, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_343_266_343_266.slha', 'axes': {'x': 343.840774616, 'y': 266.446200196}, 't': 0.023210111065445658, 'signal': 825.622176, 'UL': 4229.096800000002, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_162_84_162_84.slha', 'axes': {'x': 162.407882711, 'y': 84.7296967464}, 't': 0.023210111065445658, 'signal': 44485.3911, 'UL': 65631.60399999998, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_235_157_235_157.slha', 'axes': {'x': 235.186054308, 'y': 157.67641158}, 't': 0.023210111065445658, 'signal': 6746.54479, 'UL': 14978.816, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_311_232_311_232.slha', 'axes': {'x': 311.105715857, 'y': 232.826569474}, 't': 0.023210111065445658, 'signal': 1466.28375, 'UL': 5792.1072, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_158_78_158_78.slha', 'axes': {'x': 158.028780873, 'y': 78.4341820316}, 't': 0.023210111065445658, 'signal': 51079.3009, 'UL': 72611.55999999997, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_134_55_134_55.slha', 'axes': {'x': 134.772838729, 'y': 55.2218327728}, 't': 0.023210111065445658, 'signal': 110304.748, 'UL': 131777.8159999999, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_236_156_236_156.slha', 'axes': {'x': 236.420589976, 'y': 156.474357995}, 't': 0.023210111065445658, 'signal': 6559.81834, 'UL': 15092.431999999999, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_345_266_345_266.slha', 'axes': {'x': 345.23706206, 'y': 266.437063688}, 't': 0.023210111065445658, 'signal': 806.57729, 'UL': 4316.128800000001, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_321_243_321_243.slha', 'axes': {'x': 321.293590754, 'y': 243.640810498}, 't': 0.023210111065445658, 'signal': 1215.66721, 'UL': 5382.841200000002, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_274_196_274_196.slha', 'axes': {'x': 274.999226693, 'y': 196.095472769}, 't': 0.023210111065445658, 'signal': 2921.4057399999997, 'UL': 8173.256, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_322_244_322_244.slha', 'axes': {'x': 322.01740211, 'y': 244.033113048}, 't': 0.023210111065445658, 'signal': 1199.73892, 'UL': 5391.427999999999, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_178_100_178_100.slha', 'axes': {'x': 178.41958374, 'y': 100.048007069}, 't': 0.023210111065445658, 'signal': 27936.4312, 'UL': 43723.216, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_232_156_232_156.slha', 'axes': {'x': 232.978015004, 'y': 156.080724939}, 't': 0.023210111065445658, 'signal': 7099.72955, 'UL': 15434.759999999998, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_248_169_248_169.slha', 'axes': {'x': 248.979624054, 'y': 169.710307866}, 't': 0.023210111065445658, 'signal': 4946.023829999999, 'UL': 11742.920000000004, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_339_262_339_262.slha', 'axes': {'x': 339.65544147, 'y': 262.83638164}, 't': 0.023210111065445658, 'signal': 886.90669, 'UL': 4343.478799999998, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_321_244_321_244.slha', 'axes': {'x': 321.520277987, 'y': 244.211166885}, 't': 0.023210111065445658, 'signal': 1209.91237, 'UL': 5345.058000000001, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_244_164_244_164.slha', 'axes': {'x': 244.136317319, 'y': 164.39858095}, 't': 0.023210111065445658, 'signal': 5503.18852, 'UL': 13094.588, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_212_133_212_133.slha', 'axes': {'x': 212.038872277, 'y': 133.903742715}, 't': 0.023210111065445658, 'signal': 11592.8169, 'UL': 22713.699999999997, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_281_205_281_205.slha', 'axes': {'x': 281.480418369, 'y': 205.221749309}, 't': 0.023210111065445658, 'signal': 2558.34534, 'UL': 7574.190000000001, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_136_58_136_58.slha', 'axes': {'x': 136.722292607, 'y': 58.3500265423}, 't': 0.023210111065445658, 'signal': 102880.581, 'UL': 125992.76399999997, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_289_213_289_213.slha', 'axes': {'x': 289.196145712, 'y': 213.145972264}, 't': 0.023210111065445658, 'signal': 2207.00539, 'UL': 6924.1132, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_119_41_119_41.slha', 'axes': {'x': 119.894131231, 'y': 41.4168642542}, 't': 0.023210111065445658, 'signal': 191490.508, 'UL': 203623.1999999999, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_179_103_179_103.slha', 'axes': {'x': 179.941427236, 'y': 103.408904481}, 't': 0.023210111065445658, 'signal': 26749.976799999997, 'UL': 43363.416, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_145_66_145_66.slha', 'axes': {'x': 145.684524982, 'y': 66.4283763468}, 't': 0.023210111065445658, 'signal': 75671.9094, 'UL': 98461.04399999998, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_200_122_200_122.slha', 'axes': {'x': 200.242956246, 'y': 122.461094217}, 't': 0.023210111065445658, 'signal': 15601.7024, 'UL': 26864.644, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_118_40_118_40.slha', 'axes': {'x': 118.215608488, 'y': 40.0151208422}, 't': 0.023210111065445658, 'signal': 205129.683, 'UL': 214961.20000000016, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_133_56_133_56.slha', 'axes': {'x': 133.026941645, 'y': 56.921793255}, 't': 0.023210111065445658, 'signal': 117607.995, 'UL': 137228.56000000003, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_289_210_289_210.slha', 'axes': {'x': 289.282343352, 'y': 210.413482326}, 't': 0.023210111065445658, 'signal': 2203.68797, 'UL': 7141.5396, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_164_87_164_87.slha', 'axes': {'x': 164.509972549, 'y': 87.5604585711}, 't': 0.023210111065445658, 'signal': 41950.8848, 'UL': 61659.03999999999, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_141_63_141_63.slha', 'axes': {'x': 141.362790518, 'y': 63.7877897066}, 't': 0.023210111065445658, 'signal': 87809.6867, 'UL': 111480.68799999995, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_370_292_370_292.slha', 'axes': {'x': 370.922652163, 'y': 292.816733892}, 't': 0.023210111065445658, 'signal': 529.4146239999999, 'UL': 3378.201999999999, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_227_149_227_149.slha', 'axes': {'x': 227.470326964, 'y': 149.752188625}, 't': 0.023210111065445658, 'signal': 8015.42305, 'UL': 16976.659999999996, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_188_111_188_111.slha', 'axes': {'x': 188.093472814, 'y': 111.109366951}, 't': 0.023210111065445658, 'signal': 21449.5575, 'UL': 36570.86000000001, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_149_71_149_71.slha', 'axes': {'x': 149.078517862, 'y': 71.7120126615}, 't': 0.023210111065445658, 'signal': 67969.9531, 'UL': 87166.232, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_242_165_242_165.slha', 'axes': {'x': 242.901781651, 'y': 165.600634535}, 't': 0.023210111065445658, 'signal': 5655.51737, 'UL': 12946.156, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_337_258_337_258.slha', 'axes': {'x': 337.959581109, 'y': 258.287202823}, 't': 0.023210111065445658, 'signal': 913.0848980000001, 'UL': 4796.615999999998, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_300_221_300_221.slha', 'axes': {'x': 300.194029605, 'y': 221.6200259}, 't': 0.023210111065445658, 'signal': 1795.67589, 'UL': 6238.9104, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_188_110_188_110.slha', 'axes': {'x': 188.891690247, 'y': 110.131073851}, 't': 0.023210111065445658, 'signal': 20980.0447, 'UL': 35620.495999999985, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_266_189_266_189.slha', 'axes': {'x': 266.048963682, 'y': 189.373303399}, 't': 0.023210111065445658, 'signal': 3512.3724099999995, 'UL': 8985.112000000001, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_290_211_290_211.slha', 'axes': {'x': 290.43068138, 'y': 211.943918679}, 't': 0.023210111065445658, 'signal': 2151.47026, 'UL': 7042.160800000001, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_243_167_243_167.slha', 'axes': {'x': 243.889701257, 'y': 167.287268513}, 't': 0.023210111065445658, 'signal': 5535.61351, 'UL': 12502.155999999997, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_204_125_204_125.slha', 'axes': {'x': 204.323144934, 'y': 125.979519761}, 't': 0.023210111065445658, 'signal': 14136.3313, 'UL': 25382.995999999992, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_213_137_213_137.slha', 'axes': {'x': 213.779062918, 'y': 137.489037155}, 't': 0.023210111065445658, 'signal': 11098.9463, 'UL': 22009.33999999999, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_167_88_167_88.slha', 'axes': {'x': 167.507897488, 'y': 88.8414634948}, 't': 0.023210111065445658, 'signal': 38345.7202, 'UL': 57540.55999999997, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_305_227_305_227.slha', 'axes': {'x': 305.862136067, 'y': 227.792364589}, 't': 0.023210111065445658, 'signal': 1610.7490799999998, 'UL': 5961.588799999997, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_100_23_100_23.slha', 'axes': {'x': 100.291882887, 'y': 23.3021625331}, 't': 0.023210111065445658, 'signal': 439556.56299999997, 'UL': 308290.6, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_172_95_172_95.slha', 'axes': {'x': 172.225699892, 'y': 95.4846815259}, 't': 0.023210111065445658, 'signal': 33478.2198, 'UL': 47241.208000000006, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_196_118_196_118.slha', 'axes': {'x': 196.60741759, 'y': 118.055296806}, 't': 0.023210111065445658, 'signal': 17200.7862, 'UL': 29311.240000000005, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_251_172_251_172.slha', 'axes': {'x': 251.852044663, 'y': 172.322803905}, 't': 0.023210111065445658, 'signal': 4670.150079999999, 'UL': 11330.0428, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'T2bbWW_125_47_125_47.slha', 'axes': {'x': 125.931335831, 'y': 47.939343797}, 't': 0.023210111065445658, 'signal': 152315.80299999999, 'UL': 159014.628, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}] | [
"lessa.a.p@gmail.com"
] | lessa.a.p@gmail.com |
46a9ffa4ccd9fe2bae9d06ccad77a6be1c64fab1 | c867b7d1b26547f76605676725367eee89a9c6bb | /appimagebuilder/context.py | ad0994d1daa39762a43b8aead53128cb2ca43e72 | [
"MIT"
] | permissive | jclab-joseph/appimage-builder | a6ceae075eb0e6d2e61df9e1fe38371606a0c4e7 | e757003a6c60ea72721c866758befa2dc6a50058 | refs/heads/master | 2023-08-24T06:20:51.154749 | 2021-09-21T02:15:09 | 2021-09-21T02:15:09 | 412,281,434 | 0 | 0 | MIT | 2021-10-01T08:48:38 | 2021-10-01T01:10:51 | null | UTF-8 | Python | false | false | 2,438 | py | # Copyright 2021 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import pathlib
class AppInfo:
id: str
name: str
icon: str
version: str
exec: str
exec_args: str
def __init__(
self,
id: str = None,
name: str = None,
icon: str = None,
version: str = None,
exec: str = None,
exec_args: str = None,
):
self.id = id
self.name = name
self.icon = icon
self.version = version
self.exec = exec
self.exec_args = exec_args
class BundleInfo:
"""Application information"""
app_dir: pathlib.Path
app_info: AppInfo
# update string to be attached into
update_string: str
# appimage runtime arch
runtime_arch: str
# sign key to be used
sign_key: str
# resulting file name
file_name: str
def __init__(
self,
app_dir: pathlib.Path = None,
app_info: AppInfo = None,
update_string: str = None,
runtime_arch: str = None,
sign_key: str = None,
file_name: str = None,
):
self.app_dir = app_dir
self.app_info = AppInfo() if not app_info else app_info
self.update_string = update_string
self.runtime_arch = runtime_arch
self.sign_key = sign_key
self.file_name = file_name
class Context:
"""Define a context for commands"""
app_info: AppInfo
bundle_info: BundleInfo
app_dir: pathlib.Path
cache_dir: pathlib.Path
# Used by command to register their actions
record: dict
def __init__(
self, app_info, bundle_info, app_dir: pathlib.Path, cache_dir: pathlib.Path
):
self.app_info = app_info
self.bundle_info = bundle_info
self.app_dir = app_dir.absolute()
self.cache_dir = cache_dir.absolute()
self.record = {}
| [
"contact@azubieta.net"
] | contact@azubieta.net |
9ed19f3d4dcddbbdb88440d766807fbdb9b7ba36 | aab4acf5f144985ef0ba69fa122ecdb0973a61e3 | /python_experiments/algorithm_vis/pscan_algo_vis.py | 2a16fd7c491dbf95d0b2faacd32a4a9f8c8df1ff | [
"MIT"
] | permissive | zzerain/ppSCAN | 1f2e07fed7b7bb6ae40a7f5f6b7721d92f74eb0c | 691b39309da1c6b5df46b264b5a300a35d644f70 | refs/heads/master | 2023-03-19T06:41:15.712172 | 2020-06-04T03:24:55 | 2020-06-04T03:24:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,070 | py | import networkx as nx
import matplotlib.pyplot as plt
from pscan_algo_naive import to_csr_graph
from itertools import chain
def vis_input(graph, min_cn, similar_deg_lst, min_pts, graph_name):
"""
:type graph: nx.Graph
"""
# draw background graph
pos = nx.circular_layout(graph)
nx.draw_networkx_nodes(graph, with_labels=True, pos=pos, font_size=12, node_size=380, alpha=1.0, width=4,
node_color='black')
nx.draw_networkx(graph, with_labels=True, pos=pos, font_size=12, node_size=350, alpha=1.0, width=4,
edge_color='grey', node_color='white')
# parse edge list and its property
src_u_lst = list(chain.from_iterable(
map(lambda pair: [pair[0] for _ in xrange(pair[1])], zip(range(len(deg_lst)), deg_lst))))
edge_with_property_lst = filter(lambda pair: pair[0][0] < pair[0][1], zip(zip(src_u_lst, dst_v_lst), min_cn))
print edge_with_property_lst
# nx.draw_networkx_edge_labels(graph, pos=pos, edge_labels=dict(edge_with_property_lst))
blue_edges = map(lambda pair: pair[0], filter(lambda pair: pair[1] == -2, edge_with_property_lst))
nx.draw_networkx_edges(graph, pos=pos, edgelist=blue_edges, edge_color='b', width=4)
red_edges = map(lambda pair: pair[0], filter(lambda pair: pair[1] == -1, edge_with_property_lst))
nx.draw_networkx_edges(graph, pos=pos, edgelist=red_edges, edge_color='r', width=4)
# alpha_lst = map(lambda similar_deg: min(float(similar_deg) / min_pts, 1), similar_deg_lst)
alpha_lst = map(lambda similar_deg: 0 if similar_deg< min_pts else 1, similar_deg_lst)
print alpha_lst
for idx, alpha in enumerate(alpha_lst):
nx.draw_networkx_nodes(graph, with_labels=True, pos=pos, font_size=12, node_size=350, alpha=alpha,
nodelist=[idx], node_color='r')
plt.axis('off')
plt.savefig('./' + graph_name + '.png', bbox_inches='tight', pad_inches=0, transparent=True)
plt.show()
def vis_input_only(graph):
# draw background graph
pos = nx.circular_layout(graph)
nx.draw_networkx_nodes(graph, with_labels=True, pos=pos, font_size=12, node_size=380, alpha=1.0, width=4,
node_color='black')
nx.draw_networkx(graph, with_labels=True, pos=pos, font_size=12, node_size=350, alpha=1.0, width=4,
edge_color='grey', node_color='white')
plt.axis('off')
plt.savefig('./' + 'pure_demo_input_graph' + '.png', bbox_inches='tight', pad_inches=0, transparent=True)
plt.show()
if __name__ == '__main__':
graph = nx.read_edgelist('demo_input_graph.txt', nodetype=int)
offset_lst, dst_v_lst, deg_lst = to_csr_graph(graph)
print 'csr representation:\noffset_lst=', offset_lst, '\ndst_v_lst=', dst_v_lst, '\ndeg_lst=', deg_lst, '\n'
vis_input_only(graph)
# demo input graph
min_cn = [0 for _ in xrange(len(dst_v_lst))]
similar_degree_lst = [0 for _ in xrange(len(deg_lst))]
vis_input(graph, min_cn=min_cn, similar_deg_lst=similar_degree_lst, min_pts=3, graph_name='demo_input_graph')
# after 1. pruning
min_cn = [-1, 0, 3, 3, 3, 0, 3, 3, 0, 0, 3, 0, 0, 0, 3, 0, 3, 0, 3, 3, 4, 0, 3, 3, 0, 0, 3, 0, 0, 0, -2, -2, 0, 0]
vis_input(graph, min_cn=min_cn, similar_deg_lst=similar_degree_lst, min_pts=3, graph_name='after_pruning_graph')
# after 2.1 check core
min_cn = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -2, -2, -2, -2]
vis_input(graph, min_cn=min_cn, similar_deg_lst=similar_degree_lst, min_pts=3,
graph_name='after_check_core_1st_bsp_graph')
# after 2.2 check core
similar_degree_lst = [1, 4, 3, 3, 3, 0, 3, 3, 3, 3, 0, 0]
min_cn = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -2, -2, -2, -2]
vis_input(graph, min_cn=min_cn, similar_deg_lst=similar_degree_lst, min_pts=3,
graph_name='after_check_core_2nd_bsp_graph')
| [
"yche@cse.ust.hk"
] | yche@cse.ust.hk |
b66862f3d3d6cc0b77fbbf720f0a11fe8c797d1e | 3a891a79be468621aae43defd9a5516f9763f36e | /desktop/core/ext-py/markdown/markdown/html4.py | 08f241d57aaec2c04ed55d69f14938f1aba3e631 | [
"Apache-2.0"
] | permissive | oyorooms/hue | b53eb87f805063a90f957fd2e1733f21406269aa | 4082346ef8d5e6a8365b05752be41186840dc868 | refs/heads/master | 2020-04-15T20:31:56.931218 | 2019-01-09T19:02:21 | 2019-01-09T19:05:36 | 164,998,117 | 4 | 2 | Apache-2.0 | 2019-01-10T05:47:36 | 2019-01-10T05:47:36 | null | UTF-8 | Python | false | false | 9,672 | py | # markdown/html4.py
#
# Add html4 serialization to older versions of Elementree
# Taken from ElementTree 1.3 preview with slight modifications
#
# Copyright (c) 1999-2007 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2007 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
import markdown
ElementTree = markdown.etree.ElementTree
QName = markdown.etree.QName
Comment = markdown.etree.Comment
PI = markdown.etree.PI
ProcessingInstruction = markdown.etree.ProcessingInstruction
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "isindex", "link", "meta" "param")
try:
HTML_EMPTY = set(HTML_EMPTY)
except NameError:
pass
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
# xml schema
"http://www.w3.org/2001/XMLSchema": "xs",
"http://www.w3.org/2001/XMLSchema-instance": "xsi",
# dublic core
"http://purl.org/dc/elements/1.1/": "dc",
}
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _encode(text, encoding):
try:
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_cdata(text, encoding):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 character, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text, encoding):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
if "\n" in text:
text = text.replace("\n", " ")
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_html(text, encoding):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _serialize_html(write, elem, encoding, qnames, namespaces):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text, encoding))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text, encoding))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_html(write, e, encoding, qnames, None)
else:
write("<" + tag)
items = elem.items()
if items or namespaces:
items.sort() # lexical order
for k, v in items:
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib_html(v, encoding)
# FIXME: handle boolean attributes
write(" %s=\"%s\"" % (qnames[k], v))
if namespaces:
items = namespaces.items()
items.sort(key=lambda x: x[1]) # sort on prefix
for v, k in items:
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k.encode(encoding),
_escape_attrib(v, encoding)
))
write(">")
tag = tag.lower()
if text:
if tag == "script" or tag == "style":
write(_encode(text, encoding))
else:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_html(write, e, encoding, qnames, None)
if tag not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail, encoding))
def write_html(root, f,
# keyword arguments
encoding="us-ascii",
default_namespace=None):
assert root is not None
if not hasattr(f, "write"):
f = open(f, "wb")
write = f.write
if not encoding:
encoding = "us-ascii"
qnames, namespaces = _namespaces(
root, encoding, default_namespace
)
_serialize_html(
write, root, encoding, qnames, namespaces
)
# --------------------------------------------------------------------
# serialization support
def _namespaces(elem, encoding, default_namespace=None):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
if default_namespace:
namespaces[default_namespace] = ""
def encode(text):
return text.encode(encoding)
def add_qname(qname):
# calculate serialized qname representation
try:
if qname[:1] == "{":
uri, tag = qname[1:].split("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = encode("%s:%s" % (prefix, tag))
else:
qnames[qname] = encode(tag) # default element
else:
if default_namespace:
# FIXME: can this be handled in XML 1.0?
raise ValueError(
"cannot use non-qualified names with "
"default_namespace option"
)
qnames[qname] = encode(qname)
except TypeError:
_raise_serialization_error(qname)
# populate qname and namespaces table
try:
iterate = elem.iter
except AttributeError:
iterate = elem.getiterator # cET compatibility
for elem in iterate():
tag = elem.tag
if isinstance(tag, QName) and tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, basestring):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in elem.items():
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
def to_html_string(element, encoding=None):
class dummy:
pass
data = []
file = dummy()
file.write = data.append
write_html(ElementTree(element).getroot(),file,encoding)
return "".join(data)
| [
"bcwalrus@cloudera.com"
] | bcwalrus@cloudera.com |
7b154a6aa46aa7e5bed42941b1f2b03e772e3274 | d61d05748a59a1a73bbf3c39dd2c1a52d649d6e3 | /chromium/buildtools/checkdeps/checkdeps_test.py | 6442d5bd667d685cf208f853881fe353d10e64e3 | [
"BSD-3-Clause"
] | permissive | Csineneo/Vivaldi | 4eaad20fc0ff306ca60b400cd5fad930a9082087 | d92465f71fb8e4345e27bd889532339204b26f1e | refs/heads/master | 2022-11-23T17:11:50.714160 | 2019-05-25T11:45:11 | 2019-05-25T11:45:11 | 144,489,531 | 5 | 4 | BSD-3-Clause | 2022-11-04T05:55:33 | 2018-08-12T18:04:37 | null | UTF-8 | Python | false | false | 9,781 | py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for checkdeps.
"""
import os
import unittest
import builddeps
import checkdeps
import results
class CheckDepsTest(unittest.TestCase):
def setUp(self):
self.deps_checker = checkdeps.DepsChecker(
being_tested=True,
base_directory=os.path.join(os.path.dirname(__file__), os.path.pardir))
def ImplTestRegularCheckDepsRun(self, ignore_temp_rules, skip_tests):
self.deps_checker._ignore_temp_rules = ignore_temp_rules
self.deps_checker._skip_tests = skip_tests
self.deps_checker.CheckDirectory(
os.path.join(self.deps_checker.base_directory,
'checkdeps/testdata'))
problems = self.deps_checker.results_formatter.GetResults()
if skip_tests:
self.failUnlessEqual(4, len(problems))
else:
self.failUnlessEqual(5, len(problems))
def VerifySubstringsInProblems(key_path, substrings_in_sequence):
"""Finds the problem in |problems| that contains |key_path|,
then verifies that each of |substrings_in_sequence| occurs in
that problem, in the order they appear in
|substrings_in_sequence|.
"""
found = False
key_path = os.path.normpath(key_path)
for problem in problems:
index = problem.find(key_path)
if index != -1:
for substring in substrings_in_sequence:
index = problem.find(substring, index + 1)
self.failUnless(index != -1, '%s in %s' % (substring, problem))
found = True
break
if not found:
self.fail('Found no problem for file %s' % key_path)
if ignore_temp_rules:
VerifySubstringsInProblems('testdata/allowed/test.h',
['-checkdeps/testdata/disallowed',
'temporarily_allowed.h',
'-third_party/explicitly_disallowed',
'Because of no rule applying'])
else:
VerifySubstringsInProblems('testdata/allowed/test.h',
['-checkdeps/testdata/disallowed',
'-third_party/explicitly_disallowed',
'Because of no rule applying'])
VerifySubstringsInProblems('testdata/disallowed/test.h',
['-third_party/explicitly_disallowed',
'Because of no rule applying',
'Because of no rule applying'])
VerifySubstringsInProblems('disallowed/allowed/test.h',
['-third_party/explicitly_disallowed',
'Because of no rule applying',
'Because of no rule applying'])
VerifySubstringsInProblems('testdata/noparent/test.h',
['allowed/bad.h',
'Because of no rule applying'])
if not skip_tests:
VerifySubstringsInProblems('allowed/not_a_test.cc',
['-checkdeps/testdata/disallowed'])
def testRegularCheckDepsRun(self):
self.ImplTestRegularCheckDepsRun(False, False)
def testRegularCheckDepsRunIgnoringTempRules(self):
self.ImplTestRegularCheckDepsRun(True, False)
def testRegularCheckDepsRunSkipTests(self):
self.ImplTestRegularCheckDepsRun(False, True)
def testRegularCheckDepsRunIgnoringTempRulesSkipTests(self):
self.ImplTestRegularCheckDepsRun(True, True)
def CountViolations(self, ignore_temp_rules):
self.deps_checker._ignore_temp_rules = ignore_temp_rules
self.deps_checker.results_formatter = results.CountViolationsFormatter()
self.deps_checker.CheckDirectory(
os.path.join(self.deps_checker.base_directory,
'checkdeps/testdata'))
return self.deps_checker.results_formatter.GetResults()
def testCountViolations(self):
self.failUnlessEqual('11', self.CountViolations(False))
def testCountViolationsIgnoringTempRules(self):
self.failUnlessEqual('12', self.CountViolations(True))
def testCountViolationsWithRelativePath(self):
self.deps_checker.results_formatter = results.CountViolationsFormatter()
self.deps_checker.CheckDirectory(
os.path.join('checkdeps', 'testdata', 'allowed'))
self.failUnlessEqual('4', self.deps_checker.results_formatter.GetResults())
def testTempRulesGenerator(self):
self.deps_checker.results_formatter = results.TemporaryRulesFormatter()
self.deps_checker.CheckDirectory(
os.path.join(self.deps_checker.base_directory,
'checkdeps/testdata/allowed'))
temp_rules = self.deps_checker.results_formatter.GetResults()
expected = [u' "!checkdeps/testdata/disallowed/bad.h",',
u' "!checkdeps/testdata/disallowed/teststuff/bad.h",',
u' "!third_party/explicitly_disallowed/bad.h",',
u' "!third_party/no_rule/bad.h",']
self.failUnlessEqual(expected, temp_rules)
def testBadBaseDirectoryNotCheckoutRoot(self):
# This assumes git. It's not a valid test if buildtools is fetched via svn.
with self.assertRaises(builddeps.DepsBuilderError):
checkdeps.DepsChecker(being_tested=True,
base_directory=os.path.dirname(__file__))
def testCheckAddedIncludesAllGood(self):
problems = self.deps_checker.CheckAddedCppIncludes(
[['checkdeps/testdata/allowed/test.cc',
['#include "checkdeps/testdata/allowed/good.h"',
'#include "checkdeps/testdata/disallowed/allowed/good.h"']
]])
self.failIf(problems)
def testCheckAddedIncludesManyGarbageLines(self):
garbage_lines = ["My name is Sam%d\n" % num for num in range(50)]
problems = self.deps_checker.CheckAddedCppIncludes(
[['checkdeps/testdata/allowed/test.cc', garbage_lines]])
self.failIf(problems)
def testCheckAddedIncludesNoRule(self):
problems = self.deps_checker.CheckAddedCppIncludes(
[['checkdeps/testdata/allowed/test.cc',
['#include "no_rule_for_this/nogood.h"']
]])
self.failUnless(problems)
def testCheckAddedIncludesSkippedDirectory(self):
problems = self.deps_checker.CheckAddedCppIncludes(
[['checkdeps/testdata/disallowed/allowed/skipped/test.cc',
['#include "whatever/whocares.h"']
]])
self.failIf(problems)
def testCheckAddedIncludesTempAllowed(self):
problems = self.deps_checker.CheckAddedCppIncludes(
[['checkdeps/testdata/allowed/test.cc',
['#include "checkdeps/testdata/disallowed/temporarily_allowed.h"']
]])
self.failUnless(problems)
def testCopyIsDeep(self):
# Regression test for a bug where we were making shallow copies of
# Rules objects and therefore all Rules objects shared the same
# dictionary for specific rules.
#
# The first pair should bring in a rule from testdata/allowed/DEPS
# into that global dictionary that allows the
# temp_allowed_for_tests.h file to be included in files ending
# with _unittest.cc, and the second pair should completely fail
# once the bug is fixed, but succeed (with a temporary allowance)
# if the bug is in place.
problems = self.deps_checker.CheckAddedCppIncludes(
[['checkdeps/testdata/allowed/test.cc',
['#include "/checkdeps/testdata/disallowed/temporarily_allowed.h"']
],
['checkdeps/testdata/disallowed/foo_unittest.cc',
['#include "checkdeps/testdata/bongo/temp_allowed_for_tests.h"']
]])
# With the bug in place, there would be two problems reported, and
# the second would be for foo_unittest.cc.
self.failUnless(len(problems) == 1)
self.failUnless(problems[0][0].endswith('/test.cc'))
def testTraversalIsOrdered(self):
dirs_traversed = []
for rules, filenames in self.deps_checker.GetAllRulesAndFiles():
self.failUnlessEqual(type(filenames), list)
self.failUnlessEqual(filenames, sorted(filenames))
if filenames:
dir_names = set(os.path.dirname(file) for file in filenames)
self.failUnlessEqual(1, len(dir_names))
dirs_traversed.append(dir_names.pop())
self.failUnlessEqual(dirs_traversed, sorted(dirs_traversed))
def testCheckPartialImportsAreAllowed(self):
problems = self.deps_checker.CheckAddedProtoImports(
[['checkdeps/testdata/test.proto',
['import "no_rule_for_this/nogood.proto"']
]])
self.failIf(problems)
def testCheckAddedFullPathImportsAllowed(self):
# NOTE: Base directory is buildtools.
problems = self.deps_checker.CheckAddedProtoImports(
[['checkdeps/testdata/test.proto',
['import "checkdeps/testdata/allowed/good.proto"',
'import "checkdeps/testdata/disallowed/sub_folder/good.proto"']
]])
self.failIf(problems)
def testCheckAddedFullPathImportsDisallowed(self):
problems = self.deps_checker.CheckAddedProtoImports(
[['checkdeps/testdata/test.proto',
['import "checkdeps/testdata/disallowed/bad.proto"']
]])
self.failUnless(problems)
def testCheckAddedFullPathImportsManyGarbageLines(self):
garbage_lines = ["My name is Sam%d\n" % num for num in range(50)]
problems = self.deps_checker.CheckAddedProtoImports(
[['checkdeps/testdata/test.proto',
garbage_lines]])
self.failIf(problems)
def testCheckAddedIncludesNoRuleFullPath(self):
problems = self.deps_checker.CheckAddedProtoImports(
[['checkdeps/testdata/test.proto',
['import "../tools/some.proto"']
]])
self.failUnless(problems)
if __name__ == '__main__':
unittest.main()
| [
"csineneo@gmail.com"
] | csineneo@gmail.com |
bea85b6452243cf0efd563ed4b68fecc1a744fba | 0cdf69c4dbb89d1d57e74b160375764119e9edba | /tlouesports/tlou/views.py | 558b22b6d56534916a19f9db1a6e1c23ce50dc8f | [] | no_license | rabbanibcs/Allauth | 6b250fcfca810ac9fdd2e433dea21bd9d705be13 | dafe7e307f4bd649eb9111405f0217b9444ac2b0 | refs/heads/main | 2023-06-18T03:40:34.078037 | 2021-07-12T09:24:33 | 2021-07-12T09:24:33 | 369,312,474 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,000 | py | from datetime import datetime
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.shortcuts import render, HttpResponse, redirect
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from django.views.decorators.csrf import csrf_exempt
from .forms import UserUpdateForm,UserdtlUpdateForm
from .models import News, Games, Ladders, userdtl, Banner, sliders, Social, image, team, \
Contact, Setting
# from .forms import ContactForm
import random
from django.conf import settings
import threading
from django.core.mail import EmailMessage
# Create your views here.
def detail(request,id):
team1 = team.objects.get(pk=id)
context={'team':team1}
return render(request,'team-details.html',context)
def ranking(request):
team1=team.objects.get(pk=request.POST.get('team1'))
team2=team.objects.get(pk=request.POST.get('team2'))
team.elo_rating(team1,team2,a=1)
return redirect('home')
class datathread(threading.Thread):
def __init__(self, data):
self.data = data
threading.Thread.__init__(self)
def run(self):
self.data.send()
def defhome(request):
indexnews = News.objects.filter(active=True).order_by('?')
fBanner = Banner.objects.filter(active=True).order_by('?')[:2]
fsliders = sliders.objects.filter(active=True).order_by('?')
fladder = Ladders.objects.filter(active=True).order_by('?')[:4]
fsocial = Social.objects.all()
fimage = image.objects.filter(active=True).order_by('?')[:5]
teams = team.objects.all().order_by('-points')
return render(request,'index.html', {'news':indexnews,'banner':fBanner,'slider':fsliders[0],'social':fsocial,'ladder':fladder,'image':fimage ,'teams':teams})
def defnews(request):
indexnews = News.objects.filter(active=True)[:20]
fsocial = Social.objects.all()
fimage = image.objects.filter(active=True).order_by('?'[:5])
return render(request,'news.html', {'news':indexnews,'social':fsocial,'image':fimage})
def defsnews(request,title):
indexnews = News.objects.get(title=title)
fsocial = Social.objects.all()
fimage = image.objects.filter(active=True).order_by('?'[:5])
return render(request,'single-blog.html', {'news':indexnews,'social':fsocial,'image':fimage})
def defgames(request):
games = Games.objects.filter(active=True)[:12]
fsocial = Social.objects.all()
fimage = image.objects.filter(active=True).order_by('?'[:5])
return render(request,'games.html', {'games':games,'social':fsocial,'image':fimage})
def defladders(request,lname):
sgame = Games.objects.get(Name=lname)
fsocial = Social.objects.all()
fimage = image.objects.filter(active=True).order_by('?'[:5])
ladders = Ladders.objects.filter(game=sgame,active=True)[:12]
return render(request,'ladders.html', {'gname':lname,'ladders':ladders,'social':fsocial,'image':fimage})
def defladder(request,lname):
fsocial = Social.objects.all()
allusr = User.objects.filter(is_staff=False)
fimage = image.objects.filter(active=True).order_by('?'[:5])
ladders = Ladders.objects.get(Name=lname,active=True)
fteam = team.objects.filter(ladder=ladders)
if request.method == 'POST':
teamname = request.POST.get('teamname', '')
tadmin = request.POST.get('tadmin', '')
mamber1 = request.POST.get('member1', '')
mamber2 = request.POST.get('member2', '')
mamber3 = request.POST.get('member3', '')
mamber4 = request.POST.get('member4', '')
mamber5 = request.POST.get('member5', '')
mamber6 = request.POST.get('member6', '')
mamber7 = request.POST.get('member7', '')
mamber8 = request.POST.get('member8', '')
mamber9 = request.POST.get('member9', '')
mamber10 = request.POST.get('member10', '')
mamber11 = request.POST.get('member11', '')
mamber12 = request.POST.get('member12', '')
mamber13 = request.POST.get('member13', '')
mamber14 = request.POST.get('member14', '')
mamber15 = request.POST.get('member15', '')
try:
uadmin=User.objects.get(username=tadmin)
except:
uadmin=null
try:
umamber1=User.objects.get(username=mamber1)
except:
umamber1=None
try:
umamber2=User.objects.get(username=mamber2)
except:
umamber2=None
try:
umamber3=User.objects.get(username=mamber3)
except:
umamber3=None
try:
umamber4=User.objects.get(username=mamber4)
except:
umamber4=None
try:
umamber5=User.objects.get(username=mamber5)
except:
umamber5=None
try:
umamber6=User.objects.get(username=mamber6)
except:
umamber6=None
try:
umamber7=User.objects.get(username=mamber7)
except:
umamber7=None
try:
umamber8=User.objects.get(username=mamber8)
except:
umamber8=None
try:
umamber9=User.objects.get(username=mamber9)
except:
umamber9=None
try:
umamber10=User.objects.get(username=mamber10)
except:
umamber10=None
try:
umamber11=User.objects.get(username=mamber11)
except:
umamber11=None
try:
umamber12=User.objects.get(username=mamber12)
except:
umamber12=None
try:
umamber13=User.objects.get(username=mamber13)
except:
umamber13=None
try:
umamber14=User.objects.get(username=mamber14)
except:
umamber14=None
try:
umamber15=User.objects.get(username=mamber15)
except:
umamber15=None
pteam = team(Name=teamname,ladder=ladders,Member1=uadmin,Member2=umamber2,Member3=umamber3,Member4=umamber4,Member5=umamber5,Member6=umamber6,Member7=umamber7,Member8=umamber8,Member9=umamber9,Member10=umamber10,Member11=umamber11,Member12=umamber12,Member13=umamber13,Member14=umamber14,Member15=umamber15)
pteam.save()
messages.success(request, "Your team has been successfully created")
return redirect("/ladder/"+str(ladders.Name))
return render(request,'ladder.html', {'ladder':ladders,'social':fsocial,'image':fimage,'alluser':allusr,'team':fteam})
def handleSignup(request):
if request.method == 'POST':
# Get the post parameters
username = request.POST['username']
fname = request.POST['fname']
lname = request.POST['lname']
email = request.POST['email']
pass1 = request.POST['pass1']
pass2 = request.POST['pass2']
psn = request.POST['psn']
xbl = request.POST['xbl']
# Check for errorneous inputs
if User.objects.filter(username=username):
messages.error(request, "Username you choose is already used")
return redirect('/')
# username should be under 10 characters
if len(username) > 20:
messages.error(request, "Username must be under 20 characters")
return redirect('/')
# username should be alphanumeric
if not username.isalnum():
messages.error(request, "Username should only contain letters and numbers")
return redirect('/')
# passwords should match
if pass1 != pass2:
messages.error(request, "Passwords do not match")
return redirect('/')
# Create the user
myuser = User.objects.create_user(username, email, pass1)
myuser.first_name = fname
myuser.last_name = lname
myuser.is_active = False
myuser.save()
dtluser = userdtl(user_id = myuser.id, tpass = random.randint(1000,9999), bpass = pass1, psn=psn, xbl=xbl)
dtluser.save()
messages.success(request, "Your Tlouesports account has been successfully created")
return redirect("/validate/"+ myuser.username +"")
else:
return HttpResponse('404 - Not Found')
def handleLogin(request):
if request.method == 'POST':
# Get the post parameters
loginusername = request.POST['loginusername']
loginpassword = request.POST['loginpassword']
if loginpassword == "":
return redirect("/validate/"+ loginusername +"")
user = authenticate(username=loginusername, password=loginpassword)
if user is not None:
login(request, user)
messages.success(request, "Successfully Logged In")
return redirect('/')
else:
messages.error(request, "Invalid Credentials, Please try again")
return redirect('/')
return HttpResponse('404 - Not Found')
def handleLogout(request):
logout(request)
messages.success(request, "Successfully Logged Out")
return redirect('/')
def validate(request, user):
fsocial = Social.objects.all()
fimage = image.objects.filter(active=True).order_by('?'[:5])
try:
vuser = User.objects.get(username = user)
email_from = settings.EMAIL_HOST_USER
msg = EmailMessage(
"Your code for tlouesports.com is Generated",
"Hi " + vuser.first_name + ", <br><br> Your code for tlouesports.com is "+ vuser.userdtl.tpass + "<footer> <br><br><hr> Do not share this code with anyone <br><hr> Thank you for using tlouesports.com! </footer>",
email_from,
[vuser.email],
)
msg.content_subtype = "html"
datathread(msg).start()
if request.method == "POST":
passw = request.POST['onepass']
if passw == vuser.userdtl.tpass:
musr = User.objects.get(username = user)
if musr.is_active == False:
musr.is_active = True
messages.success(request, "Account Validation Successful !")
else:
passn = request.POST['npass']
musr.set_password(passn)
usr = userdtl.objects.get(user = musr)
usr.bpass = passn
usr.save()
messages.success(request, "Password changed successfully !!")
musr.save()
login(request, musr)
return redirect("/")
else:
messages.success(request, "Invalid OTP")
except:
messages.error(request, "Account not Exists")
return redirect('/')
return render(request, 'validate.html', {'vuser': vuser,'social':fsocial,'image':fimage})
def defprofile(request, user):
fsocial = Social.objects.all()
fimage = image.objects.filter(active=True).order_by('?'[:5])
try:
vuser = User.objects.get(username = user)
if vuser != request.user:
messages.error(request,"You are not allow to access profile of other user")
return redirect("/")
except:
messages.error(request,"You are trying to access profile of Invalid user")
return redirect("/")
return render(request,'profile.html',{'user':vuser,'social':fsocial,'image':fimage})
@csrf_exempt
def defcontact(request):
setting = Setting.objects.get(pk=2)
if request.method=="POST":
name=request.POST.get('name')
phone=request.POST.get('phone')
email=request.POST.get('email')
desc=request.POST.get('desc')
ip = request.META.get('REMOTE_ADDR')
contact= Contact(name=name, phone=phone, email=email, desc=desc, ip=ip, date=datetime.today())
contact.save()
messages.success(request, "Your message has been sent!")
context = {'setting': setting}
return render(request,'contact.html', context)
@login_required(login_url='/login') # Check login
@csrf_exempt
def user_update(request):
if request.method == 'POST':
user_form = UserUpdateForm(request.POST, instance=request.user) # request.user is user data
userdtl_form = UserdtlUpdateForm(request.POST, instance=request.user.userdtl)
if user_form.is_valid() and userdtl_form.is_valid():
user_form.save()
userdtl_form.save()
messages.success(request, 'Your account has been updated!')
return redirect('/')
else:
user_form = UserUpdateForm(instance=request.user)
userdtl_form = UserdtlUpdateForm(instance=request.user.userdtl)
context = {
'user_form': user_form,
'userdtl_form': userdtl_form
}
return render(request, 'profile.html', context)
def aboutus(request):
setting = Setting.objects.get(pk=2)
context = {'setting': setting}
return render(request, 'about.html', context)
import math
def EloRating(Ra, Rb, a, t1c=False, t2c=False):
if a == 1:
if Ra < 1500:
K = 50
elif 1500 <= Ra < 1800:
K = 40
elif Ra >= 1800:
K = 25
else:
if Rb < 1500:
K = 50
elif 1500 <= Rb < 1800:
K = 40
elif Rb >= 1800:
K = 25
Pb = 1.0 * 1.0 / (1 + 1.0 * math.pow(10, 1.0 * (Ra - Rb) / 400))
Pa = 1.0 * 1.0 / (1 + 1.0 * math.pow(10, 1.0 * (Rb - Ra) / 400))
if (a == 1):
New_t1 = Ra + K * (1 - Pa)
New_t2 = Rb + K * (0 - Pb)
else:
New_t1 = Ra + K * (0 - Pa)
New_t2 = Rb + K * (1 - Pb)
if t1c:
if a == 1:
pts = New_t1 - Ra
New_t1 += pts
else:
t1c = False
t2c = True
elif t2c:
if a != 1:
pts = New_t2 - Rb
New_t2 += pts
else:
t1c = True
t2c = False
return New_t1, New_t2, t1c, t2c | [
"rabbanibcs@gmail.com"
] | rabbanibcs@gmail.com |
d80e4d99aa7149abf90c4aaf998e1594efcb78f9 | e93690e8ac06fd6aa2f7fe7d3ea56978e787e496 | /optimizeDLM/perSentence/optimizePerVerb/optimizeDependencyLength_POS_NoSplit_ByOcc_Amortized_Optim4.py | 6cd6b2feaeee1eae42159d0e54c62f388a63ec0a | [] | no_license | m-hahn/optimization-landscapes | 8446fbb0ae783f7aa76278e8a5f4cf5e6f4b2cd8 | b16f640dd855a912f52844882b3de701e5b9eca6 | refs/heads/master | 2023-08-12T01:44:18.434912 | 2021-10-03T14:37:11 | 2021-10-03T14:37:11 | 273,661,277 | 0 | 0 | null | 2021-04-15T04:39:52 | 2020-06-20T07:36:12 | TeX | UTF-8 | Python | false | false | 17,928 | py | # Optimizing a grammar for dependency length minimization
import random
import sys
objectiveName = "DepL"
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--language', type=str)
parser.add_argument('--entropy_weight', type=float, default=0.001)
parser.add_argument('--lr_grammar', type=float, default=0.01) #random.choice([0.000001, 0.00001, 0.00002, 0.0001, 0.001, 0.01]))
parser.add_argument('--momentum_grammar', type=float, default=0.8) #random.choice([0.0, 0.2, 0.8, 0.9]))
parser.add_argument('--lr_amortized', type=float, default=random.choice([0.000001, 0.00001, 0.00002, 0.0001, 0.001, 0.01]))
parser.add_argument('--momentum_amortized', type=float, default=random.choice([0.0, 0.2, 0.8, 0.9]))
args = parser.parse_args()
myID = random.randint(0,10000000)
posUni = set()
posFine = set()
deps = ["acl", "acl:relcl", "advcl", "advmod", "amod", "appos", "aux", "auxpass", "case", "cc", "ccomp", "compound", "compound:prt", "conj", "conj:preconj", "cop", "csubj", "csubjpass", "dep", "det", "det:predet", "discourse", "dobj", "expl", "foreign", "goeswith", "iobj", "list", "mark", "mwe", "neg", "nmod", "nmod:npmod", "nmod:poss", "nmod:tmod", "nsubj", "nsubjpass", "nummod", "parataxis", "punct", "remnant", "reparandum", "root", "vocative", "xcomp"]
from math import log, exp
from random import random, shuffle
from corpusIterator_V import CorpusIterator_V as CorpusIterator
originalDistanceWeights = {}
def makeCoarse(x):
if ":" in x:
return x[:x.index(":")]
return x
import hashlib
def hash_(x):
return hashlib.sha224(x).hexdigest()
def initializeOrderTable():
orderTable = {}
keys = set()
vocab = {}
distanceSum = {}
distanceCounts = {}
depsVocab = set()
depsVocab.add("root")
for partition in ["together"]:
for sentence in CorpusIterator(args.language,partition).iterator():
sentenceHash = hash_(" ".join([x["word"] for x in sentence]))
for line in sentence:
vocab[line["word"]] = vocab.get(line["word"], 0) + 1
posFine.add(line["posFine"])
posUni.add(line["posUni"])
if line["dep"] == "root":
continue
posHere = line["posUni"]
posHead = sentence[line["head"]-1]["posUni"]
# if line["dep"] == "nsubj" and posHere == "NOUN" and posHead == "VERB":
# line["dep"] = "nsubj_"+str(sentenceHash)+"_"+str(line["index"])
line["fine_dep"] = line["dep"]
depsVocab.add(line["fine_dep"])
dep = line["fine_dep"]
direction = "HD" if line["head"] < line["index"] else "DH"
key = (posHead, dep, posHere)
keyWithDir = (dep, direction)
orderTable[keyWithDir] = orderTable.get(keyWithDir, 0) + 1
keys.add(key)
distanceCounts[key] = distanceCounts.get(key,0.0) + 1.0
distanceSum[key] = distanceSum.get(key,0.0) + abs(line["index"] - line["head"])
#print orderTable
dhLogits = {}
for key in keys:
hd = orderTable.get((key, "HD"), 0) + 1.0
dh = orderTable.get((key, "DH"), 0) + 1.0
dhLogit = log(dh) - log(hd)
dhLogits[key] = dhLogit
return dhLogits, vocab, keys, depsVocab
import torch.nn as nn
import torch
from torch.autograd import Variable
# "linearization_logprobability"
def recursivelyLinearize(sentence, position, result, batch):
line = sentence[position-1]
if "children_DH" in line:
for child in line["children_DH"][batch]:
recursivelyLinearize(sentence, child, result, batch)
result.append(line)
if "children_HD" in line:
for child in line["children_HD"][batch]:
recursivelyLinearize(sentence, child, result, batch)
import numpy.random
softmax_layer = torch.nn.Softmax(dim=1)
logsoftmax = torch.nn.LogSoftmax(dim=1)
def orderChildrenRelative(sentence, remainingChildren, reverseSoftmax, wordToDistanceLogits):
if max([len(x) for x in remainingChildren]) <= 1:
return remainingChildren, []
# print(remainingChildren)
children = sorted(list(set(flatten(remainingChildren))))
# if len(children) == 1:
# return remainingChildren, []
stoi_children = dict(list(zip(children, range(len(children)))))
childrenLinearized = [[] for _ in range(BATCH_SIZE)]
if len(children) == 0:
return childrenLinearized, []
# print(children)
mask = torch.FloatTensor([[0 if child in remainingChildren[i] else -100000000000 for child in children] for i in range(BATCH_SIZE)])
# print(mask)
logits = torch.cat([distanceWeights[stoi_deps[sentence[x-1]["dependency_key"]]].view(1) if x not in wordToDistanceLogits else wordToDistanceLogits[x].view(1) for x in children])
# print(logits)
log_probabilities = []
#print(children)
#print("============")
#print(remainingChildren)
for _ in range(len(children)):
#print("MASK")
#print(mask)
masked_logits = logits.unsqueeze(0) + mask
softmax = softmax_layer(masked_logits)
distrib = torch.distributions.categorical.Categorical(probs=softmax)
selected = distrib.sample()
log_probability = distrib.log_prob(selected)
stillHasOpen = (mask.max(dim=1)[0] > -1)
# print(stillHasOpen)
log_probability = log_probability * stillHasOpen.float()
#print(log_probability)
log_probabilities.append(log_probability)
selected_ = selected.cpu()
mask_ = mask.cpu()
for i in range(BATCH_SIZE):
if mask_[i][selected_[i].item()] > -1:
# print(children)
# print(selected_[i])
childrenLinearized[i].append(children[selected_[i].item()])
mask_[i][selected_[i].item()] = -100000000000
mask = mask_
if reverseSoftmax:
for i in range(BATCH_SIZE):
childrenLinearized[i] = childrenLinearized[i][::-1]
#print("---")
#print(children)
#print(childrenLinearized)
#print(remainingChildren)
return childrenLinearized, log_probabilities
def annotateLength(x):
if "length" not in x:
length = 0
for y in x.get("children", []):
length += annotateLength(y)
x["length"] = length+1
return x["length"]
def flatten(y):
r = []
for x in y:
for z in x:
r.append(z)
return r
itos_encodings_ = {}
def itos_encodings(x):
if x not in itos_encodings_:
itos_encodings_[x] = len(itos_encodings_)
return itos_encodings_[x]
BATCH_SIZE=12
def orderSentence(sentence, dhLogits, printThings):
root = None
logits = [None]*len(sentence)
logProbabilityGradient = 0
sentenceHash = hash_(" ".join([x["word"] for x in sentence]))
assert "children_decisions_logprobs" not in sentence[0]
if "children" not in sentence[0]:
sentence[0]["children"] = []
for line in sentence:
if line["dep"] == "root":
root = line["index"]
continue
if line["dep"].startswith("punct"):
continue
headIndex = line["head"]-1
sentence[headIndex]["children"] = (sentence[headIndex].get("children", []) + [line])
for line in sentence:
annotateLength(line)
subjects_or_objects = [x for x in sentence if x["dep"] in ["nsubj"]]
if len(subjects_or_objects) > 0:
encodings = [[x["dep"], x["posUni"], x["length"]] + ["@"+str(z) for z in flatten(sorted([(y["dep"], y["posUni"], y["length"]) for y in sentence[x["head"]-1]["children"]]))] for x in subjects_or_objects]
maxLength = max([len(x) for x in encodings])
encodings = [x + ["PAD" for _ in range(maxLength-len(x))] for x in encodings]
numerified = [[itos_encodings(x) for x in y] for y in encodings]
embedded = amortized_embeddings(torch.LongTensor(numerified))
# print(embedded)
# print(embedded.size())
convolved = amortized_conv(embedded.transpose(1,2))
# print(convolved.size())
pooled = convolved.max(dim=2)[0]
decision_logits = amortized_out(pooled)
if random() < 0.05:
print("LOGITS FROM MODEL", decision_logits)
wordToDecisionLogits = {subjects_or_objects[i]["index"] : decision_logits[i,0] for i in range(len(subjects_or_objects))}
wordToDistanceLogits = {subjects_or_objects[i]["index"] : decision_logits[i,1] for i in range(len(subjects_or_objects))}
else:
wordToDecisionLogits = {}
wordToDistanceLogits = {}
log_probabilities = []
for line in sentence:
for direction in ["DH", "HD"]:
line["children_"+direction] = [[] for _ in range(BATCH_SIZE)]
for line in sentence:
line["fine_dep"] = line["dep"]
if line["fine_dep"] == "root":
root = line["index"]
continue
if line["fine_dep"].startswith("punct"):
continue
posHead = sentence[line["head"]-1]["posUni"]
posHere = line["posUni"]
# if line["dep"] == "nsubj" and posHead == "VERB" and posHere == "NOUN":
# line["dep"] = "nsubj_"+str(sentenceHash)+"_"+str(line["index"])
line["fine_dep"] = line["dep"]
key = (posHead, line["fine_dep"], posHere) if line["fine_dep"] != "root" else stoi_deps["root"]
line["dependency_key"] = key
if line["index"] in wordToDecisionLogits:
dhLogit = wordToDecisionLogits[line["index"]]
else:
dhLogit = dhWeights[stoi_deps[key]]
probability = 1/(1 + torch.exp(-dhLogit))
dhSampled = torch.FloatTensor([1 if (random() < probability.data.numpy()) else 0 for _ in range(BATCH_SIZE)])
log_probabilities.append(torch.log(1/(1 + torch.exp(- (2*dhSampled-1.0) * dhLogit))))
#print(dhSampled)
#print(log_probabilities[-1])
if printThings:
print "\t".join(map(str,["ORD", line["index"], (line["word"]+" ")[:10], (".".join(list(key)) + " ")[:22], line["head"], str(round(float(dhSampled[0]),4)), (str(float(probability[0]))+" ")[:8], (str(distanceWeights[stoi_deps[key]].data.numpy())+" ")[:8] ] ))
headIndex = line["head"]-1
for i in range(BATCH_SIZE):
direction = "DH" if float(dhSampled[i]) > 0.5 else "HD"
sentence[headIndex]["children_"+direction][i].append(line["index"])
for line in sentence:
lengths = [len(line["children_DH"][i])+len(line["children_HD"][i]) for i in range(BATCH_SIZE)]
assert min(lengths) == max(lengths)
lengthsBefore = min(lengths)
if len(line["children_DH"]) > 0:
childrenLinearized, relativeOrderLogprobs = orderChildrenRelative(sentence, line["children_DH"], False, wordToDistanceLogits)
log_probabilities += relativeOrderLogprobs
line["children_DH"] = childrenLinearized
if len(line["children_HD"]) > 0:
childrenLinearized, relativeOrderLogprobs = orderChildrenRelative(sentence, line["children_HD"], True, wordToDistanceLogits)
log_probabilities += relativeOrderLogprobs
line["children_HD"] = childrenLinearized
lengths = [len(line["children_DH"][i])+len(line["children_HD"][i]) for i in range(BATCH_SIZE)]
assert lengthsBefore >= min(lengths)
assert min(lengths) == max(lengths)
assert lengthsBefore == min(lengths)
linearized = [[] for _ in range(BATCH_SIZE)]
for i in range(BATCH_SIZE):
recursivelyLinearize(sentence, root, linearized[i], i)
if printThings or len(linearized[0]) == 0:
print " ".join(map(lambda x:x["word"], sentence))
print " ".join(map(lambda x:x["word"], linearized[0]))
print " ".join(map(lambda x:x["word"], linearized[1]))
print " ".join(map(lambda x:x["word"], linearized[2]))
assert min([len(x) for x in linearized]) == max([len(x) for x in linearized])
# store new dependency links
dependencyLengths = [0 for _ in range(BATCH_SIZE)]
for batch in range(BATCH_SIZE):
moved = [None] * len(sentence)
for i, x in enumerate(linearized[batch]):
moved[x["index"]-1] = i
for i,x in enumerate(linearized[batch]):
if x["head"] == 0: # root
x["reordered_head"] = 0
else:
dependencyLengths[batch] += abs(moved[x["head"]-1] - i)
assert moved[x["head"]-1] != i
if printThings:
print(dependencyLengths)
# x["reordered_head"] = 1+moved[x["head"]-1]
# if True:
# print " ".join(map(lambda x:x["word"], sentence))
# print " ".join(map(lambda x:x["word"], linearized[0]))
# print " ".join(map(lambda x:x["word"], linearized[1]))
if len(linearized[0]) == 1:
return None, None
# print(log_probabilities)
return dependencyLengths, torch.stack(log_probabilities, dim=1).sum(dim=1)
dhLogits, vocab, vocab_deps, depsVocab = initializeOrderTable()
posUni = list(posUni)
itos_pos_uni = posUni
stoi_pos_uni = dict(zip(posUni, range(len(posUni))))
posFine = list(posFine)
itos_pos_ptb = posFine
stoi_pos_ptb = dict(zip(posFine, range(len(posFine))))
itos_pure_deps = sorted(list(depsVocab))
stoi_pure_deps = dict(zip(itos_pure_deps, range(len(itos_pure_deps))))
itos_deps = sorted(vocab_deps, key=lambda x:x[1])
stoi_deps = dict(zip(itos_deps, range(len(itos_deps))))
print itos_deps
relevantPath = "/u/scr/mhahn/deps/DLM_MEMORY_OPTIMIZED/locality_optimized_dlm/manual_output_funchead_fine_depl_funchead_perSent_perOcc/"
import os
dhWeights = Variable(torch.FloatTensor([0.0] * len(itos_deps)), requires_grad=True)
distanceWeights = Variable(torch.FloatTensor([0.0] * len(itos_deps)), requires_grad=True)
hasFoundKey = False
for i, key in enumerate(itos_deps):
dhLogits[key] = 0.0
if key == ("VERB", "obj", "NOUN"):
dhLogits[key] = (10.0 if random() < 0.5 else -10.0)
hasFoundKey = True
dhWeights.data[i] = dhLogits[key]
originalDistanceWeights[key] = 0.0 #random()
distanceWeights.data[i] = originalDistanceWeights[key]
assert hasFoundKey, itos_deps
assert abs(float(dhWeights.data.sum())) == 10, dhWeights.data.sum()
words = list(vocab.iteritems())
words = sorted(words, key = lambda x:x[1], reverse=True)
itos = map(lambda x:x[0], words)
stoi = dict(zip(itos, range(len(itos))))
if len(itos) > 6:
assert stoi[itos[5]] == 5
vocab_size = 50000
word_embeddings = torch.nn.Embedding(num_embeddings = vocab_size+3, embedding_dim = 1) #.cuda()
pos_u_embeddings = torch.nn.Embedding(num_embeddings = len(posUni)+3, embedding_dim = 1) #.cuda()
pos_p_embeddings = torch.nn.Embedding(num_embeddings = len(posFine)+3, embedding_dim=1) #.cuda()
baseline = nn.Linear(3, 1) #.cuda()
dropout = nn.Dropout(0.5) #.cuda()
amortized_embeddings = torch.nn.Embedding(300, 200)
amortized_conv = torch.nn.Conv1d(in_channels=200, out_channels=300, kernel_size=3)
amortized_conv.weight.data.zero_()
amortized_out = torch.nn.Linear(300, 2, bias=False)
relu = torch.nn.ReLU()
amortized_out.weight.data.zero_()
components_baseline = [word_embeddings, pos_u_embeddings, pos_p_embeddings, baseline] # rnn
components_amortized = [amortized_embeddings, amortized_conv, amortized_out]
def parameters():
for c in components:
for param in c.parameters():
yield param
yield dhWeights
yield distanceWeights
def parameters_grammar():
yield dhWeights
yield distanceWeights
def parameters_baseline():
for c in components_baseline:
for param in c.parameters():
yield param
def parameters_amortized():
for c in components_amortized:
for param in c.parameters():
yield param
def parameters():
for x in [parameters_grammar(), parameters_amortized()]:
for y in x:
yield y
initrange = 0.1
word_embeddings.weight.data.uniform_(-initrange, initrange)
pos_u_embeddings.weight.data.uniform_(-initrange, initrange)
pos_p_embeddings.weight.data.uniform_(-initrange, initrange)
baseline.bias.data.fill_(0)
baseline.weight.data.uniform_(-initrange, initrange)
batchSize = 1
lr_lm = 0.1
crossEntropy = 10.0
def encodeWord(w):
return stoi[w]+3 if stoi[w] < vocab_size else 1
optim_grammar = torch.optim.SGD(parameters_grammar(), lr=args.lr_grammar, momentum=args.momentum_grammar)
optim_amortized = torch.optim.SGD(parameters_amortized(), lr=args.lr_amortized, momentum=args.momentum_amortized)
import torch.nn.functional
counter = 0
dependencyLengthsLast = 1000
dependencyLengths = [1000]
dependencyLengthsPerEpoch = []
for epoch in range(5):
corpus = list(CorpusIterator(args.language, partition="together").iterator(rejectShortSentences = True))
shuffle(corpus)
dependencyLengthsPerEpoch.append(sum(dependencyLengths)/(0.0+len(dependencyLengths)))
dependencyLengths = []
for sentence in corpus:
if counter > 200000:
print "Quitting at counter "+str(counter)
quit()
counter += 1
printHere = (counter % 200 == 0)
current = [sentence]
assert len(current)==1
depLength, overallLogprobSum = orderSentence(current[0], dhLogits, printHere)
if depLength is None:
continue
# print(depLength, overallLogprobSum)
#
# if len(sentence) > 3 and len(sentence) < 5 and random() > 0.9:
# quit()
loss = (torch.FloatTensor(depLength) * overallLogprobSum).mean()
if printHere:
print ["AVERAGE DEPENDENCY LENGTH", crossEntropy, dependencyLengthsPerEpoch[-10:]]
dependencyLengths.append(sum(depLength)/(1.0*len(depLength)*len(sentence)))
crossEntropy = 0.99 * crossEntropy + 0.01 * dependencyLengths[-1]
optim_grammar.zero_grad()
optim_amortized.zero_grad()
loss.backward()
if printHere:
print "BACKWARD 3 "+__file__+" "+args.language+" "+str(myID)+" "+str(counter)
optim_grammar.step()
optim_amortized.step()
with open("output/"+__file__+"_"+args.language+"_"+str(myID), "w") as outFile:
print >> outFile, dependencyLengthsPerEpoch
print >> outFile, args
| [
"mhahn29@gmail.com"
] | mhahn29@gmail.com |
01e0c980fc1f652ee966283c3a6a783c0446f6fa | 2b85d16098aaae7c9aa0cc0e670b0a67df658d78 | /app/user/admin.py | 2d5d9594b3a908835a95ad51aee272ab59f8fe70 | [] | no_license | fuadaghazada/fampact-backend | 2c9ac7ba4b3e7efd5278f75ee32f7484a7409bcb | 6f0f9e3b7e2e544a4fe3a9bfa2451712e1dd1307 | refs/heads/master | 2023-03-08T09:21:33.728770 | 2021-02-21T13:09:14 | 2021-02-21T13:09:14 | 340,579,319 | 1 | 0 | null | 2021-02-20T06:43:59 | 2021-02-20T06:23:59 | null | UTF-8 | Python | false | false | 1,575 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.utils.translation import gettext_lazy as _
from scoring.models import Score
from .models import User, Family
class UserAdminInline(admin.StackedInline):
model = User
extra = 1
class UserAdmin(BaseUserAdmin):
list_display = (
'first_name',
'last_name',
'email',
'username',
'verified_at',
'family',
'role',
'score',
'd_o_b'
)
list_filter = ('is_superuser', 'role')
fieldsets = (
(None, {'fields': ('email', 'username', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name', 'd_o_b')}),
(_('Permissions'), {'fields': ('is_superuser', 'verified_at')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2'),
}),
)
search_fields = ('first_name', 'last_name', 'email')
ordering = ('first_name', 'last_name', 'email',)
filter_horizontal = ()
def score(self, instance):
return Score.objects.calculate_user_score(instance)
class FamilyAdmin(admin.ModelAdmin):
list_display = ('name', 'created_at', 'updated_at', 'score')
inlines = [
UserAdminInline
]
def get_queryset(self, request):
return Score.objects.public_leader_board_qs()
def score(self, instance):
return instance.score
admin.site.register(User, UserAdmin)
admin.site.register(Family, FamilyAdmin)
| [
"fuad.aghazada98@gmail.com"
] | fuad.aghazada98@gmail.com |
be585520b660cff0acce377f5f031333de1360bc | 148ac8d601369aaae6918cf0a55a4d4f5afb5e75 | /decision_tree.py | 9a755464423d1acfd379b30272aafd32ac13f513 | [] | no_license | MrVersatile007/ML-with-Rishi | a7800e27f5cbac9b68d526469beb380ed59bb029 | db76aa26ef5d349237d0fa1f0bdd677352dfb392 | refs/heads/main | 2023-06-02T15:53:00.902907 | 2021-06-27T15:15:01 | 2021-06-27T15:15:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,675 | py | # -*- coding: utf-8 -*-
"""
Created on Wed May 26 20:44:13 2021
@author: RISHBANS
"""
import pandas as pd
dataset = pd.read_csv("Apply_Job.csv")
X = dataset.iloc[:, 0:2].values
y = dataset.iloc[:, 2].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
from sklearn.tree import DecisionTreeClassifier
dt_c = DecisionTreeClassifier()
dt_c.fit(X_train, y_train)
pred_test = dt_c.predict(X_test)
from sklearn.metrics import accuracy_score
test_accuracy = accuracy_score(y_test, pred_test)
print(test_accuracy)
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
import numpy as np
#Define Variables
clf = dt_c
h = 0.1
X_plot, z_plot = X_train, y_train
#Standard Template to draw graph
x_min, x_max = X_plot[:, 0].min() - 1, X_plot[:, 0].max() + 1
y_min, y_max = X_plot[:, 1].min() - 1, X_plot[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh
Z = clf.predict(np.array([xx.ravel(), yy.ravel()]).T)
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z,
alpha = 0.7, cmap = ListedColormap(('blue', 'red')))
for i, j in enumerate(np.unique(z_plot)):
plt.scatter(X_plot[z_plot == j, 0], X_plot[z_plot == j, 1],
c = ['blue', 'red'][i], cmap = ListedColormap(('blue', 'red')), label = j)
#X[:, 0], X[:, 1]
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title('Decision Tree')
plt.xlabel('Exp in Year')
plt.ylabel('Salary in Lakh')
plt.legend() | [
"rishibansal02@gmail.com"
] | rishibansal02@gmail.com |
43cb66f4ba8525d2f2ac814c63860c31289b6fec | 8d161515037cd42dcd6404a068620c11b2597ae8 | /LeetCode/__Contest__/Day6/n_unique_with_zero_sum.py | 8c46dd891297268ec0052581421805d84ade21e0 | [] | no_license | YiseBoge/CompetitiveProgramming | 433526d18a7bfe754f9e1c8d20b2b234d7b7568c | 26b720623bcc1afe054101a13ca37b65ff518ce1 | refs/heads/master | 2021-12-03T17:52:00.048476 | 2021-11-23T17:40:14 | 2021-11-23T17:40:14 | 225,392,780 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | class Solution:
def sumZero(self, n: int) -> list:
half = n // 2
result = []
for i in range(1, half + 1):
result.append(i)
result.append(-i)
if n % 2 != 0:
result.append(0)
return result
def solution(l1):
s = Solution()
return s.sumZero(l1)
def main():
inp1 = 10
print(solution(inp1))
if __name__ == '__main__':
main()
| [
"Ethiopia1!"
] | Ethiopia1! |
ab5a9fce2084bd6d7cef5bc3ab7c1ca5a3c03263 | d14193a5d565c4f8ad9d69a975ae24e62c26943a | /easystack_dashboard/dashboards/admin/volumes/panel.py | dce68793b44c8fbd1409a0af964fe708730b8fea | [
"Apache-2.0"
] | permissive | oksbsb/horizon-acc | b84783c5a81a2678195c45d31a24ca214b69562f | 9524f1952461c83db485d5d1702c350b158d7ce0 | refs/heads/master | 2020-03-19T03:20:52.007840 | 2017-11-14T09:17:04 | 2017-11-14T09:17:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | from django.utils.translation import ugettext_lazy as _ # noqa
import horizon
from easystack_dashboard.dashboards.admin import dashboard
class Volumes(horizon.Panel):
name = _("Volumes")
slug = 'volumes'
permissions = ('openstack.services.volume',)
dashboard.EasyStack_Admin.register(Volumes)
| [
"zhoub1986@aliyun.com"
] | zhoub1986@aliyun.com |
daf6eb50c0ad9b90364b4f70c429ab710dba025b | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_206/1661.py | bf7150e7305f8acd5fc0af408e39ef0d48d99b88 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,817 | py | import numpy as np
with open('in.txt') as f:
lines = f.readlines()
lines = [l.split('\n')[0] for l in lines]
t = int(lines[0])
def count_speed(d, horses):
d = float(d)
if len(horses) == 1:
horse = horses[0]
t = (d - horse[0]) / horse[1]
return d / t
else:
horses = sorted(horses, key=lambda x: x[0])
print horses
t = 0
while len(horses) > 1:
h1 = horses[0]
h2 = horses[1]
if h2[1] == h1[1]:
x = -1000000000000
else:
x = (h1[0] * h2[1] - h2[0] * h1[1]) / (h2[1] - h1[1])
print x
print h1[0]
print h2[0]
if x < min(h1[0], h2[0]) or x > d:
print 'horses do not meet'
else:
horses[0] = (x, min(h1[1], h2[1]))
t += (x - h1[0]) / h1[1]
del horses[1]
print 'time', t
print 'horses left', horses
horse = horses[0]
t_last = (d - horse[0]) / horse[1]
print 'last horse', horse
print 'time', t_last
return d / (t + t_last)
f = open('out.txt', 'w')
line_count = 1
i = 1
while True:
try:
d, n = lines[line_count].split(' ')
line_count += 1
d = int(d)
n = int(n)
print d, n
horses = []
for j in xrange(line_count, line_count + n):
ki, si = lines[j].split(' ')
line_count += 1
ki = int(ki)
si = int(si)
print ki, si
horses.append((ki, si))
speed = count_speed(d, horses)
print('Case #%s: %f \n' % (i, speed))
f.write('Case #%s: %f \n' % (i, speed))
i += 1
print '-----------------'
except:
break
f.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
85e55310a98606e59016c33c42ad15b467158b08 | 5c0a253bf2fb83db01abc99097871c965f4cf565 | /spark/crm/PROC_O_LNA_XDXT_IND_EDUCATION.py | 1bea83ed976210cbffd85a9d0c063cf7a6807107 | [] | no_license | airuibel/python-1 | 3b16553ede9d069ec56efbb12a89a4de6917a447 | 94f387e2d406fab2128bcfffce6146da720b2ccc | refs/heads/master | 2020-07-05T15:43:00.957221 | 2017-09-17T14:05:48 | 2017-09-17T14:05:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,241 | py | #coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_O_LNA_XDXT_IND_EDUCATION').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
O_CI_XDXT_IND_EDUCATION = sqlContext.read.parquet(hdfs+'/O_CI_XDXT_IND_EDUCATION/*')
O_CI_XDXT_IND_EDUCATION.registerTempTable("O_CI_XDXT_IND_EDUCATION")
#任务[12] 001-01::
V_STEP = V_STEP + 1
#先删除原表所有数据
ret = os.system("hdfs dfs -rm -r /"+dbname+"/F_CI_XDXT_IND_EDUCATION/*.parquet")
#从昨天备表复制一份全量过来
ret = os.system("hdfs dfs -cp -f /"+dbname+"/F_CI_XDXT_IND_EDUCATION_BK/"+V_DT_LD+".parquet /"+dbname+"/F_CI_XDXT_IND_EDUCATION/"+V_DT+".parquet")
F_CI_XDXT_IND_EDUCATION = sqlContext.read.parquet(hdfs+'/F_CI_XDXT_IND_EDUCATION/*')
F_CI_XDXT_IND_EDUCATION.registerTempTable("F_CI_XDXT_IND_EDUCATION")
sql = """
SELECT A.CUSTOMERID AS CUSTOMERID
,A.SERIALNO AS SERIALNO
,A.BEGINDATE AS BEGINDATE
,A.ENDDATE AS ENDDATE
,A.SCHOOL AS SCHOOL
,A.DEPARTMENT AS DEPARTMENT
,A.SPECIALTY AS SPECIALTY
,A.DEGREE AS DEGREE
,A.EDUEXPERIENCE AS EDUEXPERIENCE
,A.SCHOOLLENGTH AS SCHOOLLENGTH
,A.DIPLOMANO AS DIPLOMANO
,A.DEGREENO AS DEGREENO
,A.INPUTORGID AS INPUTORGID
,A.INPUTUSERID AS INPUTUSERID
,A.INPUTDATE AS INPUTDATE
,A.REMARK AS REMARK
,A.FR_ID AS FR_ID
,V_DT AS ODS_ST_DATE
,'LNA' AS ODS_SYS_ID
FROM O_CI_XDXT_IND_EDUCATION A --个人学业履历
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
F_CI_XDXT_IND_EDUCATION_INNTMP1 = sqlContext.sql(sql)
F_CI_XDXT_IND_EDUCATION_INNTMP1.registerTempTable("F_CI_XDXT_IND_EDUCATION_INNTMP1")
#F_CI_XDXT_IND_EDUCATION = sqlContext.read.parquet(hdfs+'/F_CI_XDXT_IND_EDUCATION/*')
#F_CI_XDXT_IND_EDUCATION.registerTempTable("F_CI_XDXT_IND_EDUCATION")
sql = """
SELECT DST.CUSTOMERID --客户编号:src.CUSTOMERID
,DST.SERIALNO --流水号:src.SERIALNO
,DST.BEGINDATE --开始日期:src.BEGINDATE
,DST.ENDDATE --结束日期:src.ENDDATE
,DST.SCHOOL --所在学校:src.SCHOOL
,DST.DEPARTMENT --所在院系:src.DEPARTMENT
,DST.SPECIALTY --专业:src.SPECIALTY
,DST.DEGREE --最高学位:src.DEGREE
,DST.EDUEXPERIENCE --最高学历:src.EDUEXPERIENCE
,DST.SCHOOLLENGTH --学制:src.SCHOOLLENGTH
,DST.DIPLOMANO --学历证书号:src.DIPLOMANO
,DST.DEGREENO --学位证书号:src.DEGREENO
,DST.INPUTORGID --登记单位:src.INPUTORGID
,DST.INPUTUSERID --登记人:src.INPUTUSERID
,DST.INPUTDATE --登记日期:src.INPUTDATE
,DST.REMARK --备注:src.REMARK
,DST.FR_ID --法人号:src.FR_ID
,DST.ODS_ST_DATE --系统平台日期:src.ODS_ST_DATE
,DST.ODS_SYS_ID --系统代码:src.ODS_SYS_ID
FROM F_CI_XDXT_IND_EDUCATION DST
LEFT JOIN F_CI_XDXT_IND_EDUCATION_INNTMP1 SRC
ON SRC.CUSTOMERID = DST.CUSTOMERID
AND SRC.SERIALNO = DST.SERIALNO
WHERE SRC.CUSTOMERID IS NULL """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
F_CI_XDXT_IND_EDUCATION_INNTMP2 = sqlContext.sql(sql)
dfn="F_CI_XDXT_IND_EDUCATION/"+V_DT+".parquet"
F_CI_XDXT_IND_EDUCATION_INNTMP2=F_CI_XDXT_IND_EDUCATION_INNTMP2.unionAll(F_CI_XDXT_IND_EDUCATION_INNTMP1)
F_CI_XDXT_IND_EDUCATION_INNTMP1.cache()
F_CI_XDXT_IND_EDUCATION_INNTMP2.cache()
nrowsi = F_CI_XDXT_IND_EDUCATION_INNTMP1.count()
nrowsa = F_CI_XDXT_IND_EDUCATION_INNTMP2.count()
F_CI_XDXT_IND_EDUCATION_INNTMP2.write.save(path = hdfs + '/' + dfn, mode='overwrite')
F_CI_XDXT_IND_EDUCATION_INNTMP1.unpersist()
F_CI_XDXT_IND_EDUCATION_INNTMP2.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert F_CI_XDXT_IND_EDUCATION lines %d, all lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrowsi, nrowsa)
ret = os.system("hdfs dfs -mv /"+dbname+"/F_CI_XDXT_IND_EDUCATION/"+V_DT_LD+".parquet /"+dbname+"/F_CI_XDXT_IND_EDUCATION_BK/")
#先删除备表当天数据
ret = os.system("hdfs dfs -rm -r /"+dbname+"/F_CI_XDXT_IND_EDUCATION_BK/"+V_DT+".parquet")
#从当天原表复制一份全量到备表
ret = os.system("hdfs dfs -cp -f /"+dbname+"/F_CI_XDXT_IND_EDUCATION/"+V_DT+".parquet /"+dbname+"/F_CI_XDXT_IND_EDUCATION_BK/"+V_DT+".parquet")
| [
"cysuncn@126.com"
] | cysuncn@126.com |
b8e1f6454943395c9f5d4751831e30f28927d1b3 | 43e5657beca9836215e43f16da8f274e613ccb18 | /experiment_impact_tracker/emissions/common.py | d833313e4c68463176009a6397e92725ec32b41b | [
"MIT"
] | permissive | ml-lab/experiment-impact-tracker | eaa99b8ef7efec8c624ccc84e19602b7df8e8241 | 7017ed2c88526c2323603b254f8b81710db23ffa | refs/heads/master | 2022-04-01T20:03:19.736764 | 2020-02-14T02:16:16 | 2020-02-14T02:16:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 907 | py | import experiment_impact_tracker.emissions.us_ca_parser as us_ca_parser
import numpy
REALTIME_REGIONS = {
"US-CA" : us_ca_parser
}
def is_capable_realtime_carbon_intensity(*args, region=None, **kwargs):
return region in list(REALTIME_REGIONS.keys())
def get_realtime_carbon_source(region):
return REALTIME_REGIONS[region].get_realtime_carbon_source()
def get_realtime_carbon(*args, **kwargs):
if 'region' not in kwargs:
raise ValueError("region was not passed to function")
try:
carbon_intensity = REALTIME_REGIONS[kwargs['region']].fetch_supply()[0]['carbon_intensity']
if numpy.isnan(carbon_intensity):
return {
"realtime_carbon_intensity" : "n/a"
}
except:
return {
"realtime_carbon_intensity" : "n/a"
}
return {
"realtime_carbon_intensity" : carbon_intensity
}
| [
"peter.henderson@mail.mcgill.ca"
] | peter.henderson@mail.mcgill.ca |
9d3754884cbbf3bb4fc1923201fceb1d8b22683c | adaa06e70db86a395c76fe9945c04381321ac127 | /neurokernel/routing_table.py | 6bf5a28e9cd5656d5632a3ad5576e1b51544e456 | [
"BSD-3-Clause"
] | permissive | hihihippp/neurokernel | d9a45dbc26ec1b307c8be696469a0e20db809fe2 | f25252a2a2a4443f16ad25b7c5b5eff943f36440 | refs/heads/master | 2020-12-25T23:47:10.899818 | 2014-03-30T18:07:58 | 2014-03-30T18:07:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,485 | py | #!/usr/bin/env python
"""
Routing table class.
"""
import numpy as np
import la
class RoutingTable(object):
"""
Routing table.
Parameters
----------
t : la.larry
Labeled array to use when initializing table.
Notes
-----
Inserting rows or columns of values is not currently supported.
The initial array must be 2D and possess the same list labels for
both of its dimensions.
"""
def __init__(self, t=None):
if t is None:
self._data = None
else:
try:
type(t) == la.larry
t.label[0] == t.label
except:
raise ValueError('invalid initial array')
else:
self._data = t.copy()
def __setitem__(self, key, value):
if type(key) == slice:
raise ValueError('assignment by slice not supported')
if len(key) != 2:
raise KeyError('invalid key')
row, col = key
if not self._data:
label = list(set(key))
self._data = la.larry(np.zeros(2*(len(label),), type(value)),
[label, label])
else:
# If either the row or column identifier isn't in the
# current list of labels, add it:
for k in key:
# Create new row:
if k not in self._data.label[0]:
self._data = self._data.merge(la.larry([[0]*len(self._data.label[1])],
[[k], self._data.label[1]]))
# Create new column:
if k not in self._data.label[1]:
self._data = self._data.merge(la.larry([[0]]*len(self._data.label[0]),
[self._data.label[0], [k]]))
self._data.set([row, col], int(value))
def __getitem__(self, key):
# Index values referring to labels must be wrapped with lists:
reformat_slice = lambda s: slice(s.start if s.start is None else [s.start],
s.stop if s.stop is None else [s.stop],
s.step)
if type(key) == tuple:
key = tuple([reformat_slice(k) if type(k) == slice else [k] for k in key])
elif type(key) == slice:
key = reformat_slice(key)
else:
key = [key]
return self._data.lix[key]
def __copy__(self):
return RoutingTable(self._data)
copy = __copy__
@property
def shape(self):
"""
Shape of table.
"""
return self._data.shape
@property
def ids(self):
"""
IDs currently in routing table.
"""
if self._data is None:
return []
else:
return self._data.label[0]
@property
def coords(self):
"""
List of coordinate tuples of all nonzero table entries.
"""
if self._data is None:
return []
else:
return [tuple(x[0:2]) for x in self._data.totuples() if x[2]]
def row_ids(self, col_id):
"""
Row IDs connected to a column ID.
"""
return [self[:, col_id].label[0][i] for i, e in \
enumerate(self[:, col_id]) if e != 0]
def all_row_ids(self):
"""
All row IDs connected to column IDs.
"""
return [self._data.label[0][i] for i, e in \
enumerate(np.sum(self._data.x, 1, np.bool)) if e]
def col_ids(self, row_id):
"""
Column IDs connected to a row ID.
"""
return [self[row_id, :].label[0][i] for i, e in \
enumerate(self[row_id, :]) if e != 0]
def all_col_ids(self):
"""
All column IDs connected to row IDs.
"""
return [self._data.label[0][i] for i, e in \
enumerate(np.sum(self._data.x, 0, np.bool)) if e]
def __repr__(self):
if self._data is None:
return 'empty'
else:
t = 'ids: ' + repr(self.ids) + '\n' + \
self._data.getx().__repr__()
return t
if __name__ == '__main__':
from unittest import main, TestCase
class test_routingtable(TestCase):
def setUp(self):
self.coords_orig = [('a', 'b'), ('b', 'c')]
self.ids_orig = set([i[0] for i in self.coords_orig]+\
[i[1] for i in self.coords_orig])
self.t = RoutingTable()
for c in self.coords_orig:
self.t[c[0], c[1]] = 1
def test_shape(self):
n = len(self.ids_orig)
assert self.t.shape == (n, n)
def test_ids(self):
assert set(self.t.ids) == self.ids_orig
def test_coords(self):
assert set(self.t.coords) == set(self.coords_orig)
def test_all_row_ids(self):
assert set(self.t.all_row_ids()) == \
set([i[0] for i in self.coords_orig])
def test_all_col_ids(self):
assert set(self.t.all_col_ids()) == \
set([i[1] for i in self.coords_orig])
def test_row_ids(self):
for i in self.coords_orig:
assert i[0] in self.t.row_ids(i[1])
def test_col_ids(self):
for i in self.coords_orig:
assert i[1] in self.t.col_ids(i[0])
main()
| [
"lev@columbia.edu"
] | lev@columbia.edu |
c4487150dcd4a86c20e4becad5574f8dd4551904 | a7587f813492163433202e244df2237c9993a1a1 | /Cart/context_processors.py | 06195e7f77f9cfb23960d2a52fa0e5c627c1280f | [] | no_license | kamran1231/E-COM-WEBSITE-2021 | 3a10bc0059f4d29fc52ee029e4919d4f965174c6 | 32214468cf716cc312a63f6346b8c844f720abda | refs/heads/master | 2023-06-01T03:18:03.137405 | 2021-07-04T14:20:16 | 2021-07-04T14:20:16 | 381,634,544 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | from .models import CartItem,Carts
from .views import _cart_id
def cart_quantity(request):
cart_count = 0
if 'admin' in request.path:
return {}
else:
try:
cart = Carts.objects.filter(cart_id=_cart_id(request))
cart_items = CartItem.objects.all().filter(cart=cart[:1])
for cart_item in cart_items:
cart_count += cart_item.quantity
except Carts.DoesNotExist:
cart_count = 0
return dict(cart_count=cart_count) | [
"khanbrother805@gmail.com"
] | khanbrother805@gmail.com |
42e83e33a8338c5cb56b9fb7dea82cb9285197d7 | b54b6168ba35ce6ad34f5a26b5a4a3ab8afa124a | /kratos_3_0_1/applications/incompressible_fluid_application/python_scripts/monolithic_solver_lagrangian_compressible_two_fluids_3d_tube.py | 3701ce5bc582106c68ccd276d5568c95ced27d56 | [] | no_license | svn2github/kratos | e2f3673db1d176896929b6e841c611932d6b9b63 | 96aa8004f145fff5ca6c521595cddf6585f9eccb | refs/heads/master | 2020-04-04T03:56:50.018938 | 2017-02-12T20:34:24 | 2017-02-12T20:34:24 | 54,662,269 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 23,624 | py | #importing the Kratos Library
from KratosMultiphysics import *
from KratosMultiphysics.IncompressibleFluidApplication import *
from KratosMultiphysics.PFEMApplication import *
from KratosMultiphysics.MeshingApplication import *
CheckForPreviousImport()
def AddVariables(model_part):
model_part.AddNodalSolutionStepVariable(VELOCITY);
model_part.AddNodalSolutionStepVariable(ACCELERATION);
model_part.AddNodalSolutionStepVariable(MESH_VELOCITY);
model_part.AddNodalSolutionStepVariable(PRESSURE);
model_part.AddNodalSolutionStepVariable(AIR_PRESSURE);
model_part.AddNodalSolutionStepVariable(WATER_PRESSURE);
model_part.AddNodalSolutionStepVariable(AIR_PRESSURE_DT);
model_part.AddNodalSolutionStepVariable(WATER_PRESSURE_DT);
model_part.AddNodalSolutionStepVariable(IS_FLUID);
model_part.AddNodalSolutionStepVariable(IS_WATER);
model_part.AddNodalSolutionStepVariable(IS_VISITED);
model_part.AddNodalSolutionStepVariable(IS_POROUS);
model_part.AddNodalSolutionStepVariable(IS_STRUCTURE);
model_part.AddNodalSolutionStepVariable(IS_FREE_SURFACE);
model_part.AddNodalSolutionStepVariable(IS_INTERFACE);
model_part.AddNodalSolutionStepVariable(IS_BOUNDARY);
model_part.AddNodalSolutionStepVariable(ERASE_FLAG);
model_part.AddNodalSolutionStepVariable(DISPLACEMENT);
model_part.AddNodalSolutionStepVariable(VISCOSITY);
model_part.AddNodalSolutionStepVariable(VISCOSITY_AIR);
model_part.AddNodalSolutionStepVariable(VISCOSITY_WATER);
model_part.AddNodalSolutionStepVariable(DENSITY);
model_part.AddNodalSolutionStepVariable(DENSITY_AIR);
model_part.AddNodalSolutionStepVariable(DENSITY_WATER);
model_part.AddNodalSolutionStepVariable(AIR_SOUND_VELOCITY);
model_part.AddNodalSolutionStepVariable(WATER_SOUND_VELOCITY);
model_part.AddNodalSolutionStepVariable(SOUND_VELOCITY);
model_part.AddNodalSolutionStepVariable(BODY_FORCE);
model_part.AddNodalSolutionStepVariable(NODAL_AREA);
model_part.AddNodalSolutionStepVariable(NODAL_H);
model_part.AddNodalSolutionStepVariable(ADVPROJ);
model_part.AddNodalSolutionStepVariable(DIVPROJ);
model_part.AddNodalSolutionStepVariable(THAWONE);
model_part.AddNodalSolutionStepVariable(THAWTWO);
model_part.AddNodalSolutionStepVariable(REACTION);
model_part.AddNodalSolutionStepVariable(REACTION_WATER_PRESSURE);
model_part.AddNodalSolutionStepVariable(EXTERNAL_PRESSURE);
model_part.AddNodalSolutionStepVariable(ARRHENIUS);
model_part.AddNodalSolutionStepVariable(DISTANCE);
model_part.AddNodalSolutionStepVariable(AUX_INDEX);
model_part.AddNodalSolutionStepVariable(FLAG_VARIABLE);
model_part.AddNodalSolutionStepVariable(NORMAL);
print "variables for monolithic solver lagrangian compressible 3D solution added correctly"
def AddDofs(model_part):
for node in model_part.Nodes:
#adding dofs
node.AddDof(VELOCITY_X,REACTION_X);
node.AddDof(VELOCITY_Y,REACTION_Y);
node.AddDof(VELOCITY_Z,REACTION_Z);
node.AddDof(WATER_PRESSURE,REACTION_WATER_PRESSURE);
node.AddDof(AIR_PRESSURE,REACTION_AIR_PRESSURE);
print "variables for monolithic solver lagrangian compressible 3D solution added correctly"
class MonolithicSolver:
#######################################################################
def __init__(self,model_part,structure_model_part,domain_size,box_corner1,box_corner2):
self.model_part = model_part
self.structure_model_part = structure_model_part
self.alpha = -0.1
self.move_mesh_strategy = 2
self.time_scheme = ResidualBasedPredictorCorrectorVelocityBossakSchemeCompressible( self.alpha,self.move_mesh_strategy )
#definition of the solvers
## self.linear_solver = SkylineLUFactorizationSolver()
## self.linear_solver =SuperLUSolver()
pPrecond = DiagonalPreconditioner()
## pPrecond = ILU0Preconditioner() \\ TIENE PROBLEMAS
self.linear_solver = BICGSTABSolver(1e-6, 5000,pPrecond)
## self.linear_solver = CGSolver(1e-6, 5000,pPrecond)
#definition of the convergence criteria
self.conv_criteria = UPCriteria(1e-7,1e-9,1e-7,1e-9)
# self.conv_criteria = UPCriteria(1e-12,1e-14,1e-15,1e-17)
self.max_iter = 15
self.SetDivided = ElemBasedBCUtilities(self.model_part)
self.ChooseElement = ChooseElementProcess(self.model_part, 3, "ASGSCOMPPRDC3D", "ASGSCompressible3D")
#default settings
self.echo_level = 1
self.CalculateReactionFlag = False
self.ReformDofSetAtEachStep = True
self.CalculateNormDxFlag = True
self.MoveMeshFlag = True
self.remeshing_flag = True
self.domain_size = domain_size
####MESH CHANGES
# self.mark_close_nodes_process = MarkCloseNodesProcess(model_part);
self.PfemUtils = PfemUtils()
self.MeshMover= MoveMeshProcess(self.model_part);
self.node_erase_process = NodeEraseProcess(self.model_part);
# self.Mesher = TetGenPfemModeler()
self.Mesher = TetGenPfemRefineFace()
self.neigh_finder = FindNodalNeighboursProcess(self.model_part,9,18)
self.elem_neighbor_finder = FindElementalNeighboursProcess(self.model_part, 3, 20)
###Two model part accessories
self.save_structure_model_part_process = SaveStructureModelPartProcess();
self.save_structure_conditions_process = SaveStructureConditionsProcess();
self.merge_in_one_model_parts_process = MergeInOneModelPartsProcess();
self.alpha_shape = 1000000.0
self.h_factor = 0.5
#assign IS_FLUID to all nodes
## for node in self.model_part.Nodes:
## node.SetSolutionStepValue(IS_FLUID,0,1.0)
#detecting free_surface to all nodes
for node in self.model_part.Nodes:
# print node.GetSolutionStepValue(IS_FREE_SURFACE)
if (node.GetSolutionStepValue(IS_BOUNDARY)==1 and node.GetSolutionStepValue(IS_STRUCTURE)!=1):
node.SetSolutionStepValue(IS_FREE_SURFACE,0,1.0)
#U NEED IT FOR ALPHA-shape
print self.model_part
(self.neigh_finder).Execute();
print "MMMMMMMMMMMMMMMMMMM NEIGHBOR ARE FOUND NNNNNNNNNNNNNNNN"
Hfinder = FindNodalHProcess(self.model_part);
Hfinder.Execute();
print "OOOOOOOOOOOOOOOOOOOOOOOO Hs ARE calculated PPPPPPPPPPPPPPPPPPPPPPPPPP"
#runtime box
self.box_corner1 = box_corner1
self.box_corner2 = box_corner2
#######################################################################
def Initialize(self,output_time_increment):
#creating the solution strategy
#take structure part
(self.save_structure_model_part_process).SaveStructureModelPart(self.model_part, self.structure_model_part, self.domain_size);
(self.save_structure_conditions_process).SaveStructureConditions(self.model_part, self.structure_model_part, self.domain_size);
for elem in (self.structure_model_part).Elements:
elem.GetNode(0).SetSolutionStepValue(IS_INTERFACE,0,1.0)
elem.GetNode(1).SetSolutionStepValue(IS_INTERFACE,0,1.0)
elem.GetNode(2).SetSolutionStepValue(IS_INTERFACE,0,1.0)
self.solver = ResidualBasedNewtonRaphsonStrategy(self.model_part,self.time_scheme,self.linear_solver,self.conv_criteria,self.max_iter,self.CalculateReactionFlag, self.ReformDofSetAtEachStep,self.MoveMeshFlag)
(self.solver).SetEchoLevel(self.echo_level)
#time increment for output
self.output_time_increment = output_time_increment
self.next_output_time = self.output_time_increment
# (self.neigh_finder).Execute();
(self.neigh_finder).ClearNeighbours();
(self.neigh_finder).Execute();
#calculate neighbors of shel
(FindElementalNeighboursProcess(self.structure_model_part, 2, 20)).ClearNeighbours()
(FindElementalNeighboursProcess(self.structure_model_part, 2, 20)).Execute()
#######################################################################
def Solve(self,time,gid_io):
## (self.neigh_finder).Execute();
## (self.solver).Solve()
## (self.solver).Clear()
## (self.PfemUtils).MarkOuterNodes(self.box_corner1,self.box_corner2,(self.model_part).Nodes );
## #(self.PfemUtils).MarkExcessivelyCloseNodes((self.model_part).Nodes, .05)
## (self.node_erase_process).Execute();
## self.Remesh()
## self.OutputStep(time,gid_io)
# self.CalculateDistanceAndDiviedSet(3);
print "143"
# self.DistToH()
self.Remesh()
#(FindElementalNeighboursProcess(self.structure_model_part, 2, 20)).ClearNeighbours()
#(FindElementalNeighboursProcess(self.structure_model_part, 2, 20)).Execute()
print "145"
(self.solver).Solve()
print "a47"
(self.PfemUtils).MoveLonelyNodes(self.model_part)
(self.solver).Clear()
print "149"
self.OutputStep(time,gid_io)
#######################################################################
def EstimateDeltaTime(self,min_dt,max_dt):
print "Estimating delta time"
calc_dt=(self.PfemUtils).EstimateDeltaTime(min_dt,max_dt,self.model_part)
#cfl_dt=(self.PfemUtils).CFLdeltaT(1.0,max_dt,self.model_part)
#max_dt = cfl_dt
#print"CFL_CHOICE",cfl_dt
#calc_dt=(self.PfemUtils).ExactDtEstimate(max_dt,self.model_part)
# print "calculated dt"
return calc_dt
# def EstimateDeltaTime(self,min_dt,max_dt):
# print "Estimating delta time"
# return (self.UlfUtils).EstimateDeltaTime(max_dt,domain_size)
#######################################################################
def SetEchoLevel(self,level):
(self.solver).SetEchoLevel(level)
## ########################################################################
## def Remesh(self):
##
## if (self.remeshing_flag==True):
## (self.Mesher).ReGenerateMesh("ASGS2D", "Condition2D",self.model_part,self.node_erase_process,True, True, self.alpha_shape, self.h_factor)
#### (self.Mesher).ReGenerateMesh("ASGS2D", "Condition2D",self.model_part,self.node_erase_process,True, False, self.alpha_shape, self.h_factor)
##
## #calculating fluid neighbours before applying boundary conditions
## (self.neigh_finder).Execute();
########################################################################
def Remesh(self):
if (self.remeshing_flag==True):
# (self.PfemUtils).MoveLonelyNodes(self.model_part)
#(self.MeshMover).Execute();
print self.box_corner1
(self.PfemUtils).MarkOuterNodes(self.box_corner1,self.box_corner2,(self.model_part).Nodes )
#(self.PfemUtils).MarkNodesTouchingWall(self.model_part,3, .05)
print "after nodes touching wall"
(self.PfemUtils).MarkExcessivelyCloseNodes((self.model_part).Nodes, 0.5)
print "after excessively close nodes"
# (self.PfemUtils).MarkNodesTouchingInterface(self.model_part,3, .1)
print "after MarkNodesTouchingInterface"
###### FIND NEIGHBOUR ELEMENTS AND COLORing
self.CalculateFluidNeighborsMixedModelPartAndColor()
# (self.elem_neighbor_finder).ClearNeighbours()
# print "after ClearNeighbours()"
# (self.elem_neighbor_finder).Execute()
# print "after Execute() neighbors"
# (self.PfemUtils).ColourAirWaterElement(self.model_part,3)
# print "after Coloring"
(self.neigh_finder).ClearNeighbours();
(self.neigh_finder).Execute();
#calculate neighbors of shel
#(FindElementalNeighboursProcess(self.structure_model_part, 2, 20)).ClearNeighbours()
#(FindElementalNeighboursProcess(self.structure_model_part, 2, 20)).Execute()
print"Before remesh"
print self.structure_model_part
(self.Mesher).ReGenerateMesh("ASGSCompressible3D", "Condition3D",self.model_part,(self.structure_model_part).Elements,self.node_erase_process,True, True, self.alpha_shape, self.h_factor)
#(self.Mesher).ReGenerateMesh("ASGSCompressible3D", "Condition3D",self.model_part,self.node_erase_process,False, True, self.alpha_shape, self.h_factor)
## (self.Mesher).ReGenerateMesh("ASGS2D", "Condition2D",self.model_part,self.node_erase_process,True, False, self.alpha_shape, self.h_factor)
print"after remesh"
print self.model_part
#calculating fluid neighbours before applying boundary conditions
(FindElementalNeighboursProcess(self.model_part, 3, 20)).ClearNeighbours()
(FindElementalNeighboursProcess(self.model_part, 3, 20)).Execute()
#(self.neigh_finder).Execute();
# (self.PfemUtils).ColourAirWaterElement(self.model_part,3)
self.CalculateFluidNeighborsMixedModelPartAndColor()
# print "<<<<<<<<<<<<<<<<Colouring is done automatically>>>>>>>>>>>>>>>>>>>>>>>>><<"
# (self.PfemUtils).InterfaceDetecting(self.model_part,3, .9)
(self.ChooseElement).Execute();
print "after choose"
#calculating fluid neighbours before applying boundary conditions
#(FindElementalNeighboursProcess(self.structure_model_part, 2, 20)).ClearNeighbours()
#(FindElementalNeighboursProcess(self.structure_model_part, 2, 20)).Execute()
# (self.PfemUtils).ApplyBoundaryConditions(self.model_part,3);// this one needs neighbors!
(self.PfemUtils).IdentifyFluidNodes(self.model_part);
## HERE WE ARE ADDING STRUCTURE_MODEL_PART TO MODEL_PART
print ">>>>>>>>>>>>>>>>>>><<<<<<Before Merge<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"
print self.model_part
print self.structure_model_part
(self.merge_in_one_model_parts_process).MergeParts(self.model_part, self.structure_model_part)
print ">>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<merge is done>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<"
print self.model_part
(FindNodalNeighboursProcess(self.model_part,9,18)).ClearNeighbours()
(FindNodalNeighboursProcess(self.model_part,9,18)).Execute()
print self.structure_model_part
print "after neighbors"
## for node in self.model_part.Nodes:
## node.SetSolutionStepValue(IS_FREE_SURFACE,0,0.0)
##
## for node in self.model_part.Nodes:
## if (node.GetSolutionStepValue(IS_BOUNDARY)==1 and node.GetSolutionStepValue(IS_STRUCTURE)!=1):
## node.SetSolutionStepValue(IS_FREE_SURFACE,0,1.0)
##################################################################
def FindNeighbours(self):
(self.neigh_finder).Execute();
######################################################################
def OutputStep(self,time,gid_io):
if(time >= self.next_output_time):
self.next_output_time = self.next_output_time + self.output_time_increment;
#writing mesh
gid_io.InitializeMesh( time );
gid_io.WriteNodeMesh((self.model_part).GetMesh());
gid_io.WriteMesh((self.model_part).GetMesh());
gid_io.FinalizeMesh();
gid_io.InitializeResults(time, (self.model_part).GetMesh());
gid_io.WriteNodalResults(PRESSURE, (self.model_part).Nodes, time, 0);
gid_io.WriteNodalResults(EXTERNAL_PRESSURE, (self.model_part).Nodes, time, 0);
gid_io.WriteNodalResults(IS_FREE_SURFACE, (self.model_part).Nodes, time, 0);
gid_io.WriteNodalResults(IS_BOUNDARY, (self.model_part).Nodes, time, 0);
gid_io.WriteNodalResults(IS_STRUCTURE, (self.model_part).Nodes, time, 0);
gid_io.WriteNodalResults(IS_INTERFACE, (self.model_part).Nodes, time, 0);
gid_io.WriteNodalResults(VELOCITY, (self.model_part).Nodes, time, 0);
gid_io.WriteNodalResults(MESH_VELOCITY, (self.model_part).Nodes, time, 0);
gid_io.WriteNodalResults(DENSITY, (self.model_part).Nodes, time, 0);
gid_io.WriteNodalResults(AIR_PRESSURE,(self.model_part).Nodes,time,0);
gid_io.WriteNodalResults(WATER_PRESSURE,(self.model_part).Nodes,time,0);
gid_io.WriteNodalResults(DENSITY_AIR,(self.model_part).Nodes,time,0)
gid_io.WriteNodalResults(DENSITY_WATER,(self.model_part).Nodes,time,0)
gid_io.WriteNodalResults(AIR_SOUND_VELOCITY,(self.model_part).Nodes,time,0)
gid_io.WriteNodalResults(WATER_SOUND_VELOCITY,(self.model_part).Nodes,time,0)
gid_io.WriteNodalResults(IS_FLUID, (self.model_part).Nodes, time, 0);
gid_io.WriteNodalResults(IS_WATER, (self.model_part).Nodes, time, 0);
gid_io.WriteNodalResults(NODAL_H, (self.model_part).Nodes, time, 0);
gid_io.WriteNodalResults(DISTANCE, (self.model_part).Nodes, time, 0);
gid_io.WriteNodalResults(DISPLACEMENT, (self.model_part).Nodes, time, 0);
gid_io.WriteNodalResults(IS_VISITED, (self.model_part).Nodes, time, 0);
gid_io.WriteNodalResults(AUX_INDEX, (self.model_part).Nodes, time, 0);
gid_io.PrintOnGaussPoints(IS_WATER_ELEMENT, self.model_part, time);
gid_io.Flush()
gid_io.FinalizeResults()
######################################################################
######################################################################
def CalculateDistanceAndDiviedSet(self,domain_size):
(self.neigh_finder).Execute();
distance_tools = ElemBasedDistanceUtilities(self.model_part)
distance_calculator = BodyDistanceCalculationUtils()
#assign IS_VISITED1 to elem with DISTANCE>=0 and change DSITANCE to posetive for external ones
##Assign Zero distance to interface nodes
for node in (self.model_part).Nodes:
if(node.GetSolutionStepValue(IS_INTERFACE)== 1.0):
node.SetSolutionStepValue(DISTANCE,0,0.0)
distance_tools.MarkExternalAndMixedNodes()
distance_tools.ChangeSignToDistance()
#calculate distances towards the interior of the domain
if(domain_size == 2):
distance_calculator.CalculateDistances2D((self.model_part).Elements,DISTANCE, True);
else:
distance_calculator.CalculateDistances3D((self.model_part).Elements,DISTANCE, True);
#change sign
distance_tools.ChangeSignToDistance()
#mark as visited all of the nodes inside the fluid domain
distance_tools.MarkInternalAndMixedNodes()
print ((self.model_part).Elements).Size()
#calculate distances towards the outside
if(domain_size == 2):
distance_calculator.CalculateDistances2D((self.model_part).Elements,DISTANCE, True);
else:
distance_calculator.CalculateDistances3D((self.model_part).Elements,DISTANCE, True);
#Decide IS_WATER flag due to DISTANCE
## for node in (self.model_part).Nodes:
## if(node.GetSolutionStepValue(DISTANCE)<= 0.0):
## node.SetSolutionStepValue(IS_WATER,0,0.0)
## else:
## node.SetSolutionStepValue(IS_WATER,0,1.0)
## if(node.GetSolutionStepValue(DISTANCE)== 0.0):
## print"This node has distance zero, is_interface is assigned"
## node.SetSolutionStepValue(IS_INTERFACE,0,1.0)
## node.SetSolutionStepValue(IS_VISITED,0,1.0)
#save as distance of the old time step
distance_tools.SaveScalarVariableToOldStep(DISTANCE)
print "finished RecalculateDistanceFunction"
# (self.SetDivided).SetDividedElem_2D()
print ">>>>>ELEMENTS ARE DIVIDED<<<<<<<<<<<<"
######################################################################
######################################################################
def DistToH(self):
possible_h = self.CalculateRadius()
print possible_h
min_H = possible_h*3.14/60
#min_H = .0007#0.001
sec_min_H = 10 * min_H#.004
max_H = .02
ref_dist = 10*min_H
sec_ref_dist = 50*min_H
third_ref_dist = 200*min_H
slope = (sec_min_H - min_H)/(sec_ref_dist-ref_dist)
second_slope = (max_H - sec_min_H)/(third_ref_dist-sec_ref_dist)
#search for min an max of H
## for node in (self.model_part).Nodes:
## node_H = node.GetSolutionStepValue(NODAL_H,0)
## if(node_H<self.min_H):
## self.min_H = node_H
## else:
## if(node_H > self.max_H):
## self.max_H = node_H
# H = H + dist * dist
#print ">>>>>DISt TO H ASSIGNMENT<<<<<<<<<<<<"
for node in (self.model_part).Nodes:
current_dist = node.GetSolutionStepValue(DISTANCE,0)
if(abs(current_dist) <= ref_dist):
node_H = min_H #+ slope*abs(current_dist)
node.SetSolutionStepValue(NODAL_H,0,node_H)
# if(ref_dist<abs(current_dist) and abs(current_dist)<= sec_ref_dist):
# node_H = min_H + slope*(abs(current_dist) - ref_dist)
# node.SetSolutionStepValue(NODAL_H,0,node_H)
# if(sec_ref_dist<abs(current_dist) and abs(current_dist)<=third_ref_dist):
# node_H = sec_min_H + second_slope*(abs(current_dist)- sec_ref_dist)
# node.SetSolutionStepValue(NODAL_H,0,node_H)
# if(abs(current_dist)>third_ref_dist):
# node_H = max_H
# node.SetSolutionStepValue(NODAL_H,0,node_H)
#############################################################################
def CalculateRadius(self):
max_radi = 0.0
for node in (self.model_part).Nodes:
if node.GetSolutionStepValue(IS_INTERFACE) == 1.0:
X_ref = node.X
Y_ref = node.Y
Z_ref = node.Z
for node in (self.model_part).Nodes:
if node.GetSolutionStepValue(IS_INTERFACE) == 1.0:
radi = pow(node.X-X_ref , 2)+pow(node.Y-Y_ref , 2)+pow(node.Z-Z_ref , 2)
if(radi>max_radi):
max_radi = radi
max_radi = pow(max_radi,0.5)
if (max_radi == 0.0):
max_radi = 0.076
return max_radi
######################################################################
######################################################################
def CalculateFluidNeighborsMixedModelPartAndColor(self):
all_elements = (self.model_part).Elements
fluid_elements = ElementsArray()
print "========= find neighbors================="
(SaveElementBySizeProcess((self.model_part).Elements, fluid_elements, 4)).Execute()
(self.model_part).Elements = fluid_elements
(self.elem_neighbor_finder).ClearNeighbours()
print "after ClearNeighbours()"
(self.elem_neighbor_finder).Execute()
print "after Execute() neighbors"
(self.PfemUtils).ColourAirWaterElement(self.model_part,3)
(self.model_part).Elements = all_elements
| [
"pooyan@4358b7d9-91ec-4505-bf62-c3060f61107a"
] | pooyan@4358b7d9-91ec-4505-bf62-c3060f61107a |
44020a2430f1ee4d677ebf2e72f517311b4c61fc | a01fb7bb8e8738a3170083d84bc3fcfd40e7e44f | /python3/test/skip_test.py | a678856ad3b4261a4ba7525d1720c9bb602fa589 | [] | no_license | jk983294/CommonScript | f07acf603611b4691b176aa4a02791ef7d4d9370 | 774bcbbae9c146f37312c771c9e867fb93a0c452 | refs/heads/master | 2023-08-21T17:50:19.036159 | 2023-08-16T00:22:03 | 2023-08-16T00:22:03 | 42,732,160 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | import unittest
import os
import platform
class Tests(unittest.TestCase):
def test_0(self):
self.assertTrue(True)
@unittest.skip('skipped test')
def test_1(self):
self.fail('should have failed!')
@unittest.skipIf(os.name == 'posix', 'Not supported on Unix')
def test_2(self):
import winreg
@unittest.skipUnless(platform.system() == 'Darwin', 'Mac specific test')
def test_3(self):
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
| [
"jk983294@gmail.com"
] | jk983294@gmail.com |
37d6d2d4799abb3252b0037faeff26b63b15947e | 77900cdd9a815caf1cd04705321ca93f5072179f | /Project/.history/product_20211026233548.py | 4c3e77961db96c2cb4baab24e8753d3719e59621 | [] | no_license | Bom19990111/helloword_python | 717799d994223d65de5adaeabecf396ff2bc1fb7 | 2ee2e67a60043f03c1ce4b070470c7d2dcdc72a7 | refs/heads/master | 2023-09-06T04:17:02.057628 | 2021-11-21T20:00:46 | 2021-11-21T20:00:46 | 407,063,273 | 0 | 1 | null | 2021-11-21T20:00:47 | 2021-09-16T07:18:35 | Python | UTF-8 | Python | false | false | 6,893 | py | import data as list_product
import random
# def __init__(self, Id, Product_code, Product_name, Brand, Year, Size):
# self.Id = Id
# self.Product_code = Product_code
# self.Product_name = Product_name
# self.Brand = Brand
# self.Year = Year
# self.Size = Size
# Thêm sản phẩm
def AddProduct():
print("THÊM SẢN PHẨM")
product = {
"Id": "",
"Product_code": "",
"Product_name": "",
"Brand": "",
"Price": "",
"Year": "",
"Quantity": "",
"Size": ""
}
print("Nhập ID sản phẩm:")
Id = int(input())
while True:
student = FindProductDuplicate(Id)
if student != False:
print("ID đã tồn tại, vui lòng nhập lại ID:")
Id = int(input())
else:
break
product['Id'] = Id
# Mã sản phẩm random
code_product = random.randint(1, 99)
str_id = "HKSP"
if code_product <= 9:
str_id += "0" + str(code_product)
else:
str_id += str(code_product)
product["Product_code"] = str_id
print("Nhập tên sản phẩm: ")
product['Product_name'] = input()
print("Nhập thương hiệu sản phẩm: ")
product['Brand'] = input()
print("Nhập giá sản phẩm: ")
product['Price'] = float(input())
print("Nhập năm sản xuất: ")
product['Year'] = int(input())
print("Nhập số lượng: ")
product['Quantity'] = int(input())
print("Nhập size giày: ")
product['Size'] = input()
list_product.list_product.append(product)
answer = input("Bạn có muốn nhập tiếp không? Y/N ")
if answer == "y" or answer == "Y":
AddProduct()
# Tìm kiếm ID trùng lặp
def FindProductDuplicate(Id):
for i in range(0, len(list_product.list_product)):
if list_product.list_product[i]['Id'] == Id:
return [i, list_product.list_product[i]]
return False
# Hiển thị tất cả sản phẩm
def ShowAllProduct():
print("*** HIỂN THỊ TẤT CẢ SẢN PHẨM ***")
if len(list_product.list_product) == 0 or len(list_product.list_product) < 0:
print("Chưa có sản phẩm nào để hiển thị! ".upper())
for i in range(0, len(list_product.list_product)):
print("ID: ", list_product.list_product[i]['Id']),
print("Mã sản phẩm: ", list_product.list_product[i]['Product_code']),
print("Tên sản phẩm: ", list_product.list_product[i]['Product_name']),
print("Thương hiệu: ", list_product.list_product[i]['Brand']),
print("Giá: ", list_product.list_product[i]['Price']),
print("Năm xuất bản: ", list_product.list_product[i]['Year']),
print("Số lượng: ", list_product.list_product[i]['Quantity']),
print("Size giày: ", list_product.list_product[i]['Size'])
print("________________________________")
# Sửa thông tin sản phẩm
def UpdateProduct():
print("*** CẬP NHẬT THÔNG TIN SẢN PHẨM ***")
print("Nhập ID sản phẩm cần sửa")
Id = int(input())
product = FindProductDuplicate(Id)
if product == False:
print("Không tìm thấy sản phẩm ID = ", Id)
else:
print("""Bạn muốn cập nhật mục nào ? :
0. Thoát.
1. Tên sản phẩm.
2. Thương hiệu sản phẩm.
3. Giá sản phẩm
4. Size giày.
5. Số lượng.
6. Năm xuất bản. """)
action = 0
while action >= 0:
if action == 1:
UpdateProductName()
elif action == 2:
UpdateProductBrand()
elif action == 3:
UpdateProductPrice()
elif action == 4:
UpdateProductSize()
elif action == 5:
UpdateProductQuatity()
elif action == 6:
UpdateProductYear()
def UpdateProductName():
print("Nhập tên sản phẩm")
name_product = input()
product[1]['Product_name'] = name_product
def UpdateProductBrand():
print("Nhập thương hiệu của sản phẩm")
name_product = input()
product[1]['Brand'] = name_product
def UpdateProductPrice():
print("Nhập giá mới của sản phẩm")
name_product = float(input())
product[1]['Price'] = name_product
def UpdateProductSize():
print("Nhập size của sản phẩm")
name_product = input()
product[1]['Size'] = name_product
def UpdateProductYear():
print("Nhập năm sản xuất của sản phẩm")
name_product = int(input())
product[1]['Year'] = name_product
list_product.list_product[product[0]] = product[1]
def UpdateProductQuatity():
print("Nhập số lượng sản phẩm")
name_product = int(input())
product[1]['Quantity'] = name_product
list_product.list_product[product[0]] = product[1]
action = int(input("Bạn chọn mục cập nhật nào? "))
if action == 0:
print("Không cập nhật mục nào")
break
# Xóa sản phẩm
def DeleteProduct():
print("*** XÓA SẢN PHẨM ***")
print("Nhập ID sản phẩm cần xóa:")
Id = int(input())
product = FindProductDuplicate(Id)
if product != False:
list_product.list_product.remove(product[1])
print("Xóa sản phẩm thành công!")
else:
print("Không tìm thấy sản phẩm muốn xóa!")
# Tìm kiếm sản phẩm
def FindProductByName():
print("*** TÌM KIẾM SẢN PHẨM ***")
new_list = []
NameProduct = str(
input("Nhập tên sản phẩm hoặc tên thương hiệu bạn muốn tìm kiếm: ")).upper()
for i in range(0, len(list_product.list_product)):
if str(list_product.list_product[i]['Product_name']).upper() in NameProduct or str(list_product.list_product[i]['Brand']).upper() in NameProduct:
new_list.append(i)
for i in range(0, len(new_list)):
print("ID: ", new_list[1]['Id']),
print("Mã sản phẩm: ",
new_list[2]['Product_code']),
print("Tên sản phẩm: ",
new_list[3]['Product_name']),
print("Thương hiệu: ", new_list[4]['Brand']),
print("Giá: ", new_list[5]['Price']),
print("Năm xuất bản: ", new_list[6]['Year']),
print("Số lượng: ", new_list[i]['Quantity']),
print("Size giày: ", new_list[i]['Size'])
print("________________________________")
else:
print("Không tìm thấy sản phẩm này @@".upper())
| [
"phanthituyngoc1995@gmail.com"
] | phanthituyngoc1995@gmail.com |
b4ab245bcbf76e57839fcc1722d1ddd565a89c78 | fe82835f39ec48daa8e9d425f66ededac0347d2a | /python全栈开发/网络爬虫/myscrapy/movie/movie/spiders/douban.py | c52070006e5218aad783dc97f510a089c396ec4e | [] | no_license | Abel-Fan/UAIF1907 | 3a43d7c93b71f64d76b4b7ea2e668a46c8fa10fa | dc53af8cbf3d15f356d52c032c62251fd2536222 | refs/heads/master | 2023-01-10T16:32:07.760465 | 2019-10-29T02:55:01 | 2019-10-29T02:55:01 | 199,558,420 | 2 | 3 | null | 2023-01-04T22:47:49 | 2019-07-30T02:20:57 | Python | UTF-8 | Python | false | false | 738 | py | # -*- coding: utf-8 -*-
import scrapy
class DoubanSpider(scrapy.Spider):
name = 'douban'
allowed_domains = ['movie.douban.com']
index = 0
url = 'https://movie.douban.com/top250?start='
start_urls = ['https://movie.douban.com/top250?start=0']
def parse(self, response):
for sel in response.xpath("//div[@class='hd']"):
url = sel.xpath("a/@href").extract()[0]
yield scrapy.Request(url,callback=self.parseCon)
self.index+=25
if self.index<226:
yield scrapy.Request(self.url+str(self.index),callback=self.parse)
def parseCon(self,response):
# 详情
title = response.xpath("//h1/span[1]/text()").extract()[0]
print(title)
| [
"842615663@qq.com"
] | 842615663@qq.com |
25189a323700e225e6f96c440e680102061762f0 | 509823ea14f04d5791486b56a592d7e7499d7d51 | /parte05/ex5.09_suma_valores_diccionario.py | 618b2f2b3e475e6ab8a3018557ce2d1f48b3d8d1 | [] | no_license | Fhernd/Python-CursoV2 | 7613144cbed0410501b68bedd289a4d7fbefe291 | 1ce30162d4335945227f7cbb875f99bc5f682b98 | refs/heads/master | 2023-08-08T05:09:44.167755 | 2023-08-05T19:59:38 | 2023-08-05T19:59:38 | 239,033,656 | 64 | 38 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | # Ejercicio 5.9: Sumar todos los valores de un diccionario.
productos = {'Mouse': 29.9, 'Teclado': 119.9, 'Audífonos': 35.9, 'Monitor': 299}
total = sum(productos.values())
print('El total de los productos es:', total)
| [
"johnortizo@outlook.com"
] | johnortizo@outlook.com |
b0fa16737a686df72fd98cff9f7fd566d0d5e80a | 2ed0ab730b62665b3a36841ab006eea961116f87 | /Hash/MaximumSizeSubArray.py | 4c44f5c8ae5645363b1ad573c522e8e3413e73b1 | [] | no_license | scarlettlite/hackathon | 0f0a345d867b9e52823f10fe67c6ec210a40945f | 179ba9038bbed4d48cb2f044fd8430cf2be2bab3 | refs/heads/master | 2021-07-04T00:55:17.665292 | 2019-03-04T09:10:59 | 2019-03-04T09:10:59 | 141,269,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | class Solution:
def maxSubArrayLen(self, nums, k):
index, l, sm = {}, 0, 0
"""
if some subarray adds up to zero,
then include it
"""
index[0] = -1
for i, num in enumerate(nums):
sm += num
"""
sm - (sm - k) = k
"""
if sm - k in index:
l = max(l, i - index[sm - k])
if sm not in index:
"""
for each sum note the earliest index at which the sum occurs
"""
index[sm] = i
return l
print(Solution().maxSubArrayLen([-2, -1, 2, 1], 1)) | [
"shivanirathore496@gmail.com"
] | shivanirathore496@gmail.com |
c7bdf27bb365b6e35ca7896779eabc935dc9456e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02410/s129903146.py | f4bc923b5e62b74d7db52836dc67ef4179c3d122 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | n,m = map(int,input().split())
mat = list()
vec = list()
ans = [0 for _ in range(n)]
for _ in range(n):
mat.append(list(map(int,input().split())))
for _ in range(m):
vec.append(int(input()))
for i in range(n):
for j in range(m):
ans[i] += mat[i][j]*vec[j]
for k in ans :
print(k) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
5c512f565ac92a4de97031a9ab515142b7a4c682 | 9a066d64eb81bd65fb51790579b7e9ea874beb3b | /seisma/conf/default.py | 05ffc3b4464d0bd1557433dc5584a97bcaacda7b | [] | no_license | crisscuola/seisma-server | 377e56ec6d636b056f95c6b425cbfebd62f8ec3e | 1ea66ac986e4e3bd6911d572e278daf7ff728c75 | refs/heads/master | 2021-06-12T08:26:16.442942 | 2017-03-17T20:14:16 | 2017-03-17T20:14:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,093 | py | # -*- coding: utf-8 -*-
import sys
# Application settings
DEBUG = False
TESTING = False
# Database settings
DATABASE = {
'HOST': '127.0.0.1',
'PORT': 3306,
'USER': 'root',
'PASSWORD': '',
'NAME': 'seisma',
'POOL_SIZE': 10,
'POOL_TIMEOUT': 10,
'POOL_RECYCLE': 60 * 5,
'MAX_OVERFLOW': -1,
'SQL_LOG': False,
'TRACK_MODIFICATIONS': False,
}
# Cache
REDIS_CACHE = {
'HOST': '127.0.0.1',
'PORT': 6379,
'DB': 0,
'IS_DISABLED': False,
'MAX_CONNECTIONS': 15,
'GET_CONNECTION_TIMEOUT': 10,
}
# Logging settings
LOGGING_SETTINGS = {
'version': 1,
'formatters': {
'basic': {
'format': '%(asctime)-15s %(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'stream': sys.stderr,
'formatter': 'basic'
},
'null': {
'class': 'logging.NullHandler',
'level': 'DEBUG'
},
},
'loggers': {
'seisma': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
'flask': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
'flask_sqlalchemy': {
'handlers': ['console'],
'level': 'WARNING',
'propagate': False,
},
'flask_migrate': {
'handlers': ['console'],
'level': 'WARNING',
'propagate': False,
},
'flask_script': {
'handlers': ['console'],
'level': 'WARNING',
'propagate': False,
},
'sqlalchemy': {
'handlers': ['console'],
'level': 'WARNING',
'propagate': False,
},
},
'root': {
'propagate': False,
'handlers': ['console'],
'level': 'INFO',
},
}
# clear results over days
ROTATE_FOR_DAYS = 365
# max time of build in minutes
MAX_BUILD_TIMEOUT = 60
| [
"mikhail.trifonov@corp.mail.ru"
] | mikhail.trifonov@corp.mail.ru |
c3cf8759732213d7cdc16a9c500a9158886bd928 | 3784495ba55d26e22302a803861c4ba197fd82c7 | /venv/lib/python3.6/site-packages/keras_applications/mobilenet_v2.py | 56709fb0afee73883a7f9ea263cddedbf7d12854 | [
"MIT"
] | permissive | databill86/HyperFoods | cf7c31f5a6eb5c0d0ddb250fd045ca68eb5e0789 | 9267937c8c70fd84017c0f153c241d2686a356dd | refs/heads/master | 2021-01-06T17:08:48.736498 | 2020-02-11T05:02:18 | 2020-02-11T05:02:18 | 241,407,659 | 3 | 0 | MIT | 2020-02-18T16:15:48 | 2020-02-18T16:15:47 | null | UTF-8 | Python | false | false | 20,430 | py | """MobileNet v2 models for Keras.
MobileNetV2 is a general architecture and can be used for multiple use cases.
Depending on the use case, it can use different input layer size and
different width factors. This allows different width models to reduce
the number of multiply-adds and thereby
reduce inference cost on mobile devices.
MobileNetV2 is very similar to the original MobileNet,
except that it uses inverted residual blocks with
bottlenecking features. It has a drastically lower
parameter count than the original MobileNet.
MobileNets support any input size greater
than 32 x 32, with larger image sizes
offering better performance.
The number of parameters and number of multiply-adds
can be modified by using the `alpha` parameter,
which increases/decreases the number of filters in each layer.
By altering the image size and `alpha` parameter,
all 22 models from the paper can be built, with ImageNet weights provided.
The paper demonstrates the performance of MobileNets using `alpha` values of
1.0 (also called 100 % MobileNet), 0.35, 0.5, 0.75, 1.0, 1.3, and 1.4
For each of these `alpha` values, weights for 5 different input image sizes
are provided (224, 192, 160, 128, and 96).
The following table describes the performance of
MobileNet on various input sizes:
------------------------------------------------------------------------
MACs stands for Multiply Adds
Classification Checkpoint| MACs (M) | Parameters (M)| Top 1 Accuracy| Top 5 Accuracy
--------------------------|------------|---------------|---------|----|-------------
| [mobilenet_v2_1.4_224] | 582 | 6.06 | 75.0 | 92.5 |
| [mobilenet_v2_1.3_224] | 509 | 5.34 | 74.4 | 92.1 |
| [mobilenet_v2_1.0_224] | 300 | 3.47 | 71.8 | 91.0 |
| [mobilenet_v2_1.0_192] | 221 | 3.47 | 70.7 | 90.1 |
| [mobilenet_v2_1.0_160] | 154 | 3.47 | 68.8 | 89.0 |
| [mobilenet_v2_1.0_128] | 99 | 3.47 | 65.3 | 86.9 |
| [mobilenet_v2_1.0_96] | 56 | 3.47 | 60.3 | 83.2 |
| [mobilenet_v2_0.75_224] | 209 | 2.61 | 69.8 | 89.6 |
| [mobilenet_v2_0.75_192] | 153 | 2.61 | 68.7 | 88.9 |
| [mobilenet_v2_0.75_160] | 107 | 2.61 | 66.4 | 87.3 |
| [mobilenet_v2_0.75_128] | 69 | 2.61 | 63.2 | 85.3 |
| [mobilenet_v2_0.75_96] | 39 | 2.61 | 58.8 | 81.6 |
| [mobilenet_v2_0.5_224] | 97 | 1.95 | 65.4 | 86.4 |
| [mobilenet_v2_0.5_192] | 71 | 1.95 | 63.9 | 85.4 |
| [mobilenet_v2_0.5_160] | 50 | 1.95 | 61.0 | 83.2 |
| [mobilenet_v2_0.5_128] | 32 | 1.95 | 57.7 | 80.8 |
| [mobilenet_v2_0.5_96] | 18 | 1.95 | 51.2 | 75.8 |
| [mobilenet_v2_0.35_224] | 59 | 1.66 | 60.3 | 82.9 |
| [mobilenet_v2_0.35_192] | 43 | 1.66 | 58.2 | 81.2 |
| [mobilenet_v2_0.35_160] | 30 | 1.66 | 55.7 | 79.1 |
| [mobilenet_v2_0.35_128] | 20 | 1.66 | 50.8 | 75.0 |
| [mobilenet_v2_0.35_96] | 11 | 1.66 | 45.5 | 70.4 |
The weights for all 16 models are obtained and
translated from the Tensorflow checkpoints
from TensorFlow checkpoints found [here]
(https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/README.md).
# Reference
This file contains building code for MobileNetV2, based on
[MobileNetV2: Inverted Residuals and Linear Bottlenecks]
(https://arxiv.org/abs/1801.04381) (CVPR 2018)
Tests comparing this model to the existing Tensorflow model can be
found at [mobilenet_v2_keras]
(https://github.com/JonathanCMitchell/mobilenet_v2_keras)
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
import warnings
import numpy as np
from . import correct_pad
from . import get_submodules_from_kwargs
from . import imagenet_utils
from .imagenet_utils import decode_predictions
from .imagenet_utils import _obtain_input_shape
# TODO Change path to v1.1
BASE_WEIGHT_PATH = ('https://github.com/JonathanCMitchell/mobilenet_v2_keras/'
'releases/download/v1.1/')
backend = None
layers = None
models = None
keras_utils = None
def preprocess_input(x, **kwargs):
"""Preprocesses a numpy array encoding a batch of images.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
# Returns
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, mode='tf', **kwargs)
# This function is taken from the original tf repo.
# It ensures that all layers have a channel number that is divisible by 8
# It can be seen here:
# https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def MobileNetV2(input_shape=None,
alpha=1.0,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
**kwargs):
"""Instantiates the MobileNetV2 architecture.
# Arguments
input_shape: optional shape tuple, to be specified if you would
like to use a model with an input img resolution that is not
(224, 224, 3).
It should have exactly 3 inputs channels (224, 224, 3).
You can also omit this option if you would like
to infer input_shape from an input_tensor.
If you choose to include both input_tensor and input_shape then
input_shape will be used if they match, if the shapes
do not match then we will throw an error.
E.g. `(160, 160, 3)` would be one valid value.
alpha: controls the width of the network. This is known as the
width multiplier in the MobileNetV2 paper, but the name is kept for
consistency with MobileNetV1 in Keras.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape or invalid alpha, rows when
weights='imagenet'
"""
global backend, layers, models, keras_utils
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top` '
'as true, `classes` should be 1000')
# Determine proper input shape and default size.
# If both input_shape and input_tensor are used, they should match
if input_shape is not None and input_tensor is not None:
try:
is_input_t_tensor = backend.is_keras_tensor(input_tensor)
except ValueError:
try:
is_input_t_tensor = backend.is_keras_tensor(
keras_utils.get_source_inputs(input_tensor))
except ValueError:
raise ValueError('input_tensor: ', input_tensor,
'is not type input_tensor')
if is_input_t_tensor:
if backend.image_data_format == 'channels_first':
if backend.int_shape(input_tensor)[1] != input_shape[1]:
raise ValueError('input_shape: ', input_shape,
'and input_tensor: ', input_tensor,
'do not meet the same shape requirements')
else:
if backend.int_shape(input_tensor)[2] != input_shape[1]:
raise ValueError('input_shape: ', input_shape,
'and input_tensor: ', input_tensor,
'do not meet the same shape requirements')
else:
raise ValueError('input_tensor specified: ', input_tensor,
'is not a keras tensor')
# If input_shape is None, infer shape from input_tensor
if input_shape is None and input_tensor is not None:
try:
backend.is_keras_tensor(input_tensor)
except ValueError:
raise ValueError('input_tensor: ', input_tensor,
'is type: ', type(input_tensor),
'which is not a valid type')
if input_shape is None and not backend.is_keras_tensor(input_tensor):
default_size = 224
elif input_shape is None and backend.is_keras_tensor(input_tensor):
if backend.image_data_format() == 'channels_first':
rows = backend.int_shape(input_tensor)[2]
cols = backend.int_shape(input_tensor)[3]
else:
rows = backend.int_shape(input_tensor)[1]
cols = backend.int_shape(input_tensor)[2]
if rows == cols and rows in [96, 128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
# If input_shape is None and no input_tensor
elif input_shape is None:
default_size = 224
# If input_shape is not None, assume default size
else:
if backend.image_data_format() == 'channels_first':
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if rows == cols and rows in [96, 128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
input_shape = _obtain_input_shape(input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if backend.image_data_format() == 'channels_last':
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if weights == 'imagenet':
if alpha not in [0.35, 0.50, 0.75, 1.0, 1.3, 1.4]:
raise ValueError('If imagenet weights are being loaded, '
'alpha can be one of `0.35`, `0.50`, `0.75`, '
'`1.0`, `1.3` or `1.4` only.')
if rows != cols or rows not in [96, 128, 160, 192, 224]:
rows = 224
warnings.warn('`input_shape` is undefined or non-square, '
'or `rows` is not in [96, 128, 160, 192, 224].'
' Weights for input shape (224, 224) will be'
' loaded as the default.')
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
first_block_filters = _make_divisible(32 * alpha, 8)
x = layers.ZeroPadding2D(padding=correct_pad(backend, img_input, 3),
name='Conv1_pad')(img_input)
x = layers.Conv2D(first_block_filters,
kernel_size=3,
strides=(2, 2),
padding='valid',
use_bias=False,
name='Conv1')(x)
x = layers.BatchNormalization(axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name='bn_Conv1')(x)
x = layers.ReLU(6., name='Conv1_relu')(x)
x = _inverted_res_block(x, filters=16, alpha=alpha, stride=1,
expansion=1, block_id=0)
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2,
expansion=6, block_id=1)
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1,
expansion=6, block_id=2)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2,
expansion=6, block_id=3)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
expansion=6, block_id=4)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
expansion=6, block_id=5)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=2,
expansion=6, block_id=6)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,
expansion=6, block_id=7)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,
expansion=6, block_id=8)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,
expansion=6, block_id=9)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1,
expansion=6, block_id=10)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1,
expansion=6, block_id=11)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1,
expansion=6, block_id=12)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=2,
expansion=6, block_id=13)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1,
expansion=6, block_id=14)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1,
expansion=6, block_id=15)
x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1,
expansion=6, block_id=16)
# no alpha applied to last conv as stated in the paper:
# if the width multiplier is greater than 1 we
# increase the number of output channels
if alpha > 1.0:
last_block_filters = _make_divisible(1280 * alpha, 8)
else:
last_block_filters = 1280
x = layers.Conv2D(last_block_filters,
kernel_size=1,
use_bias=False,
name='Conv_1')(x)
x = layers.BatchNormalization(axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name='Conv_1_bn')(x)
x = layers.ReLU(6., name='out_relu')(x)
if include_top:
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(classes, activation='softmax',
use_bias=True, name='Logits')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = models.Model(inputs, x,
name='mobilenetv2_%0.2f_%s' % (alpha, rows))
# Load weights.
if weights == 'imagenet':
if include_top:
model_name = ('mobilenet_v2_weights_tf_dim_ordering_tf_kernels_' +
str(alpha) + '_' + str(rows) + '.h5')
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = keras_utils.get_file(
model_name, weight_path, cache_subdir='models')
else:
model_name = ('mobilenet_v2_weights_tf_dim_ordering_tf_kernels_' +
str(alpha) + '_' + str(rows) + '_no_top' + '.h5')
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = keras_utils.get_file(
model_name, weight_path, cache_subdir='models')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
in_channels = backend.int_shape(inputs)[channel_axis]
pointwise_conv_filters = int(filters * alpha)
pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
x = inputs
prefix = 'block_{}_'.format(block_id)
if block_id:
# Expand
x = layers.Conv2D(expansion * in_channels,
kernel_size=1,
padding='same',
use_bias=False,
activation=None,
name=prefix + 'expand')(x)
x = layers.BatchNormalization(axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'expand_BN')(x)
x = layers.ReLU(6., name=prefix + 'expand_relu')(x)
else:
prefix = 'expanded_conv_'
# Depthwise
if stride == 2:
x = layers.ZeroPadding2D(padding=correct_pad(backend, x, 3),
name=prefix + 'pad')(x)
x = layers.DepthwiseConv2D(kernel_size=3,
strides=stride,
activation=None,
use_bias=False,
padding='same' if stride == 1 else 'valid',
name=prefix + 'depthwise')(x)
x = layers.BatchNormalization(axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'depthwise_BN')(x)
x = layers.ReLU(6., name=prefix + 'depthwise_relu')(x)
# Project
x = layers.Conv2D(pointwise_filters,
kernel_size=1,
padding='same',
use_bias=False,
activation=None,
name=prefix + 'project')(x)
x = layers.BatchNormalization(axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'project_BN')(x)
if in_channels == pointwise_filters and stride == 1:
return layers.Add(name=prefix + 'add')([inputs, x])
return x
| [
"luis20dr@gmail.com"
] | luis20dr@gmail.com |
aa5bd5a0f7c81ee9514591e0bfa78361be291fc8 | e75a40843a8738b84bd529a549c45776d09e70d9 | /samples/openapi3/client/petstore/python-aiohttp/petstore_api/api/another_fake_api.py | 3ba5d212b863746d51bea35a12aa8f8ac687322d | [
"Apache-2.0"
] | permissive | OpenAPITools/openapi-generator | 3478dbf8e8319977269e2e84e0bf9960233146e3 | 8c2de11ac2f268836ac9bf0906b8bb6b4013c92d | refs/heads/master | 2023-09-02T11:26:28.189499 | 2023-09-02T02:21:04 | 2023-09-02T02:21:04 | 133,134,007 | 17,729 | 6,577 | Apache-2.0 | 2023-09-14T19:45:32 | 2018-05-12T09:57:56 | Java | UTF-8 | Python | false | false | 8,023 | py | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\
The version of the OpenAPI document: 1.0.0
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
""" # noqa: E501
import re # noqa: F401
import io
import warnings
from pydantic import validate_arguments, ValidationError
from typing_extensions import Annotated
from typing import overload, Optional, Union, Awaitable
from pydantic import Field
from petstore_api.models.client import Client
from petstore_api.api_client import ApiClient
from petstore_api.api_response import ApiResponse
from petstore_api.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class AnotherFakeApi:
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None) -> None:
if api_client is None:
api_client = ApiClient.get_default()
self.api_client = api_client
@overload
async def call_123_test_special_tags(self, client : Annotated[Client, Field(..., description="client model")], **kwargs) -> Client: # noqa: E501
...
@overload
def call_123_test_special_tags(self, client : Annotated[Client, Field(..., description="client model")], async_req: Optional[bool]=True, **kwargs) -> Client: # noqa: E501
...
@validate_arguments
def call_123_test_special_tags(self, client : Annotated[Client, Field(..., description="client model")], async_req: Optional[bool]=None, **kwargs) -> Union[Client, Awaitable[Client]]: # noqa: E501
"""To test special tags # noqa: E501
To test special tags and operation ID starting with number # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.call_123_test_special_tags(client, async_req=True)
>>> result = thread.get()
:param client: client model (required)
:type client: Client
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request.
If one number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: Client
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
message = "Error! Please call the call_123_test_special_tags_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data" # noqa: E501
raise ValueError(message)
if async_req is not None:
kwargs['async_req'] = async_req
return self.call_123_test_special_tags_with_http_info(client, **kwargs) # noqa: E501
@validate_arguments
def call_123_test_special_tags_with_http_info(self, client : Annotated[Client, Field(..., description="client model")], **kwargs) -> ApiResponse: # noqa: E501
"""To test special tags # noqa: E501
To test special tags and operation ID starting with number # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.call_123_test_special_tags_with_http_info(client, async_req=True)
>>> result = thread.get()
:param client: client model (required)
:type client: Client
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(Client, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'client'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method call_123_test_special_tags" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['client'] is not None:
_body_params = _params['client']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = [] # noqa: E501
_response_types_map = {
'200': "Client",
}
return self.api_client.call_api(
'/another-fake/dummy', 'PATCH',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
| [
"noreply@github.com"
] | OpenAPITools.noreply@github.com |
4d5bb08ad0851016b6f091607bf9b92d5e42a7de | 3a891a79be468621aae43defd9a5516f9763f36e | /desktop/core/ext-py/nose-1.3.7/functional_tests/support/issue130/test.py | 9778eefb68cb9f67413a8e0e9ed80549769d2243 | [
"Apache-2.0",
"LGPL-2.1-only"
] | permissive | oyorooms/hue | b53eb87f805063a90f957fd2e1733f21406269aa | 4082346ef8d5e6a8365b05752be41186840dc868 | refs/heads/master | 2020-04-15T20:31:56.931218 | 2019-01-09T19:02:21 | 2019-01-09T19:05:36 | 164,998,117 | 4 | 2 | Apache-2.0 | 2019-01-10T05:47:36 | 2019-01-10T05:47:36 | null | UTF-8 | Python | false | false | 65 | py | def setup():
raise "KABOOM"
def test_foo():
assert(1==1)
| [
"ranade@cloudera.com"
] | ranade@cloudera.com |
d6cb7279f5266fe1ccfbfb7eab62fa9820400359 | 4af394289f00e654b5f1611d3acc11956ff40250 | /doc/samples/qdiipred.py | 212ca14acc9f14e692e177602bbf7c9ef6e67f1a | [
"MIT"
] | permissive | vensentzhou/xalpha | 159b6d4d325878f830abae1fcd8c8e27bbd41b4f | 03537dc009c4c15416bfe385a07c7068950d1152 | refs/heads/master | 2023-03-05T00:11:50.186495 | 2021-02-14T05:32:21 | 2021-02-14T05:32:21 | 319,993,901 | 0 | 0 | MIT | 2021-02-14T05:32:22 | 2020-12-09T15:11:13 | null | UTF-8 | Python | false | false | 3,672 | py | """
一个简单展示 qdii 实时净值预测的例子,最大限度的利用缓存而减少网络请求
"""
import pandas as pd
import xalpha as xa
import logging
xa.set_backend(backend="csv", path="../../../lof/data", precached="20200103")
logger = logging.getLogger("xalpha")
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
@xa.universal.lru_cache_time(ttl=180)
def cached_get_rt(code, **kws):
return xa.get_rt(code, handler=False)
@xa.universal.lru_cache_time(ttl=1800)
def cached_get_bar(code, *args, **kws):
if code.startswith("commodities/"):
kws["handler"] = False
return xa.get_bar(code, *args, **kws)
return None
xa.set_handler(method="rt", f=cached_get_rt)
xa.set_handler(method="bar", f=cached_get_bar)
qdiis = [
"SH501018",
"SZ160416",
"SZ161129",
"SZ160723",
"SZ160216",
"SZ162411",
"SZ163208",
"SZ162719",
"SZ165513",
"SZ161815", # fr
"SZ161116", # lu
"SZ164701",
"SZ160719",
"SZ164824",
"SH513030",
"SZ160140",
"SZ165510",
"SZ164906",
"SH513050",
]
nonqdiis = [
"SH501021",
"SH513880",
"SH513520",
"SH513000",
"SH510510",
"SZ159922",
"SH510500",
"SH512500",
"SZ159920",
]
data = {
"code": [],
"name": [],
"t1": [],
"t0": [],
"now": [],
"t1rate": [],
"t0rate": [],
"position": [],
}
for c in qdiis:
p = xa.QDIIPredict(c, fetch=True, save=True, positions=True)
try:
data["t1"].append(round(p.get_t1(return_date=False), 4))
data["t1rate"].append(round(p.get_t1_rate(return_date=False), 2))
try:
data["t0"].append(round(p.get_t0(return_date=False), 4))
data["t0rate"].append(round(p.get_t0_rate(return_date=False), 2))
except ValueError:
data["t0"].append("-")
data["t0rate"].append("-")
data["position"].append(round(p.get_position(return_date=False), 3))
data["now"].append(xa.get_rt(c)["current"])
data["code"].append(c)
data["name"].append(xa.get_rt(c)["name"])
except xa.exceptions.NonAccurate as e:
print("%s cannot be predicted exactly now" % c)
print(e.reason)
for c in nonqdiis:
try:
p = xa.RTPredict(c)
data["t0"].append(round(p.get_t0(return_date=False), 4))
data["t0rate"].append(round(p.get_t0_rate(return_date=False), 2))
data["t1"].append(xa.get_rt("F" + c[2:])["current"])
data["t1rate"].append("-")
data["position"].append("-")
data["now"].append(xa.get_rt(c)["current"])
data["code"].append(c)
data["name"].append(xa.get_rt(c)["name"])
except xa.exceptions.NonAccurate as e:
print("%s cannot be predicted exactly now" % c)
print(e.reason)
df = pd.DataFrame(data)
htmlstr = (
"""<html>
<meta charset="UTF-8">
<link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/1.10.20/css/jquery.dataTables.css">
<script src="https://ajax.aspnetcdn.com/ajax/jQuery/jquery-3.4.1.min.js"></script>
<script type="text/javascript" charset="utf8" src="https://cdn.datatables.net/1.10.20/js/jquery.dataTables.js"></script>
<script>
$(document).ready( function () {
$('#df').DataTable({"scrollY": "88%",
"scrollCollapse": true,
"paging": false,
"fixedHeader": true
});
} );
</script>
<style>
td, th {
text-align: center;
}
#df tbody tr:hover {
background-color: #ffff99;
}
</style>"""
+ df.to_html(table_id="df", index=False)
+ "</html>"
)
with open("demo.html", "w") as f:
f.writelines([htmlstr])
| [
"kcanamgal@foxmail.com"
] | kcanamgal@foxmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.