hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e87e61caeabea7a4911a24a31a3d383ae4ecb968 | 126 | py | Python | Server/myLibrary/admin.py | sepehrNorouzi/SemUniLib | dffeeb769ce5da4fb45a83d1792ec24adbb866e9 | [
"MIT"
] | null | null | null | Server/myLibrary/admin.py | sepehrNorouzi/SemUniLib | dffeeb769ce5da4fb45a83d1792ec24adbb866e9 | [
"MIT"
] | null | null | null | Server/myLibrary/admin.py | sepehrNorouzi/SemUniLib | dffeeb769ce5da4fb45a83d1792ec24adbb866e9 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Book, Favorite
admin.site.register(Book)
admin.site.register(Favorite) | 18 | 34 | 0.809524 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e88065d21621edc95c47aa6a03e2fdbde75bbf30 | 329 | py | Python | kwickstart/templates/flask/app.py | TxConvergentAdmin/convergent-kwickstart | 42374a1705c019ceb34e6ece718df3c17695b852 | [
"MIT"
] | 1 | 2020-02-07T17:52:48.000Z | 2020-02-07T17:52:48.000Z | kwickstart/templates/flask/app.py | TxConvergentAdmin/convergent-kwickstart | 42374a1705c019ceb34e6ece718df3c17695b852 | [
"MIT"
] | null | null | null | kwickstart/templates/flask/app.py | TxConvergentAdmin/convergent-kwickstart | 42374a1705c019ceb34e6ece718df3c17695b852 | [
"MIT"
] | null | null | null | from flask import Flask, jsonify, request
PORT = 5000
app = Flask(__name__)
@app.route('/')
def index():
return 'Hello World'
@app.route('/data')
def data():
return jsonify({'error': False, 'data': 123})
if __name__ == "__main__":
print('Running on http://127.0.0.1:' + str(PORT))
app.run('0.0.0.0', PORT)
| 16.45 | 53 | 0.613982 | 0 | 0 | 0 | 0 | 134 | 0.407295 | 0 | 0 | 85 | 0.258359 |
e880fa35b7e36d07b02deb13e8b1c032e539faee | 7,407 | py | Python | backend/kesaseteli/applications/api/v1/views.py | jannetasa/yjdh | 5d86a56c722dfbcee03110f66c7e7ddbea966db9 | [
"MIT"
] | null | null | null | backend/kesaseteli/applications/api/v1/views.py | jannetasa/yjdh | 5d86a56c722dfbcee03110f66c7e7ddbea966db9 | [
"MIT"
] | null | null | null | backend/kesaseteli/applications/api/v1/views.py | jannetasa/yjdh | 5d86a56c722dfbcee03110f66c7e7ddbea966db9 | [
"MIT"
] | null | null | null | from django.core import exceptions
from django.http import FileResponse
from django.utils.text import format_lazy
from django.utils.translation import gettext_lazy as _
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.exceptions import ValidationError
from rest_framework.parsers import MultiPartParser
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from shared.audit_log.viewsets import AuditLoggingModelViewSet
from shared.oidc.auth import EAuthRestAuthentication
from applications.api.v1.auth import StaffAuthentication
from applications.api.v1.permissions import (
ALLOWED_APPLICATION_UPDATE_STATUSES,
ALLOWED_APPLICATION_VIEW_STATUSES,
ApplicationPermission,
get_user_company,
StaffPermission,
SummerVoucherPermission,
)
from applications.api.v1.serializers import (
ApplicationSerializer,
AttachmentSerializer,
SummerVoucherSerializer,
)
from applications.enums import ApplicationStatus
from applications.models import Application, SummerVoucher
class ApplicationViewSet(AuditLoggingModelViewSet):
queryset = Application.objects.all()
serializer_class = ApplicationSerializer
permission_classes = [IsAuthenticated, ApplicationPermission]
def get_queryset(self):
"""
Fetch all DRAFT status applications of the user & company.
Should inlcude only 1 application since we don't allow creation of multiple
DRAFT applications per user & company.
"""
queryset = (
super()
.get_queryset()
.select_related("company")
.prefetch_related("summer_vouchers")
)
user = self.request.user
if user.is_anonymous:
return queryset.none()
user_company = get_user_company(self.request)
return queryset.filter(
company=user_company,
user=user,
status__in=ALLOWED_APPLICATION_VIEW_STATUSES,
)
def create(self, request, *args, **kwargs):
"""
Allow only 1 (DRAFT) application per user & company.
"""
if self.get_queryset().filter(status=ApplicationStatus.DRAFT).exists():
raise ValidationError("Company & user can have only one draft application")
return super().create(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
"""
Allow to update only DRAFT status applications.
"""
instance = self.get_object()
if instance.status not in ALLOWED_APPLICATION_UPDATE_STATUSES:
raise ValidationError("Only DRAFT applications can be updated")
return super().update(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
class SummerVoucherViewSet(AuditLoggingModelViewSet):
queryset = SummerVoucher.objects.all()
serializer_class = SummerVoucherSerializer
authentication_classes = [EAuthRestAuthentication, StaffAuthentication]
permission_classes = [IsAuthenticated, SummerVoucherPermission | StaffPermission]
def get_queryset(self):
"""
Fetch summer vouchers of DRAFT status applications of the user & company.
"""
queryset = (
super()
.get_queryset()
.select_related("application")
.prefetch_related("attachments")
)
user = self.request.user
if user.is_staff:
return queryset
elif user.is_anonymous:
return queryset.none()
user_company = get_user_company(self.request)
return queryset.filter(
application__company=user_company,
application__user=user,
application__status__in=ALLOWED_APPLICATION_VIEW_STATUSES,
)
def create(self, request, *args, **kwargs):
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
def update(self, request, *args, **kwargs):
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
def retrieve(self, request, *args, **kwargs):
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
def list(self, request, *args, **kwargs):
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
def destroy(self, request, *args, **kwargs):
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
@action(
methods=("POST",),
detail=True,
url_path="attachments",
parser_classes=(MultiPartParser,),
)
def post_attachment(self, request, *args, **kwargs):
"""
Upload a single file as attachment
"""
obj = self.get_object()
if obj.application.status not in ALLOWED_APPLICATION_UPDATE_STATUSES:
raise ValidationError(
"Attachments can be uploaded only for DRAFT applications"
)
# Validate request data
serializer = AttachmentSerializer(
data={
"summer_voucher": obj.id,
"attachment_file": request.data["attachment_file"],
"content_type": request.data["attachment_file"].content_type,
"attachment_type": request.data["attachment_type"],
}
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
@action(
methods=(
"GET",
"DELETE",
),
detail=True,
url_path="attachments/(?P<attachment_pk>[^/.]+)",
)
def handle_attachment(self, request, attachment_pk, *args, **kwargs):
obj = self.get_object()
if request.method == "GET":
"""
Read a single attachment as file
"""
attachment = obj.attachments.filter(pk=attachment_pk).first()
if not attachment or not attachment.attachment_file:
return Response(
{
"detail": format_lazy(
_("File not found."),
)
},
status=status.HTTP_404_NOT_FOUND,
)
return FileResponse(attachment.attachment_file)
elif request.method == "DELETE":
"""
Delete a single attachment as file
"""
if obj.application.status not in ALLOWED_APPLICATION_UPDATE_STATUSES:
raise ValidationError(
"Attachments can be deleted only for DRAFT applications"
)
if (
obj.application.status
not in AttachmentSerializer.ATTACHMENT_MODIFICATION_ALLOWED_STATUSES
):
return Response(
{"detail": _("Operation not allowed for this application status.")},
status=status.HTTP_403_FORBIDDEN,
)
try:
instance = obj.attachments.get(id=attachment_pk)
except exceptions.ObjectDoesNotExist:
return Response(
{"detail": _("File not found.")}, status=status.HTTP_404_NOT_FOUND
)
instance.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| 35.104265 | 88 | 0.63413 | 6,309 | 0.851762 | 0 | 0 | 2,925 | 0.394897 | 0 | 0 | 1,234 | 0.166599 |
e8817a9d7c1201faed352ef258a2c751aff02a0a | 88 | py | Python | run.py | tildecross/tildex-txdb | bc32e1875c55cedfbf4ce7d00174b4b7bf68525f | [
"BSD-3-Clause"
] | null | null | null | run.py | tildecross/tildex-txdb | bc32e1875c55cedfbf4ce7d00174b4b7bf68525f | [
"BSD-3-Clause"
] | 1 | 2017-10-15T01:11:11.000Z | 2019-10-18T20:02:32.000Z | run.py | tildecross/tildex-txdb | bc32e1875c55cedfbf4ce7d00174b4b7bf68525f | [
"BSD-3-Clause"
] | null | null | null | #!env/bin/python3
from app import app
app.run(debug=True, host="localhost", port=8202)
| 17.6 | 48 | 0.738636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.318182 |
e88349bf365f46b6777bfd58c649dbe067a90780 | 8,647 | py | Python | yiff_image_scraper.py | viktor02/Yiff.party-Image-Scraper | 44018a23ff2ddb4d38a44200aab56e1c01457821 | [
"MIT"
] | null | null | null | yiff_image_scraper.py | viktor02/Yiff.party-Image-Scraper | 44018a23ff2ddb4d38a44200aab56e1c01457821 | [
"MIT"
] | null | null | null | yiff_image_scraper.py | viktor02/Yiff.party-Image-Scraper | 44018a23ff2ddb4d38a44200aab56e1c01457821 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup as bs
import requests
import sys
import os
import platform
amountOfLinks = len(sys.argv)-1
urlCounter = 0
urlList = []
missingFiles = []
userAgent = "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36"
dirSep = ""
system = platform.system()
cLastPageFlag = False
if(system == 'Windows'):
dirSep = "\\"
else:
dirSep = "/"
print("\n======Starting Scraper========")
#Checks if there are links present and puts then in a list if they are
if amountOfLinks <= 0:
print("\nPlease enter at least 1 link as argument.\ne.g. https://yiff.party/patreon/1\n")
print("============0/0===============\n")
sys.exit()
for n in range(amountOfLinks):
urlList.append(sys.argv[n+1])
try:
startPage = int(sys.argv[1])-1
urlList.pop(0)
amountOfLinks -= 1
except:
startPage = 0
try:
cLastPage = int(sys.argv[2])
cLastPageFlag = True
urlList.pop(0)
amountOfLinks -= 1
if cLastPage < startPage:
sys.exit()
except SystemExit:
sys.exit("Please choose a lower starting page. Your current pagenumbers are: Starting Page: " + (startPage) + ", Last Page: " + str(cLastPage))
except:
pass
#Creates Image Directory
if not os.path.isdir("."+ dirSep +"Images"+ dirSep +""):
os.mkdir("."+ dirSep +"Images"+ dirSep +"")
def getFlag():
return cLastPageFlag
def setFlag(boolean):
cLastPageFlag = boolean
def accountForDuplicates(aDict):
counter = 0
bList = []
cList = []
newDict = {}
aDict = sorted(aDict.items(), key=lambda item: item[1])
#print(aDict)
for i1 in range(len(aDict)):
#print(aDict[i1][1])
bList.append(aDict[i1][1])
for i2 in range(len(aDict)):
cList.append(aDict[i2][0])
bList.append("buffer")
cList.append("buffer")
for h in range(len(bList)-1):
if bList[h] == bList[h+1]:
#print(bList[h])
#updatedItem = {cList[h]:}
newDict[cList[h]] = (str(counter) + " " + bList[h])
counter += 1
else:
newDict[cList[h]] = bList[h]
return newDict
def makeConformUrl(aList):
for k in range(len(aList)-1):
if(str(aList[k]).startswith("/")):
aList[k] = "https://yiff.party" + str(aList[k])
return aList
def downloader(myUrl, myImageName, myPatreonAuthor): #recursively tries to download the images - in the case of the site not accepting anymore requests
try:
r = requests.get(myUrl, headers = {'User-Agent': userAgent}, timeout=(2,5), stream=True)
if r.status_code == 200:
with open("."+ dirSep +"Images"+ dirSep +"" + myPatreonAuthor + ""+ dirSep +"" + myImageName, 'wb') as f:
for chunk in r:
f.write(chunk)
else:
print("beep -- file skipped: " + myUrl)
except:
print("Skipped " + myUrl)
missingFiles.append(myUrl)
return
def downloadImages(url, urlCounter):
imageNameDict = {}
linkList = []
imgContainerUrls = []
imageCounter = 0
#Gets the Patreon Author's number. Fails if link is shorter than https://yiff.party/patreon/1.
#Also Creates a directory for the images.
try:
patreonAuthor = url.split("/")[4]
except IndexError:
print("\nThe given url might not be valid.\nSkipping url: " + url + "\n")
print("============" + str(urlCounter) + "/" + str(amountOfLinks) + "===============\n")
return
else:
if not os.path.isdir("."+ dirSep +"Images"+ dirSep +"" + patreonAuthor + ""+ dirSep +""):
os.mkdir("."+ dirSep +"Images"+ dirSep +"" + patreonAuthor + ""+ dirSep +"")
#Gets the page and converts/reads it.
response = requests.get(url, headers = {'User-Agent': userAgent})
soup = bs(response.text, "html.parser")
newUrl = "https://yiff.party/render_posts?s=patreon&c=" + patreonAuthor + "&p="
#searches for the highest page number
lastPage = soup.find_all('a', {'class':'btn pag-btn'})
try:
lastPage = int(lastPage[1]["data-pag"])
#print(lastPage)
cLPFlag = getFlag()
if cLPFlag:
if cLastPage > lastPage:
sys.exit()
lastPage = cLastPage
startPage = startPage
setFlag(False)
else:
startPage = 0
for i in range(startPage, lastPage):
imgContainerUrls.append(newUrl + str(i+1)) #appends the page number to the url
except SystemExit:
sys.exit("Last Page Number is too high. Please choose a number lower or equal than: " + str(lastPage))
except:
lastPage = 1
imgContainerUrls.append(newUrl + str(1))
#print(imgContainerUrls)
for containerUrl in imgContainerUrls:
#print(containerUrl)
response = requests.get(containerUrl, headers = {'User-Agent': userAgent})
soup = bs(response.text, "html.parser")
containersPart1 = soup.find_all('div', {'class': 'card-action'})
containersPart2 = soup.find_all('div', {'class': 'post-body'})
containersPart3 = soup.find_all('div', {'class': 'card-attachments'})
containers = containersPart1 + containersPart2 + containersPart3
#Checks if there are any images and returns an error if not. Also skips the url.
try:
containers[0]
except IndexError:
page = containerUrl.split("p=")[1]
print("\nCould not find Images. The cause might be a invalid url or there just aren't any Images.")
missingFiles.append("Page " + page + " was skipped. You can retry scraping this page with: python " + sys.argv[0] + " " + page + " " + page + " urls")
#print("Skipping url: " + url + "\n")
#print("============" + str(urlCounter) + "/" + str(amountOfLinks) + "===============\n")
continue
containerCounter1 = len(containersPart1) #amount of containers with class 'card-action'
containerCounter2 = len(containersPart2) #amount of containers with class 'post-body'
i = 0
#Searches for Image-Boxes.
for container in containers:
i += 1
if i <= containerCounter1:
try:
shortLink = container.a['href']
except:
continue
elif i <= containerCounter2 and i > containerCounter1:
try:
shortLink = container.p.a['href']
except:
continue
else:
try:
subContainer = container.p
subContainer = subContainer.find_all('a')
for subCont in subContainer:
linkList.append(subCont['href'])
except:
continue
linkList.append(shortLink)
linkList = makeConformUrl(sorted(linkList))
linkList = list(dict.fromkeys(linkList))
for h in range(0, len(linkList)-1):
updatedValue = {str(h):str(linkList[h].split("/")[len(linkList[h].split("/"))-1])}
imageNameDict.update(updatedValue)
imageNameDict = accountForDuplicates(imageNameDict)
#print(len(linkList))
#print(imageNameDict)
#print(imageCounter)
#print('\n'.join(map(str, sorted(linkList))))
#Loops through the Image Urls amd downloads them.
for i in range(len(linkList)-1):
imageName = imageNameDict[str(i)]
urlI = linkList[i]
print("Downloading " + imageName) #Shows the name of the current downloading image
downloader(urlI, imageName, patreonAuthor)
imageCounter += 1
#Just a finishing message.
if imageCounter == 0:
print("No files downloaded. Maybe there are no files or you messed up the order of the arguments: python " + sys.argv[0] + " [start page] [last page] urls")
else:
print("\nSuccessfully downloaded " + str(imageCounter) + " Images/Files!\n")
print("============" + str(urlCounter) + "/" + str(amountOfLinks) + "===============\n")
f = open("SkippedLinks.txt", "w+")
for files in missingFiles:
f.write(files + "\n")
f.close()
#Loops through all Yiff.party-Urls and downloads the images.
for url in urlList:
urlCounter += 1
downloadImages(url, urlCounter)
| 35.293878 | 165 | 0.566902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,600 | 0.300682 |
e8842b920ecc15088becbb1b9e81fd1fd0244038 | 3,031 | py | Python | api.py | klapp101/GOAT | f948bf8d6566f27e6b55ed49a34dc470df854249 | [
"MIT"
] | 1 | 2020-06-13T04:24:23.000Z | 2020-06-13T04:24:23.000Z | api.py | klapp101/GOAT | f948bf8d6566f27e6b55ed49a34dc470df854249 | [
"MIT"
] | null | null | null | api.py | klapp101/GOAT | f948bf8d6566f27e6b55ed49a34dc470df854249 | [
"MIT"
] | null | null | null | import json
import requests
import time
from discord_webhook import DiscordWebhook, DiscordEmbed
webhook_url = 'https://discordapp.com/api/webhooks/672159508675690497/4UtaClAc7rKMJsEvbR4iYf-Razv4M3ZWtkYDOxBzLfiDzJhI7RSFpoLn6iijBiRcaNOR'
webhook = DiscordWebhook(webhook_url)
pid = '508214-660'
headers = {
'Connection': 'keep-alive',
'accept': 'application/json',
'Origin': 'https://www.goat.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
'content-type': 'application/x-www-form-urlencoded',
'Sec-Fetch-Site': 'cross-site',
'Sec-Fetch-Mode': 'cors',
'Referer': 'https://www.goat.com/search?query='+ pid,
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9',
}
params = {
'x-algolia-agent': 'Algolia for vanilla JavaScript 3.25.1',
'x-algolia-application-id': '2FWOTDVM2O',
'x-algolia-api-key': 'ac96de6fef0e02bb95d433d8d5c7038a',
}
data = {
"distinct": 'true',
'facetFilters': 'product_category: shoes',
'facets': 'size',
'hitsPerPage': '48',
'numericFilters': '[]',
'page': '0',
'query': pid,
'clickAnalytics': "true"
}
response = requests.post('https://2fwotdvm2o-dsn.algolia.net/1/indexes/product_variants_v2/query', headers=headers, params=params,json=data)
response_json = response.json()
response_json_dict = response_json['hits'][0]
product_id = response_json_dict['product_template_id']
print(product_id)
def obtainBasicInfo():
webhook = DiscordWebhook(url=webhook_url)
r_api = requests.get('https://www.goat.com/web-api/v1/product_variants?productTemplateId='+ str(product_id),headers=headers)
data = r_api.json()
embed = DiscordEmbed(title=response_json_dict['name'], url=headers['Referer'], color=242424)
embed.set_thumbnail(url=response_json_dict['main_picture_url'])
sizes = []
shoe_conditions = []
box_conditions = []
prices = []
for i in data:
sizes.append(str(i['size']))
shoe_conditions.append(i['shoeCondition'])
box_conditions.append(i['boxCondition'])
prices.append(str(int(i['lowestPriceCents']['amountUsdCents'])/100))
print(' Size: ' + str(i['size']) + '\n' + ' Shoe condition: ' + i['shoeCondition'] + '\n' + ' Box condition: ' + i['boxCondition'] + '\n' + ' $' + str(int(i['lowestPriceCents']['amountUsdCents'])/100) + '\n' + '-----------------')
embed.add_embed_field(name='Size', value=(str(i['size'])))
embed.add_embed_field(name='Shoe Condition', value=str(i['shoeCondition']))
embed.add_embed_field(name='Box Condition', value=str(i['boxCondition']))
embed.add_embed_field(name='Price', value='$' + str(int(i['lowestPriceCents']['amountUsdCents'])/100))
webhook.add_embed(embed)
send_hook = webhook.execute()
time.sleep(2)
embed.fields = []
print(sizes)
print(shoe_conditions)
print(box_conditions)
print(prices)
obtainBasicInfo()
| 40.413333 | 238 | 0.670406 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,395 | 0.460244 |
e8846d4d1a26fec464850cfbf7a7e60056a6debf | 3,898 | py | Python | pytai/tests/test_application.py | angea/pytai | 2db2518be9c4324aed182f36208c0e0ccf53a96c | [
"Apache-2.0"
] | 30 | 2021-08-12T12:50:18.000Z | 2022-02-18T03:49:20.000Z | pytai/tests/test_application.py | angea/pytai | 2db2518be9c4324aed182f36208c0e0ccf53a96c | [
"Apache-2.0"
] | 3 | 2021-10-31T17:28:39.000Z | 2021-11-12T13:19:14.000Z | pytai/tests/test_application.py | angea/pytai | 2db2518be9c4324aed182f36208c0e0ccf53a96c | [
"Apache-2.0"
] | 5 | 2021-10-31T15:55:19.000Z | 2022-02-24T06:31:18.000Z | """Unit tests for the pytai application.
License:
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import unittest
import xml.etree.ElementTree as ET
from typing import Union, Callable
from unittest.mock import patch, MagicMock
from pathlib import Path
try:
from .. import application
from .xml_utils import *
except ImportError:
if __name__ == "__main__":
import sys
sys.exit(f'This script needs to be run from the root folder:\n'
f'python -m pytai.tests.{Path(sys.argv[0]).stem}\n'
f'python -m unittest pytai.tests.{Path(sys.argv[0]).stem}')
else:
raise
class MockView(MagicMock):
"""Mock class to mock the application's View"""
def __init__(self, *args, **kwargs):
super().__init__()
def add_tree_item(self, parent_handle: Union[ET.Element, str], **kwargs) -> ET.ElementTree:
"""Build an XML tree using the provided input."""
if parent_handle == "":
self.root = ET.Element("root")
return self.root
d = {k: str(v) for k, v in kwargs.items()}
return ET.SubElement(parent_handle, "node", **d)
def schedule_function(self, time_ms: int, callback: Callable[[], None]) -> None:
callback()
def start_worker(self, callback: Callable[[], bool]) -> None:
reschedule = True
while reschedule:
reschedule = callback()
class TestOffsets(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tmp_path = Path(__file__).resolve().parent / "tmp"
cls.tmp_path.mkdir(parents=True, exist_ok=True)
@staticmethod
def get_resource_path(file_name: str):
return Path(__file__).resolve().parent / "resources" / file_name
def generic_test(self, file_type):
path = self.get_resource_path(f"{file_type}.{file_type}")
format = {"kaitai_format": file_type}
with patch(__name__ + '.application.v.View', MockView()):
app = application.Application(file = path, format = format)
with open(self.tmp_path / "actual_output.xml", "w") as o:
o.write(xml_to_str(app.view.root))
expected_xml = xml_from_file(self.get_resource_path(f"{file_type}.xml"))
try:
xml_compare(app.view.root, expected_xml)
except RuntimeError as e:
self.fail(str(e))
def test_png(self):
self.generic_test("png")
def test_bmp(self):
self.generic_test("bmp")
def test_zip(self):
self.generic_test("zip")
def test_elf(self):
self.generic_test("elf")
def test_wav(self):
self.generic_test("wav")
if __name__ == "__main__":
unittest.main() | 35.436364 | 96 | 0.641355 | 2,107 | 0.540534 | 0 | 0 | 290 | 0.074397 | 0 | 0 | 1,612 | 0.413545 |
e885d4d5a1bfe3061457ca2b736ac1a2e85b233e | 6,898 | py | Python | project-posenet/pose_opencv.py | vanduc103/coral_examples | a514d003a3948cb0888d2dabc0bdd93939f8ddd0 | [
"Apache-2.0"
] | null | null | null | project-posenet/pose_opencv.py | vanduc103/coral_examples | a514d003a3948cb0888d2dabc0bdd93939f8ddd0 | [
"Apache-2.0"
] | null | null | null | project-posenet/pose_opencv.py | vanduc103/coral_examples | a514d003a3948cb0888d2dabc0bdd93939f8ddd0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from PIL import Image
from pose_engine import PoseEngine
import cv2
import argparse
import common
from edgetpu.detection.engine import DetectionEngine
BODY_PARTS = {"nose": 0, "left eye": 1, "right eye": 2, "left ear": 3, "right ear": 4,
"left shoulder": 5, "right shoulder": 6, "left elbow": 7, "right elbow": 8, "left wrist": 9,
"right wrist": 10, "left hip": 11, "right hip": 12, "left knee": 13, "right knee": 14,
"left ankle": 15, "right ankle": 16}
EDGES = (
('nose', 'left eye'),
('nose', 'right eye'),
('nose', 'left ear'),
('nose', 'right ear'),
('left ear', 'left eye'),
('right ear', 'right eye'),
('left eye', 'right eye'),
('left shoulder', 'right shoulder'),
('left shoulder', 'left elbow'),
('left shoulder', 'left hip'),
('right shoulder', 'right elbow'),
('right shoulder', 'right hip'),
('left elbow', 'left wrist'),
('right elbow', 'right wrist'),
('left hip', 'right hip'),
('left hip', 'left knee'),
('right hip', 'right knee'),
('left knee', 'left ankle'),
('right knee', 'right ankle'),
)
import zmq
from datetime import datetime
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--camera_idx', type=str, help='Index of which video source to use. ', default = 1)
parser.add_argument('--model', type=str, help='Pose model to use. ', default = '')
parser.add_argument('--detect', action='store_true', help='Detect person', default = False)
parser.add_argument('--filtered_labels', type=str, help='Filtered labels. ', default = '0')
parser.add_argument('--zmq', action='store_true', help='Send via ZeroMQ', default = False)
args = parser.parse_args()
#engine = PoseEngine('models/posenet_mobilenet_v1_075_481_641_quant_decoder_edgetpu.tflite')
engine = PoseEngine(args.model)
_, image_height, image_width, _ = engine.get_input_tensor_shape()
if args.detect:
detect_engine = DetectionEngine('../examples-camera/all_models/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite')
print("Load all models done!")
if args.zmq:
# imagezmq sender
#import imagezmq
#sender_img = imagezmq.ImageSender(connect_to='tcp://*:5555', REQ_REP=False) # REQ_REP=False: use PUB/SUB (non-block)
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.bind("tcp://*:5555")
cap = cv2.VideoCapture(args.camera_idx)
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
cv2_im = frame
cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB)
pil_image = Image.fromarray(cv2_im_rgb)
pil_image.resize((image_width, image_height), Image.NEAREST)
detect_objs = []
if args.detect:
detect_objs = detect_engine.detect_with_image(pil_image,
threshold=0.5,
keep_aspect_ratio=True,
relative_coord=True,
top_k=10)
if args.filtered_labels:
detect_objs = [obj for obj in detect_objs if str(obj.label_id) in args.filtered_labels]
poses, inference_time = engine.DetectPosesInImage(np.uint8(pil_image))
cv2_im, all_points = draw_skel_and_kp(cv2_im, poses, detect_objs)
#print(all_points.shape)
if args.zmq:
# imagezmq send image
#from datetime import datetime
#timestamp = datetime.timestamp(datetime.now())
#sender_img.send_image(timestamp, cv2_im_rgb)
# zmq send points
timestamp = datetime.timestamp(datetime.now())
send_array(socket, np.array(all_points).astype(np.float), timestamp)
cv2.namedWindow("frame", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
cv2.imshow('frame', cv2_im)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def draw_skel_and_kp(
img, poses, detect_objs,
min_pose_score=0.3, min_part_score=0.2):
out_img = img
adjacent_keypoints = []
cv_keypoints = []
all_points = []
for pose in poses:
if pose.score < min_pose_score: continue
xys = {}
points = [(-1., -1.)] * 17
for label, keypoint in pose.keypoints.items():
if keypoint.score < min_part_score: continue
# Coord
kp_y = keypoint.yx[0]
kp_x = keypoint.yx[1]
xys[label] = (kp_x, kp_y)
cv_keypoints.append(cv2.KeyPoint(int(kp_x), int(kp_y), 10. * keypoint.score))
points[BODY_PARTS[label]] = (int(kp_x), int(kp_y))
all_points.append(np.array(np.stack([p for p in points], axis=0)))
results = []
for a, b in EDGES:
if a not in xys or b not in xys: continue
ax, ay = xys[a]
bx, by = xys[b]
results.append(np.array([[ax, ay], [bx, by]]).astype(np.int32),)
adjacent_keypoints.extend(results)
if len(all_points) > 0:
all_points = np.stack([points for points in all_points], axis=0)
height, width, channels = img.shape
for obj in detect_objs:
x0, y0, x1, y1 = obj.bounding_box.flatten().tolist()
x0, y0, x1, y1 = int(x0*width), int(y0*height), int(x1*width), int(y1*height)
out_img = cv2.rectangle(out_img, (x0, y0), (x1, y1), (0, 255, 0), 1) # fill color
out_img = cv2.drawKeypoints(
out_img, cv_keypoints, outImage=np.array([]), color=(0, 0, 0),
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
out_img = cv2.polylines(out_img, adjacent_keypoints, isClosed=False, color=(0, 255, 255), thickness=2)
return out_img, np.array(all_points)
def send_array(socket, A, msg='None', flags=0, copy=True, track=False):
"""send a numpy array with metadata"""
md = dict(
msg = msg,
dtype = str(A.dtype),
shape = A.shape,
)
socket.send_json(md, flags|zmq.SNDMORE)
return socket.send(A, flags, copy=copy, track=track)
if __name__ == '__main__':
main()
| 37.901099 | 127 | 0.615251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,035 | 0.295013 |
e8868197bd242c39ce7b91dd052bac2dfdb7f10c | 4,989 | py | Python | src/fparser/two/tests/fortran2003/test_include_statement.py | sturmianseq/fparser | bf3cba3f31a72671d4d4a93b6ef4f9832006219f | [
"BSD-3-Clause"
] | 33 | 2017-08-18T16:31:27.000Z | 2022-03-28T09:43:50.000Z | src/fparser/two/tests/fortran2003/test_include_statement.py | sturmianseq/fparser | bf3cba3f31a72671d4d4a93b6ef4f9832006219f | [
"BSD-3-Clause"
] | 319 | 2017-01-12T14:22:07.000Z | 2022-03-23T20:53:25.000Z | src/fparser/two/tests/fortran2003/test_include_statement.py | sturmianseq/fparser | bf3cba3f31a72671d4d4a93b6ef4f9832006219f | [
"BSD-3-Clause"
] | 17 | 2017-10-13T07:12:28.000Z | 2022-02-11T14:42:18.000Z | # Copyright (c) 2019 Science and Technology Facilities Council
# All rights reserved.
# Modifications made as part of the fparser project are distributed
# under the following license:
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Test Fortran Include Statement: This file tests the parsing of an
include statement. Whilst include is not part of the standard Fortran
rules (the include should include code as the code is being parsed)
there are cases where users might like to keep the include statement
in the Fortran parse tree and output it again.
'''
import pytest
from fparser.api import get_reader
from fparser.two.Fortran2003 import Include_Stmt, InternalError
from fparser.two.utils import NoMatchError
def test_include_stmt(f2003_create):
'''Check that a basic include statement is parsed
correctly. Input separately as a string and as a reader object
'''
def check_include(reader):
'''Internal helper function to avoid code replication.'''
ast = Include_Stmt(reader)
assert "INCLUDE 'my-non-existant-file.inc'" in str(ast)
assert repr(ast).replace("u'", "'") == \
("Include_Stmt(Include_Filename("
"'my-non-existant-file.inc'))")
line = "include 'my-non-existant-file.inc'"
check_include(line)
reader = get_reader(line)
check_include(reader)
def test_spaces(f2003_create):
'''Check that spaces are allowed before and after an include keyword
as well as after the file string.
'''
line = " include 'my-non-existant-file.inc' "
ast = Include_Stmt(line)
assert "INCLUDE 'my-non-existant-file.inc'" in str(ast)
def test_no_space(f2003_create):
'''Check that no space is required between the include keyword and the
file string.
'''
line = "include'my-non-existant-file.inc'"
ast = Include_Stmt(line)
assert "INCLUDE 'my-non-existant-file.inc'" in str(ast)
def test_case(f2003_create):
'''Check that different case is allowed for the include keyword.'''
line = "InClUdE 'my-non-existant-file.inc'"
ast = Include_Stmt(line)
assert "INCLUDE 'my-non-existant-file.inc'" in str(ast)
def test_double_quotes(f2003_create):
'''Check that double quotes are allowed for the file string.'''
line = 'include "my-non-existant-file.inc"'
ast = Include_Stmt(line)
assert "INCLUDE 'my-non-existant-file.inc'" in str(ast)
def test_errors(f2003_create):
'''Check that syntax errors produce a NoMatchError exception.'''
for line in [None, "", " ", "includ", "includ 'x'", "include",
"include ''", "include \"x'", "include 'x\"", "include 'xxx",
"include \"xxx", "include xxx'", "include xxx\"",
"include x'x'", "include 'x'x", "x include 'x'"]:
with pytest.raises(NoMatchError) as excinfo:
_ = Include_Stmt(line)
assert "Include_Stmt: '{0}'".format(line) in str(excinfo.value)
def test_include_filename_error(f2003_create, monkeypatch):
'''Check that we raise an InternalError if a return from
Include_Filename is None or an empty string. This should never
happen as any matching errors would cause this class to raise an
exception.
'''
monkeypatch.setattr("fparser.two.Fortran2003.Include_Filename",
lambda file_name: None)
line = "include ' '"
with pytest.raises(InternalError) as excinfo:
_ = Include_Stmt(line)
assert ("Include_Filename should never return None or an empty "
"name") in str(excinfo.value)
| 38.976563 | 78 | 0.713971 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,501 | 0.701744 |
e8876c2cd4fe9d62ba323bed4e9d4016649f22e9 | 4,992 | py | Python | src/common.py | OdatNurd/HyperHelpAuthor | 8082a8eb35d54b53299e421c6005ae5c1fb49b13 | [
"MIT"
] | 1 | 2020-08-18T16:12:06.000Z | 2020-08-18T16:12:06.000Z | src/common.py | OdatNurd/HyperHelpAuthor | 8082a8eb35d54b53299e421c6005ae5c1fb49b13 | [
"MIT"
] | 2 | 2020-02-21T22:48:56.000Z | 2020-02-21T23:15:51.000Z | src/common.py | OdatNurd/HyperHelpAuthor | 8082a8eb35d54b53299e421c6005ae5c1fb49b13 | [
"MIT"
] | null | null | null | import sublime
import os
import textwrap
import hyperhelpcore
from hyperhelpcore.common import log, hh_syntax
from hyperhelpcore.core import help_index_list
###----------------------------------------------------------------------------
def loaded():
"""
Do package setup at package load time.
"""
hha_setting.obj = sublime.load_settings("HyperHelpAuthor.sublime-settings")
hha_setting.default = {
"update_header_on_save": True,
"reload_index_on_save": True,
"lint_output_to_view": False,
"author_view_settings": {
"rulers": [80],
"match_selection": True,
"draw_indent_guides": True
}
}
hyperhelpcore.initialize()
def unloaded():
"""
Do package cleanup at unload time.
"""
pass
###----------------------------------------------------------------------------
def hha_setting(key):
"""
Get a HyperHelpAuthor setting from a cached settings object.
"""
default = hha_setting.default.get(key, None)
return hha_setting.obj.get(key, default)
def is_authoring_source(view):
"""
Given a view object, tells you if that view represents a help source file.
"""
if view.match_selector(0, "text.hyperhelp.help"):
return not view.is_read_only()
return False
def package_for_view(view):
"""
Given a view object, provides you back the help index tuple for the help
package that contains this file. This may be None if this file is not a
Sublime package file, or if it doesn't correspond to a loaded help package.
This does not verify that the file is actually a part of the provided help
package, only that it is in the document root for said package.
"""
if view.file_name() is not None:
spp = sublime.packages_path()
if view.file_name().startswith(spp):
file_name = view.file_name()[len(spp)+1:]
for pkg_name, pkg_info in help_index_list().items():
if file_name.startswith(pkg_info.doc_root):
return pkg_info
return None
def local_help_filename(pkg_info, help_file):
"""
Determine what the full file name of a help file from a given package would
be if it was stored locally.
"""
return os.path.normpath(os.path.join(sublime.packages_path(),
pkg_info.doc_root, help_file))
def local_help_index(pkg_info):
"""
Determine what the full file name of the help index file for the given
package would be if it was stored locally.
"""
return os.path.normpath(os.path.join(sublime.packages_path(),
pkg_info.index_file[len("Packages/"):]))
def format_template(template, *args):
"""
Given incoming text, remove all common indent, then strip away the leading
and trailing whitespace from it.
This is a modified version of code from Default/new_templates.py from the
core Sublime code.
"""
return textwrap.dedent(template % args).strip()
def open_local_help(pkg_info, help_file, window=None):
"""
Attempt to open the provided help file locally for editing.
"""
window = window if window is not None else sublime.active_window()
local_path = local_help_filename(pkg_info, help_file)
if not os.path.exists(local_path):
return log(format_template(
"""
Specified help file does not exist; cannot open.
Note: HyperHelpAuthor can not currently open help
files from packed packages for editing.
"""), dialog=True)
view = window.open_file(local_path)
view.settings().set("_hh_auth", True)
if not view.is_loading():
apply_authoring_settings(view)
def open_help_index(pkg_info, window=None):
"""
Attempt to open the provided help index file localy for editing.
"""
window = window if window is not None else sublime.active_window()
# The index file is stored as a resource file spec, so strip the prefix
local_path = local_help_index(pkg_info)
if not os.path.exists(local_path):
return log(format_template(
"""
Specified help index does not exist; cannot open.
Note: HyperHelpAuthor can not currently open help
indexes from packed packages for editing.
"""), dialog=True)
window.open_file(local_path)
def apply_authoring_settings(view):
"""
Given a view, apply the appropriate settings to it to ensure that it is set
up properly for editing.
"""
# Ensure help files with no header get the appropriate syntax set
view.assign_syntax(hh_syntax("HyperHelp-Help.sublime-syntax"))
author_view_settings = hha_setting("author_view_settings")
settings = view.settings()
for option in author_view_settings:
settings.set(option, author_view_settings[option])
###----------------------------------------------------------------------------
| 29.892216 | 79 | 0.634215 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,451 | 0.490986 |
e8887bd9a2e073f305e0565db697d9a93e02600d | 318 | py | Python | player/migrations/0002_remove_music_thumbnail.py | Amoki/Amoki-Music | 77b0e426fe9cc6c9cd12346a5e5e81a62362bb83 | [
"MIT"
] | 3 | 2015-06-16T11:12:29.000Z | 2019-05-03T09:09:21.000Z | player/migrations/0002_remove_music_thumbnail.py | Amoki/Amoki-Music | 77b0e426fe9cc6c9cd12346a5e5e81a62362bb83 | [
"MIT"
] | 16 | 2015-08-18T14:35:55.000Z | 2021-06-10T17:31:04.000Z | player/migrations/0002_remove_music_thumbnail.py | Amoki/Amoki-Music | 77b0e426fe9cc6c9cd12346a5e5e81a62362bb83 | [
"MIT"
] | 1 | 2016-10-19T14:48:52.000Z | 2016-10-19T14:48:52.000Z | from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('player', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='music',
name='thumbnail',
),
]
| 17.666667 | 40 | 0.603774 | 233 | 0.732704 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.125786 |
e888f8bf6414e902932d3653ddddb568bc3aa26b | 5,717 | py | Python | build_card_bank.py | lawrencesim/portuguese-verb-cards | 3d754981bc5195057cf88a27741bc2eea1722f13 | [
"MIT"
] | null | null | null | build_card_bank.py | lawrencesim/portuguese-verb-cards | 3d754981bc5195057cf88a27741bc2eea1722f13 | [
"MIT"
] | null | null | null | build_card_bank.py | lawrencesim/portuguese-verb-cards | 3d754981bc5195057cf88a27741bc2eea1722f13 | [
"MIT"
] | null | null | null | import os, csv, time, shutil
from bin import cardbank
from bin import builder
def add_build(add_cards):
'''Build card bank by specifically adding new cards.'''
# process new cards to add
if not add_cards:
return
add_card_map = {}
for card in add_cards:
add_card_map[card["inf"]] = card
# read in existing card bank
card_bank = []
if os.path.exists("bank/card-bank-built.csv"):
card_bank = cardbank.read("bank/card-bank-built.csv", build_forms=False)
# start HTTP session for reuse
session = builder.session()
print("Building new card bank..")
try:
# rebuild card bank, starting with populating from existing
new_cards = []
updated_cards = []
new_card_bank = []
errored = []
for card in card_bank:
if card["inf"] not in add_card_map:
# no change, just add existing card
new_card_bank.append(card)
else:
# changed, replace with new card definition
add_card = add_card_map[card["inf"]]
del add_card_map[card["inf"]]
_build_card_and_add(add_card, new_card_bank, new_cards, errored, session)
# add all brand new cards
for inf, card in add_card_map.items():
_build_card_and_add(card, new_card_bank, new_cards, errored, session)
finally:
session.close()
finish_build(new_card_bank, new_cards, updated_cards, errored)
def build_from_difference(force_rebuild=[]):
'''Build card bank by rectifying differences in card bank basic and built.'''
# read card bank basic (card bank with all basic definitions but not built out)
card_bank_basic = cardbank.read("bank/card-bank-basic.csv", build_forms=False)
# read existing, card bank built
existing = []
if os.path.exists("bank/card-bank-built.csv"):
existing = cardbank.read("bank/card-bank-built.csv", build_forms=False)
existing_map = {}
for card in existing:
existing_map[card["inf"]] = card
# start HTTP session for reuse
session = builder.session()
print("Building new card bank..")
# build card bank from card bank basic
new_cards = []
updated_cards = []
new_card_bank = []
errored = []
for card in card_bank_basic:
# if already exists, check if requiring update only (unless in list of force rebuild)
if (card["inf"] not in force_rebuild) and (card["inf"] in existing_map):
existing_card = existing_map[card["inf"]]
# if any of the built fields are different, something's wrong, rebuilt it entirely
rebuild = False
for field in builder.BUILT_FIELDS:
if not field in existing_card or not existing_card[field]:
rebuild = True
break
# if not rebuilding, just update the supplied fields, which doesn't affect build fields
update = False
for field in builder.SUPPLIED_FIELDS:
if rebuild:
break
if field not in existing_card or existing_card[field] != card[field]:
update = True
existing_card[field] = card[field]
# if no rebuild needed, append existing [and updated] card and continue
if not rebuild:
new_card_bank.append(existing_card)
if update:
updated_cards.append(existing_card)
continue
# if doesn't exist or need rebuilding, rebuild card
_build_card_and_add(card, new_card_bank, new_cards, errored, session)
finish_build(new_card_bank, new_cards, updated_cards, errored)
def _build_card_and_add(card, new_card_bank, new_cards, errored, session=None):
# get verb tenses
tense_map = builder.get(card["inf"], session=session)
# if warning returned, then invalid somehow
if isinstance(tense_map, Warning):
errored.append((card, str(tense_map)))
# otherwise build card and append to card bank
else:
builder.build(card, tense_map)
new_card_bank.append(card)
new_cards.append(card)
# don't spam the website
time.sleep(1)
def finish_build(new_card_bank, new_cards, updated_cards, errored):
'''Finish build, save card bank, and print information about build.'''
print("")
if not new_cards and not updated_cards and not errored:
print("No changes")
return
if new_cards or updated_cards:
# backup
if os.path.exists("bank/card-bank-built.csv"):
shutil.copyfile("bank/card-bank-built.csv", "bank/card-bank-built.bkp.csv")
print("Old card bank backed up as: bank/card-bank-built.bkp.csv")
# write new
with open("bank/card-bank-built.csv", "w", newline="", encoding="utf-8") as csvf:
writer = csv.DictWriter(csvf, fieldnames=builder.FIELDS)
writer.writeheader()
writer.writerows(new_card_bank)
print("New card bank written to: bank/card-bank-built.csv")
if new_cards:
print("\nNew cards created:")
for card in new_cards:
print(" {0}".format(card["inf"]))
if updated_cards:
print("\nCards updated:")
for card in updated_cards:
print(" {0}".format(card["inf"]))
if errored:
print("\nError building card(s) for:")
for pair in errored:
print(" {0} : {1}".format(pair[0]["inf"], pair[1]))
# if called straight-up, build from difference between basic and build card bank
if __name__ == "__main__":
build_from_difference()
| 35.955975 | 99 | 0.62078 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,826 | 0.319398 |
e889b7bfe344006d80c5021c1a5f4296e429ca4d | 1,749 | py | Python | flaskapp/models.py | guillermosainz/instareplic | a4fb26269d54be1a140b1802a4d8013a6aa0ec70 | [
"MIT"
] | 53 | 2015-02-15T19:00:33.000Z | 2022-02-08T09:58:54.000Z | flaskapp/models.py | guillermosainz/instareplic | a4fb26269d54be1a140b1802a4d8013a6aa0ec70 | [
"MIT"
] | 5 | 2016-06-03T13:12:50.000Z | 2018-05-29T08:31:44.000Z | flaskapp/models.py | guillermosainz/instareplic | a4fb26269d54be1a140b1802a4d8013a6aa0ec70 | [
"MIT"
] | 15 | 2016-05-18T20:09:53.000Z | 2020-06-01T04:06:00.000Z | from mongoengine import StringField, EmailField, BooleanField
from flask.ext.login import UserMixin
import requests
import json
from mongoengine import Document
from social.apps.flask_app.me.models import FlaskStorage
class User(Document, UserMixin):
username = StringField(max_length=200)
password = StringField(max_length=200, default='')
name = StringField(max_length=100)
fullname = StringField(max_length=100)
first_name = StringField(max_length=100)
last_name = StringField(max_length=100)
email = EmailField()
active = BooleanField(default=True)
def facebook_api(self, url, fields=None):
params = {
'access_token': self.get_social_auth("facebook").extra_data['access_token']
}
if fields:
params["fields"] = ",".join(fields)
res = requests.get(url, params=params)
if res.status_code != 200:
raise Exception("Status was %s" % res.status_code)
return json.loads(res.content)
def get_facebook_albums(self):
return self.facebook_api("https://graph.facebook.com/v2.2/me/albums", fields=["id", "name"])["data"]
def get_facebook_photos(self, album_id):
photos = []
url = "https://graph.facebook.com/v2.2/%s/photos" % album_id
while url:
ret = self.facebook_api(url, fields=[
"id", "created_time", "from", "height", "width", "name", "source"
])
photos += ret["data"]
url = ret.get("paging", {}).get("next")
return photos
def get_social_auth(self, provider):
return FlaskStorage.user.get_social_auth_for_user(self, provider=provider).get()
def is_active(self):
return self.active
| 30.155172 | 108 | 0.644368 | 1,527 | 0.87307 | 0 | 0 | 0 | 0 | 0 | 0 | 241 | 0.137793 |
e88a42379408c1158246f618ad6b8e62971bdb21 | 1,924 | py | Python | src/compas_blender/geometry/__init__.py | adacko/compas | 47c443ad3825897ec7ed932ec20734c2f08ef120 | [
"MIT"
] | null | null | null | src/compas_blender/geometry/__init__.py | adacko/compas | 47c443ad3825897ec7ed932ec20734c2f08ef120 | [
"MIT"
] | null | null | null | src/compas_blender/geometry/__init__.py | adacko/compas | 47c443ad3825897ec7ed932ec20734c2f08ef120 | [
"MIT"
] | 1 | 2022-01-16T02:32:43.000Z | 2022-01-16T02:32:43.000Z | """
********************************************************************************
compas_blender.geometry
********************************************************************************
.. currentmodule:: compas_blender.geometry
Object-oriented convenience wrappers for native Blender geometry.
.. autosummary::
:toctree: generated/
BlenderCurve
BlenderMesh
BlenderPoint
BlenderSurface
"""
try:
import bpy
except ImportError:
pass
class BlenderGeometry(object):
def __init__(self, obj):
self.object = obj
self.name = obj.name
self.geometry = obj.data
self.otype = obj.type
self.attributes = {}
@property
def location(self):
return list(self.object.location)
@classmethod
def from_selection(cls):
raise NotImplementedError
@classmethod
def from_name(cls, name):
return BlenderGeometry(obj=bpy.data.objects[name])
@staticmethod
def find(guid):
raise NotImplementedError
@staticmethod
def refresh():
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1)
def delete(self):
raise NotImplementedError
def purge(self):
raise NotImplementedError
def hide(self):
raise NotImplementedError
def show(self):
raise NotImplementedError
def select(self):
raise NotImplementedError
def unselect(self):
raise NotImplementedError
def closest_point(self, *args, **kwargs):
raise NotImplementedError
def closest_points(self, *args, **kwargs):
raise NotImplementedError
from .point import BlenderPoint
from .curve import BlenderCurve
from .mesh import BlenderMesh
from .surface import BlenderSurface
__all__ = [
'BlenderGeometry',
'BlenderPoint',
'BlenderCurve',
'BlenderMesh',
'BlenderSurface',
]
| 16.168067 | 80 | 0.596674 | 1,201 | 0.62422 | 0 | 0 | 423 | 0.219854 | 0 | 0 | 507 | 0.263514 |
e88a581db548ce41d35b785b7240edca51e28965 | 1,136 | py | Python | notes/reference/tutorials/an-introduction-to-asynch-programming-and-twisted/exercises/part3/ex2.py | aav789/study-notes | 34eca00cd48869ba7a79c0ea7d8948ee9bde72b9 | [
"MIT"
] | 43 | 2015-06-10T14:48:00.000Z | 2020-11-29T16:22:28.000Z | notes/reference/tutorials/an-introduction-to-asynch-programming-and-twisted/exercises/part3/ex2.py | aav789/study-notes | 34eca00cd48869ba7a79c0ea7d8948ee9bde72b9 | [
"MIT"
] | 1 | 2021-11-01T12:01:44.000Z | 2021-11-01T12:01:44.000Z | notes/reference/tutorials/an-introduction-to-asynch-programming-and-twisted/exercises/part3/ex2.py | lextoumbourou/notes | 5f94c59a467eb3eb387542bdce398abc0365e6a7 | [
"MIT"
] | 40 | 2015-03-02T10:33:59.000Z | 2020-05-24T12:17:05.000Z | from twisted.internet import reactor, task
class CounterManager(object):
counters = []
@classmethod
def add_counter(cls, counter):
cls.counters.append(counter)
@classmethod
def has_active_counters(cls):
return all([not c.is_active for c in cls.counters])
class Counter(object):
def __init__(self, name, between_time, counter=5):
self.name = name
self.between_time = between_time
self.counter = counter
self.is_active = True
CounterManager.add_counter(self)
def start(self):
self.loop_handler = task.LoopingCall(self.count)
self.loop_handler.start(self.between_time)
def count(self):
if self.counter == 0:
self.is_active = False
self.loop_handler.stop()
if CounterManager.has_active_counters():
print 'No counters active. Stopping!'
reactor.stop()
else:
print self.name + ':', self.counter
self.counter -= 1
print 'Start'
Counter('1', 0.5).start()
Counter('2', 1).start()
Counter('3', 0.1).start()
reactor.run()
| 24.170213 | 59 | 0.612676 | 980 | 0.862676 | 0 | 0 | 190 | 0.167254 | 0 | 0 | 50 | 0.044014 |
e88a6019801aa572d384a6160bf098fc3d5f5bab | 6,889 | py | Python | tests/unit/test_validation_builder.py | shashank-google/professional-services-data-validator | db9c63add4a3ab40b09113ca7ed1c03b7c12e6f2 | [
"Apache-2.0"
] | 1 | 2021-12-24T10:01:31.000Z | 2021-12-24T10:01:31.000Z | tests/unit/test_validation_builder.py | shashank-google/professional-services-data-validator | db9c63add4a3ab40b09113ca7ed1c03b7c12e6f2 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_validation_builder.py | shashank-google/professional-services-data-validator | db9c63add4a3ab40b09113ca7ed1c03b7c12e6f2 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import pytest
from data_validation import consts
from data_validation.config_manager import ConfigManager
COLUMN_VALIDATION_CONFIG = {
# BigQuery Specific Connection Config
"source_conn": None,
"target_conn": None,
# Validation Type
consts.CONFIG_TYPE: "Column",
# Configuration Required Depending on Validator Type
consts.CONFIG_SCHEMA_NAME: "bigquery-public-data.new_york_citibike",
consts.CONFIG_TABLE_NAME: "citibike_trips",
consts.CONFIG_CALCULATED_FIELDS: [],
consts.CONFIG_GROUPED_COLUMNS: [],
consts.CONFIG_FILTERS: [
{
consts.CONFIG_TYPE: consts.FILTER_TYPE_CUSTOM,
consts.CONFIG_FILTER_SOURCE: "column_name > 100",
consts.CONFIG_FILTER_TARGET: "column_name_target > 100",
}
],
}
QUERY_LIMIT = 100
COLUMN_VALIDATION_CONFIG_LIMIT = deepcopy(COLUMN_VALIDATION_CONFIG)
COLUMN_VALIDATION_CONFIG_LIMIT[consts.CONFIG_LIMIT] = QUERY_LIMIT
QUERY_GROUPS_TEST = [
{
consts.CONFIG_FIELD_ALIAS: "start_alias",
consts.CONFIG_SOURCE_COLUMN: "starttime",
consts.CONFIG_TARGET_COLUMN: "starttime",
consts.CONFIG_CAST: "date",
}
]
AGGREGATES_TEST = [
{
consts.CONFIG_FIELD_ALIAS: "sum_starttime",
consts.CONFIG_SOURCE_COLUMN: "starttime",
consts.CONFIG_TARGET_COLUMN: "starttime",
consts.CONFIG_TYPE: "sum",
}
]
CALCULATED_MULTIPLE_TEST = [
{
consts.CONFIG_FIELD_ALIAS: "concat_start_station_name_end_station_name",
consts.CONFIG_CALCULATED_SOURCE_COLUMNS: [
"start_station_name",
"end_station_name",
],
consts.CONFIG_CALCULATED_TARGET_COLUMNS: [
"start_station_name",
"end_station_name",
],
consts.CONFIG_TYPE: "concat",
},
{
consts.CONFIG_FIELD_ALIAS: "concat_calcs",
consts.CONFIG_CALCULATED_SOURCE_COLUMNS: [
"ifnull_start_station_name",
"rstrip_start_station_name",
"upper_start_station_name",
],
consts.CONFIG_CALCULATED_TARGET_COLUMNS: [
"ifnull_start_station_name",
"rstrip_start_station_name",
"upper_start_station_name",
],
consts.CONFIG_TYPE: "concat",
"depth": 1,
},
{
consts.CONFIG_FIELD_ALIAS: "ifnull_start_station_name",
consts.CONFIG_CALCULATED_SOURCE_COLUMNS: ["start_station_name"],
consts.CONFIG_CALCULATED_TARGET_COLUMNS: ["start_station_name"],
consts.CONFIG_TYPE: "ifnull",
},
{
consts.CONFIG_FIELD_ALIAS: "length_start_station_name",
consts.CONFIG_CALCULATED_SOURCE_COLUMNS: ["start_station_name"],
consts.CONFIG_CALCULATED_TARGET_COLUMNS: ["start_station_name"],
consts.CONFIG_TYPE: "length",
},
{
consts.CONFIG_FIELD_ALIAS: "rstrip_start_station_name",
consts.CONFIG_CALCULATED_SOURCE_COLUMNS: ["start_station_name"],
consts.CONFIG_CALCULATED_TARGET_COLUMNS: ["start_station_name"],
consts.CONFIG_TYPE: "rstrip",
},
{
consts.CONFIG_FIELD_ALIAS: "upper_start_station_name",
consts.CONFIG_CALCULATED_SOURCE_COLUMNS: ["start_station_name"],
consts.CONFIG_CALCULATED_TARGET_COLUMNS: ["start_station_name"],
consts.CONFIG_TYPE: "upper",
},
]
class MockIbisClient(object):
pass
@pytest.fixture
def module_under_test():
import data_validation.validation_builder
return data_validation.validation_builder
def test_import(module_under_test):
assert module_under_test is not None
def test_column_validation(module_under_test):
mock_config_manager = ConfigManager(
COLUMN_VALIDATION_CONFIG, MockIbisClient(), MockIbisClient(), verbose=False
)
builder = module_under_test.ValidationBuilder(mock_config_manager)
assert not builder.verbose
assert builder.config_manager.query_limit is None
def test_column_validation_aggregates(module_under_test):
mock_config_manager = ConfigManager(
COLUMN_VALIDATION_CONFIG, MockIbisClient(), MockIbisClient(), verbose=False
)
builder = module_under_test.ValidationBuilder(mock_config_manager)
mock_config_manager.append_aggregates(AGGREGATES_TEST)
builder.add_config_aggregates()
assert list(builder.get_metadata().keys()) == ["sum_starttime"]
def test_validation_add_groups(module_under_test):
mock_config_manager = ConfigManager(
COLUMN_VALIDATION_CONFIG, MockIbisClient(), MockIbisClient(), verbose=False
)
builder = module_under_test.ValidationBuilder(mock_config_manager)
mock_config_manager.append_query_groups(QUERY_GROUPS_TEST)
builder.add_config_query_groups()
assert list(builder.get_group_aliases()) == ["start_alias"]
def test_column_validation_calculate(module_under_test):
mock_config_manager = ConfigManager(
COLUMN_VALIDATION_CONFIG, MockIbisClient(), MockIbisClient(), verbose=False
)
builder = module_under_test.ValidationBuilder(mock_config_manager)
mock_config_manager.append_calculated_fields(CALCULATED_MULTIPLE_TEST)
builder.add_config_calculated_fields()
print(sorted(list(builder.get_calculated_aliases())))
assert sorted(list(builder.get_calculated_aliases())) == [
"concat_calcs",
"concat_start_station_name_end_station_name",
"ifnull_start_station_name",
"length_start_station_name",
"rstrip_start_station_name",
"upper_start_station_name",
]
def test_column_validation_limit(module_under_test):
mock_config_manager = ConfigManager(
COLUMN_VALIDATION_CONFIG_LIMIT,
MockIbisClient(),
MockIbisClient(),
verbose=False,
)
builder = module_under_test.ValidationBuilder(mock_config_manager)
builder.add_query_limit()
assert builder.source_builder.limit == QUERY_LIMIT
def test_validation_add_filters(module_under_test):
mock_config_manager = ConfigManager(
COLUMN_VALIDATION_CONFIG, MockIbisClient(), MockIbisClient(), verbose=False
)
builder = module_under_test.ValidationBuilder(mock_config_manager)
builder.add_config_filters()
filter_field = builder.source_builder.filters[0]
assert filter_field.left == "column_name > 100"
| 32.649289 | 83 | 0.724488 | 38 | 0.005516 | 0 | 0 | 133 | 0.019306 | 0 | 0 | 1,713 | 0.248657 |
e88aaaf26e2de6b48dfc79634b91841480741eb1 | 2,163 | py | Python | venv/lib/python3.8/site-packages/azureml/_restclient/models/private_endpoint_connection.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/azureml/_restclient/models/private_endpoint_connection.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/azureml/_restclient/models/private_endpoint_connection.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator 2.3.33.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PrivateEndpointConnection(Model):
"""The Private Endpoint Connection resource.
:param private_endpoint: The resource of private end point.
:type private_endpoint: ~_restclient.models.PrivateEndpoint
:param private_link_service_connection_state: A collection of information
about the state of the connection between service consumer and provider.
:type private_link_service_connection_state:
~_restclient.models.PrivateLinkServiceConnectionState
:param provisioning_state: The provisioning state of the private endpoint
connection resource. Possible values include: 'Succeeded', 'Creating',
'Deleting', 'Failed'
:type provisioning_state: str or
~_restclient.models.PrivateEndpointConnectionProvisioningState
"""
_validation = {
'private_link_service_connection_state': {'required': True},
}
_attribute_map = {
'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'},
'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, private_link_service_connection_state, private_endpoint=None, provisioning_state=None):
super(PrivateEndpointConnection, self).__init__()
self.private_endpoint = private_endpoint
self.private_link_service_connection_state = private_link_service_connection_state
self.provisioning_state = provisioning_state
| 47.021739 | 151 | 0.691632 | 1,624 | 0.750809 | 0 | 0 | 0 | 0 | 0 | 0 | 1,544 | 0.713823 |
e88e2ada84f6797bbe06ad18554f171bfb1b346a | 3,100 | py | Python | src/chainalytic_icon/provider/api_bundle.py | yudus-lab/chainalytic-icon | 2c4ef9dde9e94a98a2efabccfb84f853e3a85b0d | [
"Apache-2.0"
] | 1 | 2021-02-24T18:15:10.000Z | 2021-02-24T18:15:10.000Z | src/chainalytic_icon/provider/api_bundle.py | yudus-lab/chainalytic-icon | 2c4ef9dde9e94a98a2efabccfb84f853e3a85b0d | [
"Apache-2.0"
] | null | null | null | src/chainalytic_icon/provider/api_bundle.py | yudus-lab/chainalytic-icon | 2c4ef9dde9e94a98a2efabccfb84f853e3a85b0d | [
"Apache-2.0"
] | null | null | null | import traceback
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
from chainalytic_icon.common import config, util
class ApiBundle(object):
"""
The interface to external consumers/applications
"""
def __init__(self, working_dir: str):
super(ApiBundle, self).__init__()
self.working_dir = working_dir
self.collator = None
self.logger = util.get_child_logger('provider.api_bundle')
def set_collator(self, collator: 'Collator'):
self.collator = collator
async def call_api(self, api_id: str, api_params: dict) -> Dict:
ret = {'status': 0, 'result': None}
func = getattr(self, api_id) if hasattr(self, api_id) else None
try:
if func:
self.logger.debug(f'Found API: {api_id}, calling...')
ret['result'] = await func(api_params)
ret['status'] = 1
else:
self.logger.warning(f'API not found: {api_id}')
ret['status'] = -1
ret['result'] = f'API not found: {api_id}'
except Exception as e:
ret['status'] = 0
ret['result'] = f'{str(e)}\n{traceback.format_exc()}'
self.logger.error(f'ERROR when calling API: {api_id}')
self.logger.error(f'{str(e)}\n{traceback.format_exc()}')
return ret
# #################
# APIs to be called
#
async def last_block_height(self, api_params: dict) -> Optional[int]:
if 'transform_id' in api_params:
return await self.collator.last_block_height(api_params['transform_id'])
async def latest_upstream_block_height(self, api_params: dict) -> Optional[int]:
return await self.collator.latest_upstream_block_height()
async def get_block(self, api_params: dict) -> Optional[dict]:
if 'transform_id' in api_params:
return await self.collator.get_block(api_params['height'], api_params['transform_id'])
# ########################
# For `stake_history` only
#
async def latest_unstake_state(self, api_params: dict) -> Optional[dict]:
return await self.collator.latest_unstake_state()
# ###########################
# For `contract_history` only
#
async def contract_transaction(self, api_params: dict) -> Optional[dict]:
return await self.collator.contract_transaction(
api_params['address'], int(api_params['size'])
)
async def contract_internal_transaction(self, api_params: dict) -> Optional[dict]:
return await self.collator.contract_internal_transaction(
api_params['address'], int(api_params['size'])
)
async def contract_stats(self, api_params: dict) -> Optional[dict]:
return await self.collator.contract_stats(api_params['address'])
async def contract_list(self, api_params: dict) -> Optional[dict]:
return await self.collator.contract_list()
async def max_tx_per_contract(self, api_params: dict) -> Optional[dict]:
return await self.collator.max_tx_per_contract()
| 36.904762 | 98 | 0.624839 | 2,963 | 0.955806 | 0 | 0 | 0 | 0 | 2,309 | 0.744839 | 608 | 0.196129 |
e8910b54a876264e2bd19a91436e6a9f6f4eca70 | 447 | py | Python | __scraping__/just-eat.fr - robobrowser/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 140 | 2017-02-21T22:49:04.000Z | 2022-03-22T17:51:58.000Z | __scraping__/just-eat.fr - robobrowser/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 5 | 2017-12-02T19:55:00.000Z | 2021-09-22T23:18:39.000Z | __scraping__/just-eat.fr - robobrowser/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 79 | 2017-01-25T10:53:33.000Z | 2022-03-11T16:13:57.000Z |
# date: 2019.05.05
# author: Bartłomiej 'furas' Burek
import robobrowser
br = robobrowser.RoboBrowser(user_agent='Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0')
br.parser = 'lxml'
br.open("https://www.just-eat.fr")
print(br.get_forms())
iframe_src = br.select('iframe')[0]['src']
print(iframe_src)
br.open("https://www.just-eat.fr"+iframe_src)
print(br.parsed)
br.open("https://www.just-eat.fr")
print(br.get_forms())
| 21.285714 | 109 | 0.709172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 215 | 0.479911 |
e89153be340928f8158c97cd385e8881185901f0 | 2,880 | py | Python | winning_ticket/src/model_utils.py | zankner/WinningTickets | fe40a6e74454e6ea96949f2c16263ed1a1aea6fa | [
"MIT"
] | null | null | null | winning_ticket/src/model_utils.py | zankner/WinningTickets | fe40a6e74454e6ea96949f2c16263ed1a1aea6fa | [
"MIT"
] | null | null | null | winning_ticket/src/model_utils.py | zankner/WinningTickets | fe40a6e74454e6ea96949f2c16263ed1a1aea6fa | [
"MIT"
] | null | null | null | import copy
import torch
from utils import helpers
from utils.layers import conv, linear, batch_norm
def ticketfy(model, split_rate, split_mode="kels"):
conv_layers, linear_layers, bn_layers = helpers.get_layers(model)
for n, _ in conv_layers:
cur_conv = helpers.rgetattr(model, n)
helpers.rsetattr(
model, n,
conv.SplitConv(cur_conv.in_channels,
cur_conv.out_channels,
kernel_size=cur_conv.kernel_size,
stride=cur_conv.stride,
padding=cur_conv.padding,
dilation=cur_conv.dilation,
groups=cur_conv.groups,
bias=cur_conv.bias != None,
padding_mode=cur_conv.padding_mode,
split_rate=split_rate,
split_mode=split_mode))
for i, (n, _) in enumerate(linear_layers):
cur_linear = helpers.rgetattr(model, n)
helpers.rsetattr(
model, n,
linear.SplitLinear(cur_linear.in_features,
cur_linear.out_features,
bias=cur_linear.bias != None,
split_rate=split_rate,
split_mode=split_mode,
last_layer=i == len(linear_layers) - 1))
for n, _ in bn_layers:
cur_bn = helpers.rgetattr(model, n)
helpers.rsetattr(
model, n,
batch_norm.SplitBatchNorm(
cur_bn.num_features,
eps=cur_bn.eps,
momentum=cur_bn.momentum,
track_running_stats=cur_bn.track_running_stats,
split_rate=split_rate))
def regenerate(model, evolve_mode="rand", device="cpu"):
for _, m in model.named_modules():
if hasattr(m, "weight") and m.weight is not None:
if hasattr(m, "mask"): ## Conv and Linear but not BN
assert m.split_rate < 1.0
if m.__class__ == conv.SplitConv or m.__class__ == linear.SplitLinear:
m.split_reinitialize(evolve_mode, device)
else:
raise NotImplemented('Invalid layer {}'.format(
m.__class__))
def extract_ticket(model, split_rate):
split_model = copy.deepcopy(model)
for n, m in split_model.named_modules():
if hasattr(m, "weight") and m.weight is not None:
if hasattr(m, "mask"):
m.extract_slim()
# if src_m.__class__ == conv_type.SplitConv:
# elif src_m.__class__ == linear_type.SplitLinear:
elif m.__class__ == batch_norm.SplitBatchNorm: ## BatchNorm has bn_maks not mask
m.extract_slim()
return split_model
| 38.918919 | 93 | 0.542014 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 219 | 0.076042 |
e892309adfae5627e160795d9ed6e92ccebca92c | 145 | py | Python | tests/test_get_term_list.py | vineetjohn/invest-o-scrape | 696a06538e0e2f1f4c2180bf43657861966a2685 | [
"MIT"
] | 5 | 2019-12-19T05:25:00.000Z | 2022-01-31T19:09:31.000Z | tests/test_get_term_list.py | vineetjohn/invest-o-scrape | 696a06538e0e2f1f4c2180bf43657861966a2685 | [
"MIT"
] | null | null | null | tests/test_get_term_list.py | vineetjohn/invest-o-scrape | 696a06538e0e2f1f4c2180bf43657861966a2685 | [
"MIT"
] | 3 | 2020-03-04T02:24:36.000Z | 2022-01-31T19:09:38.000Z | from utils import scrape_helper
url = "http://www.investopedia.com/terms/1/"
links = scrape_helper.get_term_links_from_page(url)
print(links)
| 18.125 | 51 | 0.786207 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.262069 |
e8927003c1e8983886adadf1d66dd6660f512ad2 | 1,626 | py | Python | nototools/drop_hints.py | RoelN/nototools | 76b29f8f8f9beaff47b6922b3b664ab2c4c680f6 | [
"Apache-2.0"
] | 156 | 2015-06-11T00:03:49.000Z | 2019-03-12T10:05:14.000Z | nototools/drop_hints.py | RoelN/nototools | 76b29f8f8f9beaff47b6922b3b664ab2c4c680f6 | [
"Apache-2.0"
] | 323 | 2015-06-09T21:26:40.000Z | 2019-04-09T11:09:52.000Z | nototools/drop_hints.py | RoelN/nototools | 76b29f8f8f9beaff47b6922b3b664ab2c4c680f6 | [
"Apache-2.0"
] | 63 | 2015-06-09T19:21:58.000Z | 2019-03-27T21:52:30.000Z | #!/usr/bin/env python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Drop hints from a font."""
__author__ = "roozbeh@google.com (Roozbeh Pournader)"
import array
import sys
from fontTools import ttLib
def drop_hints_from_glyphs(font):
"""Drops the hints from a font's glyphs."""
glyf_table = font["glyf"]
for glyph_index in range(len(glyf_table.glyphOrder)):
glyph_name = glyf_table.glyphOrder[glyph_index]
glyph = glyf_table[glyph_name]
if glyph.numberOfContours > 0:
if glyph.program.bytecode:
glyph.program.bytecode = array.array("B")
def drop_tables(font, tables):
"""Drops the listed tables from a font."""
for table in tables:
if table in font:
del font[table]
def main(argv):
"""Drop the hints from the first file specified and save as second."""
font = ttLib.TTFont(argv[1])
drop_hints_from_glyphs(font)
drop_tables(font, ["cvt ", "fpgm", "hdmx", "LTSH", "prep", "VDMX"])
font.save(argv[2])
if __name__ == "__main__":
main(sys.argv)
| 28.526316 | 74 | 0.688807 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 884 | 0.543665 |
e8928c1b30720641401724d98a6992e27f9c6712 | 5,438 | py | Python | imapclient/response_lexer.py | maxiimou/imapclient | 755936fb2ac4a3da9f898e504cd1a8f4b5da9b84 | [
"BSD-3-Clause"
] | null | null | null | imapclient/response_lexer.py | maxiimou/imapclient | 755936fb2ac4a3da9f898e504cd1a8f4b5da9b84 | [
"BSD-3-Clause"
] | 2 | 2019-05-01T08:41:02.000Z | 2020-01-03T21:54:51.000Z | imapclient/response_lexer.py | maxiimou/imapclient | 755936fb2ac4a3da9f898e504cd1a8f4b5da9b84 | [
"BSD-3-Clause"
] | 5 | 2015-12-03T03:17:52.000Z | 2021-01-31T13:10:25.000Z | # Copyright (c) 2014, Menno Smits
# Released subject to the New BSD License
# Please see http://en.wikipedia.org/wiki/BSD_licenses
"""
A lexical analyzer class for IMAP responses.
Although Lexer does all the work, TokenSource is the class to use for
external callers.
"""
from __future__ import unicode_literals
from . import six
__all__ = ["TokenSource"]
CTRL_CHARS = frozenset(c for c in range(32))
ALL_CHARS = frozenset(c for c in range(256))
SPECIALS = frozenset(c for c in six.iterbytes(b' ()%"['))
NON_SPECIALS = ALL_CHARS - SPECIALS - CTRL_CHARS
WHITESPACE = frozenset(c for c in six.iterbytes(b' \t\r\n'))
BACKSLASH = ord('\\')
OPEN_SQUARE = ord('[')
CLOSE_SQUARE = ord(']')
DOUBLE_QUOTE = ord('"')
class TokenSource(object):
"""
A simple iterator for the Lexer class that also provides access to
the current IMAP literal.
"""
def __init__(self, text):
self.lex = Lexer(text)
self.src = iter(self.lex)
@property
def current_literal(self):
return self.lex.current_source.literal
def __iter__(self):
return self.src
class Lexer(object):
"""
A lexical analyzer class for IMAP
"""
def __init__(self, text):
self.sources = (LiteralHandlingIter(self, chunk) for chunk in text)
self.current_source = None
def read_until(self, stream_i, end_char, escape=True):
token = bytearray()
try:
for nextchar in stream_i:
if escape and nextchar == BACKSLASH:
escaper = nextchar
nextchar = six.next(stream_i)
if nextchar != escaper and nextchar != end_char:
token.append(escaper) # Don't touch invalid escaping
elif nextchar == end_char:
break
token.append(nextchar)
else:
raise ValueError("No closing '%s'" % chr(end_char))
except StopIteration:
raise ValueError("No closing '%s'" % chr(end_char))
token.append(end_char)
return token
def read_token_stream(self, stream_i):
whitespace = WHITESPACE
wordchars = NON_SPECIALS
read_until = self.read_until
while True:
# Whitespace
for nextchar in stream_i:
if nextchar not in whitespace:
stream_i.push(nextchar)
break # done skipping over the whitespace
# Non-whitespace
token = bytearray()
for nextchar in stream_i:
if nextchar in wordchars:
token.append(nextchar)
elif nextchar == OPEN_SQUARE:
token.append(nextchar)
token.extend(read_until(stream_i, CLOSE_SQUARE, escape=False))
else:
if nextchar in whitespace:
yield token
elif nextchar == DOUBLE_QUOTE:
assert not token
token.append(nextchar)
token.extend(read_until(stream_i, nextchar))
yield token
else:
# Other punctuation, eg. "(". This ends the current token.
if token:
yield token
yield bytearray([nextchar])
break
else:
if token:
yield token
break
def __iter__(self):
for source in self.sources:
self.current_source = source
for tok in self.read_token_stream(iter(source)):
yield bytes(tok)
# imaplib has poor handling of 'literals' - it both fails to remove the
# {size} marker, and fails to keep responses grouped into the same logical
# 'line'. What we end up with is a list of response 'records', where each
# record is either a simple string, or tuple of (str_with_lit, literal) -
# where str_with_lit is a string with the {xxx} marker at its end. Note
# that each element of this list does *not* correspond 1:1 with the
# untagged responses.
# (http://bugs.python.org/issue5045 also has comments about this)
# So: we have a special object for each of these records. When a
# string literal is processed, we peek into this object to grab the
# literal.
class LiteralHandlingIter:
def __init__(self, lexer, resp_record):
self.lexer = lexer
if isinstance(resp_record, tuple):
# A 'record' with a string which includes a literal marker, and
# the literal itself.
self.src_text = resp_record[0]
assert self.src_text.endswith(b"}"), self.src_text
self.literal = resp_record[1]
else:
# just a line with no literals.
self.src_text = resp_record
self.literal = None
def __iter__(self):
return PushableIterator(six.iterbytes(self.src_text))
class PushableIterator(object):
NO_MORE = object()
def __init__(self, it):
self.it = iter(it)
self.pushed = []
def __iter__(self):
return self
def __next__(self):
if self.pushed:
return self.pushed.pop()
return six.next(self.it)
# For Python 2 compatibility
next = __next__
def push(self, item):
self.pushed.append(item)
| 31.433526 | 82 | 0.582383 | 4,042 | 0.743288 | 1,635 | 0.300662 | 87 | 0.015999 | 0 | 0 | 1,466 | 0.269584 |
e89384cd4e1e1e1278f38d2fac8d56157408b3f2 | 149 | py | Python | src/bert_summarizer/data/__init__.py | k-tahiro/bert-summarizer | 03a09676130e4706b42ba6ab53c545fd10b84bce | [
"MIT"
] | 8 | 2020-10-05T02:34:40.000Z | 2021-11-29T01:38:41.000Z | src/bert_summarizer/data/__init__.py | k-tahiro/bert-summarizer | 03a09676130e4706b42ba6ab53c545fd10b84bce | [
"MIT"
] | 5 | 2021-04-01T07:15:26.000Z | 2022-03-15T02:48:18.000Z | src/bert_summarizer/data/__init__.py | k-tahiro/bert-summarizer | 03a09676130e4706b42ba6ab53c545fd10b84bce | [
"MIT"
] | 2 | 2021-09-30T09:10:55.000Z | 2022-01-29T03:05:46.000Z | from .data_collator import (
DataCollatorWithPaddingWithAdditionalFeatures,
EncoderDecoderDataCollatorWithPadding,
)
from .datasets import *
| 24.833333 | 50 | 0.825503 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e894d63a61f84c8bd811e9165930421a819cf713 | 274 | py | Python | app/api_docs/__init__.py | linrong/flask-server | 5f0896c6ccedb6b172b9af7e1018600e38a2df43 | [
"MIT"
] | null | null | null | app/api_docs/__init__.py | linrong/flask-server | 5f0896c6ccedb6b172b9af7e1018600e38a2df43 | [
"MIT"
] | 1 | 2019-09-06T10:06:47.000Z | 2019-09-10T07:18:47.000Z | app/api_docs/__init__.py | linrong/flask-server | 5f0896c6ccedb6b172b9af7e1018600e38a2df43 | [
"MIT"
] | null | null | null | # _*_ coding: utf-8 _*_
"""
Created by lr on 2019/08/29.
此模块用来编写flasgger中api列表下的详细操作信息
"""
from app.api_docs.v1 import user, client, token, \
banner, theme, product, category, \
address, order, pay
from app.api_docs.cms import cms_user, file
__author__ = 'lr' | 24.909091 | 50 | 0.69708 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.422581 |
e8965b0b4a9f953e4d371e88df279eb8809cdfa7 | 5,512 | py | Python | floss/decoding_manager.py | fireeye/flare-floss | dccba79912b22c1f24267da276d9bb8653f40d05 | [
"Apache-2.0"
] | 2,067 | 2016-03-02T20:20:40.000Z | 2021-09-21T11:07:00.000Z | floss/decoding_manager.py | fireeye/flare-floss | dccba79912b22c1f24267da276d9bb8653f40d05 | [
"Apache-2.0"
] | 355 | 2016-03-05T02:28:59.000Z | 2021-09-10T19:27:48.000Z | floss/decoding_manager.py | fireeye/flare-floss | dccba79912b22c1f24267da276d9bb8653f40d05 | [
"Apache-2.0"
] | 371 | 2016-03-05T02:17:22.000Z | 2021-09-17T17:22:51.000Z | # Copyright (C) 2017 Mandiant, Inc. All Rights Reserved.
import logging
from typing import List, Tuple
from dataclasses import dataclass
import viv_utils
import envi.memory
import viv_utils.emulator_drivers
from envi import Emulator
from . import api_hooks
logger = logging.getLogger("floss")
MAX_MAPS_SIZE = 1024 * 1024 * 100 # 100MB max memory allocated in an emulator instance
def is_import(emu, va):
"""
Return True if the given VA is that of an imported function.
"""
# TODO: also check location type
t = emu.getVivTaint(va)
if t is None:
return False
return t[1] == "import"
# type aliases for envi.memory map
MemoryMapDescriptor = Tuple[
# va
int,
# size
int,
# perms
int,
# name
str,
]
# type aliases for envi.memory map
MemoryMap = Tuple[
# start
int,
# end
int,
# descriptor
MemoryMapDescriptor,
# content
bytes,
]
# type aliases for envi.memory map
Memory = List[MemoryMap]
@dataclass
class Snapshot:
"""
A snapshot of the state of the CPU and memory.
Attributes:
memory: a snapshot of the memory contents
sp: the stack counter
pc: the instruction pointer
"""
memory: Memory
sp: int
pc: int
def get_map_size(emu):
size = 0
for mapva, mapsize, mperm, mfname in emu.getMemoryMaps():
mapsize += size
return size
class MapsTooLargeError(Exception):
pass
def make_snapshot(emu: Emulator) -> Snapshot:
"""
Create a snapshot of the current CPU and memory.
"""
if get_map_size(emu) > MAX_MAPS_SIZE:
logger.debug("emulator mapped too much memory: 0x%x", get_map_size(emu))
raise MapsTooLargeError()
return Snapshot(emu.getMemorySnap(), emu.getStackCounter(), emu.getProgramCounter())
@dataclass
class Delta:
"""
a pair of snapshots from before and after an operation.
facilitates diffing the state of an emulator.
"""
pre: Snapshot
post: Snapshot
class DeltaCollectorHook(viv_utils.emulator_drivers.Hook):
"""
hook that collects Deltas at each imported API call.
"""
def __init__(self, pre_snap: Snapshot):
super(DeltaCollectorHook, self).__init__()
self._pre_snap = pre_snap
self.deltas: List[Delta] = []
def hook(self, callname, driver, callconv, api, argv):
if is_import(driver._emu, driver._emu.getProgramCounter()):
try:
self.deltas.append(Delta(self._pre_snap, make_snapshot(driver._emu)))
except MapsTooLargeError:
logger.debug("despite call to import %s, maps too large, not extracting strings", callname)
pass
def emulate_function(
emu: Emulator, function_index, fva: int, return_address: int, max_instruction_count: int
) -> List[Delta]:
"""
Emulate a function and collect snapshots at each interesting place.
These interesting places include calls to imported API functions
and the final state of the emulator.
Emulation continues until the return address is hit, or
the given max_instruction_count is hit.
Some library functions are shimmed, such as memory allocation routines.
This helps "normal" routines emulate correct using standard library function.
These include:
- GetProcessHeap
- RtlAllocateHeap
- AllocateHeap
- malloc
:type function_index: viv_utils.FunctionIndex
:param fva: The start address of the function to emulate.
:param return_address: The expected return address of the function.
Emulation stops here.
:param max_instruction_count: The max number of instructions to emulate.
This helps avoid unexpected infinite loops.
"""
try:
pre_snap = make_snapshot(emu)
except MapsTooLargeError:
logger.warn("initial snapshot mapped too much memory, can't extract strings")
return []
delta_collector = DeltaCollectorHook(pre_snap)
try:
logger.debug("Emulating function at 0x%08X", fva)
driver = viv_utils.emulator_drivers.DebuggerEmulatorDriver(emu)
monitor = api_hooks.ApiMonitor(emu.vw, function_index)
driver.add_monitor(monitor)
driver.add_hook(delta_collector)
with api_hooks.defaultHooks(driver):
driver.runToVa(return_address, max_instruction_count)
except viv_utils.emulator_drivers.InstructionRangeExceededError:
logger.debug("Halting as emulation has escaped!")
except envi.InvalidInstruction:
logger.debug("vivisect encountered an invalid instruction. will continue processing.", exc_info=True)
except envi.UnsupportedInstruction:
logger.debug("vivisect encountered an unsupported instruction. will continue processing.", exc_info=True)
except envi.BreakpointHit:
logger.debug(
"vivisect encountered an unexpected emulation breakpoint. will continue processing.", exc_info=True
)
except viv_utils.emulator_drivers.StopEmulation:
pass
except Exception:
logger.debug("vivisect encountered an unexpected exception. will continue processing.", exc_info=True)
logger.debug("Ended emulation at 0x%08X", emu.getProgramCounter())
deltas = delta_collector.deltas
try:
deltas.append(Delta(pre_snap, make_snapshot(emu)))
except MapsTooLargeError:
logger.debug("failed to create final snapshot, emulator mapped too much memory, skipping")
pass
return deltas
| 28.708333 | 113 | 0.690312 | 1,177 | 0.213534 | 0 | 0 | 457 | 0.08291 | 0 | 0 | 2,377 | 0.431241 |
e896f1a87a2c3adcfa47ef323b2471d56dc8264e | 4,575 | py | Python | data_provider/lanenet_hnet_data_processor.py | aj96/lanenet-lane-detection | d83250844d07aecae9515fe0acc7e6dde291177a | [
"Apache-2.0"
] | 10 | 2018-09-26T03:06:23.000Z | 2021-12-01T05:21:11.000Z | data_provider/lanenet_hnet_data_processor.py | aj96/lanenet-lane-detection | d83250844d07aecae9515fe0acc7e6dde291177a | [
"Apache-2.0"
] | null | null | null | data_provider/lanenet_hnet_data_processor.py | aj96/lanenet-lane-detection | d83250844d07aecae9515fe0acc7e6dde291177a | [
"Apache-2.0"
] | 8 | 2018-09-26T03:19:25.000Z | 2021-12-01T05:21:13.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 18-5-21 下午3:33
# @Author : Luo Yao
# @Site : http://icode.baidu.com/repos/baidu/personal-code/Luoyao
# @File : lanenet_hnet_data_processor.py
# @IDE: PyCharm Community Edition
"""
实现LaneNet中的HNet训练数据流
"""
import os.path as ops
import json
import cv2
import numpy as np
try:
from cv2 import cv2
except ImportError:
pass
class DataSet(object):
"""
实现数据集类
"""
def __init__(self, dataset_info_file):
"""
:param dataset_info_file: json文件列表
"""
self._label_image_path, self._label_gt_pts = self._init_dataset(dataset_info_file)
self._random_dataset()
self._next_batch_loop_count = 0
def _init_dataset(self, dataset_info_file):
"""
从json标注文件中获取标注样本信息
:param dataset_info_file:
:return:
"""
label_image_path = []
label_gt_pts = []
for json_file_path in dataset_info_file:
assert ops.exists(json_file_path), '{:s} not exist'.format(json_file_path)
src_dir = ops.split(json_file_path)[0]
with open(json_file_path, 'r') as file:
for line in file:
info_dict = json.loads(line)
image_dir = ops.split(info_dict['raw_file'])[0]
image_dir_split = image_dir.split('/')[1:]
image_dir_split.append(ops.split(info_dict['raw_file'])[1])
image_path = ops.join(src_dir, info_dict['raw_file'])
assert ops.exists(image_path), '{:s} not exist'.format(image_path)
label_image_path.append(image_path)
h_samples = info_dict['h_samples']
lanes = info_dict['lanes']
gt_pts = []
for lane in lanes:
assert len(h_samples) == len(lane)
lane_pts = []
for index in range(len(lane)):
if lane[index] == -2:
continue
else:
ptx = lane[index]
pty = h_samples[index]
lane_pts.append([ptx, pty])
if not lane_pts:
continue
if len(lane_pts) <= 3:
continue
gt_pts.append(lane_pts)
label_gt_pts.append(gt_pts)
return np.array(label_image_path), np.array(label_gt_pts)
def _random_dataset(self):
"""
:return:
"""
assert self._label_image_path.shape[0] == self._label_gt_pts.shape[0]
random_idx = np.random.permutation(self._label_image_path.shape[0])
self._label_image_path = self._label_image_path[random_idx]
self._label_gt_pts = self._label_gt_pts[random_idx]
def next_batch(self, batch_size):
"""
:param batch_size:
:return:
"""
assert self._label_gt_pts.shape[0] == self._label_image_path.shape[0]
idx_start = batch_size * self._next_batch_loop_count
idx_end = batch_size * self._next_batch_loop_count + batch_size
if idx_end > self._label_image_path.shape[0]:
self._random_dataset()
self._next_batch_loop_count = 0
return self.next_batch(batch_size)
else:
gt_img_list = self._label_image_path[idx_start:idx_end]
gt_pts_list = self._label_gt_pts[idx_start:idx_end]
gt_imgs = []
for gt_img_path in gt_img_list:
gt_imgs.append(cv2.imread(gt_img_path, cv2.IMREAD_COLOR))
self._next_batch_loop_count += 1
return gt_imgs, gt_pts_list
if __name__ == '__main__':
import glob
json_file_list = glob.glob('{:s}/*.json'.format('/home/baidu/DataBase/Semantic_Segmentation/'
'TUSimple_Lane_Detection/training'))
json_file_list = [tmp for tmp in json_file_list if 'test' not in tmp]
val = DataSet(json_file_list)
a1, a2 = val.next_batch(1)
print(a1)
print(a2)
src_image = cv2.imread(a1[0], cv2.IMREAD_COLOR)
image = np.zeros(shape=[src_image.shape[0], src_image.shape[1]], dtype=np.uint8)
for pt in a2[0]:
ptx = pt[0]
pty = pt[1]
image[pty, ptx] = 255
import matplotlib.pyplot as plt
plt.imshow(image, cmap='gray')
plt.show()
| 31.770833 | 97 | 0.555847 | 3,511 | 0.755867 | 0 | 0 | 0 | 0 | 0 | 0 | 801 | 0.172443 |
e898f497f22d8500e90feb973845a09707c2e56c | 994 | py | Python | schedule.py | kw90/drlnd_continuous-control | a28cc6071b7c36706c07dc01f343d941a7691f4e | [
"BSD-3-Clause"
] | 1 | 2021-01-06T02:15:49.000Z | 2021-01-06T02:15:49.000Z | schedule.py | kw90/drlnd_continuous-control | a28cc6071b7c36706c07dc01f343d941a7691f4e | [
"BSD-3-Clause"
] | 5 | 2020-06-04T05:06:51.000Z | 2022-03-12T00:33:08.000Z | baby_rl/utils/schedule.py | Sohojoe/UdacityDeepRL-Project2 | 7137eea0b606ea32d00424d23130ff213f03ecf1 | [
"MIT"
] | null | null | null | #######################################################################
# Copyright (C) 2017 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
class ConstantSchedule:
def __init__(self, val):
self.val = val
def __call__(self, steps=1):
return self.val
class LinearSchedule:
def __init__(self, start, end=None, steps=None):
if end is None:
end = start
steps = 1
self.inc = (end - start) / float(steps)
self.current = start
self.end = end
if end > start:
self.bound = min
else:
self.bound = max
def __call__(self, steps=1):
val = self.current
self.current = self.bound(self.current + self.inc * steps, self.end)
return val | 32.064516 | 76 | 0.469819 | 630 | 0.633803 | 0 | 0 | 0 | 0 | 0 | 0 | 355 | 0.357143 |
e89ab840c3de2e69c803f5aef5da15d296e1afb7 | 327 | py | Python | web scrapy/scrapy/criptoprice.py | douguedh/Project | 3a3569939d96eb7b36301fd84688fc72caf17e9a | [
"MIT"
] | null | null | null | web scrapy/scrapy/criptoprice.py | douguedh/Project | 3a3569939d96eb7b36301fd84688fc72caf17e9a | [
"MIT"
] | null | null | null | web scrapy/scrapy/criptoprice.py | douguedh/Project | 3a3569939d96eb7b36301fd84688fc72caf17e9a | [
"MIT"
] | null | null | null |
import requests
import bs4
dateList = []
higlist = []
lowlist= []
r = requests.get(
'https://coinmarketcap.com/currencies/bitcoin/historical-data/')
soup = bs4.BeautifulSoup(r.text, "lxml")
tr = soup.find_all('tr',{'class':'text-right'})
for item in tr:
dateList.append(item.find('td', {'class':'text-left'}).text) | 20.4375 | 68 | 0.672783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 114 | 0.348624 |
e89ad32707174026597cf08571266501ed12a036 | 1,517 | py | Python | pyQuARC/code/constants.py | NASA-IMPACT/pyQuARC | 9c174624a9d3e340cf91c7925aaae2203515e13f | [
"Apache-2.0"
] | 9 | 2021-03-12T18:04:25.000Z | 2022-03-22T01:30:56.000Z | pyQuARC/code/constants.py | NASA-IMPACT/pyQuARC | 9c174624a9d3e340cf91c7925aaae2203515e13f | [
"Apache-2.0"
] | 129 | 2021-04-19T15:42:12.000Z | 2022-03-28T16:50:39.000Z | pyQuARC/code/constants.py | NASA-IMPACT/pyQuARC | 9c174624a9d3e340cf91c7925aaae2203515e13f | [
"Apache-2.0"
] | 1 | 2022-03-30T20:33:30.000Z | 2022-03-30T20:33:30.000Z | import os
from colorama import Fore, Style
from pathlib import Path
DIF = "dif10"
ECHO10 = "echo10"
UMM_JSON = "umm-json"
ROOT_DIR = (
# go up one directory
Path(__file__).resolve().parents[1]
)
SCHEMAS_BASE_PATH = f"{ROOT_DIR}/schemas"
SCHEMAS = {
"json": [
"checks",
"check_messages",
"check_messages_override",
"checks_override",
"rule_mapping",
"rules_override",
UMM_JSON
],
"csv": [
"granuledataformat",
"instruments",
"locations",
"projects",
"providers",
"platforms",
"sciencekeywords",
"rucontenttype"
],
"xsd": [ f"{DIF}_xml", f"{ECHO10}_xml" ],
"xml": [ "catalog" ]
}
SCHEMA_PATHS = {
schema: f"{SCHEMAS_BASE_PATH}/{schema}.{filetype}"
for filetype, schemas in SCHEMAS.items()
for schema in schemas
}
VERSION_FILE = f"{SCHEMAS_BASE_PATH}/version.txt"
COLOR = {
"title": Fore.GREEN,
"info": Fore.BLUE,
"error": Fore.RED,
"warning": Fore.YELLOW,
"reset": Style.RESET_ALL,
"bright": Style.BRIGHT
}
GCMD_BASIC_URL = "https://gcmdservices.gsfc.nasa.gov/kms/concepts/concept_scheme/"
GCMD_KEYWORDS = [
"granuledataformat",
"instruments",
"locations",
"platforms",
"projects",
"providers",
"rucontenttype",
"sciencekeywords"
]
GCMD_LINKS = {
keyword: f"{GCMD_BASIC_URL}{keyword}?format=csv" for keyword in GCMD_KEYWORDS
}
CMR_URL = "https://cmr.earthdata.nasa.gov"
| 19.960526 | 82 | 0.603164 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 690 | 0.454845 |
e89be0e46a4e4f06c678ed62e880b446af83ee7d | 4,294 | py | Python | src/thresholding/Utilities.py | dsp-uga/Team-kieffer | f71ebcea3928d00496fb32156ebe990083795d29 | [
"MIT"
] | null | null | null | src/thresholding/Utilities.py | dsp-uga/Team-kieffer | f71ebcea3928d00496fb32156ebe990083795d29 | [
"MIT"
] | null | null | null | src/thresholding/Utilities.py | dsp-uga/Team-kieffer | f71ebcea3928d00496fb32156ebe990083795d29 | [
"MIT"
] | null | null | null | """
Author: Narinder Singh Project: Cilia Segmentation Date: 27 Feb 2019
Course: CSCI 8360 @ UGA Semester: Spring 2019 Module: Utilities.py
Description: This module contains methods and classes that make life easier.
"""
import os
import sys
import numpy as np
import matplotlib.pyplot as matplot
from scipy.misc import imsave
from PIL import Image
from Config import *
MASKS_PATH = os.path.join(DATA_FILES_PATH, "masks/")
LIT_MASKS_PATH = os.path.join(MASKS_PATH, "lit/")
FRAMES_PATH = os.path.join(DATA_FILES_PATH, "data/frames")
# Stretching constant for masks to scale the range of grayscales from [0, 2] to [0, 255]
MASK_STRETCHING_CONSTANT = 127.5
class UtilitiesError(Exception): pass
class BadHashError(UtilitiesError): pass
class ProgressBar:
"""
A handrolled implementation of a progress bar. The bar displays the progress as a ratio like this: (1/360).
"""
def __init__(self, max = 100, message = "Initiating ....."):
"""
Initialize the bar with the total number of units (scale).
"""
self.max = max
self.current = 0
print message + '\n'
def update(self, add = 1):
"""
Record progress.
"""
self.current += add
self._clear()
self._display()
def _display(self):
"""
Print the completion ratio on the screen.
"""
print "(" + str(self.current) + "/" + str(self.max) + ")"
def _clear(self):
"""
Erase the old ratio from the console.
"""
sys.stdout.write("\033[F")
sys.stdout.flush()
def flen(filename):
"""
File LENgth computes and returns the number of lines in a file. @filename <string> is path to a file. This is an epensive method to call for the whole file is read to determine the number of lines.
returns: <integer> line count
"""
# Read and count lines.
with open(filename, 'r') as infile:
return sum((1 for line in infile))
def isImageFile(fpath):
"""
Returns whether or not the given path or filename is for an image file. The method is crude at the moment and just checks for some popular formats.
"""
path, fname = os.path.split(fpath)
if fname.endswith(("png", "jpeg", "gif", "tiff", "bmp")): return True
else: return False
def invertMask(mask):
"""
Inverts a numpy binary mask.
"""
return mask == False
def readMask(hash, binarize=True):
"""
Reads the mask for the given hash and if binarize flag is set, makes the mask binary (True/False : Cilia/Not-cilia)
"""
fpath = os.path.join(MASKS_PATH, hash + ".png")
if not os.path.isfile(fpath): raise BadHashError("Hash: " + hash + " does not exist OR does not have a mask against it.")
img = Image.open(fpath)
mat = np.asarray(img, np.int32)
mat.setflags(write=1)
if binarize:
ciliaMask = mat == CILIA_GRAYSCALE
backgroundMask = invertMask(ciliaMask)
mat[ciliaMask] = True
mat[backgroundMask] = False
return mat
def displayMask(hash, binarize=True):
"""
Displays the cilia mask against the given hash value.
"""
mask = readMask(hash, binarize)
if binarize: im = Image.fromarray(mask * 255)
else: im = Image.fromarray(mask * MASK_STRETCHING_CONSTANT)
im.show()
def displayHeatMap(mat):
"""
Dispalys the heat map for the given matrix.
"""
matplot.imshow(mat, cmap='hot')
matplot.show()
def readLines(filepath):
"""
Reads and returns the lines of the given file as a list.
"""
lines = []
with open(filepath, 'r') as infile:
for line in infile:
lines.append(line.strip())
return lines
def getVideoFramesDirectory(hash):
"""
Returns the video frames directory for the given hash.
"""
dir = os.path.join(FRAMES_PATH, hash)
if not os.path.isdir(dir): raise BadHashError("No frame directory found against the hash: " + hash)
else: return dir
def mean(collection):
"""
Mean for a numeric collection
"""
return sum(collection) / (len(collection) or 1)
def stretchAndSaveMasks(hashes):
"""
This method stretches the contrast for the masks by rescaling them to 0-255 grayscale making the white regions in the masks cilia cells.
"""
# Read each mask and hash
for hash in hashes:
mask = readMask(hash, binarize=False)
result = mask * MASK_STRETCHING_CONSTANT
imsave(os.path.join(LIT_MASKS_PATH, hash + ".png"), result)
if __name__ == '__main__':
# Quick testing etc.
hashes = readLines(TRAIN_FILE)
stretchAndSaveMasks(hashes)
| 24.678161 | 199 | 0.698882 | 790 | 0.183978 | 0 | 0 | 0 | 0 | 0 | 0 | 1,951 | 0.454355 |
e89cf0f4f8fc704184437ad6bef375fef0886990 | 3,467 | py | Python | main.py | Jemeni11/Fic-Retriever | 9f0d94e5b266a9c2469512bbb46c7099f61796f5 | [
"MIT"
] | null | null | null | main.py | Jemeni11/Fic-Retriever | 9f0d94e5b266a9c2469512bbb46c7099f61796f5 | [
"MIT"
] | null | null | null | main.py | Jemeni11/Fic-Retriever | 9f0d94e5b266a9c2469512bbb46c7099f61796f5 | [
"MIT"
] | null | null | null | # This example requires the 'members' and 'message_content' privileged intents
import re
import os
import discord
from discord.ext import commands
from embed_messages.SH_Embed import ScribbleHubEmbed
from embed_messages.AO3_Embed import ArchiveOfOurOwnEmbed
from embed_messages.FF_Embed import FanFictionDotNetEmbed
from embed_messages.FL_Embed import FictionDotLiveEmbed
from dotenv import load_dotenv
load_dotenv()
BOT_TOKEN = os.getenv('TOKEN')
description = """An example bot to showcase the discord.ext.commands extension
module.
There are a number of utility commands being showcased here."""
intents = discord.Intents.default()
intents.members = True
# intents.message_content = True
"""
This worked perfectly about an hour ago and now it throws the following error:
(virtualenv) nonso@HPEnvy:~/Documents/Code/Projects/Summarium$ python3 main.py
Traceback (most recent call last):
File "main.py", line 25, in <module>
intents.message_content = True
AttributeError: 'Intents' object has no attribute 'message_content'
(virtualenv) nonso@HPEnvy:~/Documents/Code/Projects/Summarium$
So I commented that line out and ran my code again and it worked
somehow even though it shouldn't.
Putting this comment here incase it causes chaos later on.
"""
bot = commands.Bot(command_prefix="?", description=description, intents=intents)
@bot.event
async def on_ready():
print(f"Logged in as {bot.user} (ID: {bot.user.id})")
print("____________________________________________")
@bot.event
async def on_message(message):
if message.author.id == bot.user.id:
return
if message.author.bot:
return # Do not reply to other bots
# Pulling out all URLs
URLs = re.findall(
r"""
\b((?:https?://)?(?:(?:www\.)?(?:[\da-z\.-]+)\.(?:[a-z]{2,6})
|(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]
|2[0-4][0-9]|[01]?[0-9][0-9]?)|(?:(?:[0-9a-fA-F]{1,4}:){7,7}
[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]
{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::
[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]
{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}
|(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]
{1,4}:(?:(?::[0-9a-fA-F]{1,4}){1,6})|:(?:(?::[0-9a-fA-F]{1,4}){1,7}|:)
|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(?::0{1,4})
{0,1}:){0,1}(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}
(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])|(?:[0-9a-fA-F]{1,4}:)
{1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25
[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])))(?::[0-9]{1,4}|[1-5][0-9]
{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])?
(?:/[\w\.-]*)*/?)\b
""",
message.content, re.VERBOSE)
for i in URLs:
if re.search(r"(^https://www\.scribblehub\.com/(series|read|profile))/\d+", i, re.IGNORECASE):
await message.reply(embed=ScribbleHubEmbed(i))
elif re.search(r"^https://archiveofourown\.org/(\bseries\b|\bworks\b|\bcollections\b)/", i, re.IGNORECASE):
await message.reply(embed=ArchiveOfOurOwnEmbed(i))
elif re.search(r"^https://(www|m)\.(\bfanfiction\b\.\bnet\b)/s/\d+/\d+/\w*", i, re.IGNORECASE):
await message.reply(file=FanFictionDotNetEmbed(i)[0], embed=FanFictionDotNetEmbed(i)[1])
elif re.search(r'^https?://fiction\.live/(?:stories|Sci-fi)/[^\/]+/([0-9a-zA-Z\-]+)/?.*', i, re.IGNORECASE):
await message.reply(embed=FictionDotLiveEmbed(i))
bot.run(BOT_TOKEN)
| 37.27957 | 110 | 0.627632 | 0 | 0 | 0 | 0 | 2,090 | 0.602827 | 2,068 | 0.596481 | 2,266 | 0.653591 |
e89d13c3f0f4c6c26ca619df2f3d4497d8221a2b | 76 | py | Python | utils/__init__.py | MaLiN2223/py_proj_transport | 4e0ee156ef4b42ddb5e0971ca0603b39b0796785 | [
"MIT"
] | null | null | null | utils/__init__.py | MaLiN2223/py_proj_transport | 4e0ee156ef4b42ddb5e0971ca0603b39b0796785 | [
"MIT"
] | null | null | null | utils/__init__.py | MaLiN2223/py_proj_transport | 4e0ee156ef4b42ddb5e0971ca0603b39b0796785 | [
"MIT"
] | null | null | null | """
This module contains utility classes and methods to be used in tests
""" | 25.333333 | 68 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 1 |
e89e57eee1002924427fb9529bc74acb259d5736 | 5,895 | py | Python | zm-jython/jylibs/ldap.py | hernad/zimbra9 | cf61ffa40d9600ab255ef4516ca25029fff6603b | [
"Apache-2.0"
] | null | null | null | zm-jython/jylibs/ldap.py | hernad/zimbra9 | cf61ffa40d9600ab255ef4516ca25029fff6603b | [
"Apache-2.0"
] | null | null | null | zm-jython/jylibs/ldap.py | hernad/zimbra9 | cf61ffa40d9600ab255ef4516ca25029fff6603b | [
"Apache-2.0"
] | null | null | null | #
# ***** BEGIN LICENSE BLOCK *****
# Zimbra Collaboration Suite Server
# Copyright (C) 2010, 2012, 2013, 2014, 2015, 2016 Synacor, Inc.
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software Foundation,
# version 2 of the License.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with this program.
# If not, see <https://www.gnu.org/licenses/>.
# ***** END LICENSE BLOCK *****
#
import conf
from com.zimbra.cs.ldap.LdapServerConfig import GenericLdapConfig
from com.zimbra.cs.ldap import LdapClient
from com.zimbra.cs.ldap import LdapUsage
from com.zimbra.cs.ldap import ZAttributes
from com.zimbra.cs.ldap import ZLdapContext
from com.zimbra.cs.ldap import ZLdapFilter
from com.zimbra.cs.ldap import ZLdapFilterFactory
from com.zimbra.cs.ldap.ZLdapFilterFactory import FilterId
from com.zimbra.cs.ldap import ZSearchControls
from com.zimbra.cs.ldap import ZSearchResultEntry;
from com.zimbra.cs.ldap import ZMutableEntry
from com.zimbra.cs.ldap import ZSearchResultEnumeration
from com.zimbra.cs.ldap import ZSearchScope
from com.zimbra.cs.ldap.LdapException import LdapSizeLimitExceededException
from logmsg import *
# (Key, DN, requires_master)
keymap = {
"ldap_common_loglevel" : ("olcLogLevel", "cn=config", False),
"ldap_common_threads" : ("olcThreads", "cn=config", False),
"ldap_common_toolthreads" : ("olcToolThreads", "cn=config", False),
"ldap_common_require_tls" : ("olcSecurity", "cn=config", False),
"ldap_common_writetimeout" : ("olcWriteTimeout", "cn=config", False),
"ldap_common_tlsdhparamfile" : ("olcTLSDHParamFile", "cn=config", False),
"ldap_common_tlsprotocolmin" : ("olcTLSProtocolMin", "cn=config", False),
"ldap_common_tlsciphersuite" : ("olcTLSCipherSuite", "cn=config", False),
"ldap_db_maxsize" : ("olcDbMaxsize", "olcDatabase={3}mdb,cn=config", False),
"ldap_db_envflags" : ("olcDbEnvFlags", "olcDatabase={3}mdb,cn=config", False),
"ldap_db_rtxnsize" : ("olcDbRtxnSize", "olcDatabase={3}mdb,cn=config", False),
"ldap_accesslog_maxsize" : ("olcDbMaxsize", "olcDatabase={2}mdb,cn=config", True),
"ldap_accesslog_envflags" : ("olcDbEnvFlags", "olcDatabase={2}mdb,cn=config", True),
"ldap_overlay_syncprov_checkpoint" : ("olcSpCheckpoint", "olcOverlay={0}syncprov,olcDatabase={3}mdb,cn=config", True),
"ldap_overlay_syncprov_sessionlog" : ("olcSpSessionlog", "olcOverlay={0}syncprov,olcDatabase={3}mdb,cn=config", True),
"ldap_overlay_accesslog_logpurge" : ("olcAccessLogPurge", "olcOverlay={1}accesslog,olcDatabase={3}mdb,cn=config", True)
}
class Ldap:
master = False
mLdapConfig = None
@classmethod
def initLdap(cls, c = None):
if c:
cls.cf = c
Log.logMsg(5, "Creating ldap context")
ldapUrl = "ldapi:///"
bindDN = "cn=config"
try:
cls.mLdapConfig = GenericLdapConfig(ldapUrl, cls.cf.ldap_starttls_required, bindDN, cls.cf.ldap_root_password)
except Exception, e:
Log.logMsg(1, "LDAP CONFIG FAILURE (%s)" % e)
else:
cls.cf = conf.Config()
@classmethod
def modify_attribute(cls, key, value):
if cls.cf.ldap_is_master:
atbase = "cn=accesslog"
atfilter = "(objectClass=*)"
atreturn = ['1.1']
zfilter = ZLdapFilterFactory.getInstance().fromFilterString(FilterId.ZMCONFIGD, atfilter)
searchControls = ZSearchControls.createSearchControls(ZSearchScope.SEARCH_SCOPE_BASE, ZSearchControls.SIZE_UNLIMITED, atreturn)
mLdapContext = LdapClient.getContext(cls.mLdapConfig, LdapUsage.SEARCH)
try:
ne = mLdapContext.searchDir(atbase, zfilter, searchControls)
except:
cls.master = False
else:
cls.master = True
Log.logMsg(5, "Ldap config is master")
LdapClient.closeContext(mLdapContext)
(attr, dn, xform) = Ldap.lookupKey(key)
if attr is not None:
v = xform % (value,)
atreturn = [attr]
searchControls = ZSearchControls.createSearchControls(ZSearchScope.SEARCH_SCOPE_BASE, ZSearchControls.SIZE_UNLIMITED, atreturn)
mLdapContext = LdapClient.getContext(cls.mLdapConfig, LdapUsage.SEARCH)
ne = mLdapContext.searchDir(dn, zfilter, searchControls)
entry = ne.next()
entryAttrs = entry.getAttributes()
origValue = entryAttrs.getAttrString(attr)
attrPresent = entryAttrs.hasAttribute(attr)
LdapClient.closeContext(mLdapContext)
if origValue != v:
if attr == "olcSpSessionlog" and not attrPresent:
Log.logMsg(4, "olcSpSessionlog attribute is not present and can't replace it")
else:
Log.logMsg(4, "Setting %s to %s" % (key, v))
mLdapContext = LdapClient.getContext(cls.mLdapConfig, LdapUsage.MOD)
mEntry = LdapClient.createMutableEntry()
mEntry.setAttr(attr, v)
try:
mLdapContext.replaceAttributes(dn, mEntry.getAttributes())
LdapClient.closeContext(mLdapContext)
except:
return 1;
@classmethod
def lookupKey(cls, key):
if key in keymap:
(attr, dn, requires_master) = keymap[key]
if re.match("ldap_db_", key) and not cls.master:
dn = "olcDatabase={2}mdb,cn=config"
xform = "%s"
if key == "ldap_common_require_tls":
xform = "ssf=%s"
if requires_master and not cls.master:
Log.logMsg(5, "LDAP: Trying to modify key: %s when not a master" % (key,))
return (None, None, None)
else:
Log.logMsg(5, "Found key %s and dn %s for %s (%s)" % (attr, dn, key, cls.master))
return (attr, dn, xform)
else:
Log.logMsg(1, "UNKNOWN KEY %s" % (key,))
raise Exception("Key error")
Ldap.initLdap()
| 40.9375 | 130 | 0.709754 | 2,988 | 0.50687 | 0 | 0 | 2,931 | 0.497201 | 0 | 0 | 2,242 | 0.380322 |
e89f014ab74d401770b75b90a5239b085769b212 | 1,768 | py | Python | graphite/publishers/generic_publisher.py | bjwhite-fnal/decisionengine_modules | 1a2c3e4f57e60925dc374f386d8bca3ba9fa3e7e | [
"BSD-3-Clause"
] | null | null | null | graphite/publishers/generic_publisher.py | bjwhite-fnal/decisionengine_modules | 1a2c3e4f57e60925dc374f386d8bca3ba9fa3e7e | [
"BSD-3-Clause"
] | null | null | null | graphite/publishers/generic_publisher.py | bjwhite-fnal/decisionengine_modules | 1a2c3e4f57e60925dc374f386d8bca3ba9fa3e7e | [
"BSD-3-Clause"
] | null | null | null | """
Generic publisher for graphana
"""
import abc
import six
from decisionengine.framework.modules import Publisher
import decisionengine_modules.graphite_client as graphite
DEFAULT_GRAPHITE_HOST = 'fermicloud399.fnal.gov'
DEFAULT_GRAPHITE_PORT = 2004
DEFAULT_GRAPHITE_CONTEXT = ""
@six.add_metaclass(abc.ABCMeta)
class GenericPublisher(Publisher.Publisher):
def __init__(self, config):
self.graphite_host = config.get('graphite_host', DEFAULT_GRAPHITE_HOST)
self.graphite_port = config.get('graphite_port', DEFAULT_GRAPHITE_PORT)
self.graphite_context_header = config.get(
'graphite_context', DEFAULT_GRAPHITE_CONTEXT)
self.publush_to_graphite = config.get('publish_to_graphite')
self.output_file = config.get('output_file')
@abc.abstractmethod
def consumes(self): # this must be implemented by the inherited class
return None
@abc.abstractmethod
# this must be implemented by the inherited class
def graphite_context(self, data_block):
return None
def publish(self, data_block):
"""
Publish data
:type data_block: :obj:`~datablock.DataBlock`
:arg data_block: data block
"""
if not self.consumes():
return
data = data_block[self.consumes()[0]]
if self.graphite_host and self.publush_to_graphite:
end_point = graphite.Graphite(
host=self.graphite_host, pickle_port=self.graphite_port)
end_point.send_dict(self.graphite_context(data)[0], self.graphite_context(
data)[1], debug_print=False, send_data=True)
csv_data = data.to_csv(self.output_file, index=False)
if not self.output_file:
print(csv_data)
| 32.145455 | 86 | 0.690611 | 1,448 | 0.819005 | 0 | 0 | 1,480 | 0.837104 | 0 | 0 | 373 | 0.210973 |
e89f72335f42dd9c716112d702d16644f08e3501 | 2,864 | py | Python | datadog_checks_base/tests/openmetrics/test_interface.py | vbarbaresi/integrations-core | ab26ab1cd6c28a97c1ad1177093a93659658c7aa | [
"BSD-3-Clause"
] | 663 | 2016-08-23T05:23:45.000Z | 2022-03-29T00:37:23.000Z | datadog_checks_base/tests/openmetrics/test_interface.py | vbarbaresi/integrations-core | ab26ab1cd6c28a97c1ad1177093a93659658c7aa | [
"BSD-3-Clause"
] | 6,642 | 2016-06-09T16:29:20.000Z | 2022-03-31T22:24:09.000Z | datadog_checks_base/tests/openmetrics/test_interface.py | vbarbaresi/integrations-core | ab26ab1cd6c28a97c1ad1177093a93659658c7aa | [
"BSD-3-Clause"
] | 1,222 | 2017-01-27T15:51:38.000Z | 2022-03-31T18:17:51.000Z | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.base import OpenMetricsBaseCheckV2
from datadog_checks.base.constants import ServiceCheck
from datadog_checks.dev.testing import requires_py3
from .utils import get_check
pytestmark = [requires_py3, pytest.mark.openmetrics, pytest.mark.openmetrics_interface]
def test_default_config(aggregator, dd_run_check, mock_http_response):
class Check(OpenMetricsBaseCheckV2):
__NAMESPACE__ = 'test'
def get_default_config(self):
return {'metrics': ['.+'], 'rename_labels': {'foo': 'bar'}}
mock_http_response(
"""
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
# TYPE go_memstats_alloc_bytes gauge
go_memstats_alloc_bytes{foo="baz"} 6.396288e+06
"""
)
check = Check('test', {}, [{'openmetrics_endpoint': 'test'}])
dd_run_check(check)
aggregator.assert_metric(
'test.go_memstats_alloc_bytes', 6396288, metric_type=aggregator.GAUGE, tags=['endpoint:test', 'bar:baz']
)
aggregator.assert_all_metrics_covered()
def test_service_check_dynamic_tags(aggregator, dd_run_check, mock_http_response):
mock_http_response(
"""
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
# TYPE go_memstats_alloc_bytes gauge
go_memstats_alloc_bytes{foo="baz"} 6.396288e+06
# HELP state Node state
# TYPE state gauge
state{bar="baz"} 3
"""
)
check = get_check(
{'metrics': ['.+', {'state': {'type': 'service_check', 'status_map': {'3': 'ok'}}}], 'tags': ['foo:bar']}
)
dd_run_check(check)
aggregator.assert_metric(
'test.go_memstats_alloc_bytes',
6396288,
metric_type=aggregator.GAUGE,
tags=['endpoint:test', 'foo:bar', 'foo:baz'],
)
aggregator.assert_service_check('test.state', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar'])
aggregator.assert_service_check('test.openmetrics.health', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar'])
aggregator.assert_all_metrics_covered()
assert len(aggregator.service_check_names) == 2
aggregator.reset()
check.set_dynamic_tags('baz:foo')
dd_run_check(check)
aggregator.assert_metric(
'test.go_memstats_alloc_bytes',
6396288,
metric_type=aggregator.GAUGE,
tags=['endpoint:test', 'foo:bar', 'foo:baz', 'baz:foo'],
)
aggregator.assert_service_check('test.state', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar'])
aggregator.assert_service_check('test.openmetrics.health', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar'])
aggregator.assert_all_metrics_covered()
assert len(aggregator.service_check_names) == 2
| 34.926829 | 114 | 0.687151 | 178 | 0.062151 | 0 | 0 | 0 | 0 | 0 | 0 | 1,117 | 0.390014 |
e8a059c82f837ee40d8d6e38defcf69fc241cbd4 | 566 | py | Python | db_core/env.py | dayvagrant/db_core | 01552110ce6b31228e59b45279642a39716e55e7 | [
"MIT"
] | null | null | null | db_core/env.py | dayvagrant/db_core | 01552110ce6b31228e59b45279642a39716e55e7 | [
"MIT"
] | null | null | null | db_core/env.py | dayvagrant/db_core | 01552110ce6b31228e59b45279642a39716e55e7 | [
"MIT"
] | null | null | null | """Set of configrations."""
_CONFIGS = {
"postgres": {
"host": "0.0.0.0",
"port": "5432",
"user": <USER>,
"pwd": <USER>,
"db": "postgres",
},
"mongodb": {
"host": "0.0.0.0",
"port": "27017",
"user": <USER>,
"pwd": <PASS>,
},
"clickhouse": {
"host": "0.0.0.0",
"port": "8123",
"user": <USER>,
"pwd": <PASS>,
"db": "db_live",
},
"aws-s3": {
"url": <URL>,
"login": <USER>,
"password": <PASS>,
},
}
| 18.258065 | 27 | 0.34629 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 230 | 0.40636 |
e8a28d2c1e6b10e80aa524825e02d01157d1875e | 3,256 | py | Python | natasha/grammars/name.py | MaksMolodtsov/natasha | 08e9f1809111c1300db3ca45cc621ff5279a1bef | [
"MIT"
] | 1 | 2019-05-02T18:16:39.000Z | 2019-05-02T18:16:39.000Z | natasha/grammars/name.py | cheryomukhin/natasha | 08e9f1809111c1300db3ca45cc621ff5279a1bef | [
"MIT"
] | null | null | null | natasha/grammars/name.py | cheryomukhin/natasha | 08e9f1809111c1300db3ca45cc621ff5279a1bef | [
"MIT"
] | 1 | 2021-04-02T06:16:42.000Z | 2021-04-02T06:16:42.000Z | # coding: utf-8
from __future__ import unicode_literals
from yargy import (
rule,
and_, or_, not_,
)
from yargy.interpretation import fact
from yargy.predicates import (
eq, length_eq,
gram, tag,
is_single, is_capitalized
)
from yargy.predicates.bank import DictionaryPredicate as dictionary
from yargy.relations import gnc_relation
from natasha.data import load_dict
from yargy.rule.transformators import RuleTransformator
from yargy.rule.constructors import Rule
from yargy.predicates.constructors import AndPredicate
Name = fact(
'Name',
['first', 'middle', 'last', 'nick']
)
FIRST_DICT = set(load_dict('first.txt'))
MAYBE_FIRST_DICT = set(load_dict('maybe_first.txt'))
LAST_DICT = set(load_dict('last.txt'))
##########
#
# COMPONENTS
#
###########
IN_FIRST = dictionary(FIRST_DICT)
IN_MAYBE_FIRST = dictionary(MAYBE_FIRST_DICT)
IN_LAST = dictionary(LAST_DICT)
gnc = gnc_relation()
########
#
# FIRST
#
########
TITLE = is_capitalized()
NOUN = gram('NOUN')
NAME_CRF = tag('I')
ABBR = gram('Abbr')
SURN = gram('Surn')
NAME = and_(
gram('Name'),
not_(ABBR)
)
PATR = and_(
gram('Patr'),
not_(ABBR)
)
FIRST = and_(
NAME_CRF,
or_(
NAME,
IN_MAYBE_FIRST,
IN_FIRST
)
).interpretation(
Name.first.inflected()
).match(gnc)
FIRST_ABBR = and_(
ABBR,
TITLE
).interpretation(
Name.first
).match(gnc)
##########
#
# LAST
#
#########
LAST = and_(
NAME_CRF,
or_(
SURN,
IN_LAST
)
).interpretation(
Name.last.inflected()
).match(gnc)
########
#
# MIDDLE
#
#########
MIDDLE = PATR.interpretation(
Name.middle.inflected()
).match(gnc)
MIDDLE_ABBR = and_(
ABBR,
TITLE
).interpretation(
Name.middle
).match(gnc)
#########
#
# FI IF
#
#########
FIRST_LAST = rule(
FIRST,
LAST
)
LAST_FIRST = rule(
LAST,
FIRST
)
###########
#
# ABBR
#
###########
ABBR_FIRST_LAST = rule(
FIRST_ABBR,
'.',
LAST
)
LAST_ABBR_FIRST = rule(
LAST,
FIRST_ABBR,
'.',
)
ABBR_FIRST_MIDDLE_LAST = rule(
FIRST_ABBR,
'.',
MIDDLE_ABBR,
'.',
LAST
)
LAST_ABBR_FIRST_MIDDLE = rule(
LAST,
FIRST_ABBR,
'.',
MIDDLE_ABBR,
'.'
)
##############
#
# MIDDLE
#
#############
FIRST_MIDDLE = rule(
FIRST,
MIDDLE
)
FIRST_MIDDLE_LAST = rule(
FIRST,
MIDDLE,
LAST
)
LAST_FIRST_MIDDLE = rule(
LAST,
FIRST,
MIDDLE
)
##############
#
# SINGLE
#
#############
JUST_FIRST = FIRST
JUST_LAST = LAST
########
#
# FULL
#
########
NAME = or_(
FIRST_LAST,
LAST_FIRST,
ABBR_FIRST_LAST,
LAST_ABBR_FIRST,
ABBR_FIRST_MIDDLE_LAST,
LAST_ABBR_FIRST_MIDDLE,
FIRST_MIDDLE,
FIRST_MIDDLE_LAST,
LAST_FIRST_MIDDLE,
JUST_FIRST,
JUST_LAST,
).interpretation(
Name
)
class StripCrfTransformator(RuleTransformator):
def visit_term(self, item):
if isinstance(item, Rule):
return self.visit(item)
elif isinstance(item, AndPredicate):
predicates = [_ for _ in item.predicates if _ != NAME_CRF]
return AndPredicate(predicates)
else:
return item
SIMPLE_NAME = NAME.transform(
StripCrfTransformator
)
| 12.523077 | 70 | 0.596437 | 348 | 0.10688 | 0 | 0 | 0 | 0 | 0 | 0 | 420 | 0.128993 |
e8a30f812bcb19a63a69a2b83b8fc56667a700bf | 31,144 | py | Python | script/json2yaml.py | lunzhiPenxil/json2yaml-for-dice | 62bd66d3350b8a751f0dc8deff916f46fb4dda55 | [
"MIT"
] | 4 | 2020-01-14T13:47:28.000Z | 2022-01-22T12:11:03.000Z | script/json2yaml.py | lunzhiPenxil/json2yaml-for-dice | 62bd66d3350b8a751f0dc8deff916f46fb4dda55 | [
"MIT"
] | null | null | null | script/json2yaml.py | lunzhiPenxil/json2yaml-for-dice | 62bd66d3350b8a751f0dc8deff916f46fb4dda55 | [
"MIT"
] | null | null | null | #!/usr/bin/env python37
# -*- encoding: utf-8 -*-
'''
@File : json2yaml.py
@Time : 2020/01/12 16:44:48
@Author : BenzenPenxil
@Version : 1.0
@Contact : lunzhipenxil@gmail.com
@License : (C)Copyright 2017-2020, Penx.Studio
@Desc : None
'''
# here put the import lib
import json
import yaml
import codecs
import base64
import os
import re
import tkinter
from tkinter import filedialog
from tkinter import messagebox
from tkinter import ttk
import webbrowser
import pyperclip
from j2y_data import *
j2y_version = "1.0.9.20200605.1"
project_site = "http://benzenpenxil.xyz/json2yaml-for-dice/"
#class type_system_info:
# def __init__(self, name):
# self.name = name
#
#system_info = type_system_info(os.name)
class type_deck:
def __init__(self, name, author, version, command, desc, includes, info, default, import_list):
self.name = name
self.author = author
self.version = version
self.command = command
self.desc = desc
self.includes = includes
self.info = info
self.default = default
self.import_list = import_list
deck = type_deck("","","","","",[],"","",[])
#测试用代码
#deck.name = "彩六干员"
#deck.author = "仑质"
#deck.version = "191230"
#deck.command = "彩六干员"
#deck.desc = "抽取彩六干员"
#deck.includes = ["干员档案","干员性别"]
#deck.info = "牌堆转换器测试用"
#deck.default = "干员档案"
input_file_name = ""
output_file_name = ""
output_file_name += deck.command
giveback_flag = 0
versiontran_flag = 0
tabtran_flag = 0
infoadd_flag = 1
import_flag = 1
dict_import_default = {"性别": ["男", "女", "不明"]}
dict_import_default.update(dict_from_shiki)
dict_for_import = {}
dict_for_import.update(dict_import_default)
#dict_for_import.update({"测试": ["测试"]})
list_for_import_record = []
def filter_emoji(desstr, restr="[EMOJI]"):
try:
co = re.compile(u'[\U00010000-\U0010ffff]')
except re.error:
co = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
return co.sub(restr, desstr)
def item_get_import_list(dict_this):
output_list = []
for key_this in list(dict_this.keys()):
for item_this in dict_this.get(key_this):
for in_item_key_this in re.finditer("\{%{0,1}(.*?)\}", item_this):
in_item_key_this_str = in_item_key_this.group().lstrip("{").lstrip("%").rstrip("}")
if in_item_key_this_str.find("{") < 0 and in_item_key_this_str.find("}") < 0:
if in_item_key_this_str in list(dict_this.keys()):
pass
else:
if in_item_key_this_str in output_list:
pass
else:
output_list.append(in_item_key_this_str)
return output_list
def str_get_import_list(str_this, dict_this):
global list_for_import_record
output_list = []
for in_item_key_this in re.finditer("\{%{0,1}(.*?)\}", str_this):
in_item_key_this_str = in_item_key_this.group().lstrip("{").lstrip("%").rstrip("}")
if in_item_key_this_str not in list_for_import_record:
if in_item_key_this_str.find("{") < 0 and in_item_key_this_str.find("}") < 0:
if in_item_key_this_str in list(dict_this.keys()):
pass
else:
if in_item_key_this_str in output_list:
pass
else:
output_list.append(in_item_key_this_str)
list_for_import_record.append(in_item_key_this_str)
return output_list
def item_tran(item_this, flag):
dice_flag = 0
item_this_new=""
for i in range(0, len(item_this)):
if item_this[i] == "{":
if i + 1 <= len(item_this):
if item_this[i + 1] != "%":
if flag == 0:
item_this_new += "{%"
else:
item_this_new += "{$"
else:
item_this_new += "{$"
elif item_this[i] == "%":
if i - 1 >= 0:
if item_this[i - 1] == "{":
pass
else:
item_this_new += item_this[i]
else:
item_this_new += item_this[i]
elif item_this[i] == "\n":
item_this_new += "\\n"
elif item_this[i] == ",":
item_this_new += ","
elif item_this[i] == "D":
if dice_flag == 0:
item_this_new += item_this[i]
else:
item_this_new += "d"
elif item_this[i] == "[":
dice_flag += 1
item_this_new += item_this[i]
elif item_this[i] == "]":
dice_flag -= 1
item_this_new += item_this[i]
else:
item_this_new += item_this[i]
return item_this_new
def item_tran2show(item_this):
item_this_new = ""
for i in range(0, len(item_this)):
if item_this[i] == "\n":
item_this_new += "\\n"
elif item_this[i] == ",":
item_this_new += ","
else:
item_this_new += item_this[i]
item_this_new = filter_emoji(item_this_new)
return item_this_new
def add_import_work(dict_need_import, dict_for_import, output_str, dict_this):
global tabtran_flag
global giveback_flag
global list_for_import_record
dict_need_import_next = []
for key_this in dict_need_import:
if key_this in list(dict_for_import.keys()):
output_str += key_this + ":\n"
if tabtran_flag != 0:
output_tran_flag_tmp = 1
for item_this in dict_for_import.get(key_this):
dict_need_import_next += str_get_import_list(item_this, dict_this)
item_this = item_tran(item_this, giveback_flag)
if output_tran_flag_tmp == 0:
output_str += " - \"" + item_this + "\"\n"
else:
output_tran_flag_tmp = 0
output_str += " - \" " + item_this + "\"\n"
if dict_need_import_next != []:
output_str = add_import_work(dict_need_import_next, dict_for_import, output_str, dict_this)
return output_str
def json2yaml_work():
global giveback_flag
global versiontran_flag
global infoadd_flag
global import_flag
global deck
global input_file_name
global output_file_name
global root
global progress_obj
global list_for_import_record
progress_obj["value"] = 0
root.update()
output_str = "#必要信息\nname: " + deck.name
output_str += "\nauthor: " + deck.author
output_str += "(使用Json2Yaml转换生成)"
output_str += "\nversion: "
if versiontran_flag == 0:
output_str += deck.version
else:
output_str += deck.version.replace(".", "_")
output_str += "\ncommand: " + deck.command
output_str += "\ndesc: " + deck.desc
if deck.includes != [""]:
deck.includes_str = ""
for str_now in deck.includes:
deck.includes_str += " - \"" + str_now + "\"\n"
output_str += "\nincludes:\n" + deck.includes_str
else:
output_str += "\n"
if infoadd_flag != 0:
output_str += "\n#作者信息\ninfo:\n - \"" + "本牌堆使用Json2Yaml(By BenzenPenxil)自动转换生成\\n转换器版本号:" + j2y_version + "\\n牌堆原作者:" + deck.author + "\"\n"
output_str += "\n#牌堆部分\n"
progress_obj["value"] = 5
root.update()
try:
with open(input_file_name,"r",encoding="utf-8") as input_file_obj:
input_str = input_file_obj.read()
if input_str.startswith(codecs.BOM_UTF8.decode("UTF-8")):
input_dict = json.loads(input_str[1:], encoding="utf-8")
else:
input_dict = json.loads(input_str, encoding="utf-8")
progress_obj["value"] = 10
root.update()
except json.decoder.JSONDecodeError as error_info:
input_file_name = ""
file_name_str.set(file_name_head + "请确保文件的Json格式没有错误")
tkinter.messagebox.showerror("json.decoder.JSONDecodeError", error_info)
progress_obj["value"] = 0
root.update()
except UnicodeDecodeError as error_info:
input_file_name = ""
file_name_str.set(file_name_head + "请确保文件编码格式是UTF-8")
tkinter.messagebox.showerror("UnicodeDecodeError", error_info)
progress_obj["value"] = 0
root.update()
else:
output_tran_flag_tmp = 0
if deck.default in input_dict:
output_str += "default:\n"
if tabtran_flag != 0:
output_tran_flag_tmp = 1
for item_this in input_dict.get(deck.default):
item_this = item_tran(item_this, giveback_flag)
if output_tran_flag_tmp == 0:
output_str += " - \"" + item_this + "\"\n"
else:
output_tran_flag_tmp = 0
output_str += " - \" " + item_this + "\"\n"
progress_obj["value"] = 15
root.update()
count_work = 0
for key_this in list(input_dict.keys()):
count_work += len(input_dict.get(key_this)) + 1
if count_work <= 75:
count_step = 1
else:
count_step = int(count_work / 75)
id_count_all = 0
for key_this in list(input_dict.keys()):
output_str += key_this + ":\n"
id_count_all += 1
progress_obj["value"] = int(id_count_all * 75 / count_work + 15)
root.update()
if tabtran_flag != 0:
output_tran_flag_tmp = 1
for item_this in input_dict.get(key_this):
item_this = item_tran(item_this, giveback_flag)
if output_tran_flag_tmp == 0:
output_str += " - \"" + item_this + "\"\n"
else:
output_tran_flag_tmp = 0
output_str += " - \" " + item_this + "\"\n"
id_count_all += 1
if id_count_all % (count_step) == 0:
progress_obj["value"] = int(id_count_all * 75 / count_work + 15)
root.update()
if import_flag != 0:
list_for_import_record = []
dict_need_import_next = []
deck.import_list = item_get_import_list(input_dict)
deck_import_list = deck.import_list.copy()
for key_this in deck_import_list:
if key_this in list(dict_for_import.keys()):
deck.import_list.remove(key_this)
output_str += key_this + ":\n"
if tabtran_flag != 0:
output_tran_flag_tmp = 1
for item_this in dict_for_import.get(key_this):
dict_need_import_next += str_get_import_list(item_this, input_dict)
item_this = item_tran(item_this, giveback_flag)
if output_tran_flag_tmp == 0:
output_str += " - \"" + item_this + "\"\n"
else:
output_tran_flag_tmp = 0
output_str += " - \" " + item_this + "\"\n"
if dict_need_import_next != []:
output_str = add_import_work(dict_need_import_next, dict_for_import, output_str, input_dict)
if len(deck.import_list) != 0:
tkinter.messagebox.showwarning("未解决的依赖项", "以下依赖项未找到:\n - " + "\n - ".join(deck.import_list) + "\n将会导致抽取时无法正常调用。")
with open(output_file_name, "w", encoding="utf-8") as output_file_obj:
output_file_obj.write(output_str)
progress_obj["value"] = 100
root.update()
def select_file():
global file_name_str
global tree
global input_file_name
global root
global progress_obj
progress_obj["value"] = 0
root.update()
file_name = tkinter.filedialog.askopenfilenames(title="请选择Json文件", filetypes=[("Json", "*.json"), ("All Files", "*")])
progress_obj["value"] = 5
root.update()
if len(file_name) == 1:
for file_name_now in file_name:
try:
with open(file_name_now,"r",encoding="utf-8") as input_file_obj:
input_str = input_file_obj.read()
if input_str.startswith(codecs.BOM_UTF8.decode("UTF-8")):
input_dict = json.loads(input_str[1:], encoding="utf-8")
else:
input_dict = json.loads(input_str, encoding="utf-8")
progress_obj["value"] = 25
root.update()
except json.decoder.JSONDecodeError as error_info:
input_file_name = ""
file_name_str.set(file_name_head + "请确保文件的Json格式没有错误")
tkinter.messagebox.showerror("json.decoder.JSONDecodeError", error_info)
progress_obj["value"] = 0
root.update()
except UnicodeDecodeError as error_info:
input_file_name = ""
file_name_str.set(file_name_head + "请确保文件编码格式是UTF-8")
tkinter.messagebox.showerror("UnicodeDecodeError", error_info)
progress_obj["value"] = 0
root.update()
else:
count_work = 0
for key_this in list(input_dict.keys()):
count_work += len(input_dict.get(key_this)) + 1
input_file_name = file_name_now
file_name_str.set(file_name_head + "\"" + file_name_now + "\"")
if len(tree.get_children()) != 0:
for tree_children_now in tree.get_children():
tree.delete(tree_children_now)
if count_work <= 75:
count_step = 1
else:
count_step = int(count_work / 75)
tree_id_0_count = 0
tree_id_all_count = 0
for key_this in list(input_dict.keys()):
tree_id_0 = tree.insert("", tree_id_0_count, key_this + "#" + str(tree_id_all_count), text=key_this, value=str(tree_id_all_count))
tree_id_all_count += 1
tree_id_1_count = 0
progress_obj["value"] = int(tree_id_all_count * 75 / count_work + 25)
root.update()
for item_this in input_dict.get(key_this):
item_this = item_tran2show(item_this)
tree_id_1 = tree.insert(tree_id_0, tree_id_1_count, item_this + "#" + str(tree_id_all_count), text=item_this, value=str(tree_id_all_count))
tree_id_all_count += 1
tree_id_1_count += 1
if tree_id_all_count % (count_step) == 0:
progress_obj["value"] = int(tree_id_all_count * 75 / count_work + 25)
root.update()
#print(tree_id_all_count)
tree_id_0_count += 1
if import_flag != 0:
import_list_tmp = item_get_import_list(input_dict)
#print("|".join(import_list_tmp))
if len(import_list_tmp) != 0:
tkinter.messagebox.showwarning("存在依赖项", "扫描中发现以下被引用项不包含于导入文件中:\n - " + "\n - ".join(import_list_tmp) + "\n请确保可以提供这些依赖项。" )
progress_obj["value"] = 100
root.update()
#print(tree_id_all_count)
#print(tree.get_children())
elif len(file_name) == 0:
if len(input_file_name) == 0:
file_name_str.set(file_name_head + "请选择文件!")
tkinter.messagebox.showwarning("警告", "请选择文件!")
progress_obj["value"] = 0
root.update()
else:
if len(input_file_name) == 0:
file_name_str.set(file_name_head + "请一次只选择一个文件!")
tkinter.messagebox.showwarning("警告", "请一次只选择一个文件!")
progress_obj["value"] = 0
root.update()
def tran_save():
global t1
global t2
global t3
global t4
global t5
global t6
global deck
global input_file_name
global output_file_name
global root
#file_name = tkinter.filedialog.asksaveasfilename(initialdir = "./test")
if len(input_file_name) != 0:
file_path = tkinter.filedialog.askdirectory(title="选择保存路径")
if len(file_path) != 0:
#print(file_path)
try:
deck.name = t1.get()
deck.author = t2.get()
deck.version = t3.get()
deck.command = t1.get()
deck.desc = t4.get()
deck.includes = t5.get().split(",")
deck.default = t6.get()
except UnicodeDecodeError as error_info:
tkinter.messagebox.showerror("UnicodeDecodeError", error_info)
progress_obj["value"] = 0
root.update()
else:
output_file_name = file_path + "/" + deck.name
#print(deck.includes)
json2yaml_work()
tkinter.messagebox.showinfo("完成","已保存到 " + output_file_name)
else:
tkinter.messagebox.showwarning("警告", "请选择保存路径!")
progress_obj["value"] = 0
root.update()
else:
tkinter.messagebox.showwarning("警告", "请先选择要转换的Json文件!")
progress_obj["value"] = 0
root.update()
def load_import():
global dict_for_import
global dict_import_default
input_file_name = ""
progress_obj["value"] = 0
root.update()
file_name = tkinter.filedialog.askopenfilenames(title="请选择Json文件", filetypes=[("Json", "*.json"), ("All Files", "*")])
progress_obj["value"] = 5
root.update()
if len(file_name) > 0:
count_error = 0
for file_name_now in file_name:
try:
with open(file_name_now,"r",encoding="utf-8") as input_file_obj:
input_str = input_file_obj.read()
if input_str.startswith(codecs.BOM_UTF8.decode("UTF-8")):
input_dict = json.loads(input_str[1:], encoding="utf-8")
else:
input_dict = json.loads(input_str, encoding="utf-8")
progress_obj["value"] = 25
root.update()
except json.decoder.JSONDecodeError as error_info:
input_file_name = ""
file_name_str.set(file_name_head + "请确保文件的Json格式没有错误")
#tkinter.messagebox.showerror("json.decoder.JSONDecodeError", error_info)
count_error += 1
progress_obj["value"] = 0
root.update()
except UnicodeDecodeError as error_info:
input_file_name = ""
file_name_str.set(file_name_head + "请确保文件编码格式是UTF-8")
#tkinter.messagebox.showerror("UnicodeDecodeError", error_info)
count_error += 1
progress_obj["value"] = 0
root.update()
else:
dict_for_import.update(**input_dict)
dict_for_import.update(**dict_import_default)
#dict_for_import = {**dict_for_import, **input_dict}
#print(len(dict_for_import))
progress_obj["value"] = 100
root.update()
tkinter.messagebox.showinfo("依赖项已更新", "当前已载入" + str(len(dict_for_import)) + "个备用依赖项\n尝试载入" + str(len(file_name)) + "个文件\n其中共有" + str(count_error) + "个加载失败")
elif len(file_name) == 0:
if len(input_file_name) == 0:
file_name_str.set(file_name_head + "请选择文件!")
tkinter.messagebox.showwarning("警告", "请选择文件!")
progress_obj["value"] = 0
root.update()
else:
if len(input_file_name) == 0:
file_name_str.set(file_name_head + "请一次只选择一个文件!")
tkinter.messagebox.showwarning("警告", "请一次只选择一个文件!")
progress_obj["value"] = 0
root.update()
def clear_conf():
global t1
global t2
global t3
global t4
global t5
global t6
t1.set("")
t2.set("")
t3.set("")
t4.set("")
t5.set("")
t6.set("")
def giveback_switch():
global giveback_flag
global root
global set_menu
if giveback_flag == 0:
giveback_flag = 1
set_menu.entryconfig(1, label="忽略不放回[√]")
else:
giveback_flag = 0
set_menu.entryconfig(1, label="忽略不放回[×]")
#print(str(giveback_flag))
def versiontran_switch():
global versiontran_flag
global root
global set_menu
if versiontran_flag == 0:
versiontran_flag = 1
set_menu.entryconfig(2, label="版本号优化[√]")
else:
versiontran_flag = 0
set_menu.entryconfig(2, label="版本号优化[×]")
def tabtran_switch():
global tabtran_flag
global root
global set_menu
if tabtran_flag == 0:
tabtran_flag = 1
set_menu.entryconfig(3, label="排版格式优化[√]")
else:
tabtran_flag = 0
set_menu.entryconfig(3, label="排版格式优化[×]")
def infoadd_switch():
global infoadd_flag
global root
global set_menu
if infoadd_flag == 0:
infoadd_flag = 1
set_menu.entryconfig(4, label="附加Info项[√]")
else:
infoadd_flag = 0
set_menu.entryconfig(4, label="附加Info项[×]")
def import_switch():
global import_flag
global root
global set_menu
if import_flag == 0:
import_flag = 1
set_menu.entryconfig(5, label="尝试解决依赖项[√]")
else:
import_flag = 0
set_menu.entryconfig(5, label="尝试解决依赖项[×]")
def show_info():
tkinter.messagebox.showinfo("Json2Yaml By BenzenPenxil","Json2Yaml基于Python\n\n项目主页:\n" + project_site + "\n\n作者:仑质(BenzenPenxil)\n版本:" + j2y_version + "\n有问题请联系QQ:137334701")
def show_project_site():
tkinter.messagebox.showinfo("提示", "将通过浏览器访问 " + project_site)
try:
webbrowser.open(project_site)
except webbrowser.Error as error_info:
tkinter.messagebox.showerror("webbrowser.Error", error_info)
def tree_copy(obj, event=None):
length_select = len(obj.selection())
if length_select != 0:
str_select = obj.selection()[0]
str_select_len = len(str_select)
for i in range(1, str_select_len):
if str_select[-i] == "#":
pyperclip.copy(str_select[-str_select_len: - i])
break
def tree_set_name(obj, event=None):
global t1
length_select = len(obj.selection())
if length_select != 0:
str_select = obj.selection()[0]
str_select_len = len(str_select)
for i in range(1, str_select_len):
if str_select[-i] == "#":
t1.set(str_select[-str_select_len: - i])
break
def tree_add_includes(obj, event=None):
global t5
length_select = len(obj.selection())
if length_select != 0:
str_select = obj.selection()[0]
str_select_len = len(str_select)
for i in range(1, str_select_len):
if str_select[-i] == "#":
if t5.get().split(",") != [""]:
if str_select[-str_select_len: - i] in t5.get().split(","):
pass
else:
tmp_t5_str = t5.get().split(",")
tmp_t5_str.append(str_select[-str_select_len: - i])
t5.set(",".join(tmp_t5_str))
else:
t5.set(str_select[-str_select_len: - i])
break
def tree_set_default(obj, event=None):
global t5
global t6
length_select = len(obj.selection())
if length_select != 0:
str_select = obj.selection()[0]
str_select_len = len(str_select)
for i in range(1, str_select_len):
if str_select[-i] == "#":
t6.set(str_select[-str_select_len: - i])
if t5.get().split(",") != [""]:
if "default" in t5.get().split(","):
pass
else:
tmp_t5_str = t5.get().split(",")
tmp_t5_str.append("default")
t5.set(",".join(tmp_t5_str))
else:
t5.set("default")
break
def tree_rightKey(event, obj):
tree_rightkey_menu.delete(0, tkinter.END)
tree_rightkey_menu.add_command(label="复制", command=lambda: tree_copy(obj, event))
tree_rightkey_menu.add_command(label="设为名称/指令", command=lambda: tree_set_name(obj, event))
tree_rightkey_menu.add_command(label="加入子指令", command=lambda: tree_add_includes(obj, event))
tree_rightkey_menu.add_command(label="设为default", command=lambda: tree_set_default(obj, event))
tree_rightkey_menu.post(event.x_root, event.y_root)
def entry_cut(editor, event=None):
editor.event_generate("<<Cut>>")
def entry_copy(editor, event=None):
editor.event_generate("<<Copy>>")
def entry_paste(editor, event=None):
editor.event_generate('<<Paste>>')
def entry_clear(editor, event=None):
global root
root.globalsetvar(editor["textvariable"], "")
def entry_rightKey(event, editor):
entry_rightkey_menu.delete(0, tkinter.END)
entry_rightkey_menu.add_command(label='剪切', command=lambda: entry_cut(editor))
entry_rightkey_menu.add_command(label='复制', command=lambda: entry_copy(editor))
entry_rightkey_menu.add_command(label='粘贴', command=lambda: entry_paste(editor))
entry_rightkey_menu.add_command(label='清空', command=lambda: entry_clear(editor))
entry_rightkey_menu.post(event.x_root,event.y_root)
if __name__ == "__main__":
root = tkinter.Tk()
root.title("Json2Yaml By BenzenPenxil")
root.geometry("560x624")
root.resizable(width=False, height=False)
file_name_head = "Json文件路径:"
file_name_str = tkinter.StringVar()
file_name_str.set(file_name_head + "请选择文件!")
L1 = tkinter.Label(root, textvariable = file_name_str)
L1.place(x=0, y=0, width=560,height=32)
entry_rightkey_menu = tkinter.Menu(root,tearoff=False)
EtL1 = tkinter.Label(root, text = "名称/指令")
EtL1.place(x=0, y=432, width=60,height=32)
t1 = tkinter.StringVar()
t1.set("名称与指令")
Et1 = tkinter.Entry(root, textvariable=t1)
Et1.place(x=60, y=432, width=500, height=32)
Et1.bind("<Button-3>", lambda x: entry_rightKey(x, Et1))
EtL2 = tkinter.Label(root, text = "作者")
EtL2.place(x=0, y=464, width=60,height=32)
t2 = tkinter.StringVar()
t2.set("作者")
Et2 = tkinter.Entry(root, textvariable=t2)
Et2.place(x=60, y=464, width=500, height=32)
Et2.bind("<Button-3>", lambda x: entry_rightKey(x, Et2))
EtL3 = tkinter.Label(root, text = "版本")
EtL3.place(x=0, y=496, width=60,height=32)
t3 = tkinter.StringVar()
t3.set("版本")
Et3 = tkinter.Entry(root, textvariable=t3)
Et3.place(x=60, y=496, width=500, height=32)
Et3.bind("<Button-3>", lambda x: entry_rightKey(x, Et3))
EtL4 = tkinter.Label(root, text = "描述")
EtL4.place(x=0, y=528, width=60,height=32)
t4 = tkinter.StringVar()
t4.set("描述")
Et4 = tkinter.Entry(root, textvariable=t4)
Et4.place(x=60, y=528, width=500, height=32)
Et4.bind("<Button-3>", lambda x: entry_rightKey(x, Et4))
EtL5 = tkinter.Label(root, text = "子指令")
EtL5.place(x=0, y=560, width=60,height=32)
t5 = tkinter.StringVar()
t5.set("子指令")
Et5 = tkinter.Entry(root, textvariable=t5)
Et5.place(x=60, y=560, width=500, height=32)
Et5.bind("<Button-3>", lambda x: entry_rightKey(x, Et5))
EtL6 = tkinter.Label(root, text = "Default")
EtL6.place(x=0, y=592, width=60,height=32)
t6 = tkinter.StringVar()
t6.set("Default")
Et6 = tkinter.Entry(root, textvariable=t6)
Et6.place(x=60, y=592, width=500, height=32)
Et6.bind("<Button-3>", lambda x: entry_rightKey(x, Et6))
#Btn1 = tkinter.Button(root, text = "选择文件", command = select_file)
#Btn1.place(x=500, y=0, width=60, height=32)
#Btn2 = tkinter.Button(root, text="开始转换", command = tran_save)
#Btn2.place(x=500, y=592, width=60, height=32)
#Btn3 = tkinter.Button(root, text="i", command = show_info)
#Btn3.place(x=528, y=432, width=32, height=32)
menu_bar = tkinter.Menu(root)
file_menu = tkinter.Menu(menu_bar, tearoff=0)
set_menu = tkinter.Menu(menu_bar, tearoff=0)
info_menu = tkinter.Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label="文件", menu=file_menu)
menu_bar.add_cascade(label="操作", menu=set_menu)
menu_bar.add_cascade(label="关于", menu=info_menu)
file_menu.add_command(label="导入文件", command=select_file)
file_menu.add_command(label="开始转换", command=tran_save)
file_menu.add_command(label="加载依赖项", command=load_import)
set_menu.add_command(label="清空所有设置栏", command=clear_conf)
set_menu.add_command(label="忽略不放回[×]", command=giveback_switch)
set_menu.add_command(label="版本号优化[√]", command=versiontran_switch)
set_menu.add_command(label="排版格式优化[×]", command=tabtran_switch)
set_menu.add_command(label="附加Info项[√]", command=infoadd_switch)
set_menu.add_command(label="尝试解决依赖项[√]", command=import_switch)
info_menu.add_command(label="关于", command=show_info)
info_menu.add_command(label="查看项目", command=show_project_site)
root.config(menu=menu_bar)
if giveback_flag != 0:
set_menu.entryconfig(1, label="忽略不放回[√]")
else:
set_menu.entryconfig(1, label="忽略不放回[×]")
if versiontran_flag != 0:
set_menu.entryconfig(2, label="版本号优化[√]")
else:
set_menu.entryconfig(2, label="版本号优化[×]")
if tabtran_flag != 0:
set_menu.entryconfig(3, label="排版格式优化[√]")
else:
set_menu.entryconfig(3, label="排版格式优化[×]")
if infoadd_flag != 0:
set_menu.entryconfig(4, label="附加Info项[√]")
else:
set_menu.entryconfig(4, label="附加Info项[×]")
if import_flag != 0:
set_menu.entryconfig(5, label="尝试解决依赖项[√]")
else:
set_menu.entryconfig(5, label="尝试解决依赖项[×]")
t1.set("填入牌堆名,这同时也将是该牌堆的对应指令")
t2.set("填入作者")
t3.set("填入版本号")
t4.set("填入对于该牌堆的描述")
t5.set(",".join(["填入子指令并用半角逗号隔开"]))
t6.set("要设置子指令缺省时的调用项请设置此项")
#测试用
#t1.set(deck.name)
#t2.set(deck.author)
#t3.set(deck.version)
#t4.set(deck.desc)
#t5.set(",".join(deck.includes))
#t6.set(deck.default)
tree = ttk.Treeview(root)
tree.place(x=0, y=32, width=545, height=401)
tree_rightkey_menu = tkinter.Menu(root,tearoff=False)
tree.bind("<Button-3>", lambda x: tree_rightKey(x, tree))
progress_obj = ttk.Progressbar(root, orient="horizontal", length=560, mode="determinate")
progress_obj.place(x=0, y=32, width=560, height=25)
progress_obj["maximum"] = 100
progress_obj["value"] = 0
tree.columnconfigure(0, weight=1)
tree_yscroll = ttk.Scrollbar(root, orient="vertical", command=tree.yview)
tree_yscroll.place(x=544, y=57, width=16, height=375)
tree.configure(yscrollcommand=tree_yscroll.set)
with open("tmp.ico", "wb+") as tmp:
tmp.write(base64.b64decode(favicon_ico))
root.iconbitmap("tmp.ico")
os.remove("tmp.ico")
root.mainloop()
#json2yaml_work()
| 37.842041 | 178 | 0.58252 | 385 | 0.011803 | 0 | 0 | 0 | 0 | 0 | 0 | 5,692 | 0.174505 |
e8a3a3c194d9038995fdc352d8384c0dc7a408e0 | 1,058 | py | Python | metagen/utils.py | huaili-cid/metagen_cli | 8d80714bb839e8cd30a4ec241dcd57a22632f2b9 | [
"MIT"
] | null | null | null | metagen/utils.py | huaili-cid/metagen_cli | 8d80714bb839e8cd30a4ec241dcd57a22632f2b9 | [
"MIT"
] | null | null | null | metagen/utils.py | huaili-cid/metagen_cli | 8d80714bb839e8cd30a4ec241dcd57a22632f2b9 | [
"MIT"
] | null | null | null |
import logging
import os
import math
from metagen.helpers.exceptions import ValidationError
logger = logging.getLogger(__name__)
cli_log = logging.getLogger("metagen.cli")
def key_len(value, type_="ApiKey"):
"""Ensure an API Key or ID has valid length."""
if value is not None and len(value) < 36:
l = len(value)
raise ValidationError("{} must be 36 characters long, not {}".format(type_.upper(), str(l)))
else:
return value
def collapse_path(path):
"""Convert a path back to ~/ from expanduser()."""
home_dir = os.path.expanduser("~")
abs_path = os.path.abspath(path)
return abs_path.replace(home_dir, "~")
def is_file(string):
if os.path.exists(string):
return True
else:
return False
def convert_size(size):
if (size == 0):
return '0B'
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
index = int(math.floor(math.log(size, 1024)))
p = math.pow(1024, index)
s = round(size/p, 2)
return '{}{}'.format(s, size_name[index])
| 25.190476 | 100 | 0.621928 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 208 | 0.196597 |
e8a44e30435268e66fe7dcfc673cc83f8a07c67f | 5,387 | py | Python | features/steps/common.py | PolySync/kevlar-laces | a5f36d1e4955963b3f1be3fd0ad8d3839a0bee3b | [
"MIT"
] | 3 | 2017-10-05T23:25:40.000Z | 2018-06-06T10:03:56.000Z | features/steps/common.py | PolySync/kevlar-laces | a5f36d1e4955963b3f1be3fd0ad8d3839a0bee3b | [
"MIT"
] | 11 | 2017-10-05T23:22:30.000Z | 2018-07-10T21:13:40.000Z | features/steps/common.py | PolySync/kevlar-laces | a5f36d1e4955963b3f1be3fd0ad8d3839a0bee3b | [
"MIT"
] | 3 | 2018-06-22T21:43:07.000Z | 2021-02-01T12:02:43.000Z | from behave import *
from hamcrest import *
import subprocess
import shlex
import os
import tempfile
import utils
@given('a local copy of the repo on the {branch} branch')
def step_impl(context, branch):
context.mock_developer_dir = tempfile.mkdtemp(prefix='kevlar')
utils.shell_command('git -C {0} clone -q file:///{1} . -b {2}'.format(context.mock_developer_dir, context.mock_github_dir, branch))
utils.shell_command('git -C {0} checkout -q {1}'.format(context.mock_developer_dir, branch))
utils.shell_command('git -C {0} config --local user.signingkey 794267AC'.format(context.mock_developer_dir))
utils.shell_command('git -C {0} config --local user.name "Local Test"'.format(context.mock_developer_dir))
utils.shell_command('git -C {0} config --local user.email "donut-reply@polysync.io"'.format(context.mock_developer_dir))
utils.shell_command('git -C {0} config --local gpg.program gpg2'.format(context.mock_developer_dir))
@given('I create a new {branch} branch')
def step_impl(context, branch):
command = 'git -C {0} checkout -b {1}'.format(context.mock_developer_dir, branch)
utils.run_with_project_in_path(command, context)
@given('the {release_tag} release tag already exists')
def step_impl(context, release_tag):
command = 'git -C {0} tag -s {1} -m {1}'.format(context.mock_github_dir, release_tag)
utils.run_with_project_in_path(command, context)
@given('the GPG signing key is not available')
def step_impl(context):
command = 'git -C {0} config --local user.signingkey 00000000'.format(context.mock_developer_dir)
utils.run_with_project_in_path(command, context)
@given('I have done some work on the repo')
def step_impl(context):
utils.shell_command('cp -a {0}/features/test_file.txt {1}/test_file.txt'.format(os.getcwd(), context.mock_developer_dir))
@given('the project contains subdirectory {directory}')
def step_impl(context, directory):
wd = '{0}/{1}'.format(context.mock_developer_dir, directory)
utils.run_with_project_in_path('mkdir {0}/{1}'.format(context.mock_developer_dir, directory), context)
context.wd = wd
@given('the {branch} branch contains unsigned commits')
def step_impl(context, branch):
utils.run_with_project_in_path('git -C {0} commit --allow-empty --no-gpg-sign -m "creating an unsigned commit"'.format(context.mock_developer_dir), context)
@given('the {tag} tag is unsigned')
def step_impl(context, tag):
utils.run_with_project_in_path('git -C {0} tag -a {1} -m {1}'.format(context.mock_developer_dir, tag), context)
@given('the {tag} tag contains unsigned commits')
def step_impl(context, tag):
utils.run_with_project_in_path('git -C {0} commit --allow-empty --no-gpg-sign -m "creating an unsigned commit"'.format(context.mock_developer_dir), context)
utils.run_with_project_in_path('git -C {0} tag -s {1} -m {1}'.format(context.mock_developer_dir, tag), context)
@when('the {command} command is run with the -h flag')
def step_impl(context, command):
cmd = 'git -C {0} {1} -h'.format(context.mock_developer_dir, command)
context.out, context.err, context.rc = utils.run_with_project_in_path(cmd, context)
@when('I run git {action} from the {directory} directory')
def step_impl(context, action, directory):
command = 'git -C {0} {1}'.format(context.wd, action)
context.out, context.err, context.rc = utils.run_with_project_in_path(command, context)
@when('I run git-{action}')
def step_impl(context, action):
command = 'git -C {0} {1}'.format(context.mock_developer_dir, action)
context.out, context.err, context.rc = utils.run_with_project_in_path(command, context)
@then('the script should return {exit_code}')
def step_impl(context, exit_code):
assert_that(context.rc, equal_to(int(exit_code)))
@then('the merge commit should be signed')
def step_impl(context):
command = "git -C {0} verify-commit {1}".format(context.mock_github_dir, context.sha_hash)
unused, verify_output, rc = utils.run_with_project_in_path(command, context)
assert_that(verify_output, contains_string('Signature made'))
@then('the repo should be returned to the state it was in before I ran the script')
def step_impl(context):
exists = False
original_string = 'A working file with some text'
with open('{0}/test_file.txt'.format(context.mock_developer_dir), 'r') as check_file:
for line in check_file:
if original_string in line:
exists = True
assert_that(exists, True)
@then('the repo should be returned to the {branch} branch when I am done')
def step_impl(context, branch):
out, err, rc = utils.run_with_project_in_path('git -C {0} branch'.format(context.mock_developer_dir), context)
assert_that(out, contains_string(branch))
@then('the {directory} directory should exist when I am done')
def step_impl(context, directory):
out, err, rc = utils.shell_command('ls {0}/{1}'.format(context.mock_developer_dir, directory))
assert_that(context.rc, equal_to(0))
@then('the terminal displays usage options for the {command} command')
def step_impl(context, command):
assert_that(context.out, contains_string('usage:'))
@then('the terminal prints an error')
def step_impl(context):
assert_that(context.out, contains_string('ERROR:'))
@then('the script exits with status 0')
def step_impl(context):
assert_that(context.rc, equal_to(0))
| 46.439655 | 160 | 0.733247 | 0 | 0 | 0 | 0 | 5,224 | 0.969742 | 0 | 0 | 1,767 | 0.328012 |
e8a6d1ef969a737bdcd7f9ccf6c5ef2da1f6db6f | 268 | py | Python | src/utils/utils.py | chokyzhou/gym-flappy-bird | ffe1089501f3e2e113a8868cd27480653dbe0ef7 | [
"MIT"
] | null | null | null | src/utils/utils.py | chokyzhou/gym-flappy-bird | ffe1089501f3e2e113a8868cd27480653dbe0ef7 | [
"MIT"
] | null | null | null | src/utils/utils.py | chokyzhou/gym-flappy-bird | ffe1089501f3e2e113a8868cd27480653dbe0ef7 | [
"MIT"
] | null | null | null | import math
def obs2state(obs, multiplier=1000):
x_pos = int(math.floor(obs[0]*multiplier))
y_pos = int(math.floor(obs[1]*multiplier))
y_vel = y_vel = int(obs[2])
state_string = str(x_pos) + '_' + str(y_pos) + '_' + str(y_vel)
return state_string | 29.777778 | 67 | 0.649254 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.022388 |
e8a71fdcaa8a95b7ce8a94c0bbae859056c52963 | 494 | py | Python | python_marketman/__version__.py | LukasKlement/python-marketman | c851b10765f87182949925230ab614620bbfd15b | [
"Apache-2.0"
] | null | null | null | python_marketman/__version__.py | LukasKlement/python-marketman | c851b10765f87182949925230ab614620bbfd15b | [
"Apache-2.0"
] | null | null | null | python_marketman/__version__.py | LukasKlement/python-marketman | c851b10765f87182949925230ab614620bbfd15b | [
"Apache-2.0"
] | null | null | null | """Version details for python-marketman
This file shamelessly taken from the requests library"""
__title__ = 'python-marketman'
__description__ = 'A basic Marketman.com REST API client.'
__url__ = 'https://github.com/LukasKlement/python-marketman'
__version__ = '0.1'
__author__ = 'Lukas Klement'
__author_email__ = 'lukas.klement@me.com'
__license__ = 'Apache 2.0'
__maintainer__ = 'Lukas Klement'
__maintainer_email__ = 'lukas.klement@me.com'
__keywords__ = 'python marketman marketman.com'
| 35.285714 | 60 | 0.779352 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 327 | 0.661943 |
e8a9619f356f6c17c215a983117012929ea7b3f0 | 2,191 | py | Python | discharge_plot.py | amforte/Caucasus_Erosion | c839c90282f87256220abe390993b362b88b8b74 | [
"MIT"
] | 2 | 2021-05-15T05:04:57.000Z | 2021-12-10T02:25:29.000Z | discharge_plot.py | amforte/Caucasus_Erosion | c839c90282f87256220abe390993b362b88b8b74 | [
"MIT"
] | null | null | null | discharge_plot.py | amforte/Caucasus_Erosion | c839c90282f87256220abe390993b362b88b8b74 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Plots exceedance frequency and the relationships between discharge and drainage
area for gauged basins
Written by Adam M. Forte for
"Low variability runoff inhibits coupling of climate, tectonics, and
topography in the Greater Caucasus"
If you use this code or derivatives, please cite the original paper.
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
from cmcrameri import cm
def survive(Q):
Qstar=Q/np.mean(Q)
Qstar_sort=np.sort(Qstar)
Qn=len(Qstar)
Qrank=np.arange(1,Qn+1,1)
Q_freq_excd=(Qn+1-Qrank)/Qn
return Qstar_sort,Q_freq_excd
# Load data from gauged basins
qdf=pd.read_csv('data_tables/grdc_summary_values.csv')
mR=qdf['mean_runoff_mm_day'].to_numpy()
da=qdf['DA_km2'].to_numpy()
mRain=qdf['mnTRMM_mm_day'].to_numpy()
ID=qdf['ID'].to_numpy()
N=len(ID)
f1=plt.figure(num=1,figsize=(14,5))
ax1=plt.subplot(1,2,1)
ax1.set_ylim((10**-4,1))
ax1.set_xlim((0.01,500))
ax1.set_yscale('log')
ax1.set_xscale('log')
ax1.set_xlabel('Runoff [mm/day]')
ax1.set_ylabel('Exceedance Frequency')
rain_norm=colors.Normalize(vmin=1,vmax=6)
mQ=np.zeros(ID.shape)
for i in range(N):
df=pd.read_csv('data_tables/grdc_discharge_time_series/GRDC_'+str(ID[i])+'.csv')
Q=df['Q'].to_numpy()
mQ[i]=np.mean(Q)/(60*60*24)
[Qstar_sort,Q_freq_excd]=survive(Q)
Rainv=np.ones(Q.shape)*mRain[i]
ax1.scatter(Qstar_sort*mR[i],Q_freq_excd,c=Rainv,norm=rain_norm,cmap=cm.batlow_r,s=2)
ax2=plt.subplot(1,2,2)
ax2.set_ylim((10**-1,10**3))
ax2.set_xlim((50,5000))
ax2.set_yscale('log')
ax2.set_xscale('log')
ax2.set_xlabel('Catchment Area [km]')
ax2.set_ylabel('Mean Q [m3/s]')
runs=np.arange(1,7,1)
inter=np.linspace(0,1,len(runs))
colors=[cm.batlow_r(x) for x in inter]
sc1=ax2.scatter(da,mQ,c=mRain,marker='o',s=40,norm=rain_norm,cmap=cm.batlow_r)
xx=np.linspace(50,5000,100)
con=(1000**2)/(1000*24*60*60)
for i, color in enumerate(colors):
ax2.plot(xx,xx*runs[i]*con,c=color,zorder=0,linestyle=':')
cbar1=plt.colorbar(sc1,ax=ax2)
cbar1.ax.set_ylabel('Mean Rainfall [mm/day]')
f1.savefig('dischage.pdf')
f1.savefig('dischage.tif',dpi=300) | 27.734177 | 89 | 0.720219 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 685 | 0.312643 |
e8a9c1a7e5fb15f5604bf50940522a213e5cd010 | 131 | py | Python | app/api/__init__.py | gladuo/VideoShow | 544c6ccd98ee4da5950d914289f30b5e918aa1a6 | [
"MIT"
] | null | null | null | app/api/__init__.py | gladuo/VideoShow | 544c6ccd98ee4da5950d914289f30b5e918aa1a6 | [
"MIT"
] | null | null | null | app/api/__init__.py | gladuo/VideoShow | 544c6ccd98ee4da5950d914289f30b5e918aa1a6 | [
"MIT"
] | null | null | null | from flask import Blueprint
api = Blueprint('api', __name__)
from . import authentication, videos, shows, users, comments, errors | 26.2 | 68 | 0.770992 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0.038168 |
e8a9f52b1807abbd9ad1f04b35b11beb966b81b5 | 706 | py | Python | openatom/principal_quantum_number.py | 2556-AD/Open-Atom | 656e8167f156491143544eb6ed0d153e2073ff7a | [
"MIT"
] | 1 | 2022-02-04T20:18:21.000Z | 2022-02-04T20:18:21.000Z | openatom/principal_quantum_number.py | 2556-AD/OpenAtom | 656e8167f156491143544eb6ed0d153e2073ff7a | [
"MIT"
] | null | null | null | openatom/principal_quantum_number.py | 2556-AD/OpenAtom | 656e8167f156491143544eb6ed0d153e2073ff7a | [
"MIT"
] | null | null | null | from openatom.UNIVERSAL_CONSTANTS import *
from openatom.azimuthal_quantum_number import AzimuthalQNum
class PrincipalQNum():
def __init__(self, shellIdx):
self.label = self.assignShellLabel(shellIdx)
self.principalQuantumNumVal = shellIdx + 1
self.azimuthalArray = []
self.azimuthalArray = [AzimuthalQNum(len(self.azimuthalArray)) for i in range(self.principalQuantumNumVal)]
# shellArray.append(PrincipalQNum(len(shellArray)))
def assignShellLabel(self, shellIdx):
shellMap = {
0 : 'K',
1 : 'L',
2 : 'M',
3 : 'N',
4 : 'O',
5 : 'P'
}
return shellMap[shellIdx] | 33.619048 | 115 | 0.604816 | 602 | 0.852691 | 0 | 0 | 0 | 0 | 0 | 0 | 69 | 0.097734 |
e8aaccc9d7657be30449c0f2c759dfb4f94b6bc3 | 5,853 | py | Python | hissw/environment.py | binchensun/hissw | c03599b1d6c75dc7117318ecde20a49b8f14a97d | [
"MIT"
] | null | null | null | hissw/environment.py | binchensun/hissw | c03599b1d6c75dc7117318ecde20a49b8f14a97d | [
"MIT"
] | null | null | null | hissw/environment.py | binchensun/hissw | c03599b1d6c75dc7117318ecde20a49b8f14a97d | [
"MIT"
] | null | null | null | """
Build SSW scripts from Jinja 2 templates
"""
import os
import datetime
import subprocess
import tempfile
from jinja2 import (Environment as Env,
FileSystemLoader,
PackageLoader)
from scipy.io import readsav
from .read_config import defaults
from .util import SSWIDLError, IDLLicenseError
class Environment(object):
"""
Environment for running SSW and IDL scripts
Parameters
----------
ssw_packages : list, optional
List of SSW packages to load, e.g. 'sdo/aia', 'chianti'
ssw_paths : list, optional
List of SSW paths to pass to `ssw_path`
extra_paths : list, optional
Additional paths to add to the IDL namespace
ssw_home : str, optional
Root of SSW tree
idl_home : str, optional
Path to IDL executable
"""
def __init__(self, ssw_packages=None, ssw_paths=None, extra_paths=None,
ssw_home=None, idl_home=None,):
self.ssw_packages = ssw_packages if ssw_packages is not None else []
self.ssw_paths = ssw_paths if ssw_paths is not None else []
self.extra_paths = extra_paths if extra_paths is not None else []
self.env = Env(loader=PackageLoader('hissw', 'templates'))
self._setup_home(ssw_home, idl_home,)
def _setup_home(self, ssw_home, idl_home,):
"""
Setup SSW and IDL home locations
"""
self.ssw_home = defaults['ssw_home'] if ssw_home is None else ssw_home
if self.ssw_home is None:
raise ValueError('''ssw_home must be set at instantiation or in the hisswrc file.''')
self.idl_home = defaults['idl_home'] if idl_home is None else idl_home
if self.idl_home is None:
raise ValueError('''idl_home must be set at instantiation or in the hisswrc file.''')
def custom_script(self, script, args):
"""
Generate custom IDL scripts from templates
"""
if os.path.isfile(script):
env = Env(loader=FileSystemLoader(os.path.dirname(script)))
idl_script = env.get_template(os.path.basename(script)).render(**args)
else:
env = Env()
idl_script = env.from_string(script).render(**args)
return idl_script
def procedure_script(self, script, save_vars, save_filename):
"""
Render inner procedure file
"""
if save_vars is None:
save_vars = []
params = {'script': script, 'save_vars': save_vars, 'save_filename': save_filename}
return self.env.get_template('procedure.pro').render(**params)
def command_script(self, procedure_filename):
"""
Generate parent IDL script
"""
params = {'ssw_paths': self.ssw_paths,
'extra_paths': self.extra_paths,
'procedure_filename': procedure_filename}
return self.env.get_template('parent.pro').render(**params)
def shell_script(self, command_filename):
"""
Generate shell script for starting up SSWIDL
"""
params = {'ssw_home': self.ssw_home,
'ssw_packages': self.ssw_packages,
'idl_home': self.idl_home,
'command_filename': command_filename}
return self.env.get_template('startup.sh').render(**params)
def run(self, script, args=None, save_vars=None, verbose=True):
"""
Set up the SSWIDL environment and run the supplied scripts.
Parameters
----------
script : str
Literal script or path to script file
args : dict, optional
Input arguments to script
save_vars : list, optional
Variables to save and return from the IDL namespace
verbose : bool, optional
"""
args = {} if args is None else args
with tempfile.TemporaryDirectory() as tmpdir:
# Get filenames
fn_template = os.path.join(
tmpdir, '{name}_'+datetime.datetime.now().strftime('%Y%m%d-%H%M%S')+'.{ext}')
save_filename = fn_template.format(name='idl_vars', ext='sav')
procedure_filename = fn_template.format(name='idl_procedure', ext='pro')
command_filename = fn_template.format(name='idl_script', ext='pro')
shell_filename = fn_template.format(name='ssw_shell', ext='sh')
# Render and save scripts
idl_script = self.custom_script(script, args)
with open(procedure_filename, 'w') as f:
f.write(self.procedure_script(idl_script, save_vars, save_filename))
with open(command_filename, 'w') as f:
f.write(self.command_script(procedure_filename))
with open(shell_filename, 'w') as f:
f.write(self.shell_script(command_filename,))
# Execute
subprocess.call(['chmod', 'u+x', shell_filename])
cmd_output = subprocess.run([shell_filename], shell=True, stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
self._check_for_errors(cmd_output, verbose)
results = readsav(save_filename)
return results
def _check_for_errors(self, output, verbose):
"""
Check IDL output to try and decide if an error has occurred
"""
stdout = output.stdout.decode('utf-8')
stderr = output.stderr.decode('utf-8')
# NOTE: For some reason, not only errors are output to stderr so we
# have to check it for certain keywords to see if an error occurred
if 'execution halted' in stderr.lower():
raise SSWIDLError(stderr)
if 'failed to acquire license' in stderr.lower():
raise IDLLicenseError(stderr)
if verbose:
print(f'{stderr}\n{stdout}')
| 39.02 | 97 | 0.612677 | 5,516 | 0.942423 | 0 | 0 | 0 | 0 | 0 | 0 | 2,006 | 0.34273 |
e8aaf8435a809b03ac25ed685b774a912883267c | 4,129 | py | Python | tools/erd.py | multi-coop/catalogage-donnees | 1d70401ff6c7b01ec051460a253cb105adf65911 | [
"MIT"
] | null | null | null | tools/erd.py | multi-coop/catalogage-donnees | 1d70401ff6c7b01ec051460a253cb105adf65911 | [
"MIT"
] | 14 | 2022-01-25T17:56:52.000Z | 2022-01-28T17:47:59.000Z | tools/erd.py | multi-coop/catalogage-donnees | 1d70401ff6c7b01ec051460a253cb105adf65911 | [
"MIT"
] | null | null | null | """
Entity-relation diagram (ERD) GraphViz dot-file generator.
Usage:
python -m erd db.json -o db.dot
Then pass the result to the GraphViz `dot` tool:
dot db.dot -T png -o db.png
Inspired by: https://github.com/ehne/ERDot
"""
import argparse
import json
import re
from pathlib import Path
from typing import Dict, List
import pydantic
class Spec(pydantic.BaseModel):
tables: Dict[str, Dict[str, str]] = {}
enums: Dict[str, List[str]] = {}
relations: List[str] = []
# NOTE: this would be simpler with a declarative templating tool (e.g. Jinja2),
# but we don't have one in the project at the moment of writing this tool.
# So, imperative we go...
FONT = "Arial"
COLUMN_TYPE_COLOR = "gray40" # See: https://graphviz.org/doc/info/colors.html
GRAPHVIZ_TEMPLATE = """
digraph G {{
graph [
nodesep=0.5;
rankdir="LR";
cencentrate=true;
splines="spline";
fontname="{font}";
pad="0.2,0.2"
];
node [shape=plain, fontname="{font}"];
edge [
dir=both,
fontsize=12,
arrowsize=0.9,
penwidth=1.0,
labelangle=32,
labeldistance=1.8,
fontname="{font}]"
];
{tables}
{enums}
{relations}
}}
"""
def render_table(name: str, columns: Dict[str, str]) -> str:
label_lines = [
'<table border="0" cellborder="1" cellspacing="0">',
f"<tr><td><i>{name}</i></td></tr>",
]
for key, type_ in columns.items():
port = key.replace("+", "").replace("*", "")
display_name = key.replace("*", "PK ").replace("+", "FK ")
label_lines += [
f'<tr><td port="{port}" align="left" cellpadding="5">{display_name}'
" "
f'<font color="{COLUMN_TYPE_COLOR}">{type_}</font></td></tr>'
]
label_lines += ["</table>"]
label = "\n".join(label_lines)
return f'"{name}" [label=<{label}>];'
def render_enum(name: str, items: List[str]) -> str:
label_lines = [
'<table border="0" cellborder="1" cellspacing="0">',
f"<tr><td><i>{name}</i></td></tr>",
]
label_lines += [
f'<tr><td align="left" cellpadding="5">{item}</td></tr>' for item in items
]
label_lines += ["</table>"]
label = "\n".join(label_lines)
return f'"{name}" [label=<{label}>];'
def render_relation(relation: str) -> str:
# Example: src:dest_id *--1 dest:id
m = re.match(
r"^(?P<source_name>\w+):(?P<source_fk>\w+) (?P<left_cardinality>[\d\+\*])--(?P<right_cardinality>[\d\+\*]) (?P<dest_name>\w+):(?P<dest_pk>\w+)$", # noqa: E501
relation,
)
assert m is not None, f"Invalid relation format: {relation!r}"
(
source_name,
source_fk,
left_cardinality,
right_cardinality,
dest_name,
dest_pk,
) = m.groups()
left_props = {
"*": "arrowtail=ocrow",
"+": "arrowtail=ocrowtee",
}.get(left_cardinality, "arrowtail=noneotee")
right_props = {
"*": "arrowtail=ocrow",
"+": "arrowtail=ocrowtee",
}.get(right_cardinality, "arrowtail=noneotee")
return "\n".join(
(
f'"{source_name}":"{source_fk}"->"{dest_name}":"{dest_pk}" [',
f"{right_props},",
f"{left_props},",
"];",
)
)
def render(content: str) -> str:
spec = Spec(**json.loads(content))
tables = "\n".join(
render_table(name, columns) for name, columns in spec.tables.items()
)
enums = "\n".join(render_enum(name, items) for name, items in spec.enums.items())
relations = "\n".join(render_relation(relation) for relation in spec.relations)
return GRAPHVIZ_TEMPLATE.format(
font=FONT, tables=tables, enums=enums, relations=relations
)
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("input_file", type=Path)
parser.add_argument("-o", "--output-file", type=Path)
args = parser.parse_args()
content = args.input_file.read_text()
result = render(content)
args.output_file.write_text(result)
if __name__ == "__main__":
main()
| 25.487654 | 167 | 0.57302 | 141 | 0.034149 | 0 | 0 | 0 | 0 | 0 | 0 | 1,914 | 0.46355 |
e8ab8f69dbeef8fba3bea64a32742c5139ef4502 | 1,862 | py | Python | tests/test_hyponym_detector.py | phlobo/scispacy | e40e702770236ac35c41cbb3f1f7d1ac72f76a51 | [
"Apache-2.0"
] | 15 | 2018-09-24T23:35:18.000Z | 2019-02-01T02:33:47.000Z | tests/test_hyponym_detector.py | phlobo/scispacy | e40e702770236ac35c41cbb3f1f7d1ac72f76a51 | [
"Apache-2.0"
] | 23 | 2018-09-25T16:40:39.000Z | 2019-02-05T00:50:59.000Z | tests/test_hyponym_detector.py | phlobo/scispacy | e40e702770236ac35c41cbb3f1f7d1ac72f76a51 | [
"Apache-2.0"
] | 3 | 2018-09-24T22:11:33.000Z | 2019-01-29T18:17:50.000Z | # pylint: disable=no-self-use,invalid-name
import unittest
import spacy
from scispacy.hyponym_detector import HyponymDetector
class TestHyponymDetector(unittest.TestCase):
def setUp(self):
super().setUp()
self.nlp = spacy.load("en_core_sci_sm")
self.detector = HyponymDetector(self.nlp, extended=True)
self.nlp.add_pipe("hyponym_detector", config={"extended": True}, last=True)
def test_sentences(self):
text = (
"Recognizing that the preferred habitats for the species "
"are in the valleys, systematic planting of keystone plant "
"species such as fig trees (Ficus) creates the best microhabitats."
)
doc = self.nlp(text)
fig_trees = doc[21:23]
plant_species = doc[17:19]
assert doc._.hearst_patterns == [("such_as", plant_species, fig_trees)]
doc = self.nlp("SARS, or other coronaviruses, are bad.")
assert doc._.hearst_patterns == [("other", doc[4:5], doc[0:1])]
doc = self.nlp("Coronaviruses, including SARS and MERS, are bad.")
assert doc._.hearst_patterns == [
("include", doc[0:1], doc[3:4]),
("include", doc[0:1], doc[5:6]),
]
def test_find_noun_compound_head(self):
doc = self.nlp("The potassium channel is good.")
head = self.detector.find_noun_compound_head(doc[1])
assert head == doc[2]
doc = self.nlp("Planting of large plants.")
head = self.detector.find_noun_compound_head(doc[3])
# Planting is a noun, but not a compound with 'plants'.
assert head != doc[0]
assert head == doc[3]
def test_expand_noun_phrase(self):
doc = self.nlp("Keystone plant habitats are good.")
chunk = self.detector.expand_to_noun_compound(doc[1], doc)
assert chunk == doc[0:3]
| 36.509804 | 83 | 0.627282 | 1,732 | 0.930183 | 0 | 0 | 0 | 0 | 0 | 0 | 544 | 0.292159 |
e8ae561a8349a5665464d5b45bc60cb2a5fdf194 | 1,787 | py | Python | conftest.py | goalkeeer/boilerplate-django | f8866314fd8511a9cfe563b52259602ced6aa93c | [
"MIT"
] | null | null | null | conftest.py | goalkeeer/boilerplate-django | f8866314fd8511a9cfe563b52259602ced6aa93c | [
"MIT"
] | null | null | null | conftest.py | goalkeeer/boilerplate-django | f8866314fd8511a9cfe563b52259602ced6aa93c | [
"MIT"
] | null | null | null | import os
from contextlib import contextmanager
import pytest
from django import setup as django_setup
from django.core.cache import caches
from django.test import TransactionTestCase
# Transaction rollback emulation
# http://docs.djangoproject.com/en/2.0/topics/testing/overview/#rollback-emulation
TransactionTestCase.serialized_rollback = True
@pytest.fixture
def api_user():
from django.contrib.auth import get_user_model
user_model = get_user_model()
user = user_model(username='test', email='test@test.ru', is_active=True)
user.set_password('test_password')
user.save()
return user
def pytest_configure():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "_project_.settings")
django_setup()
@pytest.fixture(scope='session')
def base_url(live_server):
return live_server.url
@pytest.fixture(autouse=True)
def clear_caches():
for cache in caches.all():
cache.clear()
# HELPERS
@pytest.fixture(scope='function')
def assert_num_queries_lte(pytestconfig):
from django.db import connection
from django.test.utils import CaptureQueriesContext
@contextmanager
def _assert_num_queries(num):
with CaptureQueriesContext(connection) as context:
yield
queries = len(context)
if queries > num:
msg = f"Expected to perform less then {num} queries" \
f" but {queries} were done"
if pytestconfig.getoption('verbose') > 0:
sqls = (q['sql'] for q in context.captured_queries)
msg += '\n\nQueries:\n========\n\n%s' % '\n\n'.join(sqls)
else:
msg += " (add -v option to show queries)"
pytest.fail(msg)
return _assert_num_queries
| 27.921875 | 82 | 0.665921 | 0 | 0 | 809 | 0.452714 | 1,295 | 0.724678 | 0 | 0 | 378 | 0.211528 |
e8b03a2e2c4731bb8314e6a0c733af40e7fcd845 | 4,063 | py | Python | htdocs/plotting/auto/scripts100/p103.py | trentford/iem | 7264d24f2d79a3cd69251a09758e6531233a732f | [
"MIT"
] | null | null | null | htdocs/plotting/auto/scripts100/p103.py | trentford/iem | 7264d24f2d79a3cd69251a09758e6531233a732f | [
"MIT"
] | null | null | null | htdocs/plotting/auto/scripts100/p103.py | trentford/iem | 7264d24f2d79a3cd69251a09758e6531233a732f | [
"MIT"
] | null | null | null | """Steps up and down"""
import calendar
import numpy as np
from pandas.io.sql import read_sql
from pyiem import network
from pyiem.plot.use_agg import plt
from pyiem.util import get_autoplot_context, get_dbconn
PDICT = {'spring': '1 January - 30 June',
'fall': '1 July - 31 December'}
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['description'] = """This plot analyzes the number of steps down in
low temperature during the fall season and the number of steps up in
high temperature during the spring season. These steps are simply having
a newer colder low or warmer high for the season to date period.
"""
desc['arguments'] = [
dict(type='station', name='station', default='IA2203',
label='Select Station', network='IACLIMATE'),
dict(type='select', name='season', options=PDICT,
label='Select which half of year', default='fall'),
]
return desc
def plotter(fdict):
""" Go """
pgconn = get_dbconn('coop')
ctx = get_autoplot_context(fdict, get_description())
station = ctx['station']
season = ctx['season']
table = "alldata_%s" % (station[:2],)
nt = network.Table("%sCLIMATE" % (station[:2],))
df = read_sql("""
WITH obs as (
SELECT day, year, month, high, low,
case when month > 6 then 'fall' else 'spring' end as season
from """ + table + """ WHERE station = %s),
data as (
SELECT year, day, season,
max(high) OVER (PARTITION by year, season ORDER by day ASC
ROWS BETWEEN 183 PRECEDING and CURRENT ROW) as mh,
min(low) OVER (PARTITION by year, season ORDER by day ASC
ROWS BETWEEN 183 PRECEDING and CURRENT ROW) as ml
from obs),
lows as (
SELECT year, day, ml as level, season,
rank() OVER (PARTITION by year, ml ORDER by day ASC) from data
WHERE season = 'fall'),
highs as (
SELECT year, day, mh as level, season,
rank() OVER (PARTITION by year, mh ORDER by day ASC) from data
WHERE season = 'spring')
(SELECT year, day, extract(doy from day) as doy,
level, season from lows WHERE rank = 1) UNION
(SELECT year, day, extract(doy from day) as doy,
level, season from highs WHERE rank = 1)
""", pgconn, params=[station])
df2 = df[df['season'] == season]
(fig, ax) = plt.subplots(3, 1, figsize=(7, 10))
dyear = df2.groupby(['year']).count()
ax[0].bar(dyear.index, dyear['level'], facecolor='tan', edgecolor='tan')
ax[0].axhline(dyear['level'].mean(), lw=2)
ax[0].set_ylabel("Yearly Events Avg: %.1f" % (dyear['level'].mean(), ))
ax[0].set_xlim(dyear.index.min()-1, dyear.index.max()+1)
title = "%s Steps %s" % (PDICT[season],
"Down" if season == 'fall' else 'Up')
ax[0].set_title("%s [%s]\n%s in Temperature" % (nt.sts[station]['name'],
station, title))
ax[0].grid(True)
ax[1].hist(np.array(df2['level'], 'f'),
bins=np.arange(df2['level'].min(),
df2['level'].max()+1, 2),
normed=True, facecolor='tan')
ax[1].set_ylabel("Probability Density")
ax[1].axvline(32, lw=2)
ax[1].grid(True)
ax[1].set_xlabel(r"Temperature $^\circ$F, 32 degrees highlighted")
ax[2].hist(np.array(df2['doy'], 'f'),
bins=np.arange(df2['doy'].min(),
df2['doy'].max()+1, 3),
normed=True, facecolor='tan')
ax[2].set_xticks((1, 32, 60, 91, 121, 152, 182, 213, 244, 274,
305, 335, 365))
ax[2].set_xticklabels(calendar.month_abbr[1:])
ax[2].set_xlim(df2['doy'].min() - 3,
df2['doy'].max() + 3)
ax[2].set_ylabel("Probability Density")
ax[2].grid(True)
ax[2].set_xlabel("Day of Year, 3 Day Bins")
return fig, df
if __name__ == '__main__':
plotter(dict())
| 37.275229 | 77 | 0.575683 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,986 | 0.488801 |
e8b070cce38d20c7a2e6fd86cc0080c388e9443f | 310 | py | Python | Module8/inheritance/02_task_IterInt.py | xm4dn355x/specialist_python3_2nd_lvl | 4ea8c82eb0f32aa92c82914f6599c2c47a2f7032 | [
"MIT"
] | null | null | null | Module8/inheritance/02_task_IterInt.py | xm4dn355x/specialist_python3_2nd_lvl | 4ea8c82eb0f32aa92c82914f6599c2c47a2f7032 | [
"MIT"
] | null | null | null | Module8/inheritance/02_task_IterInt.py | xm4dn355x/specialist_python3_2nd_lvl | 4ea8c82eb0f32aa92c82914f6599c2c47a2f7032 | [
"MIT"
] | null | null | null | # Разработать класс IterInt, который наследует функциональность стандартного типа int, но добавляет
# возможность итерировать по цифрам числа
class IterInt(int):
pass
n = IterInt(12346)
for digit in n:
print("digit = ", digit)
# Выведет:
# digit = 1
# digit = 2
# digit = 3
# digit = 4
# digit = 6 | 17.222222 | 99 | 0.693548 | 28 | 0.065574 | 0 | 0 | 0 | 0 | 0 | 0 | 332 | 0.777518 |
e8b2ef11a1743e630591f449491e6c4cb3e345c2 | 1,100 | py | Python | base/backprop_perceptron.py | tardatio/granary_ai | 1af8efe0f2f971d25763b9b457e9025a73bdfb0d | [
"MIT"
] | null | null | null | base/backprop_perceptron.py | tardatio/granary_ai | 1af8efe0f2f971d25763b9b457e9025a73bdfb0d | [
"MIT"
] | null | null | null | base/backprop_perceptron.py | tardatio/granary_ai | 1af8efe0f2f971d25763b9b457e9025a73bdfb0d | [
"MIT"
] | null | null | null |
def forward(w,s,b,y):
Yhat= w * s + b
output = (Yhat-y)**2
return output, Yhat
def derivative_W(x, output, Yhat, y):
return ((2 * output) * (Yhat - y)) * x # w
def derivative_B(b, output, Yhat, y):
return ((2 * output) * (Yhat - y)) * b #bias
def main():
w = 1.0 #weight
x = 2.0 #sample
b = 1.0 #bias
y = 2.0*x #rule
learning = 1e-1
epoch = 3
for i in range(epoch+1):
output, Yhat = forward(w,x,b,y)
print("-----------------------------------------------------------------------------")
print("w:",w)
print("\tw*b:",w*x)
print("x:",x,"\t\tsum:", w*x+b)
print("\tb:",b,"\t\t\tg1:",abs(Yhat-y),"\tg2:",abs(Yhat-y)**2,"\tloss:",output)
print("\t\tY=2*x:", y)
print("-----------------------------------------------------------------------------")
if output == 0.0:
break
gw = derivative_W(x, output, Yhat, y)
gb = derivative_B(b, output, Yhat, y)
w -= learning * gw
b -= learning * gb
if __name__ == '__main__':
main()
| 25 | 94 | 0.408182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 271 | 0.246364 |
e8b3cbd2eb084e2d31ca72bd36ee823a6ed4df75 | 4,273 | py | Python | os/pe.py | clayne/gef-extras | 1900ae30ab56da61cbb7821c7e1ef54d31480c55 | [
"MIT"
] | 76 | 2018-03-12T09:58:23.000Z | 2022-03-28T06:58:10.000Z | os/pe.py | clayne/gef-extras | 1900ae30ab56da61cbb7821c7e1ef54d31480c55 | [
"MIT"
] | 30 | 2018-04-14T05:47:21.000Z | 2022-03-23T12:37:14.000Z | os/pe.py | clayne/gef-extras | 1900ae30ab56da61cbb7821c7e1ef54d31480c55 | [
"MIT"
] | 38 | 2018-03-13T11:48:34.000Z | 2022-03-31T04:37:03.000Z | import struct
import os
current_pe = None
class PE:
"""Basic PE parsing.
Ref:
- https://hshrzd.wordpress.com/pe-bear/
- https://blog.kowalczyk.info/articles/pefileformat.html
"""
X86_64 = 0x8664
X86_32 = 0x14c
ARM = 0x1c0
ARM64 = 0xaa64
ARMNT = 0x1c4
AM33 = 0x1d3
IA64 = 0x200
EFI = 0xebc
MIPS = 0x166
MIPS16 = 0x266
MIPSFPU = 0x366
MIPSFPU16 = 0x466
WCEMIPSV2 = 0x169
POWERPC = 0x1f0
POWERPCFP = 0x1f1
SH3 = 0x1a2
SH3DSP = 0x1a3
SH4 = 0x1a6
SH5 = 0x1a8
THUMP = 0x1c2
RISCV32 = 0x5032
RISCV64 = 0x5064
RISCV128 = 0x5128
M32R = 0x9041
dos_magic = b'MZ'
ptr_to_pe_header = None
pe_magic = b'PE'
machine = X86_32
num_of_sections = None
size_of_opt_header = None
dll_charac = None
opt_magic = b'\x02\x0b'
entry_point = None
base_of_code = None
image_base = None
def __init__(self, pe=""):
if not os.access(pe, os.R_OK):
err("'{0}' not found/readable".format(pe))
err("Failed to get file debug information, most of gef features will not work")
return
with open(pe, "rb") as fd:
# off 0x0
self.dos_magic = fd.read(2)
if self.dos_magic != PE.dos_magic:
self.machine = None
return
# off 0x3c
fd.seek(0x3c)
self.ptr_to_pe_header, = struct.unpack("<I", fd.read(4))
# off_pe + 0x0
fd.seek(self.ptr_to_pe_header)
self.pe_magic = fd.read(2)
# off_pe + 0x4
fd.seek(self.ptr_to_pe_header + 0x4)
self.machine, self.num_of_sections = struct.unpack("<HH", fd.read(4))
# off_pe + 0x14
fd.seek(self.ptr_to_pe_header + 0x14)
self.size_of_opt_header, self.dll_charac = struct.unpack("<HH", fd.read(4))
# off_pe + 0x18
self.opt_magic = fd.read(2)
# off_pe + 0x28
fd.seek(self.ptr_to_pe_header + 0x28)
self.entry_point, self.base_of_code = struct.unpack("<II", fd.read(8))
# off_pe + 0x30
self.image_base, = struct.unpack("<I", fd.read(4))
return
def is_valid(self):
return self.dos_magic == PE.DOS_MAGIC and self.pe_magic == PE.pe_magic
def get_machine_name(self):
return {
0x14c: "X86",
0x166: "MIPS",
0x169: "WCEMIPSV2",
0x1a2: "SH3",
0x1a3: "SH3DSP",
0x1a6: "SH4",
0x1a8: "SH5",
0x1c0: "ARM",
0x1c2: "THUMP",
0x1c4: "ARMNT",
0x1d3: "AM33",
0x1f0: "PowerPC",
0x1f1: "PowerPCFP",
0x200: "IA64",
0x266: "MIPS16",
0x366: "MIPSFPU",
0x466: "MIPSFPU16",
0xebc: "EFI",
0x5032: "RISCV32",
0x5064: "RISCV64",
0x5128: "RISCV128",
0x8664: "X86_64",
0x9041: "M32R",
0xaa64: "ARM64",
None: None
}[self.machine]
@lru_cache()
def get_pe_headers(filename=None):
"""Return an PE object with info from `filename`. If not provided, will return
the currently debugged file."""
if filename is None:
filename = get_filepath()
if filename.startswith("target:"):
warn("Your file is remote, you should try using `gef-remote` instead")
return
return PE(filename)
@lru_cache()
def is_pe64(filename=None):
"""Checks if `filename` is an PE64."""
pe = current_pe or get_pe_headers(filename)
return pe.machine == PE.X86_64
@lru_cache()
def is_pe32(filename=None):
"""Checks if `filename` is an PE32."""
pe = current_pe or get_pe_headers(filename)
return pe.machine == PE.X86_32 | 29.468966 | 91 | 0.495202 | 3,501 | 0.819331 | 0 | 0 | 716 | 0.167564 | 0 | 0 | 840 | 0.196583 |
e8b5f3144bc1d3413522f60d4e6476ad9be6026a | 2,230 | py | Python | ecommerce/discounts_test.py | mitodl/mitxonline | adf6084b1f4addd57473153ed6bd08ea09bc4685 | [
"BSD-3-Clause"
] | null | null | null | ecommerce/discounts_test.py | mitodl/mitxonline | adf6084b1f4addd57473153ed6bd08ea09bc4685 | [
"BSD-3-Clause"
] | 420 | 2021-07-13T14:58:52.000Z | 2022-03-31T20:50:10.000Z | ecommerce/discounts_test.py | mitodl/mitx-online | adf6084b1f4addd57473153ed6bd08ea09bc4685 | [
"BSD-3-Clause"
] | 1 | 2021-07-25T21:28:32.000Z | 2021-07-25T21:28:32.000Z | import pytest
from decimal import Decimal, getcontext
from ecommerce.factories import ProductFactory, DiscountFactory
from ecommerce.discounts import (
DiscountType,
PercentDiscount,
FixedPriceDiscount,
DollarsOffDiscount,
)
pytestmark = [pytest.mark.django_db]
@pytest.fixture()
def products():
return ProductFactory.create_batch(5)
@pytest.fixture()
def discounts():
return DiscountFactory.create_batch(10)
def test_discount_factory_generation(discounts):
"""
Runs through discounts and makes sure all the ones that come out of the
factory are recognizable by the test suite. (This is a sort of sanity
check - if a new discount type gets added and the tests aren't updated, this
test will fail.)
"""
for discount in discounts:
discount_logic = DiscountType.for_discount(discount)
what_type = (
type(discount_logic) is DollarsOffDiscount,
type(discount_logic) is FixedPriceDiscount,
type(discount_logic) is PercentDiscount,
)
assert any(what_type)
def test_discount_factory_adjustment(discounts, products):
"""
Tests discounting products. This runs through each factory-generated
product and applies all of the discounts that have been generated, then
compares the discounted price to the discount generated in the test.
"""
for product in products:
for discount in discounts:
discount_logic = DiscountType.for_discount(discount)
if type(discount_logic) is DollarsOffDiscount:
discounted_price = product.price - discount.amount
if discounted_price < 0:
discounted_price = 0
elif type(discount_logic) is FixedPriceDiscount:
discounted_price = discount.amount
elif type(discount_logic) is PercentDiscount:
discounted_price = round(
product.price * Decimal(discount.amount / 100), 2
)
else:
discounted_price = None
assert (
discounted_price > 0
and discounted_price == discount_logic.get_product_price(product)
)
| 30.972222 | 81 | 0.663677 | 0 | 0 | 0 | 0 | 153 | 0.06861 | 0 | 0 | 496 | 0.222422 |
e8b7f72135ff17d1528a58a4b339193557a313ca | 3,952 | py | Python | web-cloudformation/lambda_function.py | ClarkAtAmazon/aws-media-services-application-mapper | ab074a0313cb87cefaf807794763c07553be7c04 | [
"Apache-2.0"
] | null | null | null | web-cloudformation/lambda_function.py | ClarkAtAmazon/aws-media-services-application-mapper | ab074a0313cb87cefaf807794763c07553be7c04 | [
"Apache-2.0"
] | null | null | null | web-cloudformation/lambda_function.py | ClarkAtAmazon/aws-media-services-application-mapper | ab074a0313cb87cefaf807794763c07553be7c04 | [
"Apache-2.0"
] | null | null | null | """
This module is the custom resource used by the MSAM's CloudFormation
templates to populate the web bucket with contents of the MSAM web archive.
"""
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import os
from subprocess import call
import boto3
from botocore.exceptions import ClientError
import resource_tools
WEB_FOLDER = "/tmp/msam"
def lambda_handler(event, context):
"""
Lambda entry point. Print the event first.
"""
print("Event Input: %s" % json.dumps(event))
bucket_name = event["ResourceProperties"]["BucketName"]
result = {'Status': 'SUCCESS', "StackId": event["StackId"], "RequestId": event["RequestId"], "LogicalResourceId": event["LogicalResourceId"], 'Data': {}, 'ResourceId': bucket_name}
if event.get("PhysicalResourceId", False):
result["PhysicalResourceId"] = event["PhysicalResourceId"]
else:
result["PhysicalResourceId"] = "{}-{}".format(resource_tools.stack_name(event), event["LogicalResourceId"])
try:
if event["RequestType"] == "Create" or event["RequestType"] == "Update":
print(event["RequestType"])
replace_bucket_contents(bucket_name)
elif event["RequestType"] == "Delete":
print(event["RequestType"])
delete_bucket_contents(bucket_name)
except ClientError as client_error:
print("Exception: %s" % client_error)
result = {
'Status': 'FAILED',
"StackId": event["StackId"],
"RequestId": event["RequestId"],
"LogicalResourceId": event["LogicalResourceId"],
'Data': {
"Exception": str(client_error)
},
'ResourceId': None
}
resource_tools.send(event, context, result['Status'], result['Data'], result["PhysicalResourceId"])
def replace_bucket_contents(bucket_name):
"""
This function is responsible for removing any existing contents
in the specified bucket, and adding contents from the zip archive.
"""
client = boto3.client("s3")
region = os.environ["AWS_REGION"]
stamp = os.environ["BUILD_STAMP"]
code_bucket = os.environ["BUCKET_BASENAME"]
source = "https://{code_bucket}-{region}.s3.amazonaws.com/msam/msam-web-{stamp}.zip".format(code_bucket=code_bucket, region=region, stamp=stamp)
# empty the bucket
delete_bucket_contents(bucket_name)
# execute these commands to download the zip and extract it locally
command_list = [
"rm -f /tmp/msam-web-{stamp}.zip".format(stamp=stamp), "rm -rf {folder}".format(folder=WEB_FOLDER),
"mkdir {folder}".format(folder=WEB_FOLDER), "unzip msam-web-{stamp}.zip -d {folder}".format(stamp=stamp, folder=WEB_FOLDER), "ls -l {folder}".format(folder=WEB_FOLDER)
]
for command in command_list:
print(call(command, shell=True))
# upload each local file to the bucket, preserve folders
for dirpath, _, filenames in os.walk(WEB_FOLDER):
for name in filenames:
local = "{}/{}".format(dirpath, name)
remote = local.replace("{}/".format(WEB_FOLDER), "")
content_type = None
if remote.endswith(".js"):
content_type = "application/javascript"
elif remote.endswith(".html"):
content_type = "text/html"
else:
content_type = "binary/octet-stream"
client.put_object(Bucket=bucket_name, Key=remote, Body=open(local, 'rb'), ContentType=content_type)
def delete_bucket_contents(bucket_name):
"""
This function is responsible for removing all contents from the specified bucket.
"""
client = boto3.client("s3")
response = client.list_objects_v2(Bucket=bucket_name)
if "Contents" in response:
for item in response["Contents"]:
client.delete_object(Bucket=bucket_name, Key=item["Key"])
| 39.52 | 184 | 0.652328 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,606 | 0.406377 |
e8b95468398dfbf5ccf8ee057f5e1a97d91c23c9 | 304 | py | Python | tailow/operators/size.py | sourcepirate/tailow | 5398e619ad9508ba89153e9cc505a80cdb65d5fd | [
"MIT"
] | 2 | 2018-06-13T06:01:09.000Z | 2018-10-19T12:00:50.000Z | tailow/operators/size.py | sourcepirate/tailow | 5398e619ad9508ba89153e9cc505a80cdb65d5fd | [
"MIT"
] | null | null | null | tailow/operators/size.py | sourcepirate/tailow | 5398e619ad9508ba89153e9cc505a80cdb65d5fd | [
"MIT"
] | 2 | 2018-08-01T06:13:47.000Z | 2018-10-20T07:15:11.000Z | from tailow.operators.base import Operator
class SizeOperator(Operator):
"""
operator to query for arrays by number of elements
"""
def to_query(self, field_name, value):
return {"$size": value}
def get_value(self, field, value):
return field.to_son(value)
| 20.266667 | 59 | 0.648026 | 258 | 0.848684 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.259868 |
e8bd7184378822445ccc65b6e322ca8a912026c5 | 385 | py | Python | src/cart/migrations/0005_orderitem_quantity.py | Bakhtiyar-Habib/CSE327-Project | 4126b40eb398e4cf13b49136e552775c5f3b0635 | [
"bzip2-1.0.6"
] | null | null | null | src/cart/migrations/0005_orderitem_quantity.py | Bakhtiyar-Habib/CSE327-Project | 4126b40eb398e4cf13b49136e552775c5f3b0635 | [
"bzip2-1.0.6"
] | null | null | null | src/cart/migrations/0005_orderitem_quantity.py | Bakhtiyar-Habib/CSE327-Project | 4126b40eb398e4cf13b49136e552775c5f3b0635 | [
"bzip2-1.0.6"
] | null | null | null | # Generated by Django 2.0.7 on 2020-05-18 05:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cart', '0004_auto_20200518_1139'),
]
operations = [
migrations.AddField(
model_name='orderitem',
name='quantity',
field=models.IntegerField(default=1),
),
]
| 20.263158 | 49 | 0.597403 | 292 | 0.758442 | 0 | 0 | 0 | 0 | 0 | 0 | 99 | 0.257143 |
e8bd93991acde3b39cff9f04ff4419e3c6c936b3 | 2,097 | py | Python | odoo-13.0/odoo/addons/base/wizard/base_language_install.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | 12 | 2021-03-26T08:39:40.000Z | 2022-03-16T02:20:10.000Z | odoo-13.0/odoo/addons/base/wizard/base_language_install.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | 13 | 2020-12-20T16:00:21.000Z | 2022-03-14T14:55:30.000Z | odoo-13.0/odoo/addons/base/wizard/base_language_install.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | 17 | 2020-08-31T11:18:49.000Z | 2022-02-09T05:57:31.000Z | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
class BaseLanguageInstall(models.TransientModel):
_name = "base.language.install"
_description = "Install Language"
@api.model
def _default_language(self):
""" Display the selected language when using the 'Update Terms' action
from the language list view
"""
if self._context.get('active_model') == 'res.lang':
lang = self.env['res.lang'].browse(self._context.get('active_id'))
return lang.code
return False
@api.model
def _get_languages(self):
return [[code, name] for code, _, name in self.env['res.lang'].get_available()]
lang = fields.Selection(_get_languages, string='Language', required=True,
default=_default_language)
overwrite = fields.Boolean('Overwrite Existing Terms',
default=True,
help="If you check this box, your customized translations will be overwritten and replaced by the official ones.")
state = fields.Selection([('init', 'init'), ('done', 'done')],
string='Status', readonly=True, default='init')
def lang_install(self):
self.ensure_one()
mods = self.env['ir.module.module'].search([('state', '=', 'installed')])
mods.with_context(overwrite=self.overwrite)._update_translations(self.lang)
self.state = 'done'
self.env.cr.execute('ANALYZE ir_translation')
return {
'name': _('Language Pack'),
'view_mode': 'form',
'view_id': False,
'res_model': 'base.language.install',
'domain': [],
'context': dict(self._context, active_ids=self.ids),
'type': 'ir.actions.act_window',
'target': 'new',
'res_id': self.id,
}
def reload(self):
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
| 36.789474 | 145 | 0.57463 | 1,954 | 0.931807 | 0 | 0 | 491 | 0.234144 | 0 | 0 | 752 | 0.358608 |
e8be64b0dcdd575acf275b088a14909677a52a3e | 1,627 | py | Python | DataAnalysis/test.py | yuxiang-zhou/MarketAnalysor | 4d19d2589d07409cd699f394921d1a95f3097e94 | [
"MIT"
] | null | null | null | DataAnalysis/test.py | yuxiang-zhou/MarketAnalysor | 4d19d2589d07409cd699f394921d1a95f3097e94 | [
"MIT"
] | null | null | null | DataAnalysis/test.py | yuxiang-zhou/MarketAnalysor | 4d19d2589d07409cd699f394921d1a95f3097e94 | [
"MIT"
] | null | null | null | import urllib2
import threading
from bs4 import BeautifulSoup
import re
import json
import sys
import os
import django
from stock_list import getlist, getLSEList
from extract_stock_info import get_info, getLSEInfo
from extract_stock_history import get_historical_info
from extract_sector_history import get_sector_history, get_sector_dict
from extract_stock_news import get_stock_news
from extract_NT_transactions import get_NT_transactions
import time
from pymongo import MongoClient
import warnings
import exceptions
warnings.filterwarnings("ignore", category=exceptions.RuntimeWarning, module='django.db.backends.sqlite3.base', lineno=53)
if __name__ == '__main__':
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../MADjangoProject'))
if not path in sys.path:
sys.path.insert(1, path)
del path
os.environ['DJANGO_SETTINGS_MODULE'] = 'MADjangoProject.settings'
django.setup()
from market.models import Stock, StockHistory, SectorHistory
sec_dict = get_sector_dict()
print 'Fethcing Indices...'
ALL_Stocks = getLSEList(collection=Stock)
def get_share_info():
for share in ALL_Stocks:
print 'Fetching info of ' + share['name']
info = getLSEInfo(share['query'], share['symbol'],collection=Stock, sector_dict=sec_dict)
import threading
print 'Distributing Jobs ...'
threads = []
# callables = [get_nt]
callables = [get_share_info]
for f in callables:
t = threading.Thread(target=f)
t.setDaemon(True)
threads.append(t)
t.start()
for t in threads:
t.join()
| 29.053571 | 122 | 0.728949 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 227 | 0.139521 |
e8bfced731c80842e64d71e3b64a2ac8f77ae7b9 | 566 | py | Python | icpc/2019-10-4/F-gen.py | Riteme/test | b511d6616a25f4ae8c3861e2029789b8ee4dcb8d | [
"BSD-Source-Code"
] | 3 | 2018-08-30T09:43:20.000Z | 2019-12-03T04:53:43.000Z | icpc/2019-10-4/F-gen.py | Riteme/test | b511d6616a25f4ae8c3861e2029789b8ee4dcb8d | [
"BSD-Source-Code"
] | null | null | null | icpc/2019-10-4/F-gen.py | Riteme/test | b511d6616a25f4ae8c3861e2029789b8ee4dcb8d | [
"BSD-Source-Code"
] | null | null | null | #!/usr/bin/pypy
from sys import *
from random import *
n, m, CMAX, d1, d2 = map(int, argv[1:])
#print randint(0, n)
print 1
x0, y0 = randint(-20, -10), randint(-20, -10)
dx, dy = randint(-d1, -1), randint(1, d1)
x1, y1 = x0 + dx, y0 + dy
dx, dy = -dy, dx
print x0, y0
print x1, y1
print x1 + dx, y1 + dy
print x0 + dx, y0 + dy
print n
for i in xrange(n):
print m
x, y = randint(0, CMAX), randint(0, CMAX)
for j in xrange(m):
print x, y, 0
x += randint(-d2, d2)
y += randint(-d2, d2)
x = max(0, x)
y = max(0, y)
| 20.214286 | 45 | 0.533569 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.061837 |
e8c0f817f68a5fd4dd066ca677c3c04387639adf | 3,787 | py | Python | aea/cli/run.py | lrahmani/agents-aea | 9bd1d51530fc21bf41b5adea031cda19a94b048b | [
"Apache-2.0"
] | null | null | null | aea/cli/run.py | lrahmani/agents-aea | 9bd1d51530fc21bf41b5adea031cda19a94b048b | [
"Apache-2.0"
] | null | null | null | aea/cli/run.py | lrahmani/agents-aea | 9bd1d51530fc21bf41b5adea031cda19a94b048b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Implementation of the 'aea run' subcommand."""
import sys
from pathlib import Path
from typing import List, Optional
import click
from aea import __version__
from aea.aea import AEA
from aea.aea_builder import AEABuilder
from aea.cli.common import (
AEA_LOGO,
ConnectionsOption,
check_aea_project,
logger,
)
from aea.cli.install import install
from aea.configurations.base import PublicId
from aea.helpers.base import load_env_file
AEA_DIR = str(Path("."))
def _prepare_environment(click_context, env_file: str, is_install_deps: bool) -> None:
"""
Prepare the AEA project environment.
:param click_context: the click context
:param env_file: the path to the envrionemtn file.
:param is_install_deps: whether to install the dependencies
"""
load_env_file(env_file)
if is_install_deps:
if Path("requirements.txt").exists():
click_context.invoke(install, requirement="requirements.txt")
else:
click_context.invoke(install)
def _build_aea(
connection_ids: Optional[List[PublicId]], skip_consistency_check: bool
) -> AEA:
try:
builder = AEABuilder.from_aea_project(
Path("."), skip_consistency_check=skip_consistency_check
)
aea = builder.build(connection_ids=connection_ids)
return aea
except Exception as e:
# TODO use an ad-hoc exception class for predictable errors
# all the other exceptions should be logged with logger.exception
logger.error(str(e))
sys.exit(1)
def _run_aea(aea: AEA) -> None:
click.echo(AEA_LOGO + "v" + __version__ + "\n")
click.echo("{} starting ...".format(aea.name))
try:
aea.start()
except KeyboardInterrupt:
click.echo(" {} interrupted!".format(aea.name)) # pragma: no cover
except Exception as e:
logger.exception(e)
sys.exit(1)
finally:
click.echo("{} stopping ...".format(aea.name))
aea.stop()
@click.command()
@click.option(
"--connections",
"connection_ids",
cls=ConnectionsOption,
required=False,
default=None,
help="The connection names to use for running the agent. Must be declared in the agent's configuration file.",
)
@click.option(
"--env",
"env_file",
type=click.Path(),
required=False,
default=".env",
help="Specify an environment file (default: .env)",
)
@click.option(
"--install-deps",
"is_install_deps",
is_flag=True,
required=False,
default=False,
help="Install all the dependencies before running the agent.",
)
@click.pass_context
@check_aea_project
def run(
click_context, connection_ids: List[PublicId], env_file: str, is_install_deps: bool
):
"""Run the agent."""
skip_consistency_check = click_context.obj.config["skip_consistency_check"]
_prepare_environment(click_context, env_file, is_install_deps)
aea = _build_aea(connection_ids, skip_consistency_check)
_run_aea(aea)
| 30.055556 | 114 | 0.660681 | 0 | 0 | 0 | 0 | 997 | 0.263269 | 0 | 0 | 1,626 | 0.429364 |
e8c11fe4576975f7077b66d5affd1b1ac602cf2e | 4,817 | py | Python | analyze.py | irenicaa/hh-analytics | 03917a4e7616d048e16bab737202b64af12132ee | [
"MIT"
] | 4 | 2018-07-26T15:34:58.000Z | 2020-10-30T07:13:25.000Z | analyze.py | irenicaa/hh-analytics | 03917a4e7616d048e16bab737202b64af12132ee | [
"MIT"
] | null | null | null | analyze.py | irenicaa/hh-analytics | 03917a4e7616d048e16bab737202b64af12132ee | [
"MIT"
] | 2 | 2018-08-02T17:25:15.000Z | 2022-01-21T14:14:39.000Z | #!/usr/bin/env python3
import logging
import functools
import argparse
import collections
import fileinput
import json
import csv
import re
import requests
import box
TAX = 0.13
SPECIALIZATION_COLUMNS_INDEXES = {
'salary.average': 1,
'salary.minimum': 2,
'salary.maximum': 3,
'number': 4,
}
def save_stats(args, stats, filename_parts, headers, row_handler, sort_key_getter):
stats_rows = []
for number, stats_row in enumerate(stats.items()):
logging.info('process the stats row #%d', number)
name, data = stats_row
stats_rows.append(row_handler(name, data))
stats_rows.sort(key=sort_key_getter)
if args.query:
filename_parts.append(args.query.pattern)
if args.handicapped:
filename_parts.append('handicapped')
if args.remote:
filename_parts.append('remote')
if args.no_experience:
filename_parts.append('no_experience')
filename = '.'.join(filename_parts + ['csv'])
with open(filename, 'w', newline='') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(headers)
for number, stats_row in enumerate(stats_rows):
logging.info('output the stats row #%d', number)
csv_writer.writerow(stats_row)
logging.basicConfig(
format='%(asctime)s [%(levelname)s] %(message)s',
level=logging.INFO,
)
parser = argparse.ArgumentParser()
parser.add_argument('-q', '--query', type=lambda query: re.compile(query, re.I))
parser.add_argument('-H', '--handicapped', action='store_true')
parser.add_argument('-r', '--remote', action='store_true')
parser.add_argument('-e', '--no_experience', action='store_true')
parser.add_argument('--data_1', required=True, choices=['profareas', 'specializations'])
parser.add_argument('-s', '--sort', required=True, choices=SPECIALIZATION_COLUMNS_INDEXES.keys())
parser.add_argument('--data_2', choices=['areas', 'cities'])
args = parser.parse_args()
currencies = {}
dictionaries = box.Box(requests.get('https://api.hh.ru/dictionaries').json())
for currency in dictionaries.currency:
currencies[currency.code] = currency.rate
specializations = collections.defaultdict(list)
cities = collections.Counter()
for number, line in enumerate(fileinput.input(files=[])):
logging.info('process the vacancy #%d', number)
try:
if args.query and args.query.search(line) is None:
continue
vacancy = box.Box(json.loads(line))
if args.handicapped and not vacancy.accept_handicapped:
continue
if args.remote and vacancy.schedule.id != 'remote':
continue
if args.no_experience and vacancy.experience.id != 'noExperience':
continue
if args.data_2 == 'areas':
cities[vacancy.area.name] += 1
elif args.data_2 == 'cities' and vacancy.address and vacancy.address.city:
cities[vacancy.address.city] += 1
salary = None
if vacancy.salary:
if vacancy.salary.to:
salary = vacancy.salary.to
# override the salary with its minimum, if there are specified both limits
if vacancy.salary['from']:
salary = vacancy.salary['from']
if not salary:
continue
salary /= currencies[vacancy.salary.currency]
if vacancy.salary.gross:
salary -= salary * TAX
for specialization in vacancy.specializations:
if args.data_1 == 'profareas':
name = specialization.profarea_name
else:
name = '{} / {}'.format(specialization.profarea_name, specialization.name)
specializations[name].append(salary)
except Exception as exception:
logging.error('error: %s', exception)
save_stats = functools.partial(save_stats, args)
if specializations:
logging.info('save the %s stats', args.data_1)
save_stats(
stats=specializations,
filename_parts=[args.data_1, args.sort],
headers=['Name', 'Salary, average', 'Salary, minimum', 'Salary, maximum', 'Number'],
row_handler=lambda name, salaries: [
name,
round(sum(salaries) / len(salaries)),
round(min(salaries)),
round(max(salaries)),
len(salaries),
],
sort_key_getter=lambda specialization_row: [
-specialization_row[SPECIALIZATION_COLUMNS_INDEXES[args.sort]],
specialization_row[0].lower(),
],
)
if cities:
logging.info('save the %s stats', args.data_2)
save_stats(
stats=cities,
filename_parts=[args.data_2],
headers=['Name', 'Number'],
row_handler=lambda name, number: [name, number],
sort_key_getter=lambda city_row: [-city_row[1], city_row[0].lower()],
)
| 34.407143 | 97 | 0.645215 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 727 | 0.150924 |
e8c18148af4760bc98e8ab17fabe8787994adb73 | 1,895 | py | Python | src/environment/wrappers/max_frameskip_env.py | Kautenja/playing-mario-with-deep-reinforcement-learning | bf61b8babfd06b6e6c26eb3694b84e8c7ff4c076 | [
"MIT"
] | 57 | 2018-04-24T07:07:29.000Z | 2022-01-19T17:07:13.000Z | src/environment/wrappers/max_frameskip_env.py | Kautenja/playing-mario-with-deep-reinforcement-learning | bf61b8babfd06b6e6c26eb3694b84e8c7ff4c076 | [
"MIT"
] | 10 | 2018-06-07T14:29:19.000Z | 2019-07-29T13:48:03.000Z | src/environment/wrappers/max_frameskip_env.py | Kautenja/playing-mario-with-deep-reinforcement-learning | bf61b8babfd06b6e6c26eb3694b84e8c7ff4c076 | [
"MIT"
] | 11 | 2018-09-11T23:14:37.000Z | 2021-06-30T03:56:55.000Z | """An environment to skip k frames and return a max between the last two."""
import gym
import numpy as np
class MaxFrameskipEnv(gym.Wrapper):
"""An environment to skip k frames and return a max between the last two."""
def __init__(self, env, skip: int=4) -> None:
"""
Initialize a new max frame skip env around an existing environment.
Args:
env: the environment to wrap around
skip: the number of frames to skip (i.e. hold an action for)
Returns:
None
"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2, *env.observation_space.shape), dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
# the total reward from `skip` frames having `action` held on them
total_reward = 0.0
done = None
# perform the action `skip` times
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
total_reward += reward
# assign the buffer with the last two frames
if i == self._skip - 2:
self._obs_buffer[0] = obs
if i == self._skip - 1:
self._obs_buffer[1] = obs
# break the loop if the game terminated
if done:
break
# Note that the observation on the done=True frame doesn't matter
# (because the next state isn't evaluated when done is true)
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
# explicitly define the outward facing API of this module
__all__ = [MaxFrameskipEnv.__name__]
| 34.454545 | 86 | 0.606332 | 1,688 | 0.890765 | 0 | 0 | 0 | 0 | 0 | 0 | 909 | 0.479683 |
e8c2719a32612a325b383c39169bc8d4bf73f6f4 | 125 | py | Python | mercury/plugin/smart_grid/__init__.py | greenlsi/mercury_mso_framework | 8b9639e5cb4b2c526a65861c93a9fe9db2460ea4 | [
"Apache-2.0"
] | 1 | 2020-07-21T11:22:39.000Z | 2020-07-21T11:22:39.000Z | mercury/plugin/smart_grid/__init__.py | greenlsi/mercury_mso_framework | 8b9639e5cb4b2c526a65861c93a9fe9db2460ea4 | [
"Apache-2.0"
] | 2 | 2021-08-25T16:09:58.000Z | 2022-02-10T02:21:03.000Z | mercury/plugin/smart_grid/__init__.py | greenlsi/mercury_mso_framework | 8b9639e5cb4b2c526a65861c93a9fe9db2460ea4 | [
"Apache-2.0"
] | 1 | 2021-02-24T15:54:09.000Z | 2021-02-24T15:54:09.000Z | from .provider import EnergyProvider
from .pwr_source import PowerSource
from .consumption_manager import ConsumptionManager
| 31.25 | 51 | 0.88 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e8c30d54b333286bca5dd921414bc3d23b187ec5 | 4,236 | py | Python | hbaselines/envs/deeploco/envs.py | reufko/h-baselines | 5c5522b096ddcf3124ff2fcbfb6eb7fa4fc0fb57 | [
"MIT"
] | 186 | 2019-01-21T10:37:11.000Z | 2022-03-22T19:04:07.000Z | hbaselines/envs/deeploco/envs.py | ahlane/h-baselines | 767eb9a105deb0898248dcd7dbf2f118116cdbfa | [
"MIT"
] | 214 | 2018-10-17T06:53:22.000Z | 2022-02-09T23:59:03.000Z | hbaselines/envs/deeploco/envs.py | ahlane/h-baselines | 767eb9a105deb0898248dcd7dbf2f118116cdbfa | [
"MIT"
] | 37 | 2019-07-12T01:44:08.000Z | 2022-03-29T06:00:15.000Z | """Script containing the DeepLoco environments."""
import gym
import numpy as np
import os
import sys
import cv2
try:
sys.path.append(os.path.join(os.environ["TERRAINRL_PATH"], "simAdapter"))
import terrainRLSim # noqa: F401
except (KeyError, ImportError, ModuleNotFoundError):
pass
class BipedalSoccer(gym.Env):
"""Bipedal Soccer environment.
In this environment, a bipedal agent is placed in an open field with a
soccer ball. The agent is rewarded for moving to the ball, and additionally
dribbling the ball to the target. The reward function is a weighted sum of
the agent's distance from the ball and the distance of the ball from a
desired goal position. This reward is positive to discourage the agent from
falling prematurely.
Attributes
----------
wrapped_env : gym.Env
the original environment, which add more dimensions than wanted here
"""
def __init__(self):
"""Instantiate the environment."""
self.wrapped_env = terrainRLSim.getEnv(
"PD-Biped3D-HLC-Soccer-v1", render=False)
# Add the time horizon.
self.horizon = 512
@property
def observation_space(self):
"""See parent class."""
return self.wrapped_env.observation_space
@property
def action_space(self):
"""See parent class."""
return self.wrapped_env.action_space
def step(self, action):
"""See parent class."""
obs, rew, done, info = self.wrapped_env.step(np.array([action]))
return obs[0], rew[0][0], done, info
def reset(self):
"""See parent class."""
return self.wrapped_env.reset()[0]
def render(self, mode='human'):
"""See parent class."""
return self.wrapped_env.render(mode=mode)
class BipedalObstacles(gym.Env):
"""Bipedal Obstacles environment.
In this environment, a bipedal agent is placed in an open field with
obstacles scattered throughout the world. The goal of the agent is to
walk around the world and reach a goal position.
Attributes
----------
wrapped_env : gym.Env
the original environment, which add more dimensions than wanted here
"""
def __init__(self, render):
"""Instantiate the environment.
Parameters
----------
render : bool
whether to render the environment
"""
self.t = 0
if render:
self.wrapped_env = gym.make("PD-Biped3D-HLC-Obstacles-render-v2")
else:
self.wrapped_env = gym.make("PD-Biped3D-HLC-Obstacles-v2")
# Add the time horizon.
self.horizon = 2000
@property
def observation_space(self):
"""See parent class."""
return gym.spaces.Box(
low=20 * self.wrapped_env.observation_space.low[:-2],
high=20 * self.wrapped_env.observation_space.high[:-2],
dtype=np.float32)
@property
def context_space(self):
"""See parent class."""
return gym.spaces.Box(
low=20 * self.wrapped_env.observation_space.low[-2:],
high=20 * self.wrapped_env.observation_space.high[-2:],
dtype=np.float32)
@property
def action_space(self):
"""See parent class."""
return self.wrapped_env.action_space
@property
def current_context(self):
"""See parent class."""
return self.wrapped_env.env.getObservation()[-2:]
def step(self, action):
"""See parent class."""
self.t += 1
obs, rew, done, info = self.wrapped_env.step(action)
done = done or self.t >= self.horizon
return obs[:-2], rew, done, info
def reset(self):
"""See parent class."""
self.t = 0
return self.wrapped_env.reset()[:-2]
def render(self, mode='human'):
"""See parent class."""
image = self.wrapped_env.env.render(
headless_step=True)
if mode == 'human':
f = np.flip(image.astype(np.float32) / 255.0, axis=0)
f = np.flip(f, axis=2)
cv2.imshow("PD-Biped3D-HLC-Obstacles-v2", f)
cv2.waitKey(1)
elif mode == 'rgb_array':
return image
| 29.213793 | 79 | 0.611426 | 3,933 | 0.92847 | 0 | 0 | 1,016 | 0.239849 | 0 | 0 | 1,711 | 0.403919 |
e8c3672245371fce722c62fe43be41b167aa31e3 | 156 | py | Python | test/__init__.py | LanaMaidenbaum41/test | d9ffd610dc4b1dc940f444b2adf676b3c610d0bd | [
"MIT"
] | null | null | null | test/__init__.py | LanaMaidenbaum41/test | d9ffd610dc4b1dc940f444b2adf676b3c610d0bd | [
"MIT"
] | 183 | 2019-05-16T18:18:04.000Z | 2022-03-31T19:50:02.000Z | test/__init__.py | LanaMaidenbaum41/test | d9ffd610dc4b1dc940f444b2adf676b3c610d0bd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for Test."""
__author__ = """Lana Maidenbaum"""
__email__ = 'lana.maidenbaum@zeel.com'
__version__ = '0.1.1'
| 19.5 | 38 | 0.641026 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.705128 |
e8c391255b56e4ff6feefeae210b186373cc9c99 | 372 | py | Python | pyml/crawler/minispider/mini_spider.py | onehao/opensource | e34784fcc294094a4ce0decd0e6809c11a96bbf7 | [
"Apache-2.0"
] | null | null | null | pyml/crawler/minispider/mini_spider.py | onehao/opensource | e34784fcc294094a4ce0decd0e6809c11a96bbf7 | [
"Apache-2.0"
] | 1 | 2015-07-15T02:11:41.000Z | 2015-07-15T02:30:34.000Z | pyml/crawler/minispider/mini_spider.py | onehao/opensource | e34784fcc294094a4ce0decd0e6809c11a96bbf7 | [
"Apache-2.0"
] | 1 | 2018-09-11T12:52:30.000Z | 2018-09-11T12:52:30.000Z | # -*- coding:utf-8 -*-
'''
Created on 2015年3月2日
@author: wanhao01
'''
import sys
from crawler.minispider import logerror
import main
reload(sys)
sys.setdefaultencoding('utf-8')
if __name__ == '__main__':
try:
main.main()
except Exception as exception:
logerror("error during running, details: " + str(exception))
pass | 16.909091 | 69 | 0.629032 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.343915 |
e8c3e59f88d6793f930cb42c427e75383f6c77bd | 1,001 | py | Python | tensorflow/examples/functions/distributed/distr_fibonacci.py | acharal/tensorflow | c5d99169bea3f5d1e3cef973690d2ec7fdbac80f | [
"Apache-2.0"
] | null | null | null | tensorflow/examples/functions/distributed/distr_fibonacci.py | acharal/tensorflow | c5d99169bea3f5d1e3cef973690d2ec7fdbac80f | [
"Apache-2.0"
] | null | null | null | tensorflow/examples/functions/distributed/distr_fibonacci.py | acharal/tensorflow | c5d99169bea3f5d1e3cef973690d2ec7fdbac80f | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
from tensorflow.python.framework import function
cluster = tf.train.ClusterSpec({"local": ["localhost:2222", "localhost:2223"]})
fib = function.Declare("Fib", [("n", tf.int32)], [("ret", tf.int32)])
@function.Defun(tf.int32, func_name="Fib", out_names=["ret"])
def FibImpl(n):
def f1():
with tf.device("/job:local/replica:0/task:0/device:CPU:0"):
ret = tf.constant(1)
return ret
def f2():
with tf.device("/job:local/replica:0/task:0/device:CPU:0"):
fib1 = fib(n-1)
with tf.device("/job:local/replica:0/task:1/device:CPU:0"):
fib2 = fib(n-2)
return fib1 + fib2
return tf.cond(tf.less_equal(n, 1), f1, f2)
FibImpl.add_to_graph(tf.get_default_graph())
n = tf.placeholder(tf.int32, shape=[])
x = fib(n)
res = tf.add(x, 1)
#print(tf.get_default_graph().as_graph_def())
writer = tf.summary.FileWriter('./graphs', tf.get_default_graph())
with tf.Session("grpc://localhost:2222") as sess:
print(sess.run(res, feed_dict={n: 20}))
writer.close()
| 25.025 | 79 | 0.675325 | 0 | 0 | 0 | 0 | 434 | 0.433566 | 0 | 0 | 266 | 0.265734 |
e8c4971da7588ca0cd08099ba33608771a9b94d2 | 17,777 | py | Python | river/tree/hoeffding_tree.py | online-ml/creme | 60872844e6052b5ef20e4075aea30f9031377136 | [
"BSD-3-Clause"
] | 1,105 | 2019-01-24T15:15:30.000Z | 2020-11-10T18:27:00.000Z | river/tree/hoeffding_tree.py | online-ml/creme | 60872844e6052b5ef20e4075aea30f9031377136 | [
"BSD-3-Clause"
] | 328 | 2019-01-25T13:48:43.000Z | 2020-11-11T11:41:44.000Z | river/tree/hoeffding_tree.py | online-ml/creme | 60872844e6052b5ef20e4075aea30f9031377136 | [
"BSD-3-Clause"
] | 150 | 2019-01-29T19:05:21.000Z | 2020-11-11T11:50:14.000Z | import collections
import functools
import io
import math
import typing
from abc import ABC, abstractmethod
from river import base
from river.utils.skmultiflow_utils import (
calculate_object_size,
normalize_values_in_dict,
)
from .nodes.branch import (
DTBranch,
NominalBinaryBranch,
NominalMultiwayBranch,
NumericBinaryBranch,
NumericMultiwayBranch,
)
from .nodes.leaf import HTLeaf
try:
import graphviz
GRAPHVIZ_INSTALLED = True
except ImportError:
GRAPHVIZ_INSTALLED = False
class HoeffdingTree(ABC):
"""Base class for Hoeffding Decision Trees.
This is an **abstract class**, so it cannot be used directly. It defines base operations
and properties that all the Hoeffding decision trees must inherit or implement according to
their own design.
Parameters
----------
max_depth
The maximum depth a tree can reach. If `None`, the tree will grow indefinitely.
binary_split
If True, only allow binary splits.
max_size
The max size of the tree, in Megabytes (MB).
memory_estimate_period
Interval (number of processed instances) between memory consumption checks.
stop_mem_management
If True, stop growing as soon as memory limit is hit.
remove_poor_attrs
If True, disable poor attributes to reduce memory usage.
merit_preprune
If True, enable merit-based tree pre-pruning.
"""
def __init__(
self,
max_depth: int = None,
binary_split: bool = False,
max_size: float = 100.0,
memory_estimate_period: int = 1000000,
stop_mem_management: bool = False,
remove_poor_attrs: bool = False,
merit_preprune: bool = True,
):
# Properties common to all the Hoeffding trees
self._split_criterion: str = ""
self._leaf_prediction: str = ""
self.max_depth: float = max_depth if max_depth is not None else math.inf
self.binary_split: bool = binary_split
self._max_size: float = max_size
self._max_byte_size: float = self._max_size * (2**20) # convert to byte
self.memory_estimate_period: int = memory_estimate_period
self.stop_mem_management: bool = stop_mem_management
self.remove_poor_attrs: bool = remove_poor_attrs
self.merit_preprune: bool = merit_preprune
self._root: typing.Union[DTBranch, HTLeaf, None] = None
self._n_active_leaves: int = 0
self._n_inactive_leaves: int = 0
self._inactive_leaf_size_estimate: float = 0.0
self._active_leaf_size_estimate: float = 0.0
self._size_estimate_overhead_fraction: float = 1.0
self._growth_allowed = True
self._train_weight_seen_by_model: float = 0.0
@staticmethod
def _hoeffding_bound(range_val, confidence, n):
r"""Compute the Hoeffding bound, used to decide how many samples are necessary at each
node.
Notes
-----
The Hoeffding bound is defined as:
$\\epsilon = \\sqrt{\\frac{R^2\\ln(1/\\delta))}{2n}}$
where:
$\\epsilon$: Hoeffding bound.
$R$: Range of a random variable. For a probability the range is 1, and for an
information gain the range is log *c*, where *c* is the number of classes.
$\\delta$: Confidence. 1 minus the desired probability of choosing the correct
attribute at any given node.
$n$: Number of samples.
Parameters
----------
range_val
Range value.
confidence
Confidence of choosing the correct attribute.
n
Number of processed samples.
"""
return math.sqrt(
(range_val * range_val * math.log(1.0 / confidence)) / (2.0 * n)
)
@property
def max_size(self):
"""Max allowed size tree can reach (in MB)."""
return self._max_size
@max_size.setter
def max_size(self, size):
self._max_size = size
self._max_byte_size = self._max_size * (2**20)
@property
def height(self) -> int:
if self._root:
return self._root.height
@property
def n_nodes(self):
if self._root:
return self._root.n_nodes
@property
def n_branches(self):
if self._root:
return self._root.n_branches
@property
def n_leaves(self):
if self._root:
return self._root.n_leaves
@property
def n_active_leaves(self):
return self._n_active_leaves
@property
def n_inactive_leaves(self):
return self._n_inactive_leaves
@property
def summary(self):
"""Collect metrics corresponding to the current status of the tree
in a string buffer.
"""
summary = {
"n_nodes": self.n_nodes,
"n_branches": self.n_branches,
"n_leaves": self.n_leaves,
"n_active_leaves": self.n_active_leaves,
"n_inactive_leaves": self.n_inactive_leaves,
"height": self.height,
"total_observed_weight": self._train_weight_seen_by_model,
}
return summary
def to_dataframe(self):
"""Return a representation of the current tree structure organized in a
`pandas.DataFrame` object.
In case the tree is empty or it only contains a single node (a leaf), `None` is returned.
Returns
-------
df
A `pandas.DataFrame` depicting the tree structure.
"""
if self._root is not None and isinstance(self._root, DTBranch):
return self._root.to_dataframe()
def _branch_selector(
self, numerical_feature=True, multiway_split=False
) -> typing.Type[DTBranch]:
"""Create a new split node."""
if numerical_feature:
if not multiway_split:
return NumericBinaryBranch
else:
return NumericMultiwayBranch
else:
if not multiway_split:
return NominalBinaryBranch
else:
return NominalMultiwayBranch
@abstractmethod
def _new_leaf(
self, initial_stats: dict = None, parent: typing.Union[HTLeaf, DTBranch] = None
) -> HTLeaf:
"""Create a new learning node.
The characteristics of the learning node depends on the tree algorithm.
Parameters
----------
initial_stats
Target statistics set from the parent node.
parent
Parent node to inherit from.
Returns
-------
A new learning node.
"""
@property
def split_criterion(self) -> str:
"""Return a string with the name of the split criterion being used by the tree."""
return self._split_criterion
@split_criterion.setter
@abstractmethod
def split_criterion(self, split_criterion):
"""Define the split criterion to be used by the tree."""
@property
def leaf_prediction(self) -> str:
"""Return the prediction strategy used by the tree at its leaves."""
return self._leaf_prediction
@leaf_prediction.setter
@abstractmethod
def leaf_prediction(self, leaf_prediction):
"""Define the prediction strategy used by the tree in its leaves."""
def _enforce_size_limit(self):
"""Track the size of the tree and disable/enable nodes if required.
This memory-management routine shared by all the Hoeffding Trees is based on [^1].
References
----------
[^1]: Kirkby, R.B., 2007. Improving hoeffding trees (Doctoral dissertation,
The University of Waikato).
"""
tree_size = self._size_estimate_overhead_fraction * (
self._active_leaf_size_estimate
+ self._n_inactive_leaves * self._inactive_leaf_size_estimate
)
if self._n_inactive_leaves > 0 or tree_size > self._max_byte_size:
if self.stop_mem_management:
self._growth_allowed = False
return
leaves = self._find_leaves()
leaves.sort(key=lambda leaf: leaf.calculate_promise())
max_active = 0
while max_active < len(leaves):
max_active += 1
if (
(
max_active * self._active_leaf_size_estimate
+ (len(leaves) - max_active) * self._inactive_leaf_size_estimate
)
* self._size_estimate_overhead_fraction
) > self._max_byte_size:
max_active -= 1
break
cutoff = len(leaves) - max_active
for i in range(cutoff):
if leaves[i].is_active():
leaves[i].deactivate()
self._n_inactive_leaves += 1
self._n_active_leaves -= 1
for i in range(cutoff, len(leaves)):
if not leaves[i].is_active() and leaves[i].depth < self.max_depth:
leaves[i].activate()
self._n_active_leaves += 1
self._n_inactive_leaves -= 1
def _estimate_model_size(self):
"""Calculate the size of the model and trigger tracker function
if the actual model size exceeds the max size in the configuration.
This memory-management routine shared by all the Hoeffding Trees is based on [^1].
References
----------
[^1]: Kirkby, R.B., 2007. Improving hoeffding trees (Doctoral dissertation,
The University of Waikato).
"""
leaves = self._find_leaves()
total_active_size = 0
total_inactive_size = 0
for leaf in leaves:
if leaf.is_active():
total_active_size += calculate_object_size(leaf)
else:
total_inactive_size += calculate_object_size(leaf)
if total_active_size > 0:
self._active_leaf_size_estimate = total_active_size / self._n_active_leaves
if total_inactive_size > 0:
self._inactive_leaf_size_estimate = (
total_inactive_size / self._n_inactive_leaves
)
actual_model_size = calculate_object_size(self)
estimated_model_size = (
self._n_active_leaves * self._active_leaf_size_estimate
+ self._n_inactive_leaves * self._inactive_leaf_size_estimate
)
self._size_estimate_overhead_fraction = actual_model_size / estimated_model_size
if actual_model_size > self._max_byte_size:
self._enforce_size_limit()
def _deactivate_all_leaves(self):
"""Deactivate all leaves."""
leaves = self._find_leaves()
for leaf in leaves:
leaf.deactivate()
self._n_inactive_leaves += 1
self._n_active_leaves -= 1
def _find_leaves(self) -> typing.List[HTLeaf]:
"""Find learning nodes in the tree.
Returns
-------
List of learning nodes in the tree.
"""
return [leaf for leaf in self._root.iter_leaves()]
# Adapted from creme's original implementation
def debug_one(self, x: dict) -> typing.Union[str, None]:
"""Print an explanation of how `x` is predicted.
Parameters
----------
x
A dictionary of features.
Returns
-------
A representation of the path followed by the tree to predict `x`; `None` if
the tree is empty.
Notes
-----
Currently, Label Combination Hoeffding Tree Classifier (for multi-label
classification) is not supported.
"""
if self._root is None:
return
# We'll redirect all the print statement to a buffer, we'll return the content of the
# buffer at the end
buffer = io.StringIO()
_print = functools.partial(print, file=buffer)
for node in self._root.walk(x, until_leaf=True):
if isinstance(node, HTLeaf):
_print(repr(node))
else:
try:
child_index = node.branch_no(x) # noqa
except KeyError:
child_index, _ = node.most_common_path()
_print(node.repr_branch(child_index)) # noqa
return buffer.getvalue()
def draw(self, max_depth: int = None):
"""Draw the tree using the `graphviz` library.
Since the tree is drawn without passing incoming samples, classification trees
will show the majority class in their leaves, whereas regression trees will
use the target mean.
Parameters
----------
max_depth
Only the root will be drawn when set to `0`. Every node will be drawn when
set to `None`.
Notes
-----
Currently, Label Combination Hoeffding Tree Classifier (for multi-label
classification) is not supported.
Examples
--------
>>> from river import datasets
>>> from river import tree
>>> model = tree.HoeffdingTreeClassifier(
... grace_period=5,
... split_confidence=1e-5,
... split_criterion='gini',
... max_depth=10,
... tie_threshold=0.05,
... )
>>> for x, y in datasets.Phishing():
... model = model.learn_one(x, y)
>>> dot = model.draw()
.. image:: ../../docs/img/dtree_draw.svg
:align: center
"""
counter = 0
def iterate(node=None):
if node is None:
yield None, None, self._root, 0, None
yield from iterate(self._root)
nonlocal counter
parent_no = counter
if isinstance(node, DTBranch):
for branch_index, child in enumerate(node.children):
counter += 1
yield parent_no, node, child, counter, branch_index
if isinstance(child, DTBranch):
yield from iterate(child)
if max_depth is None:
max_depth = math.inf
dot = graphviz.Digraph(
graph_attr={"splines": "ortho", "forcelabels": "true", "overlap": "false"},
node_attr={
"shape": "box",
"penwidth": "1.2",
"fontname": "trebuchet",
"fontsize": "11",
"margin": "0.1,0.0",
},
edge_attr={"penwidth": "0.6", "center": "true", "fontsize": "7 "},
)
if isinstance(self, base.Classifier):
n_colors = len(self.classes) # noqa
else:
n_colors = 1
# Pick a color palette which maps classes to colors
new_color = functools.partial(next, iter(_color_brew(n_colors)))
palette = collections.defaultdict(new_color)
for parent_no, parent, child, child_no, branch_index in iterate():
if child.depth > max_depth:
continue
if isinstance(child, DTBranch):
text = f"{child.feature}" # noqa
else:
text = f"{repr(child)}\nsamples: {int(child.total_weight)}"
# Pick a color, the hue depends on the class and the transparency on the distribution
if isinstance(self, base.Classifier):
class_proba = normalize_values_in_dict(child.stats, inplace=False)
mode = max(class_proba, key=class_proba.get)
p_mode = class_proba[mode]
try:
alpha = (p_mode - 1 / n_colors) / (1 - 1 / n_colors)
fillcolor = str(transparency_hex(color=palette[mode], alpha=alpha))
except ZeroDivisionError:
fillcolor = "#FFFFFF"
else:
fillcolor = "#FFFFFF"
dot.node(f"{child_no}", text, fillcolor=fillcolor, style="filled")
if parent_no is not None:
dot.edge(
f"{parent_no}",
f"{child_no}",
xlabel=parent.repr_branch(branch_index, shorten=True),
)
return dot
# Utility adapted from the original creme's implementation
def _color_brew(n: int) -> typing.List[typing.Tuple[int, int, int]]:
"""Generate n colors with equally spaced hues.
Parameters
----------
n
The number of required colors.
Returns
-------
List of n tuples of form (R, G, B) being the components of each color.
References
----------
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/_export.py
"""
colors = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in [i for i in range(25, 385, int(360 / n))]:
# Calculate some intermediate values
h_bar = h / 60.0
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [
(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0),
]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
colors.append(
((int(255 * (r + m))), (int(255 * (g + m))), (int(255 * (b + m))))
)
return colors
# Utility adapted from the original creme's implementation
def transparency_hex(color: typing.Tuple[int, int, int], alpha: float) -> str:
"""Apply alpha coefficient on hexadecimal color."""
return "#%02x%02x%02x" % tuple(
[int(round(alpha * c + (1 - alpha) * 255, 0)) for c in color]
)
| 32.558608 | 97 | 0.582607 | 15,675 | 0.881757 | 3,868 | 0.217585 | 3,517 | 0.19784 | 0 | 0 | 6,835 | 0.384486 |
e8c50788ce4892af43a9be0933cf50a03caaf69d | 2,376 | py | Python | app/app.py | CLARIAH/wp6-missieven | 67e7d0123d37cce36be5353801b90010d4ad4be0 | [
"MIT"
] | null | null | null | app/app.py | CLARIAH/wp6-missieven | 67e7d0123d37cce36be5353801b90010d4ad4be0 | [
"MIT"
] | null | null | null | app/app.py | CLARIAH/wp6-missieven | 67e7d0123d37cce36be5353801b90010d4ad4be0 | [
"MIT"
] | null | null | null | import types
from tf.advanced.app import App
MODIFIERS = """
remark folio note ref emph und super special q num den
""".strip().split()
def fmt_layoutFull(app, n, **kwargs):
return app._wrapHtml(n, ("",))
def fmt_layoutRemarks(app, n, **kwargs):
return app._wrapHtml(n, ("r",))
def fmt_layoutNotes(app, n, **kwargs):
return app._wrapHtml(n, ("n",))
def fmt_layoutOrig(app, n, **kwargs):
return app._wrapHtml(n, ("o",))
def fmt_layoutNoRemarks(app, n, **kwargs):
return app._wrapHtml(n, ("o", "n"))
def fmt_layoutNoNotes(app, n, **kwargs):
return app._wrapHtml(n, ("o", "r"))
def fmt_layoutNonOrig(app, n, **kwargs):
return app._wrapHtml(n, ("r", "n"))
NOTE = "note"
WORD = "word"
class TfApp(App):
def __init__(app, *args, **kwargs):
app.fmt_layoutFull = types.MethodType(fmt_layoutFull, app)
app.fmt_layoutRemarks = types.MethodType(fmt_layoutRemarks, app)
app.fmt_layoutNotes = types.MethodType(fmt_layoutNotes, app)
app.fmt_layoutOrig = types.MethodType(fmt_layoutOrig, app)
app.fmt_layoutNoRemarks = types.MethodType(fmt_layoutNoRemarks, app)
app.fmt_layoutNoNotes = types.MethodType(fmt_layoutNoNotes, app)
app.fmt_layoutNonOrig = types.MethodType(fmt_layoutNonOrig, app)
super().__init__(*args, **kwargs)
def _wrapHtml(app, n, kinds):
api = app.api
F = api.F
Fs = api.Fs
L = api.L
preNote = ""
postNote = ""
if "" in kinds or "n" in kinds:
notes = L.u(n, otype=NOTE)
if notes:
note = notes[0]
mark = F.mark.v(note)
noteWords = L.d(note, otype=WORD)
firstWord = noteWords[0]
lastWord = noteWords[-1]
if firstWord == n:
preNote = f"«{mark}= "
if lastWord == n:
postNote = f" ={mark}»"
material = "".join(Fs(f"trans{kind}").v(n) or "" for kind in kinds)
after = "".join(Fs(f"punc{kind}").v(n) or "" for kind in kinds)
material = f"{preNote}{material}{after}{postNote}"
clses = " ".join(
cf for cf in MODIFIERS if (fscf := Fs(f"is{cf}")) and fscf.v(n)
)
if clses:
material = f'<span class="{clses}">{material}</span>'
return material
| 28.285714 | 76 | 0.570286 | 1,645 | 0.691758 | 0 | 0 | 0 | 0 | 0 | 0 | 270 | 0.113541 |
e8c55b0d0b7ee2952a20ba1c0326c0dd310acbe0 | 5,693 | py | Python | services/docker/webrecorder/local.py | rachelaus/perma | 36c05080520ea3ffce465dbc383795c060fa4112 | [
"MIT",
"Unlicense"
] | 317 | 2015-02-12T16:53:34.000Z | 2022-03-14T23:38:04.000Z | services/docker/webrecorder/local.py | rachelaus/perma | 36c05080520ea3ffce465dbc383795c060fa4112 | [
"MIT",
"Unlicense"
] | 2,069 | 2015-01-06T20:09:24.000Z | 2022-03-31T15:44:36.000Z | services/docker/webrecorder/local.py | rachelaus/perma | 36c05080520ea3ffce465dbc383795c060fa4112 | [
"MIT",
"Unlicense"
] | 69 | 2015-01-12T18:56:07.000Z | 2022-02-22T19:57:10.000Z | import hashlib
import logging
import os
import shutil
import traceback
from contextlib import closing
from pywb.utils.loaders import BlockLoader
from webrecorder.rec.storage.base import BaseStorage
from webrecorder.rec.storage.storagepaths import add_local_store_prefix, strip_prefix
logger = logging.getLogger('wr.io')
# ============================================================================
class DirectLocalFileStorage(BaseStorage):
"""Webrecorder storage (local files)."""
def __init__(self):
"""Initialize Webrecorder storage."""
super(DirectLocalFileStorage, self).__init__(os.environ['STORAGE_ROOT'])
def delete_collection_dir(self, dir_path):
"""Delete collection directory.
:param str dir_path: directory path
:returns: whether successful or not
:rtype: bool
"""
local_dir = os.path.join(self.storage_root, dir_path)
try:
logger.debug('Local Store: Deleting Directory: ' + local_dir)
parent_dir = os.path.dirname(local_dir)
shutil.rmtree(local_dir)
os.removedirs(parent_dir)
return True
except Exception as e:
if e.errno != 2:
logger.error(str(e))
return False
def do_upload(self, target_url, full_filename):
"""Upload file into local file storage.
:param str target_url: target URL
:param str full_filename: path
:returns: whether successful or not
:rtype: bool
"""
os.makedirs(os.path.dirname(target_url), exist_ok=True)
try:
if full_filename != target_url:
shutil.copyfile(full_filename, target_url)
else:
logger.debug('Local Store: Same File, No Upload')
return True
except Exception as e:
logger.error(str(e))
return False
def is_valid_url(self, target_url):
"""Return whether given target URL is an existing file.
:param str target_url: target URL
:returns: whether given target URL is an existing file
:rtype: bool
"""
return os.path.isfile(target_url)
def get_client_url(self, target_url):
"""Get client URL.
:param str target_url: target URL
:returns: client URL
:rtype: str
"""
return add_local_store_prefix(target_url.replace(os.path.sep, '/'))
def client_url_to_target_url(self, client_url):
"""Get target URL (from client URL).
:param str client URL: client URL
:returns: target URL
:rtype: str
"""
return strip_prefix(client_url)
def do_delete(self, target_url, client_url):
"""Delete file from storage.
:param str target_url: target URL
:returns: whether successful or not
:rtype: bool
"""
try:
logger.debug('Local Store: Deleting: ' + target_url)
os.remove(target_url)
# if target_url.startswith(self.storage_root):
# os.removedirs(os.path.dirname(target_url))
return True
except Exception as e:
if e.errno != 2:
logger.error(str(e))
return False
# ============================================================================
class LocalFileStorage(DirectLocalFileStorage):
"""Webrecorder storage w/ Redis interface (local files).
:ivar StrictRedis redis: Redis interface
"""
def __init__(self, redis):
"""Initialize Webrecorder storage w/ Redis interface.
:param StrictRedis redis: Redis interface
"""
self.redis = redis
super(LocalFileStorage, self).__init__()
### BEGIN PERMA CUSTOMIZATIONS
### First pass at https://github.com/harvard-lil/perma/issues/2614
def delete_collection(self, collection):
"""Delete collection.
:param collection: collection
:type: n.s.
:returns: whether successful or not
:rtype: bool
"""
path = collection.get_dir_path()
if path:
try:
dirpath = os.path.join(self.storage_root, path)
return (self.redis.publish('handle_delete_dir', dirpath) > 0)
except Exception:
logger.error("Failed attempt to delete collection {}".format(collection), exc_info=True)
return False
return False
### END PERMA CUSTOMIZATIONS
def do_delete(self, target_url, client_url):
"""Delete file.
:param str target_url: target URL
:param str client_url: client URL (unused argument)
:returns: whether successful or not
:rtype: bool
"""
return self.redis.publish('handle_delete_file', target_url) > 0
def get_checksum_and_size(self, filepath_or_url):
"""Returns the checksum of the supplied URL or filepath and the size of the resource
:param str filepath_or_url: The URL or filepath to the resource that the checksum and size is desired for
:return: A three tuple containing the kind of checksum, the checksum itself, and size
:rtype: tuple[str|None, str|None, int|None]
"""
m = hashlib.md5()
amount = 1024 * 1024
total_size = 0
with closing(BlockLoader().load(filepath_or_url)) as f:
while True:
chunk = f.read(amount)
chunk_size = len(chunk)
if chunk_size == 0:
break
total_size += chunk_size
m.update(chunk)
return 'md5', m.hexdigest(), total_size
| 30.607527 | 113 | 0.59055 | 5,205 | 0.914281 | 0 | 0 | 0 | 0 | 0 | 0 | 2,551 | 0.448094 |
e8c5b2acc01726c22676df5fc3f9d17efb91642c | 1,424 | py | Python | password-cracking/leatspeak.py | cyberprogrammer/ctf-stuff | 6814ff819c51bb1cddae76ff611bede3909aa129 | [
"MIT"
] | null | null | null | password-cracking/leatspeak.py | cyberprogrammer/ctf-stuff | 6814ff819c51bb1cddae76ff611bede3909aa129 | [
"MIT"
] | null | null | null | password-cracking/leatspeak.py | cyberprogrammer/ctf-stuff | 6814ff819c51bb1cddae76ff611bede3909aa129 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
from string import ascii_letters
import itertools
def includeDefault(charSet):
for i in range(0,256):
charSet[i] = set([i])
def includeInvertedCases(charSet):
for c in ascii_letters:
charSet[ord(c)] |= set([ord(c.lower()) if c.isupper() else ord(c.upper())])
def includeLeetSpeak(charSet):
# TODO: make this more beautiful
charSet[ord("a")] |= set([ord("@"),ord("4")])
charSet[ord("A")] |= set([ord("@"),ord("4")])
charSet[ord("e")] |= set([ord("3")])
charSet[ord("E")] |= set([ord("3")])
charSet[ord("s")] |= set([ord("$"),ord("5")])
charSet[ord("S")] |= set([ord("$"),ord("5")])
charSet[ord("l")] |= set([ord("1")])
charSet[ord("L")] |= set([ord("1")])
charSet[ord("i")] |= set([ord("!"),ord("1")])
charSet[ord("I")] |= set([ord("!"),ord("1")])
def findCombinations(word, charSet):
wordCombinations = []
# Transform string to list of set of characters
for i, c in enumerate(word):
wordCombinations += [charSet[c]]
for newWord in itertools.product(*wordCombinations):
sys.stdout.buffer.write(bytearray(newWord))
sys.stdout.flush()
if __name__ == "__main__":
charSet = dict()
includeDefault(charSet)
#includeInvertedCases(charSet)
includeLeetSpeak(charSet)
for line in sys.stdin.buffer:
findCombinations(line, charSet)
| 25.428571 | 83 | 0.587079 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 220 | 0.154494 |
e8c6ae4b647e06cb7b69a59f506ee2ec7083e0ae | 1,621 | py | Python | tests/unit_tests/community/errors_upload/__init__.py | Trading-Bot/CryptoBot | b39401f093eadfe461d6f441ba31d3b8cdb3f877 | [
"Apache-2.0"
] | 9 | 2018-04-14T18:42:16.000Z | 2018-05-22T03:45:31.000Z | tests/unit_tests/community/errors_upload/__init__.py | Trading-Bot/CryptoBot | b39401f093eadfe461d6f441ba31d3b8cdb3f877 | [
"Apache-2.0"
] | 151 | 2018-04-03T14:37:17.000Z | 2018-05-29T14:15:31.000Z | tests/unit_tests/community/errors_upload/__init__.py | Trading-Bot/CryptoBot | b39401f093eadfe461d6f441ba31d3b8cdb3f877 | [
"Apache-2.0"
] | 3 | 2018-04-22T10:53:28.000Z | 2018-05-23T08:10:18.000Z | # This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot)
# Copyright (c) 2021 Drakkar-Software, All rights reserved.
#
# OctoBot is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# OctoBot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>.
import pytest
import time
import octobot.community as community
ERROR_TITLE = "An error happened"
ERROR_METRICS_ID = "1254xyz"
ERROR_TIME = time.time()
UPLOADER_URL = "http://upload_url"
@pytest.fixture
def basic_error():
return community.Error(
None,
ERROR_TITLE,
ERROR_TIME,
ERROR_METRICS_ID
)
@pytest.fixture
def exception_error():
# generated exception with traceback
return community.Error(
_get_exception(),
ERROR_TITLE,
ERROR_TIME,
ERROR_METRICS_ID
)
@pytest.fixture
def error_uploader():
return community.ErrorsUploader(UPLOADER_URL)
def _get_exception():
def fake3():
1/0
def fake2():
fake3()
def fake_func():
fake2()
try:
fake_func()
except ZeroDivisionError as err:
return err
| 24.19403 | 77 | 0.69525 | 0 | 0 | 0 | 0 | 440 | 0.271437 | 0 | 0 | 840 | 0.518199 |
e8c6d39ffcb80d752f8e489ba1c7a392d1ea7ce9 | 794 | py | Python | SCCSearch.py | FER-NASP/AdvancedAlgorithms | ce09c50b9d02fac53a09f6f0d0d099fe87aa7354 | [
"MIT"
] | 1 | 2021-12-24T19:30:13.000Z | 2021-12-24T19:30:13.000Z | SCCSearch.py | FER-NASP/AdvancedAlgorithms | ce09c50b9d02fac53a09f6f0d0d099fe87aa7354 | [
"MIT"
] | null | null | null | SCCSearch.py | FER-NASP/AdvancedAlgorithms | ce09c50b9d02fac53a09f6f0d0d099fe87aa7354 | [
"MIT"
] | null | null | null | import collections
def SCCSearch(G):
for v in G:
G[v]['n']=G[v]['p']=0
step=0
S=collections.deque()
res=[]
for u in G:
if (G[u]['n']==0):
SCCSearch_r(G,u,step,S,res)
return res
def SCCSearch_r(G,u,step,S,res):
G[u]['p']=G[u]['n']=step
step=step+1
S.append(u)
for v in G[u]['adj']:
if (G[v]['n']==0):
SCCSearch_r(G,v,step,S,res)
G[u]['p']=min(G[u]['p'],
G[v]['p'])
elif (G[v]['n']<G[u]['n']):
G[u]['p']=min(G[u]['p'],
G[v]['n'])
if (G[u]['p']==G[u]['n']):
scc=[]
vx=S[-1]
while(vx!=u):
scc.append(S.pop())
vx=S[-1]
scc.append(S.pop())
res.append(scc)
| 22.055556 | 39 | 0.377834 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.066751 |
e8c780f21a3bf8a3de70d1cac8fa793d1b5d7adc | 973 | py | Python | tests/unit/test_transformers_token_classification.py | dreasysnail/nlp-recipes | fcd62f0d8a1495e9157a058ca90bab3dd2276dff | [
"MIT"
] | 1 | 2020-02-14T11:39:17.000Z | 2020-02-14T11:39:17.000Z | tests/unit/test_transformers_token_classification.py | KingDEV95/nlp-recipes | fcd62f0d8a1495e9157a058ca90bab3dd2276dff | [
"MIT"
] | null | null | null | tests/unit/test_transformers_token_classification.py | KingDEV95/nlp-recipes | fcd62f0d8a1495e9157a058ca90bab3dd2276dff | [
"MIT"
] | 1 | 2021-06-17T04:15:02.000Z | 2021-06-17T04:15:02.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import pytest
from utils_nlp.common.pytorch_utils import dataloader_from_dataset
from utils_nlp.models.transformers.named_entity_recognition import TokenClassificationProcessor, TokenClassifier
@pytest.mark.cpu
def test_token_classifier_fit_predict(tmpdir, ner_test_data):
token_classifier = TokenClassifier(model_name="bert-base-uncased", num_labels=6, cache_dir=tmpdir)
processor = TokenClassificationProcessor(model_name="bert-base-uncased", cache_dir=tmpdir)
# test fit, no warmup
train_dataset = processor.preprocess_for_bert(
text=ner_test_data["INPUT_TEXT"], labels=ner_test_data["INPUT_LABELS"], label_map=ner_test_data["LABEL_MAP"],
)
train_dataloader = dataloader_from_dataset(train_dataset)
token_classifier.fit(train_dataloader)
# test predict, no labels
_ = token_classifier.predict(train_dataloader, verbose=False)
| 40.541667 | 117 | 0.801644 | 0 | 0 | 0 | 0 | 680 | 0.698869 | 0 | 0 | 213 | 0.218911 |
e8c78661e1152f331d7f1edce69005199e0275a9 | 1,575 | py | Python | doc/examples/plot_peak_local_max.py | Teva/scikits.image | 12669d62e699313ca0f73de1b211bf438f4efb0c | [
"BSD-3-Clause"
] | 3 | 2015-11-12T06:34:49.000Z | 2017-09-22T07:47:50.000Z | doc/examples/plot_peak_local_max.py | amueller/scikit-image | a8bfc5c5814a3c7fe363cfcad0c68b935706cd3c | [
"BSD-3-Clause"
] | 1 | 2020-03-30T12:31:55.000Z | 2020-03-30T12:31:55.000Z | doc/examples/plot_peak_local_max.py | emmanuelle/scikit-image | eccc41907135cf81b99c4be18a480a9bc705485d | [
"BSD-3-Clause"
] | 8 | 2015-03-02T20:36:55.000Z | 2021-02-18T10:37:00.000Z | """
===============================================================================
Finding local maxima
===============================================================================
The ``peak_local_max`` function returns the coordinates of local peaks (maxima)
in an image. A maximum filter is used for finding local maxima. This operation
dilates the original image and merges neighboring local maxima closer than the
size of the dilation. Locations where the original image is equal to the
dilated image are returned as local maxima.
"""
from scipy import ndimage
import matplotlib.pyplot as plt
from skimage.feature import peak_local_max
from skimage import data, img_as_float
im = img_as_float(data.coins())
# image_max is the dilation of im with a 20*20 structuring element
# It is used within peak_local_max function
image_max = ndimage.maximum_filter(im, size=20, mode='constant')
# Comparison between image_max and im to find the coordinates of local maxima
coordinates = peak_local_max(im, min_distance=20)
# display results
plt.figure(figsize=(8, 3))
plt.subplot(131)
plt.imshow(im, cmap=plt.cm.gray)
plt.axis('off')
plt.title('Original')
plt.subplot(132)
plt.imshow(image_max, cmap=plt.cm.gray)
plt.axis('off')
plt.title('Maximum filter')
plt.subplot(133)
plt.imshow(im, cmap=plt.cm.gray)
plt.autoscale(False)
plt.plot([p[1] for p in coordinates], [p[0] for p in coordinates], 'r.')
plt.axis('off')
plt.title('Peak local max')
plt.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9,
bottom=0.02, left=0.02, right=0.98)
plt.show()
| 30.882353 | 79 | 0.676825 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 819 | 0.52 |
e8c8708a26737af95fceb49444dae7c2904ee43c | 2,662 | py | Python | TimingPoint/notes_random.py | Fairy-Phy/Relium | 70ea037cea176f02e4768bde44dd5ee23af699b3 | [
"MIT"
] | null | null | null | TimingPoint/notes_random.py | Fairy-Phy/Relium | 70ea037cea176f02e4768bde44dd5ee23af699b3 | [
"MIT"
] | null | null | null | TimingPoint/notes_random.py | Fairy-Phy/Relium | 70ea037cea176f02e4768bde44dd5ee23af699b3 | [
"MIT"
] | null | null | null | import random
from Relium import calcurate, classes, parser, constant
"""
1ラインづつランダムな位置に表示していきます
"""
source_file = r""
target_start_offset = 31999
target_end_offset = 34666
avgbpm = 180
# ノーツの高さの最大値(上げすぎると見えなくなります)
max_laneheight = 370
beat = 4
sample_set = 1
sample_index = 0
volume = 64
effects = 1
## Main ##
parsed_map = parser.parsefile(source_file)
target_hitobjects = [output for output in parsed_map.HitObjects if output.offset >= target_start_offset and output.offset <= target_end_offset]
last_process_offset = 0
result_object = classes.ParsedBeatmap([], [])
for target_hitobject in target_hitobjects:
if target_hitobject.offset == last_process_offset:
target_hitobjects.remove(target_hitobject)
last_process_offset = target_hitobject.offset
for target_hitobject_i in range(len(target_hitobjects)):
target_hitobject = target_hitobjects[target_hitobject_i]
if target_hitobject_i == 0:
result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset, constant.inf_bpm, beat, sample_set, sample_index, volume, False, effects))
result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset + 1, constant.zero_bpm, beat, sample_set, sample_index, volume, False, effects))
elif target_hitobject_i == len(target_hitobjects) - 1:
if target_hitobject_i == 0:
result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset - 1, constant.zero_bpm, beat, sample_set, sample_index, volume, False, effects))
else:
result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset - 1, calcurate.line_notesposition(avgbpm, random.uniform(1, 370)), beat, sample_set, sample_index, volume, False, effects))
result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset, calcurate.timingpoint(avgbpm), beat, sample_set, sample_index, volume, False, effects))
else:
if target_hitobject_i == 0:
result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset - 1, constant.zero_bpm, beat, sample_set, sample_index, volume, False, effects))
else:
result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset - 1, calcurate.line_notesposition(avgbpm, random.uniform(1, 370)), beat, sample_set, sample_index, volume, False, effects))
result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset, constant.inf_bpm, beat, sample_set, sample_index, volume, False, effects))
result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset + 1, constant.zero_bpm, beat, sample_set, sample_index, volume, False, effects))
# どうやらコンソールのパスからの指定らしい...
parser.parsesave(result_object, "export.txt")
| 44.366667 | 204 | 0.804282 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 238 | 0.085244 |
e8c8a148dcb36dc45bfc001222c52fac5f7202b2 | 5,527 | py | Python | tests/graph_test.py | lwi19/graphe-simple | cf5ad96e13140d6357c2a9b12de71f1a0b53c86c | [
"MIT"
] | null | null | null | tests/graph_test.py | lwi19/graphe-simple | cf5ad96e13140d6357c2a9b12de71f1a0b53c86c | [
"MIT"
] | null | null | null | tests/graph_test.py | lwi19/graphe-simple | cf5ad96e13140d6357c2a9b12de71f1a0b53c86c | [
"MIT"
] | null | null | null | import unittest
"""
graphe_test.py
Created by lwi19
Copyright © 2020 Louis Plouffe. All rights reserved.
"""
"""
Test module for graph_theory
Call method: python3 -m unittest discover -p "*test.py" -s ./tests -v
Graphs are imported in this file,
"""
# import util.graph_lib as glb
from data.some_of_them import g_01, g_02, g_03, g_06, g_07, g_08
class Testfunctions(unittest.TestCase):
def test_vertices(self):
"""
Test the presence of a vertices list in a graph.
:return: True if the list match.
"""
stuff_to_find = {5, 1, 2, 4, 0, 3}
stuff_calculated = g_01.vertices
self.assertSetEqual(stuff_to_find, stuff_calculated,
f"error: vertices(), {stuff_to_find} is not expected")
def test_edges(self):
"""
Test de presence of list of edges in a graph.
:return: True if the list match
"""
edges_tuple = (0, 3), (1, 2), (2, 3), (2, 4)
stuff_to_find = set(edges_tuple)
stuff_calculated = g_01.edges()
self.assertSetEqual(stuff_to_find, stuff_calculated,
f"error: edges(), {stuff_to_find} is not expected")
def test_is_edge(self):
"""
Test several vertices in a graph.
:return: True if they exist in a graph.
"""
stuff_to_find_1 = (18, 19)
stuff_to_find_2 = (19, 18)
stuff_to_find_3 = (88, 151) # this vertex must not pass the test
self.assertTrue(g_02.is_edge(*stuff_to_find_1), f"error: is_edges() {stuff_to_find_1} is not expected")
self.assertTrue(g_02.is_edge(*stuff_to_find_2), f"error: is_edges() {stuff_to_find_2} is not expected")
self.assertFalse(g_02.is_edge(*stuff_to_find_3), f"error: is_edges() {stuff_to_find_3} is not expected")
def test_breadth_or_depth(self):
"""
Test two different methods of searching: BFS and DFS. They
must return the same extraction path.
:return:
"""
# "ATTENTION: DFS is very long to calculate for a large graph"
dfs_components = g_06.extract_components(style="DFS")[0]
bfs_components = g_06.extract_components(style="BFS")[0]
self.assertTrue([set(i) for i in dfs_components.values()] ==
[set(j) for j in bfs_components.values()],
"error: DFS and BFS mismatch")
def test_find_shortest_path(self):
"""
Test shortest path between two random vertices.
The calculation is verified in the two possible direction
They must return the same extraction path
:return: True if the same result.
"""
# Do the calculation on two distinct random numbers.
# TODO: ensure that the vertices are connected (in the same components)
if rep := g_06.two_random_vertices():
vertex_one, vertex_two = rep
if path_in_order := g_06.find_shortest_path(vertex_one, vertex_two):
if path_in_reverse_order := g_06.find_shortest_path(vertex_two, vertex_one).reverse():
print(path_in_order)
print(path_in_reverse_order)
self.assertEqual(path_in_order, path_in_reverse_order, "error : shortest path miscalculation ")
else:
# path_in_order is False when no path exist.
self.assertTrue(path_in_order, f"error: no possible path between vertices {vertex_one} and {vertex_two}")
else:
# rep is False when vertices do not exist in the path
self.assertTrue(rep, f"error: no valid vertices found")
# Starting now, all method with pattern test*z could modified graphs.
# the 'z' is used to ensure the execution after others methods
# because graphs are modified.
def test_z_add_edge(self):
"""
Test the possibility to add an edge
:return: True if it is added correctly
"""
# g_01 is modified here
node_to_add = (5, 1)
g_01.add_edge(*node_to_add)
self.assertTrue(g_01.is_edge(*node_to_add), "edge cannot be added")
node_to_add = (0, "X")
g_01.add_edge(*node_to_add)
self.assertFalse(g_01.is_edge(*node_to_add), "ERROR: impossible edge are added ?")
def test_z_delete_edge(self):
"""
Test the possibility to delete an edge
:return: True if it is deleted correctly
"""
# g_03 is modified here
edge_to_delete = (4, 5)
edge_exist = g_03.is_edge(*edge_to_delete)
g_03.delete_edge(*edge_to_delete)
self.assertTrue(edge_exist, "cannot delete edge")
self.assertFalse(g_03.is_edge(*edge_to_delete))
def test_z_delete_vertex(self):
"""
Test the possibility to delete one vertex
:return: True if it is deleted correctly
"""
# g3 is modified here
# not only the vertex is deleted, but also all edges connected edge to it.
vertex_to_delete = g_03.one_random_vertex()
self.assertTrue(vertex_to_delete in g_03.graph_dict.keys(), f"{vertex_to_delete} vertex is not part of the graph.")
g_03.delete_vertex(vertex_to_delete)
self.assertFalse(vertex_to_delete in g_03.graph_dict.keys())
# when deleted, a vertex must not appear ine the adjacency list.
self.assertFalse(bool([k for k in g_03.graph_dict.values() if vertex_to_delete in k]))
if __name__ == "__main__":
unittest.main()
| 39.76259 | 123 | 0.631988 | 5,109 | 0.924204 | 0 | 0 | 0 | 0 | 0 | 0 | 2,592 | 0.468886 |
e8c9149709f8faee9ce978ba8f02bea4f6d7bb11 | 375 | py | Python | fbscraper.py | Woahisme/final_project | f94fe272f32bc7c1cac3c6939054c0aee6edb71a | [
"MIT"
] | null | null | null | fbscraper.py | Woahisme/final_project | f94fe272f32bc7c1cac3c6939054c0aee6edb71a | [
"MIT"
] | null | null | null | fbscraper.py | Woahisme/final_project | f94fe272f32bc7c1cac3c6939054c0aee6edb71a | [
"MIT"
] | null | null | null |
import json
from facebook_scraper import get_profile
#pass through name from webpage once issue is fixed
json_data = get_profile("passparam", cookies="./fbcookies.json")
# json_object = json.loads(json_data)
json_formatted = json.dumps(json_data, indent = 2)
print(json_formatted)
with open("fb_info.json", "w") as outfile:
json.dump(json_data, outfile, indent = 2) | 25 | 64 | 0.76 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 134 | 0.357333 |
e8c93152e18c071a0506078a1cc5a0656d4c674d | 1,106 | py | Python | ga4ghtest/controllers/plugins_controller.py | ga4gh/workflow-interop | e2b3422f6867ce632c65e017d7b558f3cce9fcf1 | [
"Apache-2.0"
] | 5 | 2019-04-15T17:37:33.000Z | 2020-04-24T15:18:06.000Z | ga4ghtest/controllers/plugins_controller.py | Sage-Bionetworks/synevalharness | e2b3422f6867ce632c65e017d7b558f3cce9fcf1 | [
"Apache-2.0"
] | 58 | 2018-12-07T15:55:35.000Z | 2022-02-18T15:42:27.000Z | ga4ghtest/controllers/plugins_controller.py | Sage-Bionetworks/synapse-orchestrator | e2b3422f6867ce632c65e017d7b558f3cce9fcf1 | [
"Apache-2.0"
] | 4 | 2018-07-18T23:00:09.000Z | 2018-07-31T20:01:21.000Z | import connexion
import six
from ga4ghtest.models import Plugin # noqa: E501
from ga4ghtest import util
from ga4ghtest.core.controllers import plugins_controller as controller
def create_plugin(
body
): # noqa: E501
"""Create a test plugin
Add a plugin for testing functionality of an API. # noqa: E501
:param body:
:type body: dict | bytes
:rtype: str
"""
if connexion.request.is_json:
body = Plugin.from_dict(connexion.request.get_json()) # noqa: E501
return controller.create_plugin(
body=body
)
def get_plugins(
sort_by='created_at',
order='desc',
limit=3
): # noqa: E501
"""Get test plugins
Get the list of available test plugins. # noqa: E501
:param sort_by: logic by which to sort matched records
:type sort_by: str
:param order: sort order (ascending or descending)
:type order: str
:param limit: maximum number of records to return
:type limit: int
:rtype: str
"""
return controller.get_plugins(
sort_by=sort_by,
order=order,
limit=limit
)
| 21.686275 | 75 | 0.659132 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 567 | 0.512658 |
e8cc90a1cdc2870db809982b1ae984cc1175ebc5 | 597 | py | Python | web/settings/__init__.py | EasySport/easysport | fa32ad6cdbf1f3e861d7eecf4d4f881deee43910 | [
"MIT"
] | 1 | 2018-05-23T15:58:40.000Z | 2018-05-23T15:58:40.000Z | web/settings/__init__.py | EasySport/easysport | fa32ad6cdbf1f3e861d7eecf4d4f881deee43910 | [
"MIT"
] | 1 | 2018-05-24T11:30:35.000Z | 2018-05-24T11:30:35.000Z | web/settings/__init__.py | EasySport/easysport | fa32ad6cdbf1f3e861d7eecf4d4f881deee43910 | [
"MIT"
] | null | null | null | import os
from split_settings.tools import include, optional
ENVIRONMENT = os.getenv('DJANGO_ENV') or 'development'
include(
# Load environment settings
'base/env.py',
optional('local/env.py'), # We can "patch" any settings from local folder env.py file.
# Here we should have the order because of dependencies
'base/paths.py',
'base/apps.py',
'base/middleware.py',
# Load all other settings
'base/*.py',
# Select the right env:
'environments/%s.py' % ENVIRONMENT,
optional('local/*.py'), # we can load any other settings from local folder
) | 25.956522 | 91 | 0.673367 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 384 | 0.643216 |
e8ccfb7cc77d87449e215a705b8af7a4549c6e04 | 15,854 | py | Python | selfdrive/car/hyundai/values.py | agegold/OPKR080 | 3a12e00f4f0b95be9cd19d10f34eab1823b72252 | [
"MIT"
] | null | null | null | selfdrive/car/hyundai/values.py | agegold/OPKR080 | 3a12e00f4f0b95be9cd19d10f34eab1823b72252 | [
"MIT"
] | null | null | null | selfdrive/car/hyundai/values.py | agegold/OPKR080 | 3a12e00f4f0b95be9cd19d10f34eab1823b72252 | [
"MIT"
] | 2 | 2021-01-19T15:45:57.000Z | 2021-01-19T15:54:18.000Z | # flake8: noqa
from cereal import car
from selfdrive.car import dbc_dict
from common.params import Params
Ecu = car.CarParams.Ecu
# Steer torque limits
class SteerLimitParams:
params = Params()
STEER_MAX = int(params.get('SteerMaxAdj')) # 409 is the max, 255 is stock
STEER_DELTA_UP = int(params.get('SteerDeltaUpAdj'))
STEER_DELTA_DOWN = int(params.get('SteerDeltaDownAdj'))
STEER_DRIVER_ALLOWANCE = 50
STEER_DRIVER_MULTIPLIER = 2
STEER_DRIVER_FACTOR = 1
class CAR:
# genesis
GENESIS = "GENESIS 2015-2016"
GENESIS_G70 = "GENESIS G70 2018"
GENESIS_G80 = "GENESIS G80 2017"
GENESIS_G90 = "GENESIS G90 2017"
# hyundai
ELANTRA = "HYUNDAI ELANTRA LIMITED ULTIMATE 2017"
ELANTRA_GT_I30 = "HYUNDAI I30 N LINE 2019 & GT 2018 DCT"
SONATA = "HYUNDAI SONATA 2020"
SONATA_HEV = "HYUNDAI SONATA HEV 2020"
SONATA19 = "HYUNDAI SONATA 2019"
SONATA19_HEV = "HYUNDAI SONATA 2019 HEV"
KONA = "HYUNDAI KONA 2019"
KONA_EV = "HYUNDAI KONA EV 2019"
KONA_HEV = "HYUNDAI KONA HEV 2019"
IONIQ_EV = "HYUNDAI IONIQ ELECTRIC LIMITED 2019"
IONIQ_HEV = "HYUNDAI IONIQ HYBRID PREMIUM 2017"
SANTA_FE = "HYUNDAI SANTA FE LIMITED 2019"
PALISADE = "HYUNDAI PALISADE 2020"
VELOSTER = "HYUNDAI VELOSTER 2019"
GRANDEUR = "GRANDEUR IG 2017-2020"
GRANDEUR_HEV = "GRANDEUR IG HEV 2019-2020"
NEXO = "HYUNDAI NEXO"
# kia
FORTE = "KIA FORTE E 2018"
OPTIMA = "KIA OPTIMA SX 2019 & 2016"
OPTIMA_HEV = "KIA OPTIMA HYBRID 2017 & SPORTS 2019"
SPORTAGE = "KIA SPORTAGE S 2020"
SORENTO = "KIA SORENTO GT LINE 2018"
STINGER = "KIA STINGER GT2 2018"
NIRO_EV = "KIA NIRO EV 2020 PLATINUM"
NIRO_HEV = "KIA NIRO HEV 2018"
CEED = "KIA CEED 2019"
CADENZA = "KIA K7 2016-2019"
CADENZA_HEV = "KIA K7 HEV 2016-2019"
class Buttons:
NONE = 0
RES_ACCEL = 1
SET_DECEL = 2
GAP_DIST = 3
CANCEL = 4
params = Params()
fingerprint_issued_fix = params.get('FingerprintIssuedFix') == "1"
if fingerprint_issued_fix: # 핑거인식문제 혹은 다른차량과 핑거프린트 충돌이 나는경우 여기다가 핑거를 넣으시고 개발자 메뉴에서 핑거프린트 이슈차량 전용을 켜면 적용됩니다.
FINGERPRINTS = {
# genesis
CAR.GENESIS: [{}],
CAR.GENESIS_G70: [{}],
CAR.GENESIS_G80: [{}],
CAR.GENESIS_G90: [{}],
# hyundai
CAR.ELANTRA: [{}],
CAR.ELANTRA_GT_I30: [{}],
CAR.SONATA: [{}],
CAR.SONATA_HEV: [{}],
CAR.SONATA19: [{}],
CAR.SONATA19_HEV: [{}],
CAR.KONA: [{}],
CAR.KONA_EV: [{}],
CAR.KONA_HEV: [{}],
CAR.IONIQ_HEV: [{}],
CAR.IONIQ_EV: [{}],
CAR.SANTA_FE: [{}],
CAR.PALISADE: [{}],
CAR.VELOSTER: [{}],
CAR.GRANDEUR: [{}],
CAR.GRANDEUR_HEV: [{}],
CAR.NEXO: [{}],
# kia
CAR.FORTE: [{}],
CAR.OPTIMA: [{}],
CAR.OPTIMA_HEV: [{}],
CAR.SPORTAGE: [{}],
CAR.SORENTO: [{}],
CAR.STINGER: [{}],
CAR.NIRO_EV: [{}],
CAR.NIRO_HEV: [{}],
CAR.CEED: [{}],
CAR.CADENZA: [{}],
CAR.CADENZA_HEV: [{}]
}
else: # 핑거 프린트 이슈 없는 차량은 이곳에 넣으세요.
FINGERPRINTS = {
# genesis
CAR.GENESIS: [{}],
CAR.GENESIS_G70: [{}],
CAR.GENESIS_G80: [{}],
CAR.GENESIS_G90: [{}],
# hyundai
CAR.ELANTRA: [{}],
CAR.ELANTRA_GT_I30: [{}],
CAR.SONATA: [{}],
CAR.SONATA_HEV: [{}],
CAR.SONATA19: [{}],
CAR.SONATA19_HEV: [{}],
CAR.KONA: [{}],
CAR.KONA_EV: [{}],
CAR.KONA_HEV: [{}],
CAR.IONIQ_HEV: [{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470:8, 1476: 8, 1535: 8},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576:8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8}],
CAR.IONIQ_EV: [{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 7, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1168: 7, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1425: 2, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1507: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 7, 545: 8, 546: 8, 548: 8, 549: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1168: 7, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1507: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 7, 546: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1168: 7, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1507: 8}],
CAR.SANTA_FE: [{}],
CAR.PALISADE: [{}],
CAR.VELOSTER: [{}],
CAR.GRANDEUR: [{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 516: 8, 524: 8, 528: 8, 532: 8, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8},{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 854 : 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8 , 1151: 6, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1185: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312 : 8, 1322: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6 , 1456: 4, 1470: 8},{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 547: 8, 549: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1185: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8},{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1185: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8},{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1185: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8},{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1185: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8}],
CAR.GRANDEUR_HEV: [{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1156: 8, 1157: 4, 1168: 7, 1173: 8, 1185: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1108: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 516: 8, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1108: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1156: 8, 1157: 4, 1168: 7, 1173: 8, 1185: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1379: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8}],
CAR.NEXO: [{}],
# kia
CAR.FORTE: [{}],
CAR.OPTIMA: [{}],
CAR.OPTIMA_HEV: [{}],
CAR.SPORTAGE: [{}],
CAR.SORENTO: [{}],
CAR.STINGER: [{}],
CAR.NIRO_EV: [{}],
CAR.NIRO_HEV: [{}],
CAR.CEED: [{}],
CAR.CADENZA: [{}],
CAR.CADENZA_HEV: [{}]
}
# Don't use these fingerprints for fingerprinting, they are still used for ECU detection
IGNORED_FINGERPRINTS = [CAR.VELOSTER, CAR.GENESIS_G70, CAR.KONA]
CHECKSUM = {
"crc8": [CAR.SANTA_FE, CAR.SONATA, CAR.PALISADE, CAR.SONATA_HEV],
"6B": [CAR.SORENTO, CAR.GENESIS],
}
FEATURES = {
# 캔오류 관련, 오류가 발생하는 경우는 본인 차종에 맞지 않는 캔신호가 들어오기때문입니다. 대부분 이곳을 수정하면 해결되나, 부득이 판다코드를 수정해야 될수도 있습니다.
# debug 코드가 포함되어 있으면, /data/openpilot/selfdrive/debug 안에 몇가지 툴이 들어있습니다. 실행하시면 디버그에 도움이 되실겁니다. 팟팅!!!
# Use Cluster for Gear Selection, rather than Transmission
"use_cluster_gears": {CAR.ELANTRA, CAR.KONA, CAR.ELANTRA_GT_I30, CAR.CADENZA, CAR.GRANDEUR},
# Use TCU Message for Gear Selection
"use_tcu_gears": {CAR.OPTIMA, CAR.SONATA19, CAR.VELOSTER},
# Use E_GEAR Message for Gear Selection
"use_elect_gears": {CAR.SONATA_HEV, CAR.SONATA19_HEV, CAR.KONA_EV, CAR.KONA_HEV, CAR.IONIQ_EV, CAR.IONIQ_HEV, CAR.GRANDEUR_HEV, CAR.NEXO,
CAR.OPTIMA_HEV, CAR.CADENZA_HEV, CAR.NIRO_EV, CAR.NIRO_HEV}, # 전기차 or 하이브리드 기어인식 부분
# Use E_EMS11 Message for Gas and Brake for Hybrid/ELectric
"use_elect_ems": {CAR.SONATA_HEV, CAR.SONATA19_HEV, CAR.KONA_EV, CAR.KONA_HEV, CAR.IONIQ_EV, CAR.IONIQ_HEV, CAR.GRANDEUR_HEV, CAR.NEXO,
CAR.OPTIMA_HEV, CAR.CADENZA_HEV, CAR.NIRO_EV, CAR.NIRO_HEV}, # 전기차 or 하이브리드 차량 넣어주세요.(가속페달관련)
# send LFA MFA message for new HKG models
"send_lfa_mfa": {CAR.GRANDEUR_HEV, CAR.GRANDEUR, CAR.KONA_HEV}, #차량의 LFA아이콘(핸들모양 아이콘)을 켜지게 하려면 여기다가 본인 차종을 넣으세요.
"has_scc13": set([]),
"has_scc14": set([]),
# these cars use the FCA11 message for the AEB and FCW signals, all others use SCC12
"use_fca": {CAR.SONATA, CAR.ELANTRA, CAR.ELANTRA_GT_I30, CAR.PALISADE, CAR.GENESIS_G70, CAR.GRANDEUR_HEV, CAR.KONA_HEV}, # 전방추돌관련 계기판 오류가 발생할 경우 여기다 본인 차종을 넣어보세요.
"use_bsm": {CAR.SONATA, CAR.PALISADE, CAR.GENESIS, CAR.GENESIS_G70, CAR.GENESIS_G80, CAR.GENESIS_G90, CAR.NEXO,
CAR.KONA, CAR.SONATA_HEV, CAR.SONATA19_HEV, CAR.KONA_EV, CAR.KONA_HEV, CAR.IONIQ_EV, CAR.IONIQ_HEV, CAR.GRANDEUR_HEV,
CAR.OPTIMA_HEV, CAR.CADENZA_HEV, CAR.NIRO_EV, CAR.NIRO_HEV, CAR.ELANTRA, CAR.KONA, CAR.ELANTRA_GT_I30, CAR.CADENZA, CAR.GRANDEUR,
CAR.OPTIMA, CAR.SONATA19, CAR.VELOSTER}, #후측방 감지 BSM 옵션이 있는 차량의 경우 넣어주세요.
}
DBC = {
# genesis
CAR.GENESIS: dbc_dict('hyundai_kia_generic', None),
CAR.GENESIS_G70: dbc_dict('hyundai_kia_generic', None),
CAR.GENESIS_G80: dbc_dict('hyundai_kia_generic', None),
CAR.GENESIS_G90: dbc_dict('hyundai_kia_generic', None),
# hyundai
CAR.ELANTRA: dbc_dict('hyundai_kia_generic', None),
CAR.ELANTRA_GT_I30: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA19: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA19_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.KONA: dbc_dict('hyundai_kia_generic', None),
CAR.KONA_EV: dbc_dict('hyundai_kia_generic', None),
CAR.KONA_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.IONIQ_EV: dbc_dict('hyundai_kia_generic', None),
CAR.IONIQ_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.SANTA_FE: dbc_dict('hyundai_kia_generic', None),
CAR.PALISADE: dbc_dict('hyundai_kia_generic', None),
CAR.VELOSTER: dbc_dict('hyundai_kia_generic', None),
CAR.GRANDEUR: dbc_dict('hyundai_kia_generic', None),
CAR.GRANDEUR_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.NEXO: dbc_dict('hyundai_kia_generic', None),
# kia
CAR.FORTE: dbc_dict('hyundai_kia_generic', None),
CAR.OPTIMA: dbc_dict('hyundai_kia_generic', None),
CAR.OPTIMA_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.SPORTAGE: dbc_dict('hyundai_kia_generic', None),
CAR.SORENTO: dbc_dict('hyundai_kia_generic', None),
CAR.STINGER: dbc_dict('hyundai_kia_generic', None),
CAR.NIRO_EV: dbc_dict('hyundai_kia_generic', None),
CAR.NIRO_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.CEED: dbc_dict('hyundai_kia_generic', None),
CAR.CADENZA: dbc_dict('hyundai_kia_generic', None),
CAR.CADENZA_HEV: dbc_dict('hyundai_kia_generic', None),
}
STEER_THRESHOLD = 150
| 70.150442 | 656 | 0.595055 | 1,672 | 0.101481 | 0 | 0 | 0 | 0 | 0 | 0 | 3,313 | 0.20108 |
e8cd034e5a44f69d6d5a77a8bdd96d2912409673 | 1,104 | py | Python | builder/utils/util_dict.py | My-Novel-Management/storybuilderunite | c003d3451e237f574c54a87ea7d4fd8da8e833be | [
"MIT"
] | 1 | 2020-06-18T01:38:55.000Z | 2020-06-18T01:38:55.000Z | builder/utils/util_dict.py | My-Novel-Management/storybuilder | 1f36e56a74dbb55a25d60fce3ce81f3c650f521a | [
"MIT"
] | 143 | 2019-11-13T00:21:11.000Z | 2020-08-15T05:47:41.000Z | builder/utils/util_dict.py | My-Novel-Management/storybuilderunite | c003d3451e237f574c54a87ea7d4fd8da8e833be | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Utility methods for dictionary
==============================
'''
__all__ = (
'calling_dict_from',
'combine_dict',
'dict_sorted')
from itertools import chain
from typing import Tuple
from builder.utils import assertion
def calling_dict_from(calling: (str, dict), name: str) -> dict:
''' Construct a calling dictionary for Person class.
'''
from builder.utils.util_str import dict_from_string
tmp = {}
if isinstance(calling, dict):
tmp = calling
else:
tmp = dict_from_string(assertion.is_str(calling), ':')
me = tmp['me'] if 'me' in tmp else '私'
return combine_dict(tmp, {'S': name, 'M': me})
def combine_dict(a: dict, b: dict) -> dict:
''' Combine one dictionary from two dictionaries.
'''
return {**assertion.is_dict(a), **assertion.is_dict(b)}
def dict_sorted(origin: dict, is_reverse: bool=False) -> dict:
''' Sort dictionary.
'''
return dict(
sorted(assertion.is_dict(origin).items(),
key=lambda x:x[0], reverse=assertion.is_bool(is_reverse)))
| 26.285714 | 70 | 0.615942 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 305 | 0.275769 |
e8cdc8529cb089c6cb182638ad49d2d99e04381f | 924 | py | Python | scripts/hdfs_store.py | coastrock/CEBD1261-2019-fall-group-project | ef3c9dd841c9d57cb580cbd6650bb62d50dfc585 | [
"MIT"
] | 1 | 2020-05-03T04:57:48.000Z | 2020-05-03T04:57:48.000Z | scripts/hdfs_store.py | coastrock/CEBD1261-2019-fall-group-project | ef3c9dd841c9d57cb580cbd6650bb62d50dfc585 | [
"MIT"
] | null | null | null | scripts/hdfs_store.py | coastrock/CEBD1261-2019-fall-group-project | ef3c9dd841c9d57cb580cbd6650bb62d50dfc585 | [
"MIT"
] | null | null | null | try:
from zipfile import ZipFile
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
import pyspark.sql.functions as f
import os
except Exception as e:
print(e)
## http://www.hongyusu.com/imt/technology/spark-via-python-basic-setup-count-lines-and-word-counts.html
def push_acc():
spark = SparkSession.builder \
.master('spark://master:7077') \
.appName("Push Accidents data to HDFS") \
.getOrCreate()
sc = spark.sparkContext
sc.setLogLevel('WARN')
# unzip the file
# with ZipFile("/volume/data/accidents_2012_2018.zip", 'r') as zipObj:
# zipObj.extractall('/volume/data')
# read the data from the volume
acc_data = spark.read.csv("/volume/data/")
# push the data on HDFS as parquet
acc_data.write.parquet("hdfs://hadoop/acc_data_parquet")
if __name__ == "__main__":
push_acc()
| 27.176471 | 103 | 0.664502 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 403 | 0.436147 |
e8ce7812ba16550644fb17607334947234116bcd | 966 | py | Python | examples/simple_bot.py | GeoffreyWesthoff/imgen-client.py | d57c00b8eeaf561962ffbb6c1f3ae9d6a4a6cde7 | [
"MIT"
] | 1 | 2020-06-25T19:38:19.000Z | 2020-06-25T19:38:19.000Z | examples/simple_bot.py | GeoffreyWesthoff/imgen-client.py | d57c00b8eeaf561962ffbb6c1f3ae9d6a4a6cde7 | [
"MIT"
] | null | null | null | examples/simple_bot.py | GeoffreyWesthoff/imgen-client.py | d57c00b8eeaf561962ffbb6c1f3ae9d6a4a6cde7 | [
"MIT"
] | null | null | null | """
DANK MEMER IMGEN API CLIENT
---------------------------
Copyright: Copyright 2019 Melms Media LLC
License: MIT
"""
from discord import Client
from imgen import AsyncClient
bot = Client()
memegen = AsyncClient(token='tokengoeshere')
@bot.event
async def on_ready():
print('Logged in as %s' % bot.user)
@bot.event
async def on_message(msg):
if msg.content.lower().startswith('!magik'):
magik = await memegen.magik.get_as_discord(avatar1=msg.author.avatar_url)
return await msg.channel.send(file=magik)
elif msg.content.lower().startswith('!crab'):
parsed = msg.content.replace('!crab ', '')
parsed = parsed.replace(', ', ',')
if len(parsed.split(',')) != 2:
return await msg.channel.send('Please split the text with a comma, e.g. !crab upper, bottom')
crab = await memegen.crab.get_as_discord(text=parsed)
return await msg.channel.send(file=crab)
bot.run('bottokengoeshere')
| 26.833333 | 105 | 0.653209 | 0 | 0 | 0 | 0 | 691 | 0.715321 | 669 | 0.692547 | 266 | 0.275362 |
e8cf1815da91e995a850a952f368039cd746e5df | 89 | py | Python | src/UQpy/surrogates/kriging/correlation_models/baseclass/__init__.py | SURGroup/UncertaintyQuantification | a94c8db47d07134ea2b3b0a3ca53ca818532c3e6 | [
"MIT"
] | null | null | null | src/UQpy/surrogates/kriging/correlation_models/baseclass/__init__.py | SURGroup/UncertaintyQuantification | a94c8db47d07134ea2b3b0a3ca53ca818532c3e6 | [
"MIT"
] | null | null | null | src/UQpy/surrogates/kriging/correlation_models/baseclass/__init__.py | SURGroup/UncertaintyQuantification | a94c8db47d07134ea2b3b0a3ca53ca818532c3e6 | [
"MIT"
] | null | null | null | from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import Correlation
| 44.5 | 88 | 0.898876 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e8cf41221caeb2929a1bb122b724d01fc3c85985 | 7,552 | py | Python | discrete_ppo/ppo_goalgrid.py | sen-pai/pygame2gym | 1c8bc1422c1f3fb80054e77c03362e7eca2ef461 | [
"MIT"
] | 1 | 2020-11-04T18:02:13.000Z | 2020-11-04T18:02:13.000Z | discrete_ppo/ppo_goalgrid.py | sen-pai/pygame2gym | 1c8bc1422c1f3fb80054e77c03362e7eca2ef461 | [
"MIT"
] | null | null | null | discrete_ppo/ppo_goalgrid.py | sen-pai/pygame2gym | 1c8bc1422c1f3fb80054e77c03362e7eca2ef461 | [
"MIT"
] | null | null | null | import torch
import torch.nn.functional as F
from torch.utils import tensorboard
import argparse
import numpy as np
import os
from statistics import mean, stdev
from tqdm import tqdm
import json
import gym
import simple_discrete_game
from models.cnn_agent import cnn_value_net, cnn_policy_net
from utils.memory import MainMemory
from utils.reproducibility import set_seed, log_params
from algo.ppo_step import calc_ppo_loss_gae
parser = argparse.ArgumentParser()
parser.add_argument("--env-name", default="GoalGrid-v0")
parser.add_argument("--exp-name", default="goalgrid_seed_1")
parser.add_argument("--batch-size", type=int, default=1000, help="batch_size")
parser.add_argument("--full-ppo-iters", type=int, default=500, help="num times whole thing is run")
parser.add_argument("--seed", type=int, default=1, help="set random seed for reproducibility ")
parser.add_argument("--num-value-updates", type=int, default=4, help="update critic per epoch")
parser.add_argument("--num-policy-updates", type=int, default=4, help="update agent per epoch")
parser.add_argument("--num-evaluate", type=int, default=20, help="eval per epoch")
parser.add_argument(
"--episode-max-lenght", type=int, default=100, help="max lenght to run an episode"
)
parser.add_argument("--save-interval", type=int, default=100, help="save weights every x episodes")
parser.add_argument("--agent-lr", type=int, default=0.002, help="agent learning rate")
parser.add_argument("--critic-lr", type=int, default=0.001, help="critic learing rate")
args = parser.parse_args()
json_log = log_params(args)
cuda = torch.device("cuda")
cpu = torch.device("cpu")
##Helper function
def flat_tensor(t):
return torch.from_numpy(t).float().view(-1)
def preprocess_obs_img(obs):
#channels first
obs = np.moveaxis(obs, 2, 0)
#normalize
obs = obs/255.0
return torch.from_numpy(obs).float()
def calculate_gae(memory, gamma=0.99, lmbda=0.95):
gae = 0
for i in reversed(range(len(memory.rewards))):
delta = (
memory.rewards[i]
+ gamma * memory.values[i + 1] * (not memory.is_terminals[i])
- memory.values[i]
)
gae = delta + gamma * lmbda * (not memory.is_terminals[i]) * gae
memory.returns.insert(0, gae + memory.values[i])
adv = np.array(memory.returns) - memory.values[:-1]
# normalize advantages
memory.advantages = (adv - np.mean(adv)) / (np.std(adv) + 1e-10)
def collect_exp_single_actor(env, actor, memory, iters):
obs = env.reset()
time_step = 0
for _ in range(iters):
obs = preprocess_obs_img(obs)
memory.states.append(np.array(obs))
action, log_prob = actor.act(obs.unsqueeze(0))
next_obs, reward, done, info = env.step(action.item())
memory.is_terminals.append(done)
memory.actions.append(action.item())
memory.logprobs.append(log_prob.item())
memory.rewards.append(reward)
obs = next_obs
time_step += 1
if done or time_step >= args.episode_max_lenght:
obs = env.reset()
# normalize rewards
m = mean(memory.rewards)
# print(memory.rewards)
std = stdev(memory.rewards) + 1e-5
memory.rewards = [(i + m) / std for i in memory.rewards]
# (memory.rewards - mean(memory.rewards)) / (stdev(memory.rewards) + 1e-5)
return memory
def save_episode_as_gif(agent, env, episode_max_lenght, gif_name):
obs_to_vis = []
obs = env.reset()
for timestep in range(0, episode_max_lenght):
obs = preprocess_obs_img(obs)
action, _ = main_actor.act(obs.unsqueeze(0))
obs, _, done, _ = env.step(action.item())
obs_to_vis.append(env.render(mode="rgb_array"))
if done:
break
write_gif(obs_to_vis, gif_name + ".gif", fps=30)
if __name__ == "__main__":
# creating environment
env = gym.make(args.env_name)
set_seed(args.seed, env)
n_actions = env.action_space.n
n_channels = 3
# create nn's
main_actor = cnn_policy_net(n_channels, n_actions)
critic = cnn_value_net(n_channels)
optim_actor = torch.optim.Adam(main_actor.parameters(), lr=args.agent_lr, betas=(0.9, 0.999))
optim_critic = torch.optim.Adam(critic.parameters(), lr=args.agent_lr, betas=(0.9, 0.999))
# create memory
main_memory = MainMemory(batch_size=args.batch_size)
# logging
tb_summary = tensorboard.SummaryWriter()
for iter in tqdm(range(args.full_ppo_iters + 1)):
main_memory = collect_exp_single_actor(env, main_actor, main_memory, args.batch_size)
critic.to(cuda)
main_actor.to(cuda)
main_memory.critic_values(critic, cuda)
calculate_gae(main_memory)
# print(main_memory.memory_size())
for k in range(args.num_policy_updates):
optim_actor.zero_grad()
ppo_loss = calc_ppo_loss_gae(main_actor, main_memory)
ppo_loss.backward()
optim_actor.step()
# value loss
value_loss_list = []
for j in range(args.num_value_updates):
batch_states, batch_returns = main_memory.get_value_batch()
batch_states, batch_returns = batch_states.to(cuda), batch_returns.to(cuda)
optim_critic.zero_grad()
pred_returns = critic(batch_states)
value_loss = F.mse_loss(pred_returns.view(-1), batch_returns.view(-1))
value_loss.backward()
optim_critic.step()
value_loss_list.append(value_loss.item())
tb_summary.add_scalar("loss/value_loss", mean(value_loss_list), global_step=iter)
main_actor.to(cpu)
main_memory.clear_memory()
# evaluation
eval_ep = 0
obs = env.reset()
eval_rewards = []
eval_timesteps = []
ep_reward = 0
ep_timestep = 0
num_done = 0
while args.num_evaluate > eval_ep:
ep_timestep += 1
obs = preprocess_obs_img(obs)
action, log_prob = main_actor.act(obs.unsqueeze(0))
obs, reward, done, info = env.step(action.item())
ep_reward += reward
if done or ep_timestep >= args.episode_max_lenght:
if done:
num_done += 1
obs = env.reset()
eval_ep += 1
eval_timesteps.append(ep_timestep)
eval_rewards.append(ep_reward)
ep_reward = 0
ep_timestep = 0
tb_summary.add_scalar("reward/eval_reward", mean(eval_rewards), global_step=iter)
tb_summary.add_scalar("time/eval_traj_len", mean(eval_timesteps), global_step=iter)
tb_summary.add_scalar("reward/prob_done", num_done / args.num_evaluate, global_step=iter)
json_log["rewards list"].append(mean(eval_rewards))
json_log["avg episode timesteps"].append(mean(eval_timesteps))
json_log["prob done"].append(num_done / args.num_evaluate)
print("eval_reward ", mean(eval_rewards), " eval_timesteps ", mean(eval_timesteps), "prob_done", num_done / args.num_evaluate)
if iter % args.save_interval == 0 and iter > 0:
torch.save(
main_actor.state_dict(), "ppo_" + args.exp_name + "_actor" + str(iter) + ".pth"
)
torch.save(critic.state_dict(), "ppo_" + args.exp_name + "_critic" + str(iter) + ".pth")
os.chdir(os.path.join(os.getcwd(), "jsons"))
with open(str(args.exp_name) + ".json", "w") as fp:
json.dump(json_log, fp, sort_keys=True, indent=4)
| 34.327273 | 134 | 0.651218 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,024 | 0.135593 |
e8cf436b3f7cff830940245da46ee0d3379aa73a | 435 | py | Python | tinder/__init__.py | mzdravkov/elsys-ci-flask-example | 6543e1cc7e40db0535df6c4fe78120fd529d5cbb | [
"Apache-2.0"
] | 2 | 2019-12-30T13:26:55.000Z | 2020-01-18T14:03:25.000Z | tinder/__init__.py | mzdravkov/elsys-ci-flask-example | 6543e1cc7e40db0535df6c4fe78120fd529d5cbb | [
"Apache-2.0"
] | 3 | 2019-11-05T16:47:54.000Z | 2020-10-31T18:50:31.000Z | tinder/__init__.py | mzdravkov/elsys-ci-flask-example | 6543e1cc7e40db0535df6c4fe78120fd529d5cbb | [
"Apache-2.0"
] | 24 | 2019-10-10T19:17:40.000Z | 2020-10-25T10:42:00.000Z | from flask import Flask
from flask_socketio import SocketIO
from flask_sqlalchemy import SQLAlchemy
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/dev.db'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.secret_key = "eusehuccuhosn23981pcgid1xth4dn"
socketio = SocketIO(app)
db = SQLAlchemy(app)
from tinder.routes import *
| 25.588235 | 63 | 0.770115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.287356 |
e8cfdb21fb6c07f6632ba57c031ad7697503326a | 1,910 | py | Python | pose/excerciseClass.py | San-B-09/BeFit | 7bd9cad51d6135fc5fab7221336f8969653b6af5 | [
"Apache-2.0"
] | null | null | null | pose/excerciseClass.py | San-B-09/BeFit | 7bd9cad51d6135fc5fab7221336f8969653b6af5 | [
"Apache-2.0"
] | null | null | null | pose/excerciseClass.py | San-B-09/BeFit | 7bd9cad51d6135fc5fab7221336f8969653b6af5 | [
"Apache-2.0"
] | null | null | null | from pose.poseClass import pose
from pose import helpers
class dumbbell_lateral_raises(pose):
angles={
('left_hip','left_shoulder','left_elbow'):None,
('right_hip','right_shoulder','right_elbow'):None,
('left_shoulder','left_elbow','left_wrist'):None,
('right_shoulder','right_elbow','right_wrist'):None
}
def __init__(self,coordinates,conf):
pose.__init__(self,coordinates,conf)
self.loadInfo()
class squats(pose):
angles={
('nose','shoulder','hip'):None,
('shoulder','hip','knee'):None,
('hip','knee','ankle'):None
}
def __init__(self,coordinates,conf):
pose.__init__(self,coordinates,conf)
self.loadInfo()
class dumbbell_upper_head(pose):
angles={
('right_shoulder','left_shoulder','left_elbow'):None,
('left_shoulder','right_shoulder','right_elbow'):None,
('right_shoulder','right_elbow','left_wrist'):None,
('right_shoulder','right_elbow','right_wrist'):None
}
def __init__(self,coordinates,conf):
pose.__init__(self,coordinates,conf)
self.loadInfo()
class push_up(pose):
angles={
# ('nose','shoulder','hip'):None,
# ('shoulder','hip','knee'):None,
# ('hip','knee','ankle'):None,
('shoulder','ankle','wrist'):None
}
def __init__(self,coordinates,conf):
pose.__init__(self,coordinates,conf)
self.loadInfo()
class sit_up(pose):
angles={
('nose','shoulder','hip'):None,
('hip','knee','ankle'):None,
('shoulder','hip','knee'):None,
}
def __init__(self,coordinates,conf):
pose.__init__(self,coordinates,conf)
self.loadInfo()
class bicep_press(pose):
angles={
('shoulder','elbow','wrist'):None
}
def __init__(self,coordinates,conf):
pose.__init__(self,coordinates,conf)
self.loadInfo() | 30.31746 | 62 | 0.610471 | 1,842 | 0.964398 | 0 | 0 | 0 | 0 | 0 | 0 | 592 | 0.309948 |
e8d0887b58745afaab21c99b854c58a1db279ba6 | 3,850 | py | Python | pyMIDICapSense.py | midilab/pyMIDICapSense | 1993381a6fc8b2906ca1e4b1bb822df827952bea | [
"MIT"
] | 2 | 2020-05-08T18:15:28.000Z | 2020-07-19T00:42:47.000Z | pyMIDICapSense.py | midilab/pyMIDICapSense | 1993381a6fc8b2906ca1e4b1bb822df827952bea | [
"MIT"
] | null | null | null | pyMIDICapSense.py | midilab/pyMIDICapSense | 1993381a6fc8b2906ca1e4b1bb822df827952bea | [
"MIT"
] | null | null | null | import wiringpi2
import rtmidi
from defines import *
from config import *
#import config
# config.TIMEOUT
def Setup(outPin, inPin, ledPin):
# set Send Pin Register
wiringpi2.pinMode(outPin, OUTPUT)
# set receivePin Register low to make sure pullups are off
wiringpi2.pinMode(inPin, OUTPUT)
wiringpi2.digitalWrite(inPin, LOW)
wiringpi2.pinMode(inPin, INPUT)
# set ledPin
wiringpi2.pinMode(ledPin, OUTPUT)
wiringpi2.digitalWrite(ledPin, LOW)
def CapRead(outPin, inPin, total=0, cycles=CYCLES):
# set Send Pin Register low
wiringpi2.digitalWrite(outPin, LOW)
# set send Pin High
wiringpi2.digitalWrite(outPin, HIGH)
# while receive pin is LOW AND total is positive value
while( wiringpi2.digitalRead(inPin) == LOW and total < TIMEOUT ):
total+=1
if ( total > TIMEOUT ):
return -2 # total variable over TIMEOUT
# set receive pin HIGH briefly to charge up fully - because the while loop above will exit when pin is ~ 2.5V
wiringpi2.digitalWrite(inPin, HIGH)
# set send Pin LOW
wiringpi2.digitalWrite(outPin, LOW)
# while receive pin is HIGH AND total is less than TIMEOUT
while( wiringpi2.digitalRead(inPin) == HIGH and total < TIMEOUT) :
total+=1
if ( total >= TIMEOUT ):
return -2
# decrement cycles counting
cycles-=1
# if we reach the end of cycles, then...
if (cycles == 0):
if DEBUG:
print("total unit count: %d" % total)
# get the average of values over the cycles
total = round(total/CYCLES)
# if the average total is greater of equal to TRIGGER value
if ( total >= TRIGGER ):
return 1
else:
return 0
return CapRead(outPin, inPin, total, cycles)
def ChangeBank():
global BANK
# increment bank number
BANK += 1
# check for bank boundaries
if ( BANK > BANK_MAX ):
BANK = 1
print("BANK: %d selected" % BANK)
# blink the led x times followed by BANK number
#i = 0
#while ( i < BANK ):
# wiringpi2.delay(500)
# wiringpi2.digitalWrite(SENSORS[7]['led'], HIGH)
# wiringpi2.delay(500)
# wiringpi2.digitalWrite(SENSORS[7]['led'], LOW)
# i += 1
# Initial definitions
note = 0;
# setup sensor input and output pins
for sensor in SENSORS:
Setup(sensor['output'], sensor['input'], sensor['led'])
# Init virtual midi port
midi_out = rtmidi.MidiOut()
midi_out.open_virtual_port()
# loop
while True:
for sensor in SENSORS:
if (DEBUG):
print("############ %s" % sensor['name'])
value = CapRead(sensor['output'], sensor['input'])
if ( value and ( value != sensor['last_value'] ) ):
print("############ %s" % sensor['name'])
#if (sensor['note'][BANK-1] == 0):
if (sensor['type'] == BANK_CHANGE):
# change bank request
ChangeBank()
note = 0
elif (sensor['type'] == BANK_SELECT):
note = sensor['note'][BANK-1]
elif (sensor['type'] == RANDOM):
# stuff for ramdom type
note = note
elif (sensor['type'] == SEQUENTIAL):
# stuff for sequential sensor type
sensor['seq_next'] += 1
if ( sensor['seq_next'] >= len(sensor['note']) ):
sensor['seq_next'] = 0
note = sensor['note'][sensor['seq_next']]
print ('Send Note ON: %d' % note)
midi_out.send_message([0x90, note, 100]) # Note on
# set sensor led ON
wiringpi2.digitalWrite(sensor['led'], HIGH)
elif ( value != sensor['last_value'] ):
#if (sensor['note'][BANK-1] != 0):
print("############ %s" % sensor['name'])
if (sensor['type'] == BANK_SELECT):
note = sensor['note'][BANK-1]
elif (sensor['type'] == RANDOM):
# stuff for ramdom type
note = note
elif (sensor['type'] == SEQUENTIAL):
# stuff for sequential sensor type
note = sensor['note'][sensor['seq_next']]
print ('Send Note OFF: %d' % note)
midi_out.send_message([0x80, note, 100]) # Note off
# set sensor led Off
wiringpi2.digitalWrite(sensor['led'], LOW)
sensor['last_value'] = value
| 24.522293 | 111 | 0.651948 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,578 | 0.40987 |
e8d1a1267a50a6e4e81cbb82affa3e724bd9c390 | 841 | py | Python | src/losses.py | saman-codes/dldojo | 9fd828f1902ba3d46e9bb5f554ef37d07335b29e | [
"MIT"
] | null | null | null | src/losses.py | saman-codes/dldojo | 9fd828f1902ba3d46e9bb5f554ef37d07335b29e | [
"MIT"
] | null | null | null | src/losses.py | saman-codes/dldojo | 9fd828f1902ba3d46e9bb5f554ef37d07335b29e | [
"MIT"
] | null | null | null | import numpy as np
class Loss():
def output_gradient(self):
return
class MSE(Loss):
def __call__(self, predicted, labels):
return 0.5 * np.square(predicted - labels)
def output_gradient(self, predicted, labels):
return predicted - labels
class BinaryCrossEntropy(Loss):
def __call__(self, predicted, labels):
return - np.nan_to_num((labels*np.log(predicted) + (1-labels)*np.log(1-predicted)))
def output_gradient(self, predicted, labels):
return np.nan_to_num(-(labels/predicted) + (1-labels)/(1-predicted))
class CategoricalCrossEntropy(Loss):
def __call__(self, predicted, labels):
return -np.nan_to_num(np.sum(labels*np.log(predicted), axis=0, keepdims=True))
def output_gradient(self, predicted, labels):
return -np.nan_to_num(labels/predicted)
| 30.035714 | 91 | 0.688466 | 813 | 0.966706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e8d389d59d2e598814e8742d6ed48d600f563413 | 436 | py | Python | cape_privacy/pandas/transformations/test_utils.py | vismaya-Kalaiselvan/cape-python | 2b93696cec43c4bab9098c35eccf6f2f66d9e5c0 | [
"Apache-2.0"
] | 144 | 2020-06-23T21:31:49.000Z | 2022-02-25T15:51:00.000Z | cape_privacy/pandas/transformations/test_utils.py | vismaya-Kalaiselvan/cape-python | 2b93696cec43c4bab9098c35eccf6f2f66d9e5c0 | [
"Apache-2.0"
] | 44 | 2020-06-24T14:42:23.000Z | 2022-02-21T03:30:58.000Z | cape_privacy/pandas/transformations/test_utils.py | vismaya-Kalaiselvan/cape-python | 2b93696cec43c4bab9098c35eccf6f2f66d9e5c0 | [
"Apache-2.0"
] | 16 | 2020-06-26T20:05:51.000Z | 2022-01-12T05:23:58.000Z | import pandas as pd
class PlusN:
"""A sample transform that adds n to a specific field.
Attributes:
field: The field that this transform will be applied to.
n: The value to add to the field.
"""
identifier = "plusN"
type_signature = "col->col"
def __init__(self, n: int = 1) -> None:
self.n = n
def __call__(self, column: pd.Series) -> pd.Series:
return column + self.n
| 21.8 | 64 | 0.607798 | 413 | 0.947248 | 0 | 0 | 0 | 0 | 0 | 0 | 203 | 0.465596 |
e8d415fab5a4afde95fd5d9bc52286a0958b5e14 | 5,003 | py | Python | Redmash/redmash.py | zatherz/reddit | bd4378ff62e893d28fa824df5678c6de4021b123 | [
"MIT"
] | 4 | 2016-05-04T10:46:58.000Z | 2021-03-22T06:05:40.000Z | Redmash/redmash.py | zatherz/reddit | bd4378ff62e893d28fa824df5678c6de4021b123 | [
"MIT"
] | null | null | null | Redmash/redmash.py | zatherz/reddit | bd4378ff62e893d28fa824df5678c6de4021b123 | [
"MIT"
] | null | null | null | #/u/GoldenSights
import praw # simple interface to the reddit API, also handles rate limiting of requests
import time
import datetime
import traceback
import pickle
'''USER CONFIGURATION'''
APP_ID = ""
APP_SECRET = ""
APP_URI = ""
APP_REFRESH = ""
# https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/
USERAGENT = ""
#This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter bot"
SUBREDDIT = "nsaleaks"
#This is the sub or list of subs to scan for new posts. For a single sub, use "sub1". For multiple subs, use "sub1+sub2+sub3+...". For all use "all"
KEYWORDS = [" NSA", "NSA " "Snowden", "Greenwald"]
#Words to look for
KEYDOMAINS = []
#Domains to look for
KEYNAMES = []
#Names to look for
IGNORESELF = False
#Do you want the bot to dump selfposts? Use True or False (Use capitals! No quotations!)
TIMESTAMP = '%A %d %B %Y'
#The time format.
# "%A %d %B %Y" = "Wendesday 04 June 2014"
#http://docs.python.org/2/library/time.html#time.strftime
HEADER = ""
#Put this at the top of the .txt file
FORMAT = "_timestamp_: [_title_](_url_) - [r/_subreddit_](_nplink_)"
#USE THESE INJECTORS TO CREATE CUSTOM OUTPUT
#_timestamp_ which follows the TIMESTAMP format
#_title_
#_url_
#_subreddit_
#_nplink_
#_author_
PRINTFILE = "nsa"
#Name of the file that will be produced. Do not type the file extension
MAXPOSTS = 1000
#This is how many posts you want to retrieve all at once.
'''All done!'''
for m in ["_date", "_author", "_subreddit", "_title"]:
clistfile = open(PRINTFILE + m + '.txt', "a+")
clistfile.close()
#This is a hackjob way of creating the files if they do not exist.
MAXS = str(MAXPOSTS)
try:
import bot
USERAGENT = bot.getaG()
except ImportError:
pass
print('Logging in.')
r = praw.Reddit(USERAGENT)
r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)
r.refresh_access_information(APP_REFRESH)
def work(lista):
global listfile
if HEADER != "":
print(HEADER, file=listfile)
for post in lista:
timestamp = post.created_utc
timestamp = datetime.datetime.fromtimestamp(int(timestamp)).strftime(TIMESTAMP)
final = FORMAT
final = final.replace('_timestamp_', timestamp)
final = final.replace('_title_', post.title)
try:
final = final.replace('_author_', post.author.name)
except Exception:
final = final.replace('_author_', '[DELETED]')
final = final.replace('_subreddit_', post.subreddit.display_name)
url = post.url
url = url.replace('http://www.reddit.com', 'http://np.reddit.com')
final = final.replace('_url_', url)
slink = post.short_link
slink = slink.replace('http://', 'http://np.')
final = final.replace('_nplink_', slink)
try:
print(final, file=listfile)
except:
print('\t' + post.id + ': Charstepping')
for char in final:
try:
print(char, file=listfile, end='')
except:
pass
print('',file=listfile)
lista = []
count = 0
counta = 0
try:
print('Scanning.')
subreddit = r.get_subreddit(SUBREDDIT)
posts = subreddit.get_new(limit=MAXPOSTS)
for post in posts:
if not post.is_self or IGNORESELF is False:
try:
author = post.author.name
except Exception:
author = '[DELETED]'
if any(m.lower() in post.title.lower() for m in KEYWORDS) \
or any(m.lower() in post.url.lower() for m in KEYDOMAINS) \
or any(m.lower() == author.lower() for m in KEYNAMES):
lista.append(post)
counta += 1
count += 1
print(str(count) + ' / ' + MAXS + ' | ' + str(counta))
for item in lista:
if item.author is None:
item.author = '[DELETED]'
except Exception:
print('EMERGENCY')
print('Collected ' + str(counta) + ' items.')
try:
print('Writing Date file')
lista.sort(key=lambda x: x.created_utc, reverse=False)
listfile = open(PRINTFILE + '_date.txt', 'w')
work(lista)
listfile.close()
print('Writing Subreddit file')
lista.sort(key=lambda x: x.subreddit.display_name.lower(), reverse=False)
listfile = open(PRINTFILE + '_subreddit.txt', 'w')
work(lista)
listfile.close()
print('Writing Title file')
lista.sort(key=lambda x: x.title.lower(), reverse=False)
listfile = open(PRINTFILE + '_title.txt', 'w')
work(lista)
listfile.close()
print('Writing Author file')
lista.sort(key=lambda x: x.author.name.lower(), reverse=False)
listfile = open(PRINTFILE + '_author.txt', 'w')
work(lista)
listfile.close()
except Exception:
traceback.print_tb()
('EMERGENCY: txt writing failed')
print('Saving to Pickle.')
class Posted(object):
pass
listc = []
for item in lista:
obj = Posted()
obj.id = item.id
obj.fullname = item.fullname
obj.created_utc = item.created_utc
obj.title = item.title
obj.subreddit = item.subreddit.display_name
obj.url = item.url
obj.short_link = item.short_link
try:
obj.author = item.author.name
except:
obj.author = '[DELETED]'
if item.is_self is True:
obj.is_self = True
obj.selftext = item.selftext
else:
obj.is_self = False
listc.append(obj.__dict__)
filec = open(PRINTFILE + '.p', 'wb')
pickle.dump(listc, filec)
filec.close()
print('Done.') | 26.611702 | 148 | 0.695783 | 27 | 0.005397 | 0 | 0 | 0 | 0 | 0 | 0 | 1,739 | 0.347591 |
e8d5737be4a024a7d11423b09416200eaeadcd17 | 8,222 | py | Python | src/pynadc/scia/db.py | rmvanhees/pynadc | c34d722dea8bfdf357271da18df0fb08d612a1ba | [
"BSD-3-Clause"
] | 1 | 2019-02-17T06:52:17.000Z | 2019-02-17T06:52:17.000Z | src/pynadc/scia/db.py | rmvanhees/pynadc | c34d722dea8bfdf357271da18df0fb08d612a1ba | [
"BSD-3-Clause"
] | null | null | null | src/pynadc/scia/db.py | rmvanhees/pynadc | c34d722dea8bfdf357271da18df0fb08d612a1ba | [
"BSD-3-Clause"
] | 1 | 2021-03-26T10:50:44.000Z | 2021-03-26T10:50:44.000Z | """
This file is part of pynadc
https://github.com/rmvanhees/pynadc
Methods to query the NADC Sciamachy SQLite database
Copyright (c) 2012-2021 SRON - Netherlands Institute for Space Research
All Rights Reserved
License: BSD-3-Clause
"""
from pathlib import Path
import sqlite3
# --------------------------------------------------
def get_product_by_name(args=None, dbname=None, product=None,
to_screen=False, dump=False, debug=False):
"""
Query NADC Sciamachy SQLite database on product name
Input
-----
args : dictionary with keys dbname, product, to_screen, dump, debug
dbname : full path to Sciamachy SQLite database
product : name of product [value required]
to_screen : print query result to standard output [default: False]
dump : return database content about product, instead of full-path
debug : do not query data base, but display SQL query [default: False]
Output
------
return full-path to product [default]
or show database content about product
"""
if args:
dbname = args.dbname
product = args.product
dump = args.dump
debug = args.debug
if dbname is None:
print('Fatal, SQLite database is not specified')
return []
if not Path(dbname).is_file():
print('Fatal, can not find SQLite database: %s' % dbname)
return []
if product[0:10] == 'SCI_NL__0P':
table = 'meta__0P'
elif product[0:10] == 'SCI_NL__1P':
table = 'meta__1P'
else:
table = 'meta__2P'
if dump:
select_str = '*'
else:
select_str = 'path,name,compression'
query_str = 'select {} from {} where name=\'{}\''.format(select_str,
table,
product)
# pylint: disable=no-member
conn = sqlite3.connect(dbname)
if dump:
conn.row_factory = sqlite3.Row
cur = conn.cursor()
if debug:
print(query_str)
conn.close()
return []
cur.execute(query_str)
row = cur.fetchone()
if row is None:
conn.close()
return []
if to_screen:
if dump:
for name in row.keys():
print(name, '\t', row[name])
else:
if row[2] == 0:
print(Path(*row[:-1]))
else:
print(Path(*row[:-1]).with_suffix('.gz'))
if dump:
return row
if row[2] == 0:
return str(Path(*row[:-1]))
return str(Path(*row[:-1]).with_suffix('.gz'))
# --------------------------------------------------
def get_product_by_type(args=None, dbname=None, prod_type=None,
proc_stage=None, proc_best=None,
orbits=None, date=None, rtime=None,
to_screen=False, dump=False, debug=False):
"""
Query NADC Sciamachy SQLite database on product type with data selections
Input
-----
args : dictionary with keys dbname, type, proc, best, orbit, date,
rtime, to_screen, dump, debug
dbname : full path to Sciamachy SQLite database
prod_type : level of product, available 0, 1, 2 [value required]
prod_stage ; baseline of product (PROC_STAGE): N, R, P, R, U, W, ...
[default: None]
prod_best ; select highest available baseline [default: None]
orbit : select on absolute orbit number [default: None]
date : select on dateTimeStart [default: None]
rtime : select on receiveTime [default: None]
to_screen : print query result to standard output [default: False]
debug : do not query data base, but display SQL query [default: False]
Output
------
return full-path to selected products [default]
"""
if args:
dbname = args.dbname
prod_type = args.type
proc_stage = args.proc
proc_best = args.best
orbits = args.orbit
date = args.date
rtime = args.rtime
dump = args.dump
debug = args.debug
if dbname is None:
print('Fatal, SQLite database is not specified')
return []
if not Path(dbname).is_file():
print('Fatal, can not find SQLite database: %s' % dbname)
return []
if dump:
query_str = ['select * from meta__%sP' % prod_type]
else:
query_str = ['select path,name,compression from meta__%sP' % prod_type]
if proc_best:
if prod_type == '0':
query_str.append(' as s1 join (select absOrbit,MAX(q_flag)')
query_str.append(' as qflag from meta__%sP' % prod_type)
else:
query_str.append(' as s1 join (select absOrbit,MAX(procStage)')
query_str.append(' as proc from meta__%sP' % prod_type)
if orbits:
if ' where' not in query_str:
query_str.append(' where')
else:
query_str.append(' and')
if len(orbits) == 1:
mystr = ' absOrbit=%-d' % orbits[0]
else:
mystr = ' absOrbit between %-d and %-d' % (orbits[0], orbits[1])
query_str.append(mystr)
if proc_stage:
if ' where' not in query_str:
query_str.append(' where')
else:
query_str.append(' and')
mystr = ' procStage in ('
for _c in proc_stage:
if mystr[-1] != '(':
mystr += ','
mystr += '\'' + _c + '\''
mystr += ')'
query_str.append(mystr)
if date:
if ' where' not in query_str:
query_str.append(' where')
else:
query_str.append(' and')
dtime = '+1 second'
year = int(date[0:4])
dtime = '+1 year'
if len(date) >= 6:
month = int(date[4:6])
dtime = '+1 month'
else:
month = 1
if len(date) >= 8:
day = int(date[6:8])
dtime = '+1 day'
else:
day = 1
if len(date) >= 10:
hour = int(date[8:10])
dtime = '+1 hour'
else:
hour = 0
if len(date) >= 12:
minu = int(date[10:12])
dtime = '+1 minute'
else:
minu = 0
_d1 = '{:04d}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}'.format(
year, month, day, hour, minu, 0)
mystr = ' dateTimeStart between \'%s\' and datetime(\'%s\',\'%s\')'
query_str.append(mystr % (_d1, _d1, dtime))
if rtime:
if ' where' not in query_str:
query_str.append(' where')
else:
query_str.append(' and')
mystr = ' receiveDate between datetime(\'now\',\'-%-d %s\')' \
+ ' and datetime(\'now\')'
if rtime[-1] == 'h':
query_str.append(mystr % (int(rtime[0:-1]), 'hour'))
else:
query_str.append(mystr % (int(rtime[0:-1]), 'day'))
if proc_best:
query_str.append(' GROUP by absOrbit) as s2 on')
query_str.append(' s1.absOrbit=s2.absOrbit')
if prod_type == '0':
query_str.append(' and s1.q_flag=s2.qflag')
else:
query_str.append(' and s1.procStage=s2.proc')
else:
query_str.append(' order by absOrbit ASC, procStage DESC')
if debug:
print(''.join(query_str))
return []
# pylint: disable=no-member
row_list = []
conn = sqlite3.connect(dbname)
if dump:
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute(''.join(query_str))
for row in cur:
if to_screen:
if dump:
print(row)
else:
if row[2] == 0:
print(Path(*row[:-1]))
else:
print(Path(*row[:-1]).with_suffix('.gz'))
else:
if dump:
row_list.append(row)
else:
if row[2] == 0:
row_list.append(str(Path(*row[:-1])))
else:
row_list.append(str(Path(*row[:-1]).with_suffix('.gz')))
conn.close()
return row_list
| 29.57554 | 79 | 0.518973 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,036 | 0.369253 |
e8d781ec22ac065bc93f930e69187121d239b5c5 | 22,889 | py | Python | project_1_2017/program/flatland.py | pveierland/permve-ntnu-it3708 | 1066d5c1af5c953dbaf129d7e05ce32f2d4292aa | [
"CC0-1.0"
] | null | null | null | project_1_2017/program/flatland.py | pveierland/permve-ntnu-it3708 | 1066d5c1af5c953dbaf129d7e05ce32f2d4292aa | [
"CC0-1.0"
] | null | null | null | project_1_2017/program/flatland.py | pveierland/permve-ntnu-it3708 | 1066d5c1af5c953dbaf129d7e05ce32f2d4292aa | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
import argparse
import collections
import enum
import itertools
import numpy as np
import pickle
import random
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtSvg import *
from PyQt5.QtWidgets import *
from PyQt5.QtPrintSupport import *
class Action(enum.IntEnum):
MOVE_LEFT = 0
MOVE_FORWARD = 1
MOVE_RIGHT = 2
class Direction(enum.IntEnum):
LEFT = 0
FORWARD = 1
RIGHT = 2
class Entity(enum.IntEnum):
EMPTY = 0
WALL = 1
FOOD = 2
POISON = 3
# These entities are mapped to EMPTY by masking two lower bits:
FOOD_EATEN = 4
POISON_EATEN = 8
class Heading(enum.IntEnum):
NORTH = 0
EAST = 1
SOUTH = 2
WEST = 3
REWARDS = {
Entity.EMPTY: 0,
Entity.WALL: -100,
Entity.FOOD: 1,
Entity.FOOD_EATEN: 0,
Entity.POISON: -4,
Entity.POISON_EATEN: 0
}
class BaselineAgent(object):
def __init__(self, args):
self.baseline_go_sideways = args.baseline_go_sideways
self.baseline_prefer_avoid_wall = args.baseline_prefer_avoid_wall
self.baseline_prefer_right = args.baseline_prefer_right
self.baseline_take_food_near_wall = args.baseline_take_food_near_wall
def act(self, percepts):
ip = list(percepts[:,0])
# L/F/R ambiguity:
if ip[Direction.LEFT] == ip[Direction.FORWARD] and ip[Direction.FORWARD] == ip[Direction.RIGHT]:
return (Action.MOVE_FORWARD if not self.baseline_go_sideways else
Action.MOVE_RIGHT if self.baseline_prefer_right else
Action.MOVE_LEFT)
# Single side wall ambiguity:
if ip.count(Entity.WALL) == 1 and ip[Direction.FORWARD] == Entity.FOOD:
if ip.count(Entity.FOOD) == 2:
return (Action.MOVE_FORWARD if self.baseline_take_food_near_wall else
Action.MOVE_LEFT if ip[Direction.RIGHT] == Entity.WALL
else Action.MOVE_RIGHT)
elif self.baseline_prefer_avoid_wall and ip.count(Entity.EMPTY) == 1:
return Action.MOVE_LEFT if ip[Direction.RIGHT] == Entity.WALL else Action.MOVE_RIGHT
else:
return Action.MOVE_FORWARD
# Single wall:
if ip.count(Entity.WALL) == 1 and ip[Direction.FORWARD] != Entity.WALL:
if ip.count(Entity.FOOD) == 2:
return (Action.MOVE_FORWARD if self.baseline_take_food_near_wall else
Action.MOVE_LEFT if ip[Direction.RIGHT] == Entity.WALL
else Action.MOVE_RIGHT)
elif ip.count(Entity.FOOD) == 1:
if ip[Direction.FORWARD] == Entity.FOOD:
if self.baseline_prefer_avoid_wall and ip.count(Entity.EMPTY) == 1:
return Action.MOVE_LEFT if ip[Direction.RIGHT] == Entity.WALL else Action.MOVE_RIGHT
else:
return Action.MOVE_FORWARD
else:
return Action.MOVE_LEFT if ip[Direction.RIGHT] == Entity.WALL else Action.MOVE_RIGHT
elif ip.count(Entity.EMPTY) == 2:
return Action.MOVE_LEFT if ip[Direction.RIGHT] == Entity.WALL else Action.MOVE_RIGHT
elif ip.count(Entity.EMPTY) == 1:
return (Action.MOVE_FORWARD if ip[Direction.FORWARD] == Entity.EMPTY else
Action.MOVE_LEFT if ip[Direction.RIGHT] == Entity.WALL else
Action.MOVE_RIGHT)
elif ip.count(Entity.POISON) == 2:
return Action.MOVE_LEFT if ip[Direction.RIGHT] == Entity.WALL else Action.MOVE_RIGHT
elif ip.count(Entity.POISON) == 1:
return (Action.MOVE_FORWARD if ip[Direction.FORWARD] == Entity.POISON else
Action.MOVE_LEFT if ip[Direction.RIGHT] == Entity.WALL else
Action.MOVE_RIGHT)
# Prefer food:
if ip.count(Entity.FOOD) == 1:
return (Action.MOVE_LEFT if ip[Direction.LEFT] == Entity.FOOD else
Action.MOVE_FORWARD if ip[Direction.FORWARD] == Entity.FOOD else
Action.MOVE_RIGHT)
elif ip.count(Entity.FOOD) == 2:
if ip[Direction.FORWARD] != Entity.FOOD:
# L/R ambiguity:
return Action.MOVE_RIGHT if self.baseline_prefer_right else Action.MOVE_LEFT
else:
# S/F ambiguity:
return (Action.MOVE_FORWARD if not self.baseline_go_sideways else
Action.MOVE_RIGHT if ip[Direction.RIGHT] == Entity.FOOD else
Action.MOVE_LEFT)
# Prefer empty:
if ip.count(Entity.EMPTY) == 1:
return (Action.MOVE_LEFT if ip[Direction.LEFT] == Entity.EMPTY else
Action.MOVE_FORWARD if ip[Direction.FORWARD] == Entity.EMPTY else
Action.MOVE_RIGHT)
elif ip.count(Entity.EMPTY) == 2:
if ip[Direction.FORWARD] != Entity.EMPTY:
# L/R ambiguity:
return Action.MOVE_RIGHT if self.baseline_prefer_right else Action.MOVE_LEFT
else:
# S/F ambiguity:
return (Action.MOVE_FORWARD if not self.baseline_go_sideways else
Action.MOVE_RIGHT if ip[Direction.RIGHT] == Entity.EMPTY else
Action.MOVE_LEFT)
# Prefer poison:
if ip.count(Entity.POISON) == 1:
return (Action.MOVE_LEFT if ip[Direction.LEFT] == Entity.POISON else
Action.MOVE_FORWARD if ip[Direction.FORWARD] == Entity.POISON else
Action.MOVE_RIGHT)
elif ip.count(Entity.POISON) == 2:
if ip[Direction.FORWARD] != Entity.POISON:
# L/R ambiguity:
return Action.MOVE_RIGHT if self.baseline_prefer_right else Action.MOVE_LEFT
else:
return (Action.MOVE_FORWARD if not self.baseline_go_sideways else
Action.MOVE_RIGHT if ip[Direction.RIGHT] == Entity.POISON else
Action.MOVE_LEFT)
raise Exception('unknown scenario: {}'.format(ip))
class RandomAgent(object):
def __init__(self, args):
pass
def act(self, percepts):
return np.random.choice(list(Action))
class LearningAgent(object):
def __init__(self, args):
self.weights = np.random.randn(3, 3 * 4 * args.sensor_range) * 0.001
def act(self, percepts):
return np.argmax(self.evaluate(percepts)[1])
def evaluate(self, percepts):
inputs = encode_percepts_as_one_hot(percepts)
outputs = np.dot(self.weights, inputs)
return inputs, outputs
def update_weights(self, learning_rate, delta, inputs):
self.weights += learning_rate * np.dot(delta.reshape((-1, 1)), inputs.reshape((1, -1)))
class ReinforcementAgent(LearningAgent):
def __init__(self, args):
super().__init__(args)
def train(self, percepts, percepts_next, learning_rate, discount_factor, reward):
inputs, outputs = self.evaluate(percepts)
action = np.argmax(outputs)
q_current = outputs[action]
q_next = np.amax(self.evaluate(percepts_next)[1])
delta = (encode_int_as_one_hot(action, 3) *
(reward + discount_factor * q_next - q_current))
self.update_weights(learning_rate, delta, inputs)
class SupervisedAgent(LearningAgent):
def __init__(self, args):
super().__init__(args)
def train(self, percepts, learning_rate, target_action):
inputs, outputs = self.evaluate(percepts)
outputs -= np.max(outputs) # Shift values for numerical stability
softmax = np.exp(outputs) / np.sum(np.exp(outputs))
correct_choice = encode_int_as_one_hot(target_action, 3)
delta = correct_choice - softmax
self.update_weights(learning_rate, delta, inputs)
def apply_action(world, agent_position, agent_heading, action):
# Update agent heading
if action == Action.MOVE_LEFT:
agent_heading = (agent_heading + 4 - 1) % 4
elif action == Action.MOVE_RIGHT:
agent_heading = (agent_heading + 1) % 4
# Update agent position
if agent_heading == Heading.NORTH:
agent_position = (agent_position[0] - 1, agent_position[1])
elif agent_heading == Heading.EAST:
agent_position = (agent_position[0], agent_position[1] + 1)
elif agent_heading == Heading.SOUTH:
agent_position = (agent_position[0] + 1, agent_position[1])
elif agent_heading == Heading.WEST:
agent_position = (agent_position[0], agent_position[1] - 1)
agent_position = (np.clip(agent_position[0], 0, world.shape[0] - 1),
np.clip(agent_position[1], 0, world.shape[1] - 1))
entity = world[agent_position]
reward = REWARDS[entity]
done = entity == Entity.WALL
if entity == Entity.FOOD:
world[agent_position] = Entity.FOOD_EATEN
elif entity == Entity.POISON:
world[agent_position] = Entity.POISON_EATEN
return reward, done, agent_position, agent_heading
def benchmark_agent(agent, iterations, args):
total_points = 0
for _ in range(iterations):
world, agent_position, agent_heading = create_world(
args.world_width, args.world_height, args.food_ratio, args.poison_ratio)
_, _, _, _, points = evaluate_agent(
world, args.max_steps, args.sensor_range, agent, agent_position, agent_heading)
total_points += points
return total_points / iterations
def create_world(width, height, food_ratio, poison_ratio):
world = np.full((width + 2, height + 2), Entity.EMPTY, dtype=int)
# Add wall border
world[ 0, :] = Entity.WALL
world[-1, :] = Entity.WALL
world[ :, 0] = Entity.WALL
world[ :,-1] = Entity.WALL
world[np.where(np.logical_and(
world == Entity.EMPTY,
np.random.choice([True, False], world.shape, p=[food_ratio, 1.0 - food_ratio])))] \
= Entity.FOOD
world[np.where(np.logical_and(
world == Entity.EMPTY,
np.random.choice([True, False], world.shape, p=[poison_ratio, 1.0 - poison_ratio])))] \
= Entity.POISON
agent_position = list(zip(*np.where(world == Entity.EMPTY)))[0]
agent_heading = np.random.choice(list(Heading))
return world, agent_position, agent_heading
def encode_int_as_one_hot(value, k):
one_hot_encoding = np.zeros(k)
one_hot_encoding[value] = 1
return one_hot_encoding
def encode_percepts_as_one_hot(percepts):
one_hot_percepts = np.zeros((3 * len(percepts[0]), 4))
one_hot_percepts[np.arange(one_hot_percepts.shape[0]), np.concatenate(percepts)] = 1
return one_hot_percepts.flatten()
def evaluate_agent(world, steps, sensor_range, agent, agent_position, agent_heading):
world = np.copy(world)
done = False
points = 0
position_history = [agent_position]
percepts_history = []
action_history = []
while not done and steps > 0:
percepts = get_percepts(world, sensor_range, agent_position, agent_heading)
action = agent.act(percepts)
reward, done, agent_position, agent_heading = apply_action(
world, agent_position, agent_heading, action)
percepts_history.append(np.copy(percepts))
action_history.append(action)
position_history.append(agent_position)
points += reward
steps -= 1
return world, position_history, percepts_history, action_history, points
def get_percepts(world, sensor_range, agent_position, agent_heading):
# Construct perception
padded_world = np.full(
(world.shape[0] + 2 * sensor_range, world.shape[1] + 2 * sensor_range),
Entity.WALL, dtype=int)
padded_world[sensor_range:sensor_range + world.shape[0],
sensor_range:sensor_range + world.shape[1]] = world
# Rotate perception according to agent heading and mask two lower bits
# such that FOOD_EATEN and POISON_EATEN are mapped to EMPTY.
agent_percepts = np.rot90(
padded_world[agent_position[0]:agent_position[0] + 2 * sensor_range + 1,
agent_position[1]:agent_position[1] + 2 * sensor_range + 1],
agent_heading) & 0x3
# Get agent action
percepts = np.stack((
agent_percepts[sensor_range, sensor_range - 1::-1], # Left
agent_percepts[sensor_range - 1::-1, sensor_range], # Forward
agent_percepts[sensor_range, sensor_range + 1:])) # Right
return percepts
def render(output_filename, world, agent_path):
app = QApplication([ '-platform', 'offscreen'])
cell_size = 50
margin_size = 5
symbol_size = 0.35
colors = {
'line': QColor( 51, 51, 51),
'path': QColor( 51, 51, 51),
Entity.WALL: QColor( 88, 89, 91),
Entity.FOOD: QColor( 28, 150, 32),
Entity.FOOD_EATEN: QColor(135, 243, 132),
Entity.POISON: QColor(255, 153, 0),
Entity.POISON_EATEN: QColor(204, 51, 102)
}
printer = QPrinter()
printer.setOutputFormat(QPrinter.PdfFormat)
printer.setOutputFileName(output_filename)
printer.setPageMargins(0, 0, 0, 0, QPrinter.Inch)
printer.setPageSize(QPageSize(
QSizeF(float(world.shape[1] * cell_size + 2 * margin_size) / printer.resolution(),
float(world.shape[0] * cell_size + 2 * margin_size) / printer.resolution()),
QPageSize.Inch))
painter = QPainter(printer)
painter.translate(margin_size, margin_size)
painter.setPen(QPen(colors['line'], 0))
for y in range(world.shape[0] + 1):
painter.drawLine(0,
cell_size * y,
cell_size * world.shape[1],
cell_size * y)
for x in range(world.shape[1] + 1):
painter.drawLine(cell_size * x,
0,
cell_size * x,
cell_size * world.shape[0])
# Draw X marking starting location
painter.drawLine(cell_size * agent_path[0][1],
cell_size * agent_path[0][0],
cell_size * (agent_path[0][1] + 1),
cell_size * (agent_path[0][0] + 1))
painter.drawLine(cell_size * (agent_path[0][1] + 1),
cell_size * agent_path[0][0],
cell_size * agent_path[0][1],
cell_size * (agent_path[0][0] + 1))
for row in range(world.shape[0]):
for column in range(world.shape[1]):
entity = world[row, column]
if entity != Entity.EMPTY:
painter.setBrush(QBrush(colors[entity]))
painter.drawEllipse(
QPointF(cell_size * (column + 0.5),
cell_size * (row + 0.5)),
symbol_size * float(cell_size),
symbol_size * float(cell_size))
pen_thickness_increment = 0.2
pen_thickness = 2.0
for i, (first, second) in enumerate(zip(agent_path, agent_path[1:])):
pen_thickness += pen_thickness_increment
painter.setPen(QPen(colors['path'], pen_thickness, Qt.SolidLine, Qt.RoundCap))
painter.drawLine(
QPointF(cell_size * (first[1] + 0.5),
cell_size * (first[0] + 0.5)),
QPointF(cell_size * (second[1] + 0.5),
cell_size * (second[0] + 0.5)))
painter.end()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--agent',
choices=['baseline', 'random', 'supervised', 'reinforcement'])
parser.add_argument('--baseline_go_sideways', action='store_true')
parser.add_argument('--baseline_prefer_avoid_wall', action='store_true')
parser.add_argument('--baseline_prefer_right', action='store_true')
parser.add_argument('--baseline_take_food_near_wall', action='store_true')
parser.add_argument('--compare', action='store_true')
parser.add_argument('--discount_factor', type=float, default=0.9)
parser.add_argument('--evaluate', type=int)
parser.add_argument('--food_ratio', type=float, default=0.5)
parser.add_argument('--learning_rate', type=float, default=0.01)
parser.add_argument('--load', type=str)
parser.add_argument('--max_steps', type=int, default=50)
parser.add_argument('--poison_ratio', type=float, default=0.5)
parser.add_argument('--render', action='store_true')
parser.add_argument('--render_filename', type=str, default='flatland.pdf')
parser.add_argument('--report_output', action='store_true')
parser.add_argument('--report_weights', action='store_true')
parser.add_argument('--save', type=str)
parser.add_argument('--sensor_range', type=int, default=1)
parser.add_argument('--train', action='store_true')
parser.add_argument('--training_round_repetitions', type=int, default=1)
parser.add_argument('--training_round_size', type=int, default=100)
parser.add_argument('--training_rounds', type=int, default=25)
parser.add_argument('--world_height', type=int, default=10)
parser.add_argument('--world_width', type=int, default=10)
args = parser.parse_args()
if args.load:
agent = pickle.load(open(args.load, 'rb'))
else:
if not args.agent:
print('Agent type must be specified')
sys.exit(1)
agent = globals()[args.agent.title() + 'Agent'](args)
if args.train:
if not issubclass(agent.__class__, LearningAgent):
print('Agent class cannot be trained')
sys.exit(1)
if args.agent == 'supervised':
baseline_agent = BaselineAgent(args)
mean_agent_scores = np.zeros((args.training_round_repetitions, args.training_rounds))
for training_round_repetition in range(args.training_round_repetitions):
agent = globals()[args.agent.title() + 'Agent'](args)
for training_round in range(args.training_rounds):
total_points = 0
for iteration in range(args.training_round_size):
world, agent_position, agent_heading = create_world(
args.world_width, args.world_height, args.food_ratio, args.poison_ratio)
done = False
steps = args.max_steps
points = 0
while not done and steps > 0:
percepts = get_percepts(
world, args.sensor_range, agent_position, agent_heading)
action = agent.act(percepts)
reward, done, agent_position, agent_heading = apply_action(
world, agent_position, agent_heading, action)
if args.agent == 'supervised':
target_action = baseline_agent.act(percepts)
agent.train(percepts, args.learning_rate, target_action)
elif args.agent == 'reinforcement':
updated_percepts = get_percepts(
world, args.sensor_range, agent_position, agent_heading)
agent.train(percepts, updated_percepts,
args.learning_rate, args.discount_factor, reward)
points += reward
steps -= 1
total_points += points
mean_agent_scores[training_round_repetition, training_round] += \
total_points / args.training_round_size
print('\n'.join('{} {} {}'.format(training_round + 1, mean, std)
for training_round, mean, std in zip(
range(args.training_rounds),
np.mean(mean_agent_scores, axis=0),
np.std(mean_agent_scores, axis=0))))
if args.save:
pickle.dump(agent, open(args.save, 'wb'))
if args.evaluate:
mean_agent_score = benchmark_agent(agent, args.evaluate, args)
print(mean_agent_score)
if args.report_output:
for i, scenario in enumerate(itertools.product([Entity.EMPTY, Entity.WALL, Entity.FOOD, Entity.POISON], repeat=3)):
percepts = np.array(scenario).reshape(3, 1)
inputs, outputs = agent.evaluate(percepts)
action = np.argmax(outputs)
print(' & '.join(
[str(i + 1)] +
['\\textsc{{{}}}'.format(str(Entity(entity))) for entity in scenario] +
['${:.5f}$'.format(float(output)) for output in outputs] +
['\\textsc{{{}}}'.format(str(Action(action))), '~ \\\\']))
if args.report_weights:
print('&{}\\\\'.format('&'.join(' \\textsc{{{}}} '.format(
''.join(x)) for x in itertools.product('LFR', 'EWFP'))))
for i, action in enumerate(['Left', 'Forward', 'Right']):
print('\\textsc{{{}}} &{}\\\\'.format(
action, '&'.join(' \\textsc{{{:.5f}}} '.format(weight)
for weight in list(agent.weights[i, :]))))
if args.compare:
baseline_agent = BaselineAgent(args)
for scenario in itertools.product([Entity.EMPTY, Entity.WALL, Entity.FOOD, Entity.POISON], repeat=3):
percepts = np.array(scenario).reshape(3, 1)
action = agent.act(percepts)
baseline_action = baseline_agent.act(percepts)
if action != baseline_action:
print('{} -> Agent: {} Baseline: {}'.format(scenario, str(Action(action)), str(Action(baseline_action))))
if args.render:
world, agent_position, agent_heading = create_world(
args.world_width, args.world_height, args.food_ratio, args.poison_ratio)
world, position_history, percepts_history, action_history, points = evaluate_agent(
world, args.max_steps, args.sensor_range, agent, agent_position, agent_heading)
render(args.render_filename, world, position_history)
print(points)
if __name__ == '__main__':
main()
| 41.315884 | 123 | 0.590065 | 7,750 | 0.338591 | 0 | 0 | 0 | 0 | 0 | 0 | 1,607 | 0.070208 |
e8d83d7f2cf5e884bff990da5f5c7bb6b671dc63 | 745 | py | Python | cogs/Data.py | AmashiSenpai/AmashiDiscordBot | c5314572441ed820a26d987ba43e67624df7a97a | [
"MIT"
] | null | null | null | cogs/Data.py | AmashiSenpai/AmashiDiscordBot | c5314572441ed820a26d987ba43e67624df7a97a | [
"MIT"
] | null | null | null | cogs/Data.py | AmashiSenpai/AmashiDiscordBot | c5314572441ed820a26d987ba43e67624df7a97a | [
"MIT"
] | null | null | null | from pycord.discord.ext import commands
import pycord.discord as discord
from pycord.discord import Embed
import requests
import json
from discord import Embed
class Data(commands.Cog):
def __init__(self, bot) -> None:
self.bot: commands.Bot = bot
@commands.command()
async def ping(self, ctx):
await ctx.send(f"My Ping: {round(self.bot.latency * 1000)}ws")
@commands.command()
async def discordstatus(self, ctx):
res = requests.get('https://discordstatus.com/metrics-display/5k2rt9f7pmny/day.json')
data = json.loads(res.text)
latency = round(data['summary']['mean'])
embed = Embed(
description=f"Current: {latency}",
)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Data(bot))
| 24.032258 | 89 | 0.699329 | 540 | 0.724832 | 0 | 0 | 439 | 0.589262 | 395 | 0.530201 | 147 | 0.197315 |
e8d86181d638617f52c86437ff758dca3b1df2e4 | 17,743 | py | Python | scripts/Run.py | ekg/shasta | e2fd3c3d79fb4cafe77c62f6af2fef46f7a04b01 | [
"BSD-3-Clause"
] | null | null | null | scripts/Run.py | ekg/shasta | e2fd3c3d79fb4cafe77c62f6af2fef46f7a04b01 | [
"BSD-3-Clause"
] | null | null | null | scripts/Run.py | ekg/shasta | e2fd3c3d79fb4cafe77c62f6af2fef46f7a04b01 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python3
from SetupRunDirectory import verifyDirectoryFiles, setupRunDirectory
from CleanupRunDirectory import cleanUpRunDirectory
from RunAssembly import verifyConfigFiles, verifyFastaFiles, runAssembly, initializeAssembler
from SaveRun import saveRun
import configparser
from datetime import datetime
from shutil import copyfile
import subprocess
import signal
import traceback
import argparse
import sys
import gc
import os
def getDatetimeString():
"""
Generate a datetime string. Useful for making output folders names that never conflict.
"""
now = datetime.now()
now = [now.year, now.month, now.day, now.hour, now.minute, now.second, now.microsecond]
datetimeString = "_".join(list(map(str, now)))
return datetimeString
def ensureDirectoryExists(directoryPath, i=0):
"""
Recursively test directories in a directory path and generate missing directories as needed
:param directoryPath:
:return:
"""
if i > 3:
print("WARNING: generating subdirectories of depth %d, please verify path is correct: %s" % (i, directoryPath))
if not os.path.exists(directoryPath):
try:
os.mkdir(directoryPath)
except FileNotFoundError:
ensureDirectoryExists(os.path.dirname(directoryPath), i=i + 1)
if not os.path.exists(directoryPath):
os.mkdir(directoryPath)
def overrideDefaultConfig(config, args):
"""
Check all the possible params to see if the user provided an override value, and add any overrides
to their appropriate location in the config dictionary
"""
if args.minReadLength is not None:
config["Reads"]["minReadLength"] = str(args.minReadLength)
if args.k is not None:
config["Kmers"]["k"] = str(args.k)
if args.probability is not None:
config["Kmers"]["probability"] = str(args.probability)
if args.m is not None:
config["MinHash"]["m"] = str(args.m)
if args.minHashIterationCount is not None:
config["MinHash"]["minHashIterationCount"] = str(args.minHashIterationCount)
if args.maxBucketSize is not None:
config["MinHash"]["maxBucketSize"] = str(args.maxBucketSize)
if args.minFrequency is not None:
config["MinHash"]["minFrequency"] = str(args.minFrequency)
if args.maxSkip is not None:
config["Align"]["maxSkip"] = str(args.maxSkip)
if args.maxMarkerFrequency is not None:
config["Align"]["maxMarkerFrequency"] = str(args.maxMarkerFrequency)
if args.minAlignedMarkerCount is not None:
config["Align"]["minAlignedMarkerCount"] = str(args.minAlignedMarkerCount)
if args.maxTrim is not None:
config["Align"]["maxTrim"] = str(args.maxTrim)
if args.minComponentSize is not None:
config["ReadGraph"]["minComponentSize"] = str(args.minComponentSize)
if args.maxChimericReadDistance is not None:
config["ReadGraph"]["maxChimericReadDistance"] = str(args.maxChimericReadDistance)
if args.minCoverage is not None:
config["MarkerGraph"]["minCoverage"] = str(args.minCoverage)
if args.maxCoverage is not None:
config["MarkerGraph"]["maxCoverage"] = str(args.maxCoverage)
if args.lowCoverageThreshold is not None:
config["MarkerGraph"]["lowCoverageThreshold"] = str(args.lowCoverageThreshold)
if args.highCoverageThreshold is not None:
config["MarkerGraph"]["highCoverageThreshold"] = str(args.highCoverageThreshold)
if args.maxDistance is not None:
config["MarkerGraph"]["maxDistance"] = str(args.maxDistance)
if args.pruneIterationCount is not None:
config["MarkerGraph"]["pruneIterationCount"] = str(args.pruneIterationCount)
if args.markerGraphEdgeLengthThresholdForConsensus is not None:
config["Assembly"]["markerGraphEdgeLengthThresholdForConsensus"] = str(
args.markerGraphEdgeLengthThresholdForConsensus)
if args.consensusCaller is not None:
config["Assembly"]["consensusCaller"] = str(args.consensusCaller) + "ConsensusCaller"
if args.useMarginPhase is not None:
config["Assembly"]["useMarginPhase"] = str(args.useMarginPhase)
if args.storeCoverageData is not None:
config["Assembly"]["storeCoverageData"] = str(args.storeCoverageData)
return config
def main(readsSequencePath, outputParentDirectory, Data, largePagesMountPoint, processHandler, savePageMemory, performPageCleanUp, args):
if not os.path.exists(readsSequencePath):
raise Exception("ERROR: input file not found: %s" % readsSequencePath)
# Make sure given sequence file path is absolute, because CWD will be changed later
readsSequencePath = os.path.abspath(readsSequencePath)
# Generate output directory to run shasta in
outputDirectoryName = "run_" + getDatetimeString()
outputDirectory = os.path.abspath(os.path.join(outputParentDirectory, outputDirectoryName))
ensureDirectoryExists(outputDirectory)
# Locate path of default configuration files relative to this script's "binary" file.
# Use of realpath is needed to make sure symbolic links are resolved.
scriptPath = os.path.dirname(os.path.realpath(__file__))
confDirectory = os.path.join(os.path.dirname(scriptPath), "conf")
defaultConfFilename = "shasta.conf"
defaultConfPath = os.path.join(confDirectory, defaultConfFilename)
localConfPath = os.path.join(outputDirectory, "shasta.conf")
# Parse config file to fill in default parameters
config = configparser.ConfigParser()
if not config.read(defaultConfPath):
raise Exception("Error reading config file %s." % defaultConfPath)
# Check if any params were specified by user and override the default config
config = overrideDefaultConfig(config, args)
# Write updated config file to output directory so RunAssembly.py can be called as a separate process
with open(localConfPath, "w") as file:
config.write(file)
# Add bayesian params file to the output directory if needed
if args.consensusCaller == "SimpleBayesian":
defaultMatrixPath = os.path.join(confDirectory, "SimpleBayesianConsensusCaller-1.csv")
localMatrixPath = os.path.join(outputDirectory, "SimpleBayesianConsensusCaller.csv")
copyfile(defaultMatrixPath, localMatrixPath)
# Add marginphase params file to the output directory if needed
if args.useMarginPhase:
defaultParamsPath = os.path.join(confDirectory, "MarginPhase-allParams.np.json")
localParamsPath = os.path.join(outputDirectory, "MarginPhase.json")
copyfile(defaultParamsPath, localParamsPath)
# Setup run directory according to SetupRunDirectory.py
verifyDirectoryFiles(runDirectory=outputDirectory)
setupRunDirectory(runDirectory=outputDirectory)
# Ensure prerequisite files are present
verifyConfigFiles(parentDirectory=outputDirectory)
verifyFastaFiles(fastaFileNames=[readsSequencePath])
# Set current working directory to the output dir
os.chdir(outputDirectory)
# Launch assembler as a separate process using the saved (updated) config file
executablePath = os.path.join(scriptPath, "RunAssembly.py")
arguments = [executablePath, readsSequencePath]
processHandler.launchProcess(arguments=arguments, working_directory=outputDirectory, wait=True)
# Save page memory to disk so it can be reused during RunServerFromDisk
if savePageMemory:
saveRun(outputDirectory)
if performPageCleanUp:
sys.stderr.write("Cleaning up page memory...")
cleanUpRunDirectory(requireUserInput=False)
sys.stderr.write("\rCleaning up page memory... Done\n")
class ProcessHandler:
def __init__(self, Data, largePagesMountPoint, process=None):
self.process = process
self.Data = Data
self.largePagesMountPoint = largePagesMountPoint
def launchProcess(self, arguments, working_directory, wait):
if self.process is None:
self.process = subprocess.Popen(arguments, cwd=working_directory)
if wait:
self.process.wait()
else:
exit("ERROR: process already launched")
def handleExit(self, signum, frame):
"""
Method to be called at (early) termination. By default, the native "signal" handler passes 2 arguments signum
and frame
:param signum:
:param frame:
:return:
"""
pass
if self.process is not None:
self.process.kill() # kill or terminate?
gc.collect()
self.cleanup()
def cleanup(self):
sys.stderr.write("\nERROR: script terminated or interrupted\n")
sys.stderr.write("Cleaning up page memory...")
cleanUpRunDirectory(requireUserInput=False)
sys.stderr.write("\rCleaning up page memory... Done\n")
exit(1)
def stringAsBool(s):
s = s.lower()
boolean = None
if s in {"t", "true", "1", "y", "yes"}:
boolean = True
elif s in {"f", "false", "0", "n", "no"}:
boolean = False
else:
exit("Error: invalid argument specified for boolean flag: %s"%s)
return boolean
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", stringAsBool) # add type keyword to registries
parser.add_argument(
"--inputSequences",
type=str,
required=True,
help="File path of FASTQ or FASTA sequence file containing sequences for assembly"
)
parser.add_argument(
"--savePageMemory",
type="bool",
# default=10,
required=False,
help="Save page memory to disk before clearing the ephemeral page data. \n \
Convenient for post-assembly analysis using RunServerFromDisk.py. \n\n \
Any case insensitive variant of the following is accepted: \n \
t, true, 1, y, yes, f, false, 0, n, no"
)
parser.add_argument(
"--performPageCleanUp",
type="bool",
default="True",
required=False,
help="Whether to perform post-assembly cleanup of page files. \n \
Any case insensitive variant of the following is accepted: \n \
t, true, 1, y, yes, f, false, 0, n, no"
)
parser.add_argument(
"--storeCoverageData",
type="bool",
# default=10,
required=False,
help="Whether to store read-level data: observed bases and run lengths. \n \
Any case insensitive variant of the following is accepted: \n \
t, true, 1, y, yes, f, false, 0, n, no"
)
parser.add_argument(
"--outputDir",
type=str,
default="./output/",
required=False,
help="Desired output directory path (will be created during run time if doesn't exist)"
)
parser.add_argument(
"--minReadLength",
type=int,
# default=1000,
required=False,
help="The minimum read length. Reads shorter than this are skipped on input."
)
parser.add_argument(
"--k",
type=int,
# default=10,
required=False,
help="The length of the k-mers used as markers. \n"
)
parser.add_argument(
"--probability",
type=float,
# default=0.1,
required=False,
help="The probability that a k-mer is a marker. \n \
This is approximately equal to the fraction\n \
of k-mers that will be used as markers."
)
parser.add_argument(
"--m",
type=int,
# default=4,
required=False,
help="The number of consecutive markers that define a MinHash feature."
)
parser.add_argument(
"--minHashIterationCount",
type=int,
# default=100,
required=False,
help="The number of MinHash iterations."
)
parser.add_argument(
"--maxBucketSize",
type=int,
# default=30,
required=False,
help="The maximum bucket size to be used by the MinHash algoritm. \n \
Buckets larger than this are ignored."
)
parser.add_argument(
"--minFrequency",
type=int,
# default=1,
required=False,
help="The minimum number of times a pair of oriented reads \n \
is found by the MinHash algorithm for the pair to \n \
generate an overlap."
)
parser.add_argument(
"--maxSkip",
type=int,
# default=30,
required=False,
help="The maximum number of markers that an alignment is allowed\n \
to skip on either of the oriented reads being aligned."
)
parser.add_argument(
"--maxMarkerFrequency",
type=int,
# default=10,
required=False,
help="Marker frequency threshold. \n \
When computing an alignment between two oriented reads, \n \
marker kmers that appear more than this number of times \n \
in either of the two oriented reads are discarded \n \
(in both oriented reads)."
)
parser.add_argument(
"--minAlignedMarkerCount",
type=int,
# default=100,
required=False,
help="The minimum number of aligned markers in an alignment \n \
in order for the alignment to be considered good and usable."
)
parser.add_argument(
"--maxTrim",
type=int,
# default=30,
required=False,
help="The maximum number of trim markers tolerated at the \n \
beginning and end of an alignment. There can be \n \
up this number of markers between the first/last aligned marker \n \
and the beginning/end of either oriented read \n \
for an alignment to be considered good and usable."
)
parser.add_argument(
"--minComponentSize",
type=int,
# default=100,
required=False,
help="The minimum size (number of oriented reads) of \n \
a connected component to be kept."
)
parser.add_argument(
"--maxChimericReadDistance",
type=int,
# default=2,
required=False,
help="Argument maxChimericReadDistance for flagChimericReads."
)
parser.add_argument(
"--minCoverage",
type=int,
# default=10,
required=False,
help="The minimum and maximum coverage (number of markers) \n \
for a vertex of the marker graph. \n \
Vertices with coverage outside this range are collapsed \n \
away and not generated by computeMarkerGraphVertices."
)
parser.add_argument(
"--maxCoverage",
type=int,
# default=100,
required=False,
help="The minimum and maximum coverage (number of markers) \n \
for a vertex of the marker graph. \n \
Vertices with coverage outside this range are collapsed \n \
away and not generated by computeMarkerGraphVertices."
)
parser.add_argument(
"--lowCoverageThreshold",
type=int,
# default=1,
required=False,
help="Parameters for flagMarkerGraphWeakEdges."
)
parser.add_argument(
"--highCoverageThreshold",
type=int,
# default=1000,
required=False,
help="Parameters for flagMarkerGraphWeakEdges."
)
parser.add_argument(
"--maxDistance",
type=int,
# default=30,
required=False,
help="Parameters for flagMarkerGraphWeakEdges."
)
parser.add_argument(
"--pruneIterationCount",
type=int,
# default=6,
required=False,
help="Number of iterations for pruneMarkerGraphStrongSubgraph."
)
parser.add_argument(
"--markerGraphEdgeLengthThresholdForConsensus",
type=int,
# default=100,
required=False,
help="Used during sequence assembly."
)
parser.add_argument(
"--consensusCaller",
type=str,
required=False,
choices=["Simple", "SimpleBayesian", "Median"],
help="Whether to use Bayesian inference on read lengths during consensus calling"
)
parser.add_argument(
"--useMarginPhase",
type="bool",
# default=True,
required=False,
help="Use margin polisher during consensus. \n\n \
Any case insensitive variant of the following is accepted: \n \
t, true, 1, y, yes, f, false, 0, n, no"
)
args = parser.parse_args()
# Assign default paths for page data
largePagesMountPoint = "/hugepages"
Data = os.path.join(largePagesMountPoint, "Data")
# Initialize a class to deal with the subprocess that is opened for the assembler
processHandler = ProcessHandler(Data=Data, largePagesMountPoint=largePagesMountPoint)
# Setup termination handling to deallocate large page memory, unmount on-disk page data, and delete disk data
# This is done by mapping the signal handler to the member function of an instance of ProcessHandler
signal.signal(signal.SIGTERM, processHandler.handleExit)
signal.signal(signal.SIGINT, processHandler.handleExit)
main(readsSequencePath=args.inputSequences,
outputParentDirectory=args.outputDir,
largePagesMountPoint=largePagesMountPoint,
Data=Data,
args=args,
processHandler=processHandler,
savePageMemory=args.savePageMemory,
performPageCleanUp=args.performPageCleanUp)
| 35.344622 | 137 | 0.650961 | 1,236 | 0.069661 | 0 | 0 | 0 | 0 | 0 | 0 | 7,706 | 0.434312 |
e8d9603c4f23969490a26fe940e3be9b6eaabe3e | 6,651 | py | Python | prestring/output.py | podhmo/prestring | 8a3499377d1b1b2b180809b31bd7536de5c3ec4d | [
"MIT"
] | 8 | 2015-03-05T07:32:52.000Z | 2022-03-11T09:28:21.000Z | prestring/output.py | podhmo/prestring | 8a3499377d1b1b2b180809b31bd7536de5c3ec4d | [
"MIT"
] | 19 | 2016-12-01T03:09:03.000Z | 2021-03-28T05:27:35.000Z | prestring/output.py | podhmo/prestring | 8a3499377d1b1b2b180809b31bd7536de5c3ec4d | [
"MIT"
] | 1 | 2017-07-19T12:39:43.000Z | 2017-07-19T12:39:43.000Z | import typing as t
import typing_extensions as tx
import sys
import logging
import os.path
import dataclasses
import filecmp
from io import StringIO
from .minifs import MiniFS, File, T, DefaultT
from .utils import reify
logger = logging.getLogger(__name__)
ActionType = tx.Literal["update", "create"]
class Writer(tx.Protocol):
def write(self, name: str, file: File[T], *, _retry: bool = False) -> None:
...
def cleanup_all(output: "output[DefaultT]") -> None:
import shutil
logger.info("cleanup %s", output.root)
shutil.rmtree(output.root, ignore_errors=True) # todo: dryrun
@dataclasses.dataclass(frozen=False, unsafe_hash=False)
class output(t.Generic[DefaultT]):
root: str
prefix: str = ""
suffix: str = ""
# for MiniFS
opener: t.Optional[t.Callable[[], DefaultT]] = None
sep: str = "/"
store: t.Dict[str, t.Any] = dataclasses.field(default_factory=dict)
cleanup: t.Optional[t.Callable[["output[DefaultT]"], None]] = None
verbose: bool = os.environ.get("VERBOSE", "") != ""
use_console: bool = os.environ.get("CONSOLE", "") != ""
nocheck: bool = os.environ.get("NOCHECK", "") != ""
def fullpath(self, name: str) -> str:
dirname, basename = os.path.split(name)
fname = "{}{}{}".format(self.prefix, basename, self.suffix)
return os.path.join(self.root, os.path.join(dirname, fname))
def guess_action(self, fullpath: str) -> ActionType:
if os.path.exists(fullpath):
return "update"
else:
return "create"
@reify
def fs(self) -> MiniFS[DefaultT]:
opener = self.opener or StringIO
return MiniFS(opener=opener, sep=self.sep) # type: ignore # xxx
@reify
def writer(self) -> Writer:
setup_logging(level=logging.INFO) # xxx
if self.use_console:
return _ConsoleWriter(self)
else:
return _ActualWriter(self)
def __enter__(self) -> MiniFS[DefaultT]:
return self.fs
def __exit__(
self,
exc: t.Optional[t.Type[BaseException]],
value: t.Optional[BaseException],
tb: t.Any,
) -> None:
writer = self.writer
if not self.use_console and self.cleanup is not None:
self.cleanup(self)
for name, f in self.fs.walk():
if name is None:
raise RuntimeError("something wrong, name is None")
writer.write(name, f)
class _ActualWriter:
TMP_SUFFIX = "_TMP"
def __init__(self, output: output[DefaultT]):
self.output = output
def write(self, name: str, file: File[T], *, _retry: bool = False) -> None:
if self.output.nocheck:
self._write_without_check(name, file)
else:
self._write_with_check(name, file)
def _write_with_check(self, name: str, file: File[T]) -> None:
fullpath = self.output.fullpath(name)
if not os.path.exists(fullpath):
self._write_without_check(name, file, action="create")
else:
tmppath = fullpath + self.TMP_SUFFIX
with open(tmppath, "w") as wf:
file.write(wf)
not_changed = filecmp.cmp(fullpath, tmppath, shallow=True)
if not_changed:
action = "no change"
os.remove(tmppath)
if self.output.verbose:
logger.info("[F]\t%s\t%s", action, fullpath)
else:
action = "update"
os.replace(tmppath, fullpath)
logger.info("[F]\t%s\t%s", action, fullpath)
def _write_without_check(
self,
name: str,
file: File[T],
*,
action: t.Optional[ActionType] = None,
_retry: bool = False,
) -> None:
fullpath = self.output.fullpath(name)
action = action or self.output.guess_action(fullpath)
try:
with open(fullpath, "w") as wf:
file.write(wf)
logger.info("[F]\t%s\t%s", action, fullpath)
except FileNotFoundError:
if _retry:
raise
logger.info("[D]\tcreate\t%s", os.path.dirname(fullpath))
os.makedirs(os.path.dirname(fullpath), exist_ok=True)
self._write_without_check(name, file, action="create", _retry=True)
class _ConsoleWriter:
def __init__(
self,
output: output[DefaultT],
*,
stdout: t.IO[str] = sys.stdout,
stderr: t.IO[str] = sys.stderr,
) -> None:
self.output = output
self.stdout = stdout
self.stderr = stderr
def write(self, name: str, f: File[T], *, _retry: bool = False) -> None:
fullpath = self.output.fullpath(name)
if not self.output.verbose:
logger.info("[F]\t%s\t%s", self.output.guess_action(fullpath), fullpath)
return
print(f"# {fullpath}", file=self.stdout)
print(
"\x1b[90m----------------------------------------\x1b[0m", file=self.stderr
)
self.stderr.flush()
o = StringIO()
f.write(o)
print(
" ",
o.getvalue().rstrip().replace("\n", "\n ").rstrip(" "),
file=self.stdout,
sep="",
)
self.stdout.flush()
print("\n", file=self.stderr)
self.stderr.flush()
class _MarkdownWriter:
def __init__(
self,
output: output[DefaultT],
*,
stdout: t.IO[str] = sys.stdout,
stderr: t.IO[str] = sys.stderr,
) -> None:
self.output = output
self.stdout = stdout
self.stderr = stderr
def write(self, name: str, f: File[T], *, _retry: bool = False) -> None:
fullpath = self.output.fullpath(name)
o = StringIO()
f.write(o)
content = o.getvalue().strip()
print(f"## {fullpath}\n", file=self.stdout)
self.stdout.flush()
print("<details>\n", file=self.stderr)
self.stderr.flush()
print("```", file=self.stdout)
print(content, file=self.stdout)
print("```\n", file=self.stdout)
self.stdout.flush()
print("</details>\n", file=self.stderr)
self.stderr.flush()
self.stdout.flush()
def setup_logging(
*, _logger: t.Optional[logging.Logger] = None, level: int = logging.INFO
) -> None:
_logger = _logger or logger
if _logger.handlers:
return
h = logging.StreamHandler(sys.stderr)
h.setFormatter(logging.Formatter(fmt="%(message)s"))
_logger.addHandler(h)
_logger.propagate = False
logging.basicConfig(level=level)
| 29.959459 | 87 | 0.568787 | 5,721 | 0.860171 | 0 | 0 | 1,846 | 0.277552 | 0 | 0 | 498 | 0.074876 |
2cd13739878101d7497f2cd47bc8682631e79c60 | 4,158 | py | Python | demo.py | Yijun-Mao/CGenerator | 74bf8952b759ad6b2cadcab1c4b92e192c82abeb | [
"MIT"
] | 9 | 2020-11-11T13:21:56.000Z | 2021-12-15T14:13:47.000Z | translate.py | superduduguan/CGenerator | 3d9159ad1dbece9e040d08b89775c7b4c19f7e5f | [
"MIT"
] | null | null | null | translate.py | superduduguan/CGenerator | 3d9159ad1dbece9e040d08b89775c7b4c19f7e5f | [
"MIT"
] | 2 | 2020-11-11T13:22:03.000Z | 2020-11-11T13:38:52.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from itertools import repeat
import os
import numpy as np
import json
from flask import Flask,render_template,url_for,request
import joblib
import traceback
import requests
from bs4 import BeautifulSoup
import re
from onmt.utils.logging import init_logger
from onmt.utils.misc import split_corpus
from onmt.translate.translator import build_translator
import onmt.opts as opts
from onmt.utils.parse import ArgumentParser
app = Flask(__name__)
Categories = ["相机", "内存卡", "三脚架","麦克风","行车记录仪","充电器","笔记本电脑","遥控器",
"音响","手机","智能手表","体脂秤","键盘","鼠标","显示器","打印机","平板电脑","电子书阅读器"]
@app.route('/index.html')
def index():
return render_template('index.html')
@app.route('/about.html')
def about():
return render_template('about.html')
@app.route('/contact.html')
def contact():
return render_template('contact.html')
def constraint_iter_func(f_iter):
all_tags = []
for json_line in f_iter:
data = json.loads(json_line)
words = data['words']
probs = [p[1] for p in data['class_probabilities'][:len(words)]]
tags = [1 if p > opt.bu_threshold else 0 for p in probs]
all_tags.append(tags)
#print(len(words), len(data['class_probabilities']))
#all_tags.append(words)
return all_tags
def _get_parser():
parser = ArgumentParser(description='translate.py')
opts.config_opts(parser)
opts.translate_opts(parser)
return parser
def extract_category(title):
min_start = len(title)
target = None
for i, category in enumerate(Categories):
result = re.search(category, title)
if result is not None:
result = result.span()
if result[0] < min_start and result[0] >= 0:
min_start = result[0]
target = category
if target is None:
target = "不知道"
return target
parser = _get_parser()
opt = parser.parse_args()
model_path = opt.models[0]
step = os.path.basename(model_path)[:-3].split('step_')[-1]
temp = opt.random_sampling_temp
if opt.extra_output_str:
opt.extra_output_str = '_'+opt.extra_output_str
if opt.output is None:
output_path = '/'.join(model_path.split('/')[:-2])+'/output_%s_%s%s.encoded' % (step, temp, opt.extra_output_str)
opt.output = output_path
ArgumentParser.validate_translate_opts(opt)
logger = init_logger(opt.log_file)
translator = build_translator(opt, report_score=True)
BASE_LIB='html5lib'
UA='Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'
HEADERS={'user-agent':UA}
print("prepared")
@app.route('/index.html',methods=['POST'])
def main():
if request.method == 'POST':
url= str(request.form['message']).strip()
if len(url) == 0 or not url.startswith("https://"):
return render_template('index.html',prediction = "请输入正确的网址")
resp = requests.get(url, headers=HEADERS)
text = resp.text
soup = BeautifulSoup(text, 'lxml')
title=soup.title.string[:-16]
print(title)
src_shard = extract_category(title)
print(src_shard)
try:
assert src_shard in Categories
predictions = translator.translate(
src=[src_shard.encode(encoding = "utf-8")]*opt.batch_size,
tgt=None,
src_dir=opt.src_dir,
batch_size=opt.batch_size,
attn_debug=opt.attn_debug,
tag_shard=None
)
pred_comments = [prediction[0].replace(" ", "").split("。")[0] for prediction in predictions[1]]
scores = [-torch_score[0].cpu().item() for torch_score in predictions[0]]
pred_comment = pred_comments[scores.index(max(scores))]
print(pred_comments)
print(scores)
except:
traceback.print_exc()
pred_comment = "不好意思,此类商品暂不支持"
return render_template('index.html',prediction = pred_comment)
if __name__ == "__main__":
app.run(host='127.0.0.1',port=5500,debug=True)
| 28.675862 | 117 | 0.642136 | 0 | 0 | 0 | 0 | 1,683 | 0.389223 | 0 | 0 | 824 | 0.190564 |