hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ebea5fcc5fbc9dfa33dd37faef02e2cbd317cfb1
| 45,514
|
py
|
Python
|
uploadfile/views.py
|
Briliant-wang/GreaterWMS_Software
|
47a8825bb0da03a21ddfc5afc79cd6a70995c92e
|
[
"Apache-2.0"
] | 1,063
|
2020-11-15T12:55:15.000Z
|
2022-03-31T14:33:12.000Z
|
uploadfile/views.py
|
Briliant-wang/GreaterWMS_Software
|
47a8825bb0da03a21ddfc5afc79cd6a70995c92e
|
[
"Apache-2.0"
] | 96
|
2020-11-18T00:06:05.000Z
|
2022-03-03T09:05:39.000Z
|
uploadfile/views.py
|
Briliant-wang/GreaterWMS_Software
|
47a8825bb0da03a21ddfc5afc79cd6a70995c92e
|
[
"Apache-2.0"
] | 349
|
2020-11-15T13:15:30.000Z
|
2022-03-31T11:01:15.000Z
|
from rest_framework import viewsets, views
import pandas as pd
import numpy as np
from utils.datasolve import data_validate
from utils.datasolve import is_number
from goods.models import ListModel as goodslist
from goodsunit.models import ListModel as goodsunit
from goodsclass.models import ListModel as goodsclass
from goodsbrand.models import ListModel as goodsbrand
from goodscolor.models import ListModel as goodscolor
from goodsshape.models import ListModel as goodsshape
from goodsspecs.models import ListModel as goodsspecs
from goodsorigin.models import ListModel as goodsorigin
from goods import files as goodsfiles
from supplier.models import ListModel as supplier
from supplier import files as supplierfiles
from customer.models import ListModel as customer
from customer import files as customerfiles
from payment.models import TransportationFeeListModel as freight
from capital.models import ListModel as capital
from goods.serializers import GoodsGetSerializer
from supplier.serializers import SupplierGetSerializer
from customer.serializers import CustomerGetSerializer
from capital.serializers import CapitalGetSerializer
from payment.serializers import FreightGetSerializer
from rest_framework.response import Response
from rest_framework.exceptions import APIException
class GoodlistfileViewSet(views.APIView):
"""
create:
Upload One Excel(post)
"""
pagination_class = []
def get_queryset(self):
if self.request.user:
return goodslist.objects.filter(openid=self.request.auth.openid)
else:
return goodslist.objects.filter().none()
def get_lang(self):
if self.request.user:
lang = self.request.META.get('HTTP_LANGUAGE')
else:
lang = 'en-us'
if lang == 'zh-hans':
data_header = goodsfiles.cn_data_header()
elif lang == 'en-us':
data_header = goodsfiles.en_data_header()
else:
data_header = goodsfiles.en_data_header()
return data_header
def post(self, request, *args, **kwargs):
data_header = self.get_lang()
files = self.request.FILES.get('file')
if files:
excel_type = files.name.split('.')[1]
if excel_type in ['xlsx', 'xls', 'csv']:
self.get_queryset().delete()
goodsunit.objects.all().delete()
goodsclass.objects.all().delete()
goodsbrand.objects.all().delete()
goodscolor.objects.all().delete()
goodsshape.objects.all().delete()
goodsspecs.objects.all().delete()
goodsorigin.objects.all().delete()
df = pd.read_excel(files)
df.drop_duplicates(keep='first', inplace=True)
data_list = df.drop_duplicates(subset=[data_header.get('goods_code')], keep='first').values
for d in range(len(data_list)):
data_validate(str(data_list[d]))
for i in range(len(data_list)):
if str(data_list[i][0]) == 'nan':
continue
else:
if str(data_list[i][1]) == 'nan':
data_list[i][1] = 'N/A'
if str(data_list[i][2]) == 'nan':
data_list[i][2] = 'N/A'
if is_number(str(data_list[i][3])):
if str(data_list[i][3]) == 'nan':
data_list[i][3] = 0
else:
data_list[i][3] = 0
if is_number(str(data_list[i][4])):
if str(data_list[i][4]) == 'nan':
data_list[i][4] = 0
else:
data_list[i][4] = 0
if is_number(str(data_list[i][5])):
if str(data_list[i][5]) == 'nan':
data_list[i][5] = 0
else:
data_list[i][5] = 0
if is_number(str(data_list[i][6])):
if str(data_list[i][6]) == 'nan':
data_list[i][6] = 0
else:
data_list[i][6] = 0
if is_number(str(data_list[i][7])):
if str(data_list[i][7]) == 'nan':
data_list[i][7] = 0
else:
data_list[i][7] = 0
if str(data_list[i][8]) == 'nan':
data_list[i][8] = 'N/A'
if str(data_list[i][9]) == 'nan':
data_list[i][9] = 'N/A'
if str(data_list[i][10]) == 'nan':
data_list[i][10] = 'N/A'
if str(data_list[i][11]) == 'nan':
data_list[i][11] = 'N/A'
if str(data_list[i][12]) == 'nan':
data_list[i][12] = 'N/A'
if str(data_list[i][13]) == 'nan':
data_list[i][13] = 'N/A'
if str(data_list[i][14]) == 'nan':
data_list[i][14] = 'N/A'
if is_number(str(data_list[i][15])):
if str(data_list[i][15]) == 'nan':
data_list[i][15] = 0
else:
data_list[i][15] = 0
if is_number(str(data_list[i][16])):
if str(data_list[i][16]) == 'nan':
data_list[i][16] = 0
else:
data_list[i][16] = 0
goodslist.objects.create(openid=self.request.auth.openid,
goods_code=str(data_list[i][0]).strip(),
goods_desc=str(data_list[i][1]).strip(),
goods_supplier=str(data_list[i][2]).strip(),
goods_weight=data_list[i][3],
goods_w=data_list[i][4],
goods_d=data_list[i][5],
goods_h=data_list[i][6],
unit_volume=data_list[i][7],
goods_unit=str(data_list[i][8]).strip(),
goods_class=str(data_list[i][9]).strip(),
goods_brand=str(data_list[i][10]).strip(),
goods_color=str(data_list[i][11]).strip(),
goods_shape=str(data_list[i][12]).strip(),
goods_specs=str(data_list[i][13]).strip(),
goods_origin=str(data_list[i][14]).strip(),
goods_cost=data_list[i][15],
goods_price=data_list[i][16],
creater=self.request.auth.name
)
goods_unit_list = df.drop_duplicates(subset=[data_header.get('goods_unit')], keep='first').loc[:,
data_header.get('goods_unit')].values
for i in goods_unit_list:
if str(i) == 'nan':
i = 'N/A'
goodsunit.objects.create(openid=self.request.auth.openid,
goods_unit=str(i).strip(),
creater=self.request.auth.name
)
goods_class_list = df.drop_duplicates(subset=[data_header.get('goods_class')], keep='first').loc[:,
data_header.get('goods_class')].values
for i in goods_class_list:
if str(i) == 'nan':
i = 'N/A'
goodsclass.objects.create(openid=self.request.auth.openid,
goods_class=str(i).strip(),
creater=self.request.auth.name
)
goods_brand_list = df.drop_duplicates(subset=[data_header.get('goods_brand')], keep='first').loc[:,
data_header.get('goods_brand')].values
for i in goods_brand_list:
if str(i) == 'nan':
i = 'N/A'
goodsbrand.objects.create(openid=self.request.auth.openid,
goods_brand=str(i).strip(),
creater=self.request.auth.name
)
goods_color_list = df.drop_duplicates(subset=[data_header.get('goods_color')], keep='first').loc[:,
data_header.get('goods_color')].values
for i in goods_color_list:
if str(i) == 'nan':
i = 'N/A'
goodscolor.objects.create(openid=self.request.auth.openid,
goods_color=str(i).strip(),
creater=self.request.auth.name
)
goods_shape_list = df.drop_duplicates(subset=[data_header.get('goods_shape')], keep='first').loc[:,
data_header.get('goods_shape')].values
for i in goods_shape_list:
if str(i) == 'nan':
i = 'N/A'
goodsshape.objects.create(openid=self.request.auth.openid,
goods_shape=str(i).strip(),
creater=self.request.auth.name
)
goods_specs_list = df.drop_duplicates(subset=[data_header.get('goods_specs')], keep='first').loc[:,
data_header.get('goods_specs')].values
for i in goods_specs_list:
if str(i) == 'nan':
i = 'N/A'
goodsspecs.objects.create(openid=self.request.auth.openid,
goods_specs=str(i).strip(),
creater=self.request.auth.name
)
goods_origin_list = df.drop_duplicates(subset=[data_header.get('goods_origin')], keep='first').loc[:,
data_header.get('goods_origin')].values
for i in goods_origin_list:
if str(i) == 'nan':
i = 'N/A'
goodsorigin.objects.create(openid=self.request.auth.openid,
goods_origin=str(i).strip(),
creater=self.request.auth.name
)
else:
raise APIException({"detail": "Can Not Support This File Type"})
else:
raise APIException({"detail": "Please Select One File"})
return Response({"detail": "success"})
class SupplierfileViewSet(views.APIView):
"""
create:
Upload One Excel(post)
"""
pagination_class = []
def get_queryset(self):
if self.request.user:
return supplier.objects.filter(openid=self.request.auth.openid)
else:
return supplier.objects.filter().none()
def get_lang(self):
if self.request.user:
lang = self.request.META.get('HTTP_LANGUAGE')
else:
lang = 'en-us'
if lang == 'zh-hans':
data_header = supplierfiles.cn_data_header()
elif lang == 'en-us':
data_header = supplierfiles.en_data_header()
else:
data_header = supplierfiles.en_data_header()
return data_header
def post(self, request, *args, **kwargs):
data_header = self.get_lang()
files = self.request.FILES.get('file')
if files:
excel_type = files.name.split('.')[1]
if excel_type in ['xlsx', 'xls', 'csv']:
self.get_queryset().delete()
df = pd.read_excel(files)
df.drop_duplicates(keep='first', inplace=True)
data_list = df.drop_duplicates(subset=[data_header.get('supplier_name')], keep='first').values
for d in range(len(data_list)):
data_validate(str(data_list[d]))
for i in range(len(data_list)):
if str(data_list[i][0]) == 'nan':
continue
else:
if str(data_list[i][1]) == 'nan':
data_list[i][1] = 'N/A'
if str(data_list[i][2]) == 'nan':
data_list[i][2] = 'N/A'
if is_number(str(data_list[i][3])):
if str(data_list[i][3]) == 'nan':
data_list[i][3] = 0
else:
data_list[i][3] = 0
if str(data_list[i][4]) == 'nan':
data_list[i][4] = 'N/A'
if is_number(str(data_list[i][5])):
if str(data_list[i][5]) == 'nan':
data_list[i][5] = 0
else:
data_list[i][5] = 0
supplier.objects.create(openid=self.request.auth.openid,
supplier_name=str(data_list[i][0]).strip(),
supplier_city=str(data_list[i][1]).strip(),
supplier_address=str(data_list[i][2]).strip(),
supplier_contact=data_list[i][3],
supplier_manager=str(data_list[i][4]).strip(),
supplier_level=data_list[i][5],
creater=self.request.auth.name
)
else:
raise APIException({"detail": "Can Not Support This File Type"})
else:
raise APIException({"detail": "Please Select One File"})
return Response({"detail": "success"})
class CustomerfileViewSet(views.APIView):
"""
create:
Upload One Excel(post)
"""
pagination_class = []
def get_queryset(self):
if self.request.user:
return customer.objects.filter(openid=self.request.auth.openid)
else:
return customer.objects.filter().none()
def get_lang(self):
if self.request.user:
lang = self.request.META.get('HTTP_LANGUAGE')
else:
lang = 'en-us'
if lang == 'zh-hans':
data_header = customerfiles.cn_data_header()
elif lang == 'en-us':
data_header = customerfiles.en_data_header()
else:
data_header = customerfiles.en_data_header()
return data_header
def post(self, request, *args, **kwargs):
data_header = self.get_lang()
files = self.request.FILES.get('file')
if files:
excel_type = files.name.split('.')[1]
if excel_type in ['xlsx', 'xls', 'csv']:
self.get_queryset().delete()
df = pd.read_excel(files)
df.drop_duplicates(keep='first', inplace=True)
data_list = df.drop_duplicates(subset=[data_header.get('customer_name')], keep='first').values
for d in range(len(data_list)):
data_validate(str(data_list[d]))
for i in range(len(data_list)):
if str(data_list[i][0]) == 'nan':
continue
else:
if str(data_list[i][1]) == 'nan':
data_list[i][1] = 'N/A'
if str(data_list[i][2]) == 'nan':
data_list[i][2] = 'N/A'
if is_number(str(data_list[i][3])):
if str(data_list[i][3]) == 'nan':
data_list[i][3] = 0
else:
data_list[i][3] = 0
if str(data_list[i][4]) == 'nan':
data_list[i][4] = 'N/A'
if is_number(str(data_list[i][5])):
if str(data_list[i][5]) == 'nan':
data_list[i][5] = 0
else:
data_list[i][5] = 0
customer.objects.create(openid=self.request.auth.openid,
customer_name=str(data_list[i][0]).strip(),
customer_city=str(data_list[i][1]).strip(),
customer_address=str(data_list[i][2]).strip(),
customer_contact=data_list[i][3],
customer_manager=str(data_list[i][4]).strip(),
customer_level=data_list[i][5],
creater=self.request.auth.name
)
else:
raise APIException({"detail": "Can Not Support This File Type"})
else:
raise APIException({"detail": "Please Select One File"})
return Response({"detail": "success"})
class CapitalfileViewSet(views.APIView):
"""
create:
Upload One Excel(post)
"""
pagination_class = []
def get_queryset(self):
if self.request.user:
return capital.objects.filter(openid=self.request.auth.openid)
else:
return capital.objects.filter().none()
def post(self, request, *args, **kwargs):
files = self.request.FILES.get('file')
if files:
excel_type = files.name.split('.')[1]
if excel_type in ['xlsx', 'xls', 'csv']:
self.get_queryset().delete()
df = pd.read_excel(files)
data_list = df.drop_duplicates(keep='first', inplace=True)
for d in range(len(data_list)):
data_validate(str(data_list[d]))
for i in range(len(data_list)):
if str(data_list[i][0]) == 'nan':
continue
else:
if is_number(str(data_list[i][1])):
if str(data_list[i][1]) == 'nan':
data_list[i][1] = 0
else:
data_list[i][1] = 0
if is_number(str(data_list[i][2])):
if str(data_list[i][2]) == 'nan':
data_list[i][2] = 0
else:
data_list[i][2] = 0
capital.objects.create(openid=self.request.auth.openid,
capital_name=str(data_list[i][0]).strip(),
capital_qty=data_list[i][1],
capital_cost=data_list[i][2],
creater=self.request.auth.name
)
else:
raise APIException({"detail": "Can Not Support This File Type"})
else:
raise APIException({"detail": "Please Select One File"})
return Response({"detail": "success"})
class FreightfileViewSet(views.APIView):
"""
create:
Upload One Excel(post)
"""
pagination_class = []
def get_queryset(self):
if self.request.user:
return freight.objects.filter(openid=self.request.auth.openid)
else:
return freight.objects.filter().none()
def post(self, request, *args, **kwargs):
files = self.request.FILES.get('file')
if files:
excel_type = files.name.split('.')[1]
if excel_type in ['xlsx', 'xls', 'csv']:
self.get_queryset().delete()
df = pd.read_excel(files)
data_list = df.drop_duplicates(keep='first', inplace=True).values
for d in range(len(data_list)):
data_validate(str(data_list[d]))
for i in range(len(data_list)):
if str(data_list[i][0]) == 'nan':
data_list[i][0] = 'N/A'
if str(data_list[i][1]) == 'nan':
data_list[i][1] = 'N/A'
if is_number(str(data_list[i][2])):
if str(data_list[i][2]) == 'nan':
data_list[i][2] = 0
else:
data_list[i][2] = 0
if is_number(str(data_list[i][3])):
if str(data_list[i][3]) == 'nan':
data_list[i][3] = 0
else:
data_list[i][3] = 0
if is_number(str(data_list[i][4])):
if str(data_list[i][4]) == 'nan':
data_list[i][4] = 0
else:
data_list[i][4] = 0
if str(data_list[i][5]) == 'nan':
data_list[i][5] = 'N/A'
freight.objects.create(openid=self.request.auth.openid,
send_city=str(data_list[i][0]).strip(),
receiver_city=str(data_list[i][1]).strip(),
weight_fee=data_list[i][2],
volume_fee=data_list[i][3],
min_payment=data_list[i][4],
transportation_supplier=str(data_list[i][5]).strip(),
creater=self.request.auth.name
)
else:
raise APIException({"detail": "Can Not Support This File Type"})
else:
raise APIException({"detail": "Please Select One File"})
return Response({"detail": "success"})
class GoodlistfileAddViewSet(views.APIView):
"""
create:
Upload One Excel(post)
"""
pagination_class = []
def get_queryset(self):
if self.request.user:
return goodslist.objects.filter(openid=self.request.auth.openid)
else:
return goodslist.objects.filter().none()
def get_lang(self):
if self.request.user:
lang = self.request.META.get('HTTP_LANGUAGE')
else:
lang = 'en-us'
if lang == 'zh-hans':
data_header = goodsfiles.cn_data_header()
elif lang == 'en-us':
data_header = goodsfiles.en_data_header()
else:
data_header = goodsfiles.en_data_header()
return data_header
def post(self, request, *args, **kwargs):
data_header = self.get_lang()
files = self.request.FILES.get('file')
if files:
excel_type = files.name.split('.')[1]
if excel_type in ['xlsx', 'xls', 'csv']:
df = pd.read_excel(files)
df.drop_duplicates(keep='first', inplace=True)
data_list = df.drop_duplicates(subset=[data_header.get('goods_code')], keep='first').values
for d in range(len(data_list)):
data_validate(str(data_list[d]))
for i in range(len(data_list)):
if str(data_list[i][0]) == 'nan':
continue
else:
if str(data_list[i][1]) == 'nan':
data_list[i][1] = 'N/A'
if str(data_list[i][2]) == 'nan':
data_list[i][2] = 'N/A'
if is_number(str(data_list[i][3])):
if str(data_list[i][3]) == 'nan':
data_list[i][3] = 0
else:
data_list[i][3] = 0
if is_number(str(data_list[i][4])):
if str(data_list[i][4]) == 'nan':
data_list[i][4] = 0
else:
data_list[i][4] = 0
if is_number(str(data_list[i][5])):
if str(data_list[i][5]) == 'nan':
data_list[i][5] = 0
else:
data_list[i][5] = 0
if is_number(str(data_list[i][6])):
if str(data_list[i][6]) == 'nan':
data_list[i][6] = 0
else:
data_list[i][6] = 0
if is_number(str(data_list[i][7])):
if str(data_list[i][7]) == 'nan':
data_list[i][7] = 0
else:
data_list[i][7] = 0
if str(data_list[i][8]) == 'nan':
data_list[i][8] = 'N/A'
if str(data_list[i][9]) == 'nan':
data_list[i][9] = 'N/A'
if str(data_list[i][10]) == 'nan':
data_list[i][10] = 'N/A'
if str(data_list[i][11]) == 'nan':
data_list[i][11] = 'N/A'
if str(data_list[i][12]) == 'nan':
data_list[i][12] = 'N/A'
if str(data_list[i][13]) == 'nan':
data_list[i][13] = 'N/A'
if str(data_list[i][14]) == 'nan':
data_list[i][14] = 'N/A'
if is_number(str(data_list[i][15])):
if str(data_list[i][15]) == 'nan':
data_list[i][15] = 0
else:
data_list[i][15] = 0
if is_number(str(data_list[i][16])):
if str(data_list[i][16]) == 'nan':
data_list[i][16] = 0
else:
data_list[i][16] = 0
goodslist.objects.create(openid=self.request.auth.openid,
goods_code=str(data_list[i][0]).strip(),
goods_desc=str(data_list[i][1]).strip(),
goods_supplier=str(data_list[i][2]).strip(),
goods_weight=data_list[i][3],
goods_w=data_list[i][4],
goods_d=data_list[i][5],
goods_h=data_list[i][6],
unit_volume=data_list[i][7],
goods_unit=str(data_list[i][8]).strip(),
goods_class=str(data_list[i][9]).strip(),
goods_brand=str(data_list[i][10]).strip(),
goods_color=str(data_list[i][11]).strip(),
goods_shape=str(data_list[i][12]).strip(),
goods_specs=str(data_list[i][13]).strip(),
goods_origin=str(data_list[i][14]).strip(),
goods_cost=data_list[i][15],
goods_price=data_list[i][16],
creater=self.request.auth.name
)
goods_unit_list = df.drop_duplicates(subset=[data_header.get('goods_unit')], keep='first').loc[:,
data_header.get('goods_unit')].values
for i in goods_unit_list:
if str(i) == 'nan':
i = 'N/A'
goodsunit.objects.create(openid=self.request.auth.openid,
goods_unit=str(i).strip(),
creater=self.request.auth.name
)
goods_class_list = df.drop_duplicates(subset=[data_header.get('goods_class')], keep='first').loc[:,
data_header.get('goods_class')].values
for i in goods_class_list:
if str(i) == 'nan':
i = 'N/A'
goodsclass.objects.create(openid=self.request.auth.openid,
goods_class=str(i).strip(),
creater=self.request.auth.name
)
goods_brand_list = df.drop_duplicates(subset=[data_header.get('goods_brand')], keep='first').loc[:,
data_header.get('goods_brand')].values
for i in goods_brand_list:
if str(i) == 'nan':
i = 'N/A'
goodsbrand.objects.create(openid=self.request.auth.openid,
goods_brand=str(i).strip(),
creater=self.request.auth.name
)
goods_color_list = df.drop_duplicates(subset=[data_header.get('goods_color')], keep='first').loc[:,
data_header.get('goods_color')].values
for i in goods_color_list:
if str(i) == 'nan':
i = 'N/A'
goodscolor.objects.create(openid=self.request.auth.openid,
goods_color=str(i).strip(),
creater=self.request.auth.name
)
goods_shape_list = df.drop_duplicates(subset=[data_header.get('goods_shape')], keep='first').loc[:,
data_header.get('goods_shape')].values
for i in goods_shape_list:
if str(i) == 'nan':
i = 'N/A'
goodsshape.objects.create(openid=self.request.auth.openid,
goods_shape=str(i).strip(),
creater=self.request.auth.name
)
goods_specs_list = df.drop_duplicates(subset=[data_header.get('goods_specs')], keep='first').loc[:,
data_header.get('goods_specs')].values
for i in goods_specs_list:
if str(i) == 'nan':
i = 'N/A'
goodsspecs.objects.create(openid=self.request.auth.openid,
goods_specs=str(i).strip(),
creater=self.request.auth.name
)
goods_origin_list = df.drop_duplicates(subset=[data_header.get('goods_origin')], keep='first').loc[:,
data_header.get('goods_origin')].values
for i in goods_origin_list:
if str(i) == 'nan':
i = 'N/A'
goodsorigin.objects.create(openid=self.request.auth.openid,
goods_origin=str(i).strip(),
creater=self.request.auth.name
)
else:
raise APIException({"detail": "Can Not Support This File Type"})
else:
raise APIException({"detail": "Please Select One File"})
return Response({"detail": "success"})
class SupplierfileAddViewSet(views.APIView):
"""
create:
Upload One Excel(post)
"""
pagination_class = []
def get_queryset(self):
if self.request.user:
return supplier.objects.filter(openid=self.request.auth.openid)
else:
return supplier.objects.filter().none()
def get_lang(self):
if self.request.user:
lang = self.request.META.get('HTTP_LANGUAGE')
else:
lang = 'en-us'
if lang == 'zh-hans':
data_header = supplierfiles.cn_data_header()
elif lang == 'en-us':
data_header = supplierfiles.en_data_header()
else:
data_header = supplierfiles.en_data_header()
return data_header
def post(self, request, *args, **kwargs):
data_header = self.get_lang()
files = self.request.FILES.get('file')
if files:
excel_type = files.name.split('.')[1]
if excel_type in ['xlsx', 'xls', 'csv']:
df = pd.read_excel(files)
df.drop_duplicates(keep='first', inplace=True)
data_list = df.drop_duplicates(subset=[data_header.get('supplier_name')], keep='first').values
for d in range(len(data_list)):
data_validate(str(data_list[d]))
for i in range(len(data_list)):
if str(data_list[i][0]) == 'nan':
continue
else:
if str(data_list[i][1]) == 'nan':
data_list[i][1] = 'N/A'
if str(data_list[i][2]) == 'nan':
data_list[i][2] = 'N/A'
if is_number(str(data_list[i][3])):
if str(data_list[i][3]) == 'nan':
data_list[i][3] = 0
else:
data_list[i][3] = 0
if str(data_list[i][4]) == 'nan':
data_list[i][4] = 'N/A'
if is_number(str(data_list[i][5])):
if str(data_list[i][5]) == 'nan':
data_list[i][5] = 0
else:
data_list[i][5] = 0
supplier.objects.create(openid=self.request.auth.openid,
supplier_name=str(data_list[i][0]).strip(),
supplier_city=str(data_list[i][1]).strip(),
supplier_address=str(data_list[i][2]).strip(),
supplier_contact=data_list[i][3],
supplier_manager=str(data_list[i][4]).strip(),
supplier_level=data_list[i][5],
creater=self.request.auth.name
)
else:
raise APIException({"detail": "Can Not Support This File Type"})
else:
raise APIException({"detail": "Please Select One File"})
return Response({"detail": "success"})
class CustomerfileAddViewSet(views.APIView):
"""
create:
Upload One Excel(post)
"""
pagination_class = []
def get_queryset(self):
if self.request.user:
return customer.objects.filter(openid=self.request.auth.openid)
else:
return customer.objects.filter().none()
def get_lang(self):
if self.request.user:
lang = self.request.META.get('HTTP_LANGUAGE')
else:
lang = 'en-us'
if lang == 'zh-hans':
data_header = customerfiles.cn_data_header()
elif lang == 'en-us':
data_header = customerfiles.en_data_header()
else:
data_header = customerfiles.en_data_header()
return data_header
def post(self, request, *args, **kwargs):
data_header = self.get_lang()
files = self.request.FILES.get('file')
if files:
excel_type = files.name.split('.')[1]
if excel_type in ['xlsx', 'xls', 'csv']:
df = pd.read_excel(files)
df.drop_duplicates(keep='first', inplace=True)
data_list = df.drop_duplicates(subset=[data_header.get('customer_name')], keep='first').values
for d in range(len(data_list)):
data_validate(str(data_list[d]))
for i in range(len(data_list)):
if str(data_list[i][0]) == 'nan':
continue
else:
if str(data_list[i][1]) == 'nan':
data_list[i][1] = 'N/A'
if str(data_list[i][2]) == 'nan':
data_list[i][2] = 'N/A'
if is_number(str(data_list[i][3])):
if str(data_list[i][3]) == 'nan':
data_list[i][3] = 0
else:
data_list[i][3] = 0
if str(data_list[i][4]) == 'nan':
data_list[i][4] = 'N/A'
if is_number(str(data_list[i][5])):
if str(data_list[i][5]) == 'nan':
data_list[i][5] = 0
else:
data_list[i][5] = 0
customer.objects.create(openid=self.request.auth.openid,
customer_name=str(data_list[i][0]).strip(),
customer_city=str(data_list[i][1]).strip(),
customer_address=str(data_list[i][2]).strip(),
customer_contact=data_list[i][3],
customer_manager=str(data_list[i][4]).strip(),
customer_level=data_list[i][5],
creater=self.request.auth.name
)
else:
raise APIException({"detail": "Can Not Support This File Type"})
else:
raise APIException({"detail": "Please Select One File"})
return Response({"detail": "success"})
class CapitalfileAddViewSet(views.APIView):
"""
create:
Upload One Excel(post)
"""
pagination_class = []
def get_queryset(self):
if self.request.user:
return capital.objects.filter(openid=self.request.auth.openid)
else:
return capital.objects.filter().none()
def post(self, request, *args, **kwargs):
files = self.request.FILES.get('file')
if files:
excel_type = files.name.split('.')[1]
if excel_type in ['xlsx', 'xls', 'csv']:
df = pd.read_excel(files)
data_list = df.drop_duplicates(keep='first', inplace=True)
for d in range(len(data_list)):
data_validate(str(data_list[d]))
for i in range(len(data_list)):
if str(data_list[i][0]) == 'nan':
continue
else:
if is_number(str(data_list[i][1])):
if str(data_list[i][1]) == 'nan':
data_list[i][1] = 0
else:
data_list[i][1] = 0
if is_number(str(data_list[i][2])):
if str(data_list[i][2]) == 'nan':
data_list[i][2] = 0
else:
data_list[i][2] = 0
capital.objects.create(openid=self.request.auth.openid,
capital_name=str(data_list[i][0]).strip(),
capital_qty=data_list[i][1],
capital_cost=data_list[i][2],
creater=self.request.auth.name
)
else:
raise APIException({"detail": "Can Not Support This File Type"})
else:
raise APIException({"detail": "Please Select One File"})
return Response({"detail": "success"})
class FreightfileAddViewSet(views.APIView):
"""
create:
Upload One Excel(post)
"""
pagination_class = []
def get_queryset(self):
if self.request.user:
return freight.objects.filter(openid=self.request.auth.openid)
else:
return freight.objects.filter().none()
def post(self, request, *args, **kwargs):
files = self.request.FILES.get('file')
if files:
excel_type = files.name.split('.')[1]
if excel_type in ['xlsx', 'xls', 'csv']:
df = pd.read_excel(files)
data_list = df.drop_duplicates(keep='first', inplace=True).values
for d in range(len(data_list)):
data_validate(str(data_list[d]))
for i in range(len(data_list)):
if str(data_list[i][0]) == 'nan':
data_list[i][0] = 'N/A'
if str(data_list[i][1]) == 'nan':
data_list[i][1] = 'N/A'
if is_number(str(data_list[i][2])):
if str(data_list[i][2]) == 'nan':
data_list[i][2] = 0
else:
data_list[i][2] = 0
if is_number(str(data_list[i][3])):
if str(data_list[i][3]) == 'nan':
data_list[i][3] = 0
else:
data_list[i][3] = 0
if is_number(str(data_list[i][4])):
if str(data_list[i][4]) == 'nan':
data_list[i][4] = 0
else:
data_list[i][4] = 0
if str(data_list[i][5]) == 'nan':
data_list[i][5] = 'N/A'
freight.objects.create(openid=self.request.auth.openid,
send_city=str(data_list[i][0]).strip(),
receiver_city=str(data_list[i][1]).strip(),
weight_fee=data_list[i][2],
volume_fee=data_list[i][3],
min_payment=data_list[i][4],
transportation_supplier=str(data_list[i][5]).strip(),
creater=self.request.auth.name
)
else:
raise APIException({"detail": "Can Not Support This File Type"})
else:
raise APIException({"detail": "Please Select One File"})
return Response({"detail": "success"})
| 50.125551
| 117
| 0.418421
|
e9ab279f29985b135dd956f47552b6a97668242e
| 839
|
py
|
Python
|
scrapping/wikipediaScrapper.py
|
marioallaa/imLazyaf
|
00828fbed4ca8bc64a7cd7665255c1cd49c2fb84
|
[
"MIT"
] | 1
|
2020-12-03T10:38:51.000Z
|
2020-12-03T10:38:51.000Z
|
scrapping/wikipediaScrapper.py
|
marioallaa/imLazy_af
|
00828fbed4ca8bc64a7cd7665255c1cd49c2fb84
|
[
"MIT"
] | null | null | null |
scrapping/wikipediaScrapper.py
|
marioallaa/imLazy_af
|
00828fbed4ca8bc64a7cd7665255c1cd49c2fb84
|
[
"MIT"
] | null | null | null |
import wikipedia as w
class WikiScrapping():
def __init__(self, lang='en'):
self.title = lang
w.set_lang(lang)
def search(self, query):
self.query = query
return w.search(query)
def openPage(self, title):
self.title = title
self.page = w.page(self.title)
self.summ = w.summary(title, 10)
return self.summ
def getContent(self):
self.content = self.page.content
return self.content
def getRef(self):
return self.page.url, self.page.references
def openImg(self):
import webbrowser
url = "https://www.google.com/search?q={0}i&source=lnms&tbm=isch&sa=X&ved=0ahUKEwig99z_ns_eAhWB_ywKHakgAQIQ_AUIDigB&biw=1662&bih=815&dpr=1.13".format([s for s in self.title.split(' ')])
return webbrowser.open(url)
| 24.676471
| 193
| 0.626937
|
7e80abc1a1a9bd0f764a206457aee520ecdbc1bd
| 16,491
|
py
|
Python
|
selfdrive/controls/radard.py
|
tb205gti/openpilot
|
4a468ae24e7b17ca124385919d9e40abe3f64066
|
[
"MIT"
] | 1
|
2021-11-24T06:53:57.000Z
|
2021-11-24T06:53:57.000Z
|
selfdrive/controls/radard.py
|
tb205gti/openpilot
|
4a468ae24e7b17ca124385919d9e40abe3f64066
|
[
"MIT"
] | null | null | null |
selfdrive/controls/radard.py
|
tb205gti/openpilot
|
4a468ae24e7b17ca124385919d9e40abe3f64066
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import numpy as np
import numpy.matlib
import importlib
import zmq
from collections import defaultdict, deque
import selfdrive.messaging as messaging
from selfdrive.services import service_list
from selfdrive.controls.lib.radar_helpers import Track, Cluster
from selfdrive.config import RADAR_TO_CENTER
from selfdrive.controls.lib.cluster.fastcluster_py import cluster_points_centroid
from selfdrive.swaglog import cloudlog
from cereal import car,log,tesla
from common.params import Params
from common.realtime import set_realtime_priority, Ratekeeper, DT_MDL
from selfdrive.car.tesla.readconfig import read_config_file,CarSettings
DEBUG = False
#vision point
DIMSV = 2
XV, SPEEDV = 0, 1
VISION_POINT = -1
RDR_TO_LDR = 0.
# Time-alignment
rate = 1. / DT_MDL # model and radar are both at 20Hz
v_len = 20 # how many speed data points to remember for t alignment with rdr data
def laplacian_cdf(x, mu, b):
b = np.max([b, 1e-4])
return np.exp(-abs(x-mu)/b)
def match_vision_to_cluster(v_ego, lead, clusters):
# match vision point to best statistical cluster match
probs = []
offset_vision_dist = lead.dist - RADAR_TO_CENTER
for c in clusters:
prob_d = laplacian_cdf(c.dRel, offset_vision_dist, lead.std)
prob_y = laplacian_cdf(c.yRel, lead.relY, lead.relYStd)
prob_v = laplacian_cdf(c.vRel, lead.relVel, lead.relVelStd)
# This is isn't exactly right, but good heuristic
combined_prob = prob_d * prob_y * prob_v
probs.append(combined_prob)
idx = np.argmax(probs)
# if no 'sane' match is found return -1
# stationary radar points can be false positives
dist_sane = abs(clusters[idx].dRel - offset_vision_dist) < max([(offset_vision_dist)*.25, 5.0])
vel_sane = (abs(clusters[idx].vRel - lead.relVel) < 10) or (v_ego + clusters[idx].vRel > 2)
if dist_sane and vel_sane:
return idx
else:
return None
def get_rrext_by_trackId(rrext,trackId):
if rrext is not None:
for p in rrext:
if p.trackId == trackId:
return p
return None
def get_lead(v_ego, ready, clusters, lead_msg, low_speed_override=True):
# Determine leads, this is where the essential logic happens
if len(clusters) > 0 and ready and lead_msg.prob > .5:
lead_idx = match_vision_to_cluster(v_ego, lead_msg, clusters)
else:
lead_idx = None
lead_dict = {'status': False}
lead_dict_ext = {'trackId': 1, 'oClass': 0, 'length': 0.}
if lead_idx is not None:
lead_dict,lead_dict_ext = clusters[lead_idx].get_RadarState(lead_msg.prob)
elif (lead_idx is None) and ready and (lead_msg.prob > .5):
lead_dict = Cluster().get_RadarState_from_vision(lead_msg, v_ego)
if low_speed_override:
low_speed_clusters = [c for c in clusters if c.potential_low_speed_lead(v_ego)]
if len(low_speed_clusters) > 0:
lead_idx = np.argmin([c.dRel for c in low_speed_clusters])
if (not lead_dict['status']) or (low_speed_clusters[lead_idx].dRel < lead_dict['dRel']):
lead_dict,lead_dict_ext = low_speed_clusters[lead_idx].get_RadarState()
return lead_dict,lead_dict_ext
class RadarD(object):
def __init__(self, mocked, RI):
self.current_time = 0
self.mocked = mocked
self.RI = RI
self.tracks = defaultdict(dict)
self.last_md_ts = 0
self.last_controls_state_ts = 0
self.active = 0
# v_ego
self.v_ego = 0.
self.v_ego_hist_t = deque([0], maxlen=v_len)
self.v_ego_hist_v = deque([0], maxlen=v_len)
self.v_ego_t_aligned = 0.
self.ready = False
self.icCarLR = None
if (RI.TRACK_RIGHT_LANE or RI.TRACK_LEFT_LANE) and CarSettings().get_value("useTeslaRadar"):
self.icCarLR = messaging.pub_sock(service_list['uiIcCarLR'].port)
self.lane_width = 3.0
#only used for left and right lanes
self.path_x = np.arange(0.0, 160.0, 0.1) # 160 meters is max
self.poller = zmq.Poller()
self.pathPlanSocket = messaging.sub_sock(service_list['pathPlan'].port, conflate=True, poller=self.poller)
self.dPoly = [0.,0.,0.,0.]
def update(self, frame, delay, sm, rr, has_radar,rrext):
self.current_time = 1e-9*max([sm.logMonoTime[key] for key in sm.logMonoTime.keys()])
use_tesla_radar = CarSettings().get_value("useTeslaRadar")
if sm.updated['controlsState']:
self.active = sm['controlsState'].active
self.v_ego = sm['controlsState'].vEgo
self.v_ego_hist_v.append(self.v_ego)
self.v_ego_hist_t.append(float(frame)/rate)
if sm.updated['model']:
self.ready = True
for socket, _ in self.poller.poll(0):
if socket is self.pathPlanSocket:
pp = messaging.recv_one(self.pathPlanSocket).pathPlan
self.lane_width = pp.laneWidth
self.dPoly = pp.dPoly
path_y = np.polyval(self.dPoly, self.path_x)
ar_pts = {}
for pt in rr.points:
extpt = get_rrext_by_trackId(rrext,pt.trackId)
ar_pts[pt.trackId] = [pt.dRel + RDR_TO_LDR, pt.yRel, pt.vRel, pt.measured, pt.aRel, pt.yvRel, extpt.objectClass, extpt.length, pt.trackId+2, extpt.movingState]
# *** remove missing points from meta data ***
for ids in self.tracks.keys():
if ids not in ar_pts:
self.tracks.pop(ids, None)
# *** compute the tracks ***
for ids in ar_pts:
rpt = ar_pts[ids]
# align v_ego by a fixed time to align it with the radar measurement
cur_time = float(frame)/rate
self.v_ego_t_aligned = np.interp(cur_time - delay, self.v_ego_hist_t, self.v_ego_hist_v)
# distance relative to path
d_path = np.sqrt(np.amin((self.path_x - rpt[0]) ** 2 + (path_y - rpt[1]) ** 2))
# add sign
d_path *= np.sign(rpt[1] - np.interp(rpt[0], self.path_x, path_y))
# create the track if it doesn't exist or it's a new track
if ids not in self.tracks:
self.tracks[ids] = Track()
self.tracks[ids].update(rpt[0], rpt[1], rpt[2], rpt[3], rpt[4],rpt[5],rpt[6],rpt[7],rpt[8],rpt[9], d_path, self.v_ego_t_aligned,use_tesla_radar)
idens = list(self.tracks.keys())
track_pts = np.array([self.tracks[iden].get_key_for_cluster() for iden in idens])
# If we have multiple points, cluster them
if len(track_pts) > 1:
cluster_idxs = cluster_points_centroid(track_pts, 2.5)
clusters = [None] * (max(cluster_idxs) + 1)
for idx in xrange(len(track_pts)):
cluster_i = cluster_idxs[idx]
if clusters[cluster_i] is None:
clusters[cluster_i] = Cluster()
clusters[cluster_i].add(self.tracks[idens[idx]])
elif len(track_pts) == 1:
# FIXME: cluster_point_centroid hangs forever if len(track_pts) == 1
cluster_idxs = [0]
clusters = [Cluster()]
clusters[0].add(self.tracks[idens[0]])
else:
clusters = []
# if a new point, reset accel to the rest of the cluster
for idx in xrange(len(track_pts)):
if self.tracks[idens[idx]].cnt <= 1:
aLeadK = clusters[cluster_idxs[idx]].aLeadK
aLeadTau = clusters[cluster_idxs[idx]].aLeadTau
self.tracks[idens[idx]].reset_a_lead(aLeadK, aLeadTau)
### START REVIEW SECTION
#################################################################
#BB For Tesla integration we will also track Left and Right lanes
#################################################################
if (self.RI.TRACK_RIGHT_LANE or self.RI.TRACK_LEFT_LANE) and use_tesla_radar:
datrl = tesla.ICCarsLR.new_message()
datrl.v1Type = int(0)
datrl.v1Dx = float(0.)
datrl.v1Vrel = float(0.)
datrl.v1Dy = float(0.)
datrl.v1Id = int(0)
datrl.v2Type = int(0)
datrl.v2Dx = float(0.)
datrl.v2Vrel = float(0.)
datrl.v2Dy = float(0.)
datrl.v2Id = int(0)
datrl.v3Type = int(0)
datrl.v3Dx = float(0.)
datrl.v3Vrel = float(0.)
datrl.v3Dy = float(0.)
datrl.v3Id = int(0)
datrl.v4Type = int(0)
datrl.v4Dx = float(0.)
datrl.v4Vrel = float(0.)
datrl.v4Dy = float(0.)
datrl.v4Id = int(0)
lane_offset = 0.
#LEFT LANE
if self.RI.TRACK_LEFT_LANE and use_tesla_radar:
ll_track_pts = np.array([self.tracks[iden].get_key_for_cluster_dy(-self.lane_width) for iden in idens])
# If we have multiple points, cluster them
if len(ll_track_pts) > 1:
ll_cluster_idxs = cluster_points_centroid(ll_track_pts, 2.5)
ll_clusters = [None] * (max(ll_cluster_idxs) + 1)
for idx in xrange(len(ll_track_pts)):
ll_cluster_i = ll_cluster_idxs[idx]
if ll_clusters[ll_cluster_i] == None:
ll_clusters[ll_cluster_i] = Cluster()
ll_clusters[ll_cluster_i].add(self.tracks[idens[idx]])
elif len(ll_track_pts) == 1:
# TODO: why do we need this?
ll_clusters = [Cluster()]
ll_clusters[0].add(self.tracks[idens[0]])
else:
ll_clusters = []
if DEBUG:
for i in ll_clusters:
print(i)
# *** extract the lead car ***
ll_lead_clusters = [c for c in ll_clusters
if c.is_potential_lead_dy(self.v_ego,-self.lane_width)]
ll_lead_clusters.sort(key=lambda x: x.dRel)
ll_lead_len = len(ll_lead_clusters)
ll_lead1_truck = (len([c for c in ll_lead_clusters
if c.is_truck(ll_lead_clusters)]) > 0)
# *** extract the second lead from the whole set of leads ***
ll_lead2_clusters = [c for c in ll_lead_clusters
if c.is_potential_lead2(ll_lead_clusters)]
ll_lead2_clusters.sort(key=lambda x: x.dRel)
ll_lead2_len = len(ll_lead2_clusters)
ll_lead2_truck = (len([c for c in ll_lead_clusters
if c.is_truck(ll_lead2_clusters)]) > 0)
# publish data
if ll_lead_len > 0:
datrl.v1Type = int(ll_lead_clusters[0].oClass)
if datrl.v1Type == 1 and ll_lead1_truck:
datrl.v1Type = 0
datrl.v1Dx = float(ll_lead_clusters[0].dRel)
datrl.v1Vrel = float(ll_lead_clusters[0].vRel)
datrl.v1Dy = float(-ll_lead_clusters[0].yRel - lane_offset)
datrl.v1Id = int(ll_lead_clusters[0].track_id % 32)
if ll_lead2_len > 0:
datrl.v2Type = int(ll_lead2_clusters[0].oClass)
if datrl.v2Type == 1 and ll_lead2_truck:
datrl.v2Type = 0
datrl.v2Dx = float(ll_lead2_clusters[0].dRel)
datrl.v2Vrel = float(ll_lead2_clusters[0].vRel)
datrl.v2Dy = float(-ll_lead2_clusters[0].yRel - lane_offset)
datrl.v2Id = int(ll_lead2_clusters[0].track_id % 32)
#RIGHT LANE
if self.RI.TRACK_RIGHT_LANE and use_tesla_radar:
rl_track_pts = np.array([self.tracks[iden].get_key_for_cluster_dy(self.lane_width) for iden in idens])
# If we have multiple points, cluster them
if len(rl_track_pts) > 1:
rl_cluster_idxs = cluster_points_centroid(rl_track_pts, 2.5)
rl_clusters = [None] * (max(rl_cluster_idxs) + 1)
for idx in xrange(len(rl_track_pts)):
rl_cluster_i = rl_cluster_idxs[idx]
if rl_clusters[rl_cluster_i] == None:
rl_clusters[rl_cluster_i] = Cluster()
rl_clusters[rl_cluster_i].add(self.tracks[idens[idx]])
elif len(rl_track_pts) == 1:
# TODO: why do we need this?
rl_clusters = [Cluster()]
rl_clusters[0].add(self.tracks[idens[0]])
else:
rl_clusters = []
if DEBUG:
for i in rl_clusters:
print(i)
# *** extract the lead car ***
rl_lead_clusters = [c for c in rl_clusters
if c.is_potential_lead_dy(self.v_ego,self.lane_width)]
rl_lead_clusters.sort(key=lambda x: x.dRel)
rl_lead_len = len(rl_lead_clusters)
rl_lead1_truck = (len([c for c in rl_lead_clusters
if c.is_truck(rl_lead_clusters)]) > 0)
# *** extract the second lead from the whole set of leads ***
rl_lead2_clusters = [c for c in rl_lead_clusters
if c.is_potential_lead2(rl_lead_clusters)]
rl_lead2_clusters.sort(key=lambda x: x.dRel)
rl_lead2_len = len(rl_lead2_clusters)
rl_lead2_truck = (len([c for c in rl_lead_clusters
if c.is_truck(rl_lead2_clusters)]) > 0)
# publish data
if rl_lead_len > 0:
datrl.v3Type = int(rl_lead_clusters[0].oClass)
if datrl.v3Type == 1 and rl_lead1_truck:
datrl.v3Type = 0
datrl.v3Dx = float(rl_lead_clusters[0].dRel)
datrl.v3Vrel = float(rl_lead_clusters[0].vRel)
datrl.v3Dy = float(-rl_lead_clusters[0].yRel+ lane_offset)
datrl.v3Id = int(rl_lead_clusters[0].track_id % 32)
if rl_lead2_len > 0:
datrl.v4Type = int(rl_lead2_clusters[0].oClass)
if datrl.v4Type == 1 and rl_lead2_truck:
datrl.v4Type = 0
datrl.v4Dx = float(rl_lead2_clusters[0].dRel)
datrl.v4Vrel = float(rl_lead2_clusters[0].vRel)
datrl.v4Dy = float(-rl_lead2_clusters[0].yRel + lane_offset)
datrl.v4Id = int(rl_lead2_clusters[0].track_id % 32)
if (self.RI.TRACK_RIGHT_LANE or self.RI.TRACK_LEFT_LANE) and use_tesla_radar:
self.icCarLR.send(datrl.to_bytes())
### END REVIEW SECTION
# *** publish radarState ***
dat = messaging.new_message()
dat.init('radarState')
dat.valid = sm.all_alive_and_valid(service_list=['controlsState', 'model'])
dat.radarState.mdMonoTime = self.last_md_ts
dat.radarState.canMonoTimes = list(rr.canMonoTimes)
dat.radarState.radarErrors = list(rr.errors)
dat.radarState.controlsStateMonoTime = self.last_controls_state_ts
datext = tesla.ICLeads.new_message()
l1x = tesla.TeslaLeadPoint.new_message()
l2x = tesla.TeslaLeadPoint.new_message()
if has_radar:
l1d,l1x = get_lead(self.v_ego, self.ready, clusters, sm['model'].lead, low_speed_override=True)
l2d,l2x = get_lead(self.v_ego, self.ready, clusters, sm['model'].leadFuture, low_speed_override=False)
dat.radarState.leadOne = l1d
dat.radarState.leadTwo = l2d
datext.lead1trackId = l1x['trackId']
datext.lead1oClass = l1x['oClass']
datext.lead1length = l1x['length']
datext.lead2trackId = l2x['trackId']
datext.lead2oClass = l2x['oClass']
datext.lead2length = l2x['length']
return dat, datext
# fuses camera and radar data for best lead detection
def radard_thread(gctx=None):
set_realtime_priority(2)
# wait for stats about the car to come in from controls
cloudlog.info("radard is waiting for CarParams")
CP = car.CarParams.from_bytes(Params().get("CarParams", block=True))
use_tesla_radar = CarSettings().get_value("useTeslaRadar")
mocked = (CP.carName == "mock") or ((CP.carName == "tesla") and not use_tesla_radar)
cloudlog.info("radard got CarParams")
# import the radar from the fingerprint
cloudlog.info("radard is importing %s", CP.carName)
RadarInterface = importlib.import_module('selfdrive.car.%s.radar_interface' % CP.carName).RadarInterface
can_sock = messaging.sub_sock(service_list['can'].port)
sm = messaging.SubMaster(['model', 'controlsState', 'liveParameters'])
RI = RadarInterface(CP)
# *** publish radarState and liveTracks
radarState = messaging.pub_sock(service_list['radarState'].port)
liveTracks = messaging.pub_sock(service_list['liveTracks'].port)
icLeads = messaging.pub_sock(service_list['uiIcLeads'].port)
rk = Ratekeeper(rate, print_delay_threshold=None)
RD = RadarD(mocked, RI)
has_radar = not CP.radarOffCan or mocked
last_md_ts = 0.
v_ego = 0.
while 1:
can_strings = messaging.drain_sock_raw(can_sock, wait_for_one=True)
rr,rrext = RI.update(can_strings)
if rr is None:
continue
sm.update(0)
if sm.updated['controlsState']:
v_ego = sm['controlsState'].vEgo
dat,datext = RD.update(rk.frame, RI.delay, sm, rr, has_radar, rrext)
dat.radarState.cumLagMs = -rk.remaining*1000.
radarState.send(dat.to_bytes())
icLeads.send(datext.to_bytes())
# *** publish tracks for UI debugging (keep last) ***
tracks = RD.tracks
dat = messaging.new_message()
dat.init('liveTracks', len(tracks))
for cnt, ids in enumerate(tracks.keys()):
dat.liveTracks[cnt] = {
"trackId": ids,
"dRel": float(tracks[ids].dRel),
"yRel": float(tracks[ids].yRel),
"vRel": float(tracks[ids].vRel),
}
liveTracks.send(dat.to_bytes())
rk.monitor_time()
def main(gctx=None):
radard_thread(gctx)
if __name__ == "__main__":
main()
| 37.650685
| 165
| 0.660118
|
a4bd3467d72ac61974636f5121ab82b493860c31
| 2,374
|
py
|
Python
|
experiments/simulation_experiments/hypothesis_testing/plot_roc_genesets.py
|
andrewcharlesjones/cplvm
|
f4bbcfc4b2e9a9cec7d01eb5f7ff3a169d6e3ff6
|
[
"MIT"
] | 7
|
2020-12-17T12:03:46.000Z
|
2021-11-30T19:27:17.000Z
|
experiments/simulation_experiments/hypothesis_testing/plot_roc_genesets.py
|
andrewcharlesjones/cplvm
|
f4bbcfc4b2e9a9cec7d01eb5f7ff3a169d6e3ff6
|
[
"MIT"
] | 3
|
2021-11-04T18:10:48.000Z
|
2022-02-07T23:26:40.000Z
|
experiments/simulation_experiments/hypothesis_testing/plot_roc_genesets.py
|
andrewcharlesjones/cplvm
|
f4bbcfc4b2e9a9cec7d01eb5f7ff3a169d6e3ff6
|
[
"MIT"
] | 3
|
2020-12-17T12:03:52.000Z
|
2021-11-05T18:21:03.000Z
|
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
import os
from os.path import join as pjoin
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
import matplotlib
font = {"size": 30}
matplotlib.rc("font", **font)
matplotlib.rcParams["text.usetex"] = True
DATA_DIR = "../out/cai_good"
files = os.listdir(DATA_DIR)
p_list = [10, 100, 1000]
plt.figure(figsize=(7, 7))
######## CPLVM ##########
# Load CPLVM results
# cplvm_stats = np.load(
# pjoin(DATA_DIR, "bfs_targeted.npy")
# )
cplvm_stats = np.load(
pjoin("/Users/andrewjones/Desktop", "bfs_targeted.npy")
)
bfs_stimulated_set = cplvm_stats[:, 0]
bfs_unstimulated_set = np.ndarray.flatten(cplvm_stats[:, 1:])
# import ipdb; ipdb.set_trace()
tpr_shuffled, fpr_shuffled, thresholds_shuffled = roc_curve(
y_true=np.concatenate(
[np.zeros(len(bfs_unstimulated_set)), np.ones(len(bfs_stimulated_set))]
),
y_score=np.concatenate([bfs_unstimulated_set, bfs_stimulated_set]),
)
plt.plot(tpr_shuffled, fpr_shuffled, label="CPLVM", linestyle=":", color="black")
# plt.show()
cplvm_df = pd.DataFrame({"TPR": tpr_shuffled, "FPR": fpr_shuffled})
cplvm_df['method'] = "CPLVM"
######## Li 2012 ##########
li2012_stats_experiment = np.load(
"../out/li2012/test_stats_experiment.npy"
)
li2012_stats_null = np.load("../out/li2012/test_stats_shuffled.npy")
tpr_shuffled, fpr_shuffled, thresholds_shuffled = roc_curve(
y_true=np.concatenate(
[np.zeros(len(li2012_stats_null)), np.ones(len(li2012_stats_experiment))]
),
y_score=np.concatenate([li2012_stats_null, li2012_stats_experiment]),
)
plt.plot(tpr_shuffled, fpr_shuffled, label="Li 2012", linestyle="--", color="black")
li2012_df = pd.DataFrame({"TPR": tpr_shuffled, "FPR": fpr_shuffled})
li2012_df['method'] = "Li 2012"
# plt.legend(prop={"size": 20})
# plt.xlabel("TPR")
# plt.ylabel("FPR")
# plt.plot([0, 1], [0, 1], "--", color="black")
# plot_df = pd.concat([cplvm_df, li2012_df], axis=0)
# g = sns.lineplot(data=plot_df, x="TPR", y="FPR", style="method", color="black", ci=95, err_style="band")
# g.legend_.set_title(None)
plt.legend(prop={"size": 20})
plt.xlabel("TPR")
plt.ylabel("FPR")
# import ipdb; ipdb.set_trace()
plt.tight_layout()
plt.savefig("../out/roc_comparison_genesets.png")
plt.show()
import ipdb; ipdb.set_trace()
| 25.804348
| 106
| 0.701769
|
979ce30cc77b378c99e73a035f1d7121fc230847
| 401
|
py
|
Python
|
ecommerce/profiles/migrations/0002_profile_description.py
|
kurniantoska/ecommerce
|
a30c51417dd8183517cb2feb8f0efdc8ec202421
|
[
"MIT"
] | null | null | null |
ecommerce/profiles/migrations/0002_profile_description.py
|
kurniantoska/ecommerce
|
a30c51417dd8183517cb2feb8f0efdc8ec202421
|
[
"MIT"
] | null | null | null |
ecommerce/profiles/migrations/0002_profile_description.py
|
kurniantoska/ecommerce
|
a30c51417dd8183517cb2feb8f0efdc8ec202421
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.6 on 2018-06-10 13:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='profile',
name='description',
field=models.TextField(default='description default text'),
),
]
| 21.105263
| 71
| 0.603491
|
fac85a86f0f0fa0132d570640c073712e61b56e4
| 515
|
py
|
Python
|
components/apt.py
|
RobInLabUJI/ROSLab
|
3a5047a204989dea108cb163fd1ca7516ec2f5c9
|
[
"MIT"
] | 10
|
2019-09-18T18:51:06.000Z
|
2022-01-25T21:46:05.000Z
|
components/apt.py
|
RobInLabUJI/ROSLab
|
3a5047a204989dea108cb163fd1ca7516ec2f5c9
|
[
"MIT"
] | 2
|
2019-09-11T13:02:35.000Z
|
2019-10-11T12:44:13.000Z
|
components/apt.py
|
RobInLabUJI/ROSLab
|
3a5047a204989dea108cb163fd1ca7516ec2f5c9
|
[
"MIT"
] | 2
|
2019-10-31T06:29:05.000Z
|
2020-01-08T03:18:53.000Z
|
DOCKER_CONTENTS = """
##################################### APT ######################################
RUN apt-get -o Acquire::ForceIPv4=true update \\
&& apt-get -o Acquire::ForceIPv4=true install -yq --no-install-recommends \\
%s && apt-get clean \\
&& rm -rf /var/lib/apt/lists/*
"""
def write(DOCKER_FILE, package_list):
pstr = ''
for p in package_list:
pstr += ' ' + p + ' \\\n'
with open(DOCKER_FILE, "a") as dockerfile:
dockerfile.write(DOCKER_CONTENTS % pstr)
return
| 28.611111
| 80
| 0.524272
|
87e8e3569f35776506150e55f7972d9b629a0463
| 2,827
|
py
|
Python
|
tests/sanity_test.py
|
old-reliable/magics-python
|
141504926562cbe130ed26d69b38d479e400c8b6
|
[
"Apache-2.0"
] | null | null | null |
tests/sanity_test.py
|
old-reliable/magics-python
|
141504926562cbe130ed26d69b38d479e400c8b6
|
[
"Apache-2.0"
] | null | null | null |
tests/sanity_test.py
|
old-reliable/magics-python
|
141504926562cbe130ed26d69b38d479e400c8b6
|
[
"Apache-2.0"
] | null | null | null |
# (C) Copyright 1996-2019 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
"""
A unittest script which dynamically adds tests based on the contents of the 'gallery'
directory.
"""
import os
import glob
import unittest
import subprocess
class MagicsSanityTest(unittest.TestCase):
"""
A class with dynamically-generated test methods.
"""
pass
def cleanup_backup(backup_name, original_name):
"""
Move a backed-up file back to its original name.
"""
print("Replacing {} with {}".format(original_name, backup_name))
if os.path.isfile(backup_name):
os.rename(backup_name, original_name)
def cleanup_output(output_name):
"""
Delete a file created by running a test script.
"""
print("Removing {}".format(output_name))
os.remove(output_name)
def generate_test_method(test_name):
"""
Generate a test method based on a given test name.
The test is simply to run a test script 'test_name.py' and check that an output file with the
name 'test_name.png' is generated.
"""
def run_test(self):
# backup any existing files with our expected output_name
output_name = "{}.png".format(test_name)
backup_name = output_name + ".backup"
if os.path.isfile(output_name):
os.rename(output_name, backup_name)
self.addCleanup(cleanup_backup, backup_name, output_name)
# run the test
ret = subprocess.call("python {}.py".format(test_name), shell=True)
self.assertEqual(ret, 0)
output_exists = os.path.isfile(output_name)
if output_exists:
self.addCleanup(cleanup_output, output_name)
ps_output_name = "{}.ps".format(test_name)
if os.path.isfile(ps_output_name):
# some tests may also generate postscript files which need to be deleted
self.addCleanup(cleanup_output, ps_output_name)
self.assertTrue(output_exists)
return run_test
# This code needs to be outside of `if __name__ == '__main__'` so the test methods are generated
# at import time so that pytest can find them
test_dir = os.getenv("MAGICS_PYTHON_TESTS")
if not test_dir:
test_dir = "./gallery"
os.chdir(test_dir)
for file_name in glob.glob("*.py"):
test_name = os.path.splitext(file_name)[0]
print("Adding test: {}".format(test_name))
method_name = "test_{}".format(test_name)
setattr(MagicsSanityTest, method_name, generate_test_method(test_name))
if __name__ == "__main__":
unittest.main()
| 30.728261
| 97
| 0.695791
|
c7a96629a0e7ddc122bee0b3542b55bc41f85c44
| 1,919
|
py
|
Python
|
pydis_site/apps/api/viewsets/bot/aoc_link.py
|
discord-python/site
|
4c6993fab16ed0ab6fe08000ab242f7c96e69839
|
[
"MIT"
] | 13
|
2018-02-03T22:57:41.000Z
|
2018-05-17T07:38:36.000Z
|
pydis_site/apps/api/viewsets/bot/aoc_link.py
|
discord-python/site
|
4c6993fab16ed0ab6fe08000ab242f7c96e69839
|
[
"MIT"
] | 61
|
2018-02-07T21:34:39.000Z
|
2018-06-05T16:15:28.000Z
|
pydis_site/apps/api/viewsets/bot/aoc_link.py
|
discord-python/site
|
4c6993fab16ed0ab6fe08000ab242f7c96e69839
|
[
"MIT"
] | 16
|
2018-02-03T12:37:48.000Z
|
2018-06-02T17:14:55.000Z
|
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.mixins import (
CreateModelMixin, DestroyModelMixin, ListModelMixin, RetrieveModelMixin
)
from rest_framework.viewsets import GenericViewSet
from pydis_site.apps.api.models.bot import AocAccountLink
from pydis_site.apps.api.serializers import AocAccountLinkSerializer
class AocAccountLinkViewSet(
GenericViewSet, CreateModelMixin, DestroyModelMixin, RetrieveModelMixin, ListModelMixin
):
"""
View providing management for Users who linked their AoC accounts to their Discord Account.
## Routes
### GET /bot/aoc-account-links
Returns all the AoC account links
#### Response format
>>> [
... {
... "user": 2,
... "aoc_username": "AoCUser1"
... },
... ...
... ]
### GET /bot/aoc-account-links/<user__id:int>
Retrieve a AoC account link by User ID
#### Response format
>>>
... {
... "user": 2,
... "aoc_username": "AoCUser1"
... }
#### Status codes
- 200: returned on success
- 404: returned if an AoC account link with the given `user__id` was not found.
### POST /bot/aoc-account-links
Adds a single AoC account link block
#### Request body
>>> {
... 'user': int,
... 'aoc_username': str
... }
#### Status codes
- 204: returned on success
- 400: if one of the given fields was invalid
### DELETE /bot/aoc-account-links/<user__id:int>
Deletes the AoC account link item with the given `user__id`.
#### Status codes
- 204: returned on success
- 404: returned if the AoC account link with the given `user__id` was not found
"""
serializer_class = AocAccountLinkSerializer
queryset = AocAccountLink.objects.all()
filter_backends = (DjangoFilterBackend,)
filter_fields = ("user__id", "aoc_username")
| 26.652778
| 95
| 0.647733
|
007cfc335cebfa16c3dca45e3cd3926c4db4f370
| 6,131
|
py
|
Python
|
AutomatedTesting/Gem/PythonTests/Multiplayer/tests/Multiplayer_AutoComponent_RPC.py
|
Perpixel/o3de
|
b1df4c90d54839c44a6236d6fd3853e7e2af6404
|
[
"Apache-2.0",
"MIT"
] | 1
|
2022-02-24T08:49:30.000Z
|
2022-02-24T08:49:30.000Z
|
AutomatedTesting/Gem/PythonTests/Multiplayer/tests/Multiplayer_AutoComponent_RPC.py
|
Perpixel/o3de
|
b1df4c90d54839c44a6236d6fd3853e7e2af6404
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
AutomatedTesting/Gem/PythonTests/Multiplayer/tests/Multiplayer_AutoComponent_RPC.py
|
Perpixel/o3de
|
b1df4c90d54839c44a6236d6fd3853e7e2af6404
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
# Test Case Title : Check that the four network RPCs can be sent and received
# fmt: off
class TestSuccessFailTuples():
enter_game_mode = ("Entered game mode", "Failed to enter game mode")
exit_game_mode = ("Exited game mode", "Couldn't exit game mode")
find_network_player = ("Found network player", "Couldn't find network player")
# fmt: on
def Multiplayer_AutoComponent_RPC():
r"""
Summary:
Runs a test to make sure that RPCs can be sent and received via script canvas
Level Description:
- Dynamic
1. Although the level is nearly empty, when the server and editor connect the server will spawn and replicate the player network prefab.
a. The player network prefab has a NetworkTestPlayerComponent.AutoComponent and a script canvas attached which sends and receives various RPCs.
Print logs occur upon sending and receiving the RPCs; we are testing to make sure the expected events and values are received.
- Static
1. NetLevelEntity. This is a networked entity which has a script attached. Used for cross-entity communication. The net-player prefab will send this level entity Server->Authority RPCs
Expected Outcome:
We should see editor logs stating that RPCs have been sent and received.
However, if the script receives unexpected values for the Process event we will see print logs for bad data as well.
:return:
"""
import azlmbr.legacy.general as general
from editor_python_test_tools.utils import Report
from editor_python_test_tools.utils import Tracer
from editor_python_test_tools.utils import TestHelper as helper
from ly_remote_console.remote_console_commands import RemoteConsole as RemoteConsole
level_name = "AutoComponent_RPC"
player_prefab_name = "Player"
player_prefab_path = f"levels/multiplayer/{level_name}/{player_prefab_name}.network.spawnable"
helper.init_idle()
# 1) Open Level
helper.open_level("Multiplayer", level_name)
with Tracer() as section_tracer:
# 2) Enter game mode
helper.multiplayer_enter_game_mode(TestSuccessFailTuples.enter_game_mode, player_prefab_path.lower())
# 3) Make sure the network player was spawned
player_id = general.find_game_entity(player_prefab_name)
Report.critical_result(TestSuccessFailTuples.find_network_player, player_id.IsValid())
# 4) Check the editor logs for expected and unexpected log output
# Authority->Autonomous RPC
PLAYERID_RPC_WAIT_TIME_SECONDS = 1.0 # The player id is sent from the server as soon as the player script is spawned. 1 second should be more than enough time to send/receive that RPC.
helper.succeed_if_log_line_found('EditorServer', 'Script: AutoComponent_RPC: Sending client PlayerNumber 1', section_tracer.prints, PLAYERID_RPC_WAIT_TIME_SECONDS)
helper.succeed_if_log_line_found('Script', "AutoComponent_RPC: I'm Player #1", section_tracer.prints, PLAYERID_RPC_WAIT_TIME_SECONDS)
# Authority->Client RPC
PLAYFX_RPC_WAIT_TIME_SECONDS = 1.1 # The server will send an RPC to play an fx on the client every second.
helper.succeed_if_log_line_found('EditorServer', "Script: AutoComponent_RPC_NetLevelEntity Activated on entity: NetLevelEntity", section_tracer.prints, PLAYFX_RPC_WAIT_TIME_SECONDS)
helper.succeed_if_log_line_found('EditorServer', "Script: AutoComponent_RPC_NetLevelEntity: Authority sending RPC to play some fx.", section_tracer.prints, PLAYFX_RPC_WAIT_TIME_SECONDS)
helper.succeed_if_log_line_found('Script', "AutoComponent_RPC_NetLevelEntity: I'm a client playing some fx.", section_tracer.prints, PLAYFX_RPC_WAIT_TIME_SECONDS)
# Autonomous->Authority RPC
# Sending 2 RPCs: 1 containing a parameter and 1 without
AUTONOMOUS_TO_AUTHORITY_RPC_WAIT_TIME_SECONDS = 1.0 # This RPC is sent as soon as the autonomous player script is spawned. 1 second should be more than enough time to send/receive that RPC.
helper.succeed_if_log_line_found('Script', "AutoComponent_RPC: Sending AutonomousToAuthorityNoParam RPC.", section_tracer.prints, AUTONOMOUS_TO_AUTHORITY_RPC_WAIT_TIME_SECONDS)
helper.succeed_if_log_line_found('Script', "AutoComponent_RPC: Sending AutonomousToAuthority RPC (with float param).", section_tracer.prints, AUTONOMOUS_TO_AUTHORITY_RPC_WAIT_TIME_SECONDS)
helper.succeed_if_log_line_found('EditorServer', "Script: AutoComponent_RPC: Successfully received AutonomousToAuthorityNoParams RPC.", section_tracer.prints, AUTONOMOUS_TO_AUTHORITY_RPC_WAIT_TIME_SECONDS)
helper.succeed_if_log_line_found('EditorServer', "Script: AutoComponent_RPC: Successfully received AutonomousToAuthority RPC (with expected float param).", section_tracer.prints, AUTONOMOUS_TO_AUTHORITY_RPC_WAIT_TIME_SECONDS)
# Server->Authority RPC. Inter-Entity Communication.
SERVER_TO_AUTHORITY_RPC_WAIT_TIME_SECONDS = 1.0 # This RPC is sent as soon as the networked level entity finds the player in the level, and previous tests are relying on the player's existence. 1 second should be more than enough time to send/receive that RPC.
helper.succeed_if_log_line_found('EditorServer', "Script: AutoComponent_RPC_NetLevelEntity: Send ServerToAuthority RPC.", section_tracer.prints, SERVER_TO_AUTHORITY_RPC_WAIT_TIME_SECONDS)
helper.succeed_if_log_line_found('EditorServer', "Script: AutoComponent_RPC: Received ServerToAuthority RPC. Damage=42.", section_tracer.prints, SERVER_TO_AUTHORITY_RPC_WAIT_TIME_SECONDS)
# Exit game mode
helper.exit_game_mode(TestSuccessFailTuples.exit_game_mode)
if __name__ == "__main__":
from editor_python_test_tools.utils import Report
Report.start_test(Multiplayer_AutoComponent_RPC)
| 62.561224
| 269
| 0.761866
|
5e0e3eebc576ae955ba55a319bb39ccc270db402
| 200
|
py
|
Python
|
sample/fst_elem.py
|
iogf/ehp
|
14d2cae449f411863fea201c3a99802fa37f1696
|
[
"MIT"
] | 47
|
2015-09-11T20:44:05.000Z
|
2022-03-03T01:16:51.000Z
|
sample/fst_elem.py
|
iogf/ehp
|
14d2cae449f411863fea201c3a99802fa37f1696
|
[
"MIT"
] | 6
|
2016-06-30T18:55:08.000Z
|
2020-08-15T20:19:11.000Z
|
sample/fst_elem.py
|
iogf/ehp
|
14d2cae449f411863fea201c3a99802fa37f1696
|
[
"MIT"
] | 15
|
2016-05-08T01:17:10.000Z
|
2021-06-29T07:43:08.000Z
|
# Name: ex17.py
from ehp import *
html = Html()
data = '<body> <em> beta. </em></body>'
dom = html.feed(data)
root, item = dom.fst_with_root('em')
root.insert_after(item, Tag('p'))
print(root)
| 12.5
| 39
| 0.625
|
20cd83c0c5b1f95b20383139f751f0e15b46e3a9
| 21,583
|
py
|
Python
|
var/spack/repos/builtin/packages/openmpi/package.py
|
HaochengLIU/spack
|
26e51ff1705a4d6234e2a0cf734f93f7f95df5cb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/openmpi/package.py
|
HaochengLIU/spack
|
26e51ff1705a4d6234e2a0cf734f93f7f95df5cb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/openmpi/package.py
|
HaochengLIU/spack
|
26e51ff1705a4d6234e2a0cf734f93f7f95df5cb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import sys
from spack import *
def _verbs_dir():
"""Try to find the directory where the OpenFabrics verbs package is
installed. Return None if not found.
"""
try:
# Try to locate Verbs by looking for a utility in the path
ibv_devices = which("ibv_devices")
# Run it (silently) to ensure it works
ibv_devices(output=str, error=str)
# Get path to executable
path = ibv_devices.exe[0]
# Remove executable name and "bin" directory
path = os.path.dirname(path)
path = os.path.dirname(path)
# There's usually no "/include" on Unix; use "/usr/include" instead
if path == "/":
path = "/usr"
return path
except TypeError:
return None
except ProcessError:
return None
def _mxm_dir():
"""Look for default directory where the Mellanox package is
installed. Return None if not found.
"""
# Only using default directory; make this more flexible in the future
path = "/opt/mellanox/mxm"
if os.path.isdir(path):
return path
else:
return None
class Openmpi(AutotoolsPackage):
"""An open source Message Passing Interface implementation.
The Open MPI Project is an open source Message Passing Interface
implementation that is developed and maintained by a consortium
of academic, research, and industry partners. Open MPI is
therefore able to combine the expertise, technologies, and
resources from all across the High Performance Computing
community in order to build the best MPI library available.
Open MPI offers advantages for system and software vendors,
application developers and computer science researchers.
"""
homepage = "http://www.open-mpi.org"
url = "https://www.open-mpi.org/software/ompi/v3.1/downloads/openmpi-3.1.2.tar.bz2"
list_url = "http://www.open-mpi.org/software/ompi/"
# Current
version('3.1.3', sha256='8be04307c00f51401d3fb9d837321781ea7c79f2a5a4a2e5d4eaedc874087ab6') # libmpi.so.40.10.3
version('3.1.2', sha256='c654ed847f34a278c52a15c98add40402b4a90f0c540779f1ae6c489af8a76c5') # libmpi.so.40.10.2
version('3.1.1', sha256='3f11b648dd18a8b878d057e9777f2c43bf78297751ad77ae2cef6db0fe80c77c') # libmpi.so.40.10.1
version('3.1.0', sha256='b25c044124cc859c0b4e6e825574f9439a51683af1950f6acda1951f5ccdf06c') # libmpi.so.40.10.0
# Still supported
version('3.0.3', sha256='fb228e42893fe6a912841a94cd8a0c06c517701ae505b73072409218a12cf066') # libmpi.so.40.00.4
version('3.0.2', sha256='d2eea2af48c1076c53cabac0a1f12272d7470729c4e1cb8b9c2ccd1985b2fb06') # libmpi.so.40.00.2
version('3.0.1', sha256='663450d1ee7838b03644507e8a76edfb1fba23e601e9e0b5b2a738e54acd785d') # libmpi.so.40.00.1
version('3.0.0', sha256='f699bff21db0125d8cccfe79518b77641cd83628725a1e1ed3e45633496a82d7') # libmpi.so.40.00.0
version('2.1.5', sha256='b807ccab801f27c3159a5edf29051cd3331d3792648919f9c4cee48e987e7794') # libmpi.so.20.10.3
version('2.1.4', sha256='3e03695ca8bd663bc2d89eda343c92bb3d4fc79126b178f5ddcb68a8796b24e2') # libmpi.so.20.10.3
version('2.1.3', sha256='285b3e2a69ed670415524474496043ecc61498f2c63feb48575f8469354d79e8') # libmpi.so.20.10.2
version('2.1.2', sha256='3cc5804984c5329bdf88effc44f2971ed244a29b256e0011b8deda02178dd635') # libmpi.so.20.10.2
version('2.1.1', sha256='bd7badd4ff3afa448c0d7f3ca0ee6ce003b957e9954aa87d8e4435759b5e4d16') # libmpi.so.20.10.1
version('2.1.0', sha256='b169e15f5af81bf3572db764417670f508c0df37ce86ff50deb56bd3acb43957') # libmpi.so.20.10.0
version('2.0.4', sha256='4f82d5f7f294becbd737319f74801206b08378188a95b70abe706fdc77a0c20b') # libmpi.so.20.0.4
version('2.0.3', sha256='b52c0204c0e5954c9c57d383bb22b4181c09934f97783292927394d29f2a808a') # libmpi.so.20.0.3
version('2.0.2', sha256='cae396e643f9f91f0a795f8d8694adf7bacfb16f967c22fb39e9e28d477730d3') # libmpi.so.20.0.2
version('2.0.1', sha256='fed74f4ae619b7ebcc18150bb5bdb65e273e14a8c094e78a3fea0df59b9ff8ff') # libmpi.so.20.0.1
version('2.0.0', sha256='08b64cf8e3e5f50a50b4e5655f2b83b54653787bd549b72607d9312be44c18e0') # libmpi.so.20.0.0
version('1.10.7', 'c87c613f9acb1a4eee21fa1ac8042579') # libmpi.so.12.0.7
version('1.10.6', '2e65008c1867b1f47c32f9f814d41706') # libmpi.so.12.0.6
version('1.10.5', 'd32ba9530a869d9c1eae930882ea1834') # libmpi.so.12.0.5
version('1.10.4', '9d2375835c5bc5c184ecdeb76c7c78ac') # libmpi.so.12.0.4
version('1.10.3', 'e2fe4513200e2aaa1500b762342c674b') # libmpi.so.12.0.3
version('1.10.2', 'b2f43d9635d2d52826e5ef9feb97fd4c') # libmpi.so.12.0.2
version('1.10.1', 'f0fcd77ed345b7eafb431968124ba16e') # libmpi.so.12.0.1
version('1.10.0', '280cf952de68369cebaca886c5ce0304') # libmpi.so.12.0.0
# Retired
version('1.8.8', '0dab8e602372da1425e9242ae37faf8c') # libmpi.so.1.6.3
version('1.8.7', '2485ed6fa0fab9bb5b4e7a9f63718630') # libmpi.so.1.6.2
version('1.8.6', 'eb569e7dc97eeaa5b1876cccf114f377') # libmpi.so.1.6.1
version('1.8.5', '93e958914ff0e4d9634d668d2d48c793') # libmpi.so.1.6.0
version('1.8.4', '93b7ea2c4ebae76947f942579608ae4a') # libmpi.so.1.6.0
version('1.8.3', '2067d00853e0c33d498153fc7d268d2b') # libmpi.so.1.6.0
version('1.8.2', '339a9fc199563bacbb359875ce8c9e20') # libmpi.so.1.5.2
version('1.8.1', '0e12c24a28a605f681ff9a19a1aca2f1') # libmpi.so.1.5.0
version('1.8', '5999cfb177a50c480b5d0bced379aff1') # libmpi.so.1.5.0
version('1.7.5', '321ab81147ac69a5bbca72652fb3b468') # libmpi.so.1.4.0
version('1.7.4', '4aea4fb00f8956dd56ccf50e5784e03f') # libmpi.so.1.3.0
version('1.7.3', '7d0779f73c43eb1d098ad037d60649bc') # libmpi.so.1.2.0
version('1.7.2', 'b897b92100bd13b367e651df483421d5') # libmpi.so.1.1.2
version('1.7.1', 'f25b446a9dcbbd6a105a99d926d34441') # libmpi.so.1.1.1
version('1.7', 'c0e3c4b3bfcd8b8bbd027f6f4c164acb') # libmpi.so.1.1.0
version('1.6.5', '03aed2a4aa4d0b27196962a2a65fc475') # libmpi.so.1.0.8
version('1.6.4', '62119579ab92b2592cd96b6a9d2a8cc3') # libmpi.so.1.0.7
version('1.6.3', 'eedb73155a7a40b0b07718494298fb25') # libmpi.so.1.0.6
version('1.6.2', '219754715a8e40beb468bbc8f0b3251a') # libmpi.so.1.0.3
version('1.6.1', '33d2782c20ff6be79130a703b85da8f0') # libmpi.so.1.0.3
version('1.6', 'dd6f5bd4b3cb14d93bbf530e50e46e60') # libmpi.so.1.0.3
# Ancient
version('1.5.5', 'f882fd61ff89db856bfd8f0dfa42e1bd') # libmpi.so.1.0.3
version('1.5.4', '51153d794c49ce6d275dba2793ab0c68') # libmpi.so.1.0.2
version('1.5.3', '0eb8ec2aa05c74a4bc7602b01847131e') # libmpi.so.1.0.1
version('1.5.2', 'faaee6a2777bf607d7fa1297c0b3a9ed') # libmpi.so.1.0.1
version('1.5.1', '3f9409f5d3b617c04dea48ba8fbd703a') # libmpi.so.1.0.0
version('1.5', '86bf5f9ef7337231abbca3350b31f112') # libmpi.so.1.0.0
version('1.4.5', '84ddd2772f46d35da79e1db8a274c99d') # libmpi.so.0.0.4
version('1.4.4', 'e58a1ea7b8af62453aaa0ddaee5f26a0') # libmpi.so.0.0.3
version('1.4.3', 'd2ead141c43b915343f5c5a18f3b5016') # libmpi.so.0.0.2
version('1.4.2', '53b26fa2586aedaf73cf40effbfcc2f3') # libmpi.so.0.0.2
version('1.4.1', '28a820c85e02973809df881fdeddd15e') # libmpi.so.0.0.1
version('1.4', '9786ec0698afed9498ce43dc3978a435') # libmpi.so.0.0.1
version('1.3.4', '978c29f3b671856daa0fc67459b73e01') # libmpi.so.0.0.1
version('1.3.3', 'f6cdc9c195daa8571b2e509e952d6755') # libmpi.so.0.0.0
version('1.3.2', '75781dc31255cd841701c065e239d994') # libmpi.so.0.0.0
version('1.3.1', 'd759523b0752139872c534714d641d64') # libmpi.so.0.0.0
version('1.3', 'efbba7d652d1e430d456f65d7a2e339b') # libmpi.so.0.0.0
version('1.2.9', '78c2aebbb746610ed12bdedcc2b6ec0e') # libmpi.so.0.0.0
version('1.2.8', '7f2d5af02101c5f01173f4f6de296549') # libmpi.so.0.0.0
version('1.2.7', 'b5ae3059fba71eba4a89a2923da8223f') # libmpi.so.0.0.0
version('1.2.6', 'f126793b68e71f5ec4a192c40675af2d') # libmpi.so.0.0.0
version('1.2.5', 'c6e82aab6cdcd425bf29217e8317d7dc') # libmpi.so.0.0.0
version('1.2.4', '311b38c597f54d8d6b277225ef458666') # libmpi.so.0.0.0
version('1.2.3', 'ae980bb00f9686934a1143701cc041e4') # libmpi.so.0.0.0
version('1.2.2', '7f553317e388c4efe479e908b66f910d') # libmpi.so.0.0.0
version('1.2.1', 'ceaa42891edba2324a94fdd0b87e46ca') # libmpi.so.0.0.0
version('1.2', '37e8d4edad54a8d8c3127fbef87ebda1') # libmpi.so.0.0.0
version('1.1.5', '6aada92896a1830ece6d3ba1e66a17fa') # libmpi.so.0.0.0
version('1.1.4', '28940b182156478fa442397b0c9660e1') # libmpi.so.0.0.0
version('1.1.3', 'bbaa7fe9d556212d877d872544a31569') # libmpi.so.0.0.0
version('1.1.2', '53877ec8bca5f6e505496b6b94b1d850') # libmpi.so.0.0.0
version('1.1.1', '498b9322ae0ad512026a008a30c7e0b5') # libmpi.so.0.0.0
version('1.1', '821af8bbb7a8e85ec707cb4c3b6bcbf6') # libmpi.so.0.0.0
version('1.0.2', 'fd32861d643f9fe539a01d0d5b836f41') # libmpi.so.0.0.0
version('1.0.1', '8abccca5cdddc81a6d9d9e22b3bb6db9') # libmpi.so.0.0.0
version('1.0', 'f5dcb5d3a98f2e5a9c2a0caaef54d806') # libmpi.so.0.0.0
patch('ad_lustre_rwcontig_open_source.patch', when="@1.6.5")
patch('llnl-platforms.patch', when="@1.6.5")
patch('configure.patch', when="@1.10.1")
patch('fix_multidef_pmi_class.patch', when="@2.0.0:2.0.1")
# Vader Bug: https://github.com/open-mpi/ompi/issues/5375
# Haven't release fix for 2.1.x
patch('btl_vader.patch', when='@2.1.3:2.1.5')
# Fixed in 3.0.3 and 3.1.3
patch('btl_vader.patch', when='@3.0.1:3.0.2')
patch('btl_vader.patch', when='@3.1.0:3.1.2')
fabrics = ('psm', 'psm2', 'verbs', 'mxm', 'ucx', 'libfabric')
variant(
'fabrics',
default=None if _verbs_dir() is None else 'verbs',
description="List of fabrics that are enabled",
values=fabrics,
multi=True
)
variant(
'schedulers',
description='List of schedulers for which support is enabled',
values=('alps', 'lsf', 'tm', 'slurm', 'sge', 'loadleveler'),
multi=True
)
# Additional support options
variant('java', default=False, description='Build Java support')
variant('sqlite3', default=False, description='Build SQLite3 support')
variant('vt', default=True, description='Build VampirTrace support')
variant('thread_multiple', default=False,
description='Enable MPI_THREAD_MULTIPLE support')
variant('cuda', default=False, description='Enable CUDA support')
variant('pmi', default=False, description='Enable PMI support')
variant('cxx_exceptions', default=True, description='Enable C++ Exception support')
# Adding support to build a debug version of OpenMPI that activates
# Memchecker, as described here:
#
# https://www.open-mpi.org/faq/?category=debugging#memchecker_what
#
# This option degrades run-time support, and thus is disabled by default
variant(
'memchecker',
default=False,
description='Memchecker support for debugging [degrades performance]'
)
variant(
'legacylaunchers',
default=False,
description='Do not remove mpirun/mpiexec when building with slurm'
)
provides('mpi')
provides('mpi@:2.2', when='@1.6.5')
provides('mpi@:3.0', when='@1.7.5:')
provides('mpi@:3.1', when='@2.0.0:')
if sys.platform != 'darwin':
depends_on('numactl')
depends_on('hwloc')
# ompi@:3.0.0 doesn't support newer hwloc releases:
# "configure: error: OMPI does not currently support hwloc v2 API"
# Future ompi releases may support it, needs to be verified.
# See #7483 for context.
depends_on('hwloc@:1.999')
depends_on('hwloc +cuda', when='+cuda')
depends_on('java', when='+java')
depends_on('sqlite', when='+sqlite3@:1.11')
depends_on('zlib', when='@3.0.0:')
depends_on('valgrind~mpi', when='+memchecker')
depends_on('ucx', when='fabrics=ucx')
depends_on('libfabric', when='fabrics=libfabric')
depends_on('slurm', when='schedulers=slurm')
depends_on('binutils+libiberty', when='fabrics=mxm')
conflicts('+cuda', when='@:1.6') # CUDA support was added in 1.7
conflicts('fabrics=psm2', when='@:1.8') # PSM2 support was added in 1.10.0
conflicts('fabrics=mxm', when='@:1.5.3') # MXM support was added in 1.5.4
conflicts('+pmi', when='@:1.5.4') # PMI support was added in 1.5.5
conflicts('schedulers=slurm ~pmi', when='@1.5.4:',
msg='+pmi is required for openmpi(>=1.5.5) to work with SLURM.')
filter_compiler_wrappers('openmpi/*-wrapper-data*', relative_root='share')
conflicts('fabrics=libfabric', when='@:1.8') # libfabric support was added in 1.10.0
# It may be worth considering making libfabric an exclusive fabrics choice
def url_for_version(self, version):
url = "http://www.open-mpi.org/software/ompi/v{0}/downloads/openmpi-{1}.tar.bz2"
return url.format(version.up_to(2), version)
@property
def headers(self):
hdrs = HeaderList(find(self.prefix.include, 'mpi.h', recursive=False))
if not hdrs:
hdrs = HeaderList(find(self.prefix, 'mpi.h', recursive=True))
return hdrs or None
@property
def libs(self):
query_parameters = self.spec.last_query.extra_parameters
libraries = ['libmpi']
if 'cxx' in query_parameters:
libraries = ['libmpi_cxx'] + libraries
return find_libraries(
libraries, root=self.prefix, shared=True, recursive=True
)
def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
spack_env.set('MPICC', join_path(self.prefix.bin, 'mpicc'))
spack_env.set('MPICXX', join_path(self.prefix.bin, 'mpic++'))
spack_env.set('MPIF77', join_path(self.prefix.bin, 'mpif77'))
spack_env.set('MPIF90', join_path(self.prefix.bin, 'mpif90'))
spack_env.set('OMPI_CC', spack_cc)
spack_env.set('OMPI_CXX', spack_cxx)
spack_env.set('OMPI_FC', spack_fc)
spack_env.set('OMPI_F77', spack_f77)
def setup_dependent_package(self, module, dependent_spec):
self.spec.mpicc = join_path(self.prefix.bin, 'mpicc')
self.spec.mpicxx = join_path(self.prefix.bin, 'mpic++')
self.spec.mpifc = join_path(self.prefix.bin, 'mpif90')
self.spec.mpif77 = join_path(self.prefix.bin, 'mpif77')
self.spec.mpicxx_shared_libs = [
join_path(self.prefix.lib, 'libmpi_cxx.{0}'.format(dso_suffix)),
join_path(self.prefix.lib, 'libmpi.{0}'.format(dso_suffix))
]
def with_or_without_verbs(self, activated):
# Up through version 1.6, this option was previously named
# --with-openib
opt = 'openib'
# In version 1.7, it was renamed to be --with-verbs
if self.spec.satisfies('@1.7:'):
opt = 'verbs'
# If the option has not been activated return
# --without-openib or --without-verbs
if not activated:
return '--without-{0}'.format(opt)
line = '--with-{0}'.format(opt)
path = _verbs_dir()
if (path is not None) and (path not in ('/usr', '/usr/local')):
line += '={0}'.format(path)
return line
def with_or_without_mxm(self, activated):
opt = 'mxm'
# If the option has not been activated return --without-mxm
if not activated:
return '--without-{0}'.format(opt)
line = '--with-{0}'.format(opt)
path = _mxm_dir()
if (path is not None):
line += '={0}'.format(path)
return line
@run_before('autoreconf')
def die_without_fortran(self):
# Until we can pass variants such as +fortran through virtual
# dependencies depends_on('mpi'), require Fortran compiler to
# avoid delayed build errors in dependents.
if (self.compiler.f77 is None) or (self.compiler.fc is None):
raise InstallError(
'OpenMPI requires both C and Fortran compilers!'
)
def configure_args(self):
spec = self.spec
config_args = [
'--enable-shared',
]
# Add extra_rpaths dirs from compilers.yaml into link wrapper
rpaths = [self.compiler.cc_rpath_arg + path
for path in self.compiler.extra_rpaths]
config_args.extend([
'--with-wrapper-ldflags={0}'.format(' '.join(rpaths))
])
# According to this comment on github:
#
# https://github.com/open-mpi/ompi/issues/4338#issuecomment-383982008
#
# adding --enable-static silently disables slurm support via pmi/pmi2
if spec.satisfies('schedulers=slurm'):
config_args.append('--with-pmi={0}'.format(spec['slurm'].prefix))
else:
config_args.append('--enable-static')
config_args.extend(self.with_or_without('pmi'))
if spec.satisfies('@2.0:'):
# for Open-MPI 2.0:, C++ bindings are disabled by default.
config_args.extend(['--enable-mpi-cxx'])
if spec.satisfies('@3.0.0:', strict=True):
config_args.append('--with-zlib={0}'.format(spec['zlib'].prefix))
# Fabrics
config_args.extend(self.with_or_without('fabrics'))
# Schedulers
config_args.extend(self.with_or_without('schedulers'))
config_args.extend(self.enable_or_disable('memchecker'))
if spec.satisfies('+memchecker', strict=True):
config_args.extend([
'--enable-debug',
'--with-valgrind={0}'.format(spec['valgrind'].prefix),
])
# Hwloc support
if spec.satisfies('@1.5.2:'):
config_args.append('--with-hwloc={0}'.format(spec['hwloc'].prefix))
# Java support
if spec.satisfies('@1.7.4:'):
if '+java' in spec:
config_args.extend([
'--enable-java',
'--enable-mpi-java',
'--with-jdk-dir={0}'.format(spec['java'].home)
])
else:
config_args.extend([
'--disable-java',
'--disable-mpi-java'
])
# SQLite3 support
if spec.satisfies('@1.7.3:1.999'):
if '+sqlite3' in spec:
config_args.append('--with-sqlite3')
else:
config_args.append('--without-sqlite3')
# VampirTrace support
if spec.satisfies('@1.3:1.999'):
if '+vt' not in spec:
config_args.append('--enable-contrib-no-build=vt')
# Multithreading support
if spec.satisfies('@1.5.4:2.999'):
if '+thread_multiple' in spec:
config_args.append('--enable-mpi-thread-multiple')
else:
config_args.append('--disable-mpi-thread-multiple')
# CUDA support
# See https://www.open-mpi.org/faq/?category=buildcuda
if spec.satisfies('@1.7:'):
if '+cuda' in spec:
# OpenMPI dynamically loads libcuda.so, requires dlopen
config_args.append('--enable-dlopen')
# Searches for header files in DIR/include
config_args.append('--with-cuda={0}'.format(
spec['cuda'].prefix))
if spec.satisfies('@1.7:1.7.2'):
# This option was removed from later versions
config_args.append('--with-cuda-libdir={0}'.format(
spec['cuda'].libs.directories[0]))
if spec.satisfies('@1.7.2'):
# There was a bug in 1.7.2 when --enable-static is used
config_args.append('--enable-mca-no-build=pml-bfo')
if spec.satisfies('%pgi^cuda@7.0:7.999'):
# OpenMPI has problems with CUDA 7 and PGI
config_args.append(
'--with-wrapper-cflags=-D__LP64__ -ta:tesla')
if spec.satisfies('%pgi@:15.8'):
# With PGI 15.9 and later compilers, the
# CFLAGS=-D__LP64__ is no longer needed.
config_args.append('CFLAGS=-D__LP64__')
else:
config_args.append('--without-cuda')
if '+cxx_exceptions' in spec:
config_args.append('--enable-cxx-exceptions')
else:
config_args.append('--disable-cxx-exceptions')
return config_args
@run_after('install')
def delete_mpirun_mpiexec(self):
# The preferred way to run an application when Slurm is the
# scheduler is to let Slurm manage process spawning via PMI.
#
# Deleting the links to orterun avoids users running their
# applications via mpirun or mpiexec, and leaves srun as the
# only sensible choice (orterun is still present, but normal
# users don't know about that).
if '@1.6: ~legacylaunchers schedulers=slurm' in self.spec:
os.remove(self.prefix.bin.mpirun)
os.remove(self.prefix.bin.mpiexec)
os.remove(self.prefix.bin.shmemrun)
os.remove(self.prefix.bin.oshrun)
| 45.921277
| 116
| 0.64333
|
e9c326dfc4b35ac45dbe1fc1bfaa83da0e1d65fc
| 5,714
|
py
|
Python
|
Code/InterLabDataAnalysis.py
|
andreschristen/BA_Dilution_Series
|
b9de412f5e71770a056c0412249ef8ce7764cf15
|
[
"MIT"
] | null | null | null |
Code/InterLabDataAnalysis.py
|
andreschristen/BA_Dilution_Series
|
b9de412f5e71770a056c0412249ef8ce7764cf15
|
[
"MIT"
] | null | null | null |
Code/InterLabDataAnalysis.py
|
andreschristen/BA_Dilution_Series
|
b9de412f5e71770a056c0412249ef8ce7764cf15
|
[
"MIT"
] | 1
|
2021-04-16T15:01:33.000Z
|
2021-04-16T15:01:33.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon 2018.02.28:00:00:00
@author: jac-nosm
Abstrac:
Dilution experiments are performed routinely in microbial laboratories.
However, their results are not properly analyzed, with only very basic hueristic
formulas. We present a proper statistical analysis of these experiments and
include also a further analysis of the bacterial evolution under temperature stress.
"""
# Packages
from numpy import array, zeros, exp
from DilExp import DilExp, MultiDilExp
from matplotlib import pyplot as plt
def TransformTNTC( count, c=300):
if isinstance( count, int):
return count
else:
return c
def AnaBF( spreadsheet, lab, T=100000, control=False, data_all=True):
"""Analyse BF for the repetitions in all data
experiment in lab = 'Lab5', 'Lab6', 'Lab8' or
if control 'Lab5Cntrl', 'Lab6Cntrl', 'Lab8Cntrl'
not(data_all), that is, only include the first countable dilution.
"""
J=7 ## Dilutions < J
c=300 ##maximum count for the drop or plated volumen, TNTC threshold
d = DilExp( J=J, alpha0=4, alpha=10, alphap=100, c=c, Sc=4.13, Sc_unit=r"cm^2", q=0.05, pr_a=10)
""" alpha0, dilution factor for tube 1 from tube 0: 1ml from 4*10ml
alpha, dilution factor for each tube =10, 1ml from 10ml
alphap, dilution factor for the drop or plated volumen =100, 0.1ml from 10ml
q, probibility of misscounting (=0.05)
pr_a, prior mean of the count, given the dilution s to be counted (=20)
Sc, coupon surface area (=4.13)
Sc_unit, coupon surface area units (=r"cm^2").
"""
if control:
sheet = lab + 'Cntrl'
#print("Analizing control experiment of %s" % (lab,))
else:
sheet = lab
#print("Analizing experiment of %s" % (lab,))
rt = []
for k in range(3): #Repetitions 1, 2, 3
s = zeros(2)
y = zeros(2)
plate1 = array(spreadsheet[sheet].loc[ :, 'Plate 1'])
for j, count in enumerate(plate1[J*k:(J*(k+1))]):
if TransformTNTC(count) < c:
s[0] = spreadsheet[sheet].loc[ j+J*k, 'Dilution']
y[0] = count
break
plate2 = array(spreadsheet[sheet].loc[ :, 'Plate 2'])
for j, count in enumerate(plate2[J*k:(J*(k+1))]):
if TransformTNTC(count) < c:
s[1] = spreadsheet[sheet].loc[ j+J*k, 'Dilution']
y[1] = count
break
d.Data( s=s, y=y, calc_post=True)
rt += [d.BF]
print("%10s, %2d, %16g, %16g, %16g, %16f" %\
( sheet, k, d.k_betabinom, d.k_binom, exp(d.k_binom_c - d.k_betabinom_c), d.BF))
return rt
def InterLabGenMultiDilExp( spreadsheet, lab, T=100000, control=False, data_all=True):
"""Create a MultiDilExp object for the repetitions of
experiment in lab = 'Lab5', 'Lab6', 'Lab8' or
if control 'Lab5Cntrl', 'Lab6Cntrl', 'Lab8Cntrl'
if data_all include all the counts including TNTC
if not(data_all) only include first countable dilution.
"""
J=7 ## Dilutions < J
c=300 ##maximum count for the drop or plated volumen, TNTC threshold
md = MultiDilExp( K=3, J=J, alpha0=4, alpha=10, alphap=100, c=c, Sc=4.13, Sc_unit=r"cm^2", q=0.05, pr_a=10, b=500, M=10)
""" alpha0, dilution factor for tube 1 from tube 0: 1ml from 4*10ml
alpha, dilution factor for each tube =10, 1ml from 10ml
alphap, dilution factor for the drop or plated volumen =100, 0.1ml from 10ml
q, probibility of misscounting (=0.05)
pr_a, prior mean of the count, given the dilution s to be counted (=20)
Sc, coupon surface area (=4.13)
Sc_unit, coupon surface area units (=r"cm^2").
"""
if control:
sheet = lab + 'Cntrl'
print("Analizing control experiment of %s" % (lab,))
else:
sheet = lab
print("Analizing experiment of %s" % (lab,))
for k in range(3): #Repetitions 1, 2, 3
if data_all:
s = array([[d,d] for d in range(J)]).flatten() # Dilutions
y = zeros(2*J)
for d in range(J):
y[2*d] = TransformTNTC(spreadsheet[sheet].loc[ d+J*k, 'Plate 1'])
y[2*d + 1] = TransformTNTC(spreadsheet[sheet].loc[ d+J*k, 'Plate 2'])
print("Repetition %d" % (k,))
print(s)
print(y)
print("\n")
md.Data( k=k, s=s, y=y)
else:
s = zeros(2)
y = zeros(2)
plate1 = array(spreadsheet[sheet].loc[ :, 'Plate 1'])
for j, count in enumerate(plate1[J*k:(J*(k+1))]):
if TransformTNTC(count) < c:
s[0] = spreadsheet[sheet].loc[ j+J*k, 'Dilution']
y[0] = count
break
plate2 = array(spreadsheet[sheet].loc[ :, 'Plate 2'])
for j, count in enumerate(plate2[J*k:(J*(k+1))]):
if TransformTNTC(count) < c:
s[1] = spreadsheet[sheet].loc[ j+J*k, 'Dilution']
y[1] = count
break
print("Repetition %d" % (k,))
print(s)
print(y)
print("\n")
md.Data( k=k, s=s, y=y)
md.data_other = sheet
md.RunTwalk(T=T)
md.twalk.Ana()
return md
def InterLabMakeList( spreadsheet, T=100000, control=False, data_all=False):
mdlist = []
for lab in ['Lab5', 'Lab6', 'Lab8']:
mdlist += [InterLabGenMultiDilExp( spreadsheet, lab, T=T, control=control, data_all=data_all)]
return mdlist
| 40.524823
| 124
| 0.560903
|
5d97afc603e014406003675d1948ea3e92ee9846
| 139
|
py
|
Python
|
app/api/__init__.py
|
Bobfrat/sequence-alignment-app
|
64cbd790705ccd5a328da2798445e7b9ce65d647
|
[
"BSD-3-Clause"
] | null | null | null |
app/api/__init__.py
|
Bobfrat/sequence-alignment-app
|
64cbd790705ccd5a328da2798445e7b9ce65d647
|
[
"BSD-3-Clause"
] | null | null | null |
app/api/__init__.py
|
Bobfrat/sequence-alignment-app
|
64cbd790705ccd5a328da2798445e7b9ce65d647
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
'''
app/api/__init__.py
'''
from flask import Blueprint
api = Blueprint('api', __name__)
from app.api import views
| 13.9
| 32
| 0.71223
|
e14c03468004e1217e46653088f78ec4852b9175
| 1,741
|
py
|
Python
|
google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/enums/types/ad_group_type.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/enums/types/ad_group_type.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/enums/types/ad_group_type.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.enums',
marshal='google.ads.googleads.v8',
manifest={
'AdGroupTypeEnum',
},
)
class AdGroupTypeEnum(proto.Message):
r"""Defines types of an ad group, specific to a particular
campaign channel type. This type drives validations that
restrict which entities can be added to the ad group.
"""
class AdGroupType(proto.Enum):
r"""Enum listing the possible types of an ad group."""
UNSPECIFIED = 0
UNKNOWN = 1
SEARCH_STANDARD = 2
DISPLAY_STANDARD = 3
SHOPPING_PRODUCT_ADS = 4
HOTEL_ADS = 6
SHOPPING_SMART_ADS = 7
VIDEO_BUMPER = 8
VIDEO_TRUE_VIEW_IN_STREAM = 9
VIDEO_TRUE_VIEW_IN_DISPLAY = 10
VIDEO_NON_SKIPPABLE_IN_STREAM = 11
VIDEO_OUTSTREAM = 12
SEARCH_DYNAMIC_ADS = 13
SHOPPING_COMPARISON_LISTING_ADS = 14
PROMOTED_HOTEL_ADS = 15
VIDEO_RESPONSIVE = 16
VIDEO_EFFICIENT_REACH = 17
SMART_CAMPAIGN_ADS = 18
__all__ = tuple(sorted(__protobuf__.manifest))
| 30.54386
| 74
| 0.686962
|
61ec36c3c4aed1e1b1d9150ea68fe3443e343e9f
| 1,142
|
py
|
Python
|
config.py
|
itzcy/PiDashboard-Python
|
1cd988163bd0265cf47e04834424c9b765936c40
|
[
"MIT"
] | 1
|
2019-10-22T04:41:29.000Z
|
2019-10-22T04:41:29.000Z
|
config.py
|
itzcy/PiDashboard-Python
|
1cd988163bd0265cf47e04834424c9b765936c40
|
[
"MIT"
] | 1
|
2021-06-01T22:35:17.000Z
|
2021-06-01T22:35:17.000Z
|
config.py
|
itzcy/PiDashboard-Python
|
1cd988163bd0265cf47e04834424c9b765936c40
|
[
"MIT"
] | null | null | null |
import os
# 当前目录
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
HOST = '0.0.0.0'
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky]'
FLASKY_MAIL_SENDER = 'Flasky Admin <flasky@example.com>'
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
@staticmethod
# 此注释可表明使用类名可以直接调用该方法
def init_app(app): # 执行当前需要的环境的初始化
pass
class DevelopmentConfig(Config): # 开发环境
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or 'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config): # 测试环境
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or 'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
class ProductionConfig(Config): # 生产环境
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or 'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| 32.628571
| 125
| 0.707531
|
d4d287544ddbf53d7be9244867211c8993014058
| 1,941
|
py
|
Python
|
ash/webui/file_manager/resources/gen_main_html.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668
|
2015-01-01T01:57:10.000Z
|
2022-03-31T23:33:32.000Z
|
ash/webui/file_manager/resources/gen_main_html.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 86
|
2015-10-21T13:02:42.000Z
|
2022-03-14T07:50:50.000Z
|
ash/webui/file_manager/resources/gen_main_html.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941
|
2015-01-02T11:32:21.000Z
|
2022-03-31T16:35:46.000Z
|
#!/usr/bin/env python
#
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate SWA files app main.html from files app main.html"""
from __future__ import print_function
import fileinput
import optparse
import os
import shutil
import sys
_SWA = '<script type="module" src="chrome://file-manager/main.js"></script>'
def GenerateSwaMainHtml(source, target):
"""Copy source file to target, do SWA edits, then add BUILD time stamp."""
# Copy source (main.html) file to the target (main.html) file.
shutil.copyfile(source, target)
# Edit the target file.
for line in fileinput.input(target, inplace=True):
# Add _SWA <script> tag after the <head> tag.
if line.find('<head>') >= 0:
print(line + ' ' + _SWA)
# Add <meta> charset="utf-8" attribute.
elif line.find('<meta ') >= 0:
sys.stdout.write(line.replace('<meta ', '<meta charset="utf-8" '))
# Ignore HTML Imports and its polyfil.
elif 'rel="import"' in line or 'html-imports' in line:
continue
# Remove files app foreground/js <script> tags: SWA app must load
# them after the SWA app has initialized needed resources.
elif line.find('src="foreground/js/main.') == -1:
sys.stdout.write(line)
# Create a BUILD time stamp for the target file.
open(target + '.stamp', 'a').close()
def main(args):
parser = optparse.OptionParser()
parser.add_option('--source', help='Files app main.html source file.')
parser.add_option('--target', help='Target SWA main.html for output.')
options, _ = parser.parse_args(args)
if options.source and options.target:
target = os.path.join(os.getcwd(), options.target)
GenerateSwaMainHtml(options.source, target)
return
raise SyntaxError('Usage: all arguments are required.')
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 31.819672
| 76
| 0.688305
|
85427b01d7587adaa3b432e84fb738a8d798688d
| 1,281
|
py
|
Python
|
src/main/resources/csv_etl.py
|
justin2061/etc-data-lab
|
e995475747d95a7e2de1923f1ed848bdc80b34d5
|
[
"Apache-2.0"
] | null | null | null |
src/main/resources/csv_etl.py
|
justin2061/etc-data-lab
|
e995475747d95a7e2de1923f1ed848bdc80b34d5
|
[
"Apache-2.0"
] | null | null | null |
src/main/resources/csv_etl.py
|
justin2061/etc-data-lab
|
e995475747d95a7e2de1923f1ed848bdc80b34d5
|
[
"Apache-2.0"
] | null | null | null |
#!/bin/bash
PYTHON=`which python`
echo $PYTHON
for f in $(find . -name \*.csv -print); do
echo "File -> $f"
$PYTHON csv_etl.py $f
done
[hadoop@pc-hadoop1:/data2/workspace/etc]$ more csv_etl.py
# -*- coding: utf-8 -*-
import csv
import mysql.connector
#import time
import sys
from datetime import date, datetime, timedelta
#print sys.argv
if len(sys.argv) < 2:
print 'no csv filename.'
sys.exit()
#print sys.argv[1]
add_m06a_data = ("insert into M06A(VehicleType, DetectionTime_O, GantryID_O, DetectionTime_D, GantryID_D, TripLength, TripEnd, TripInformation) values (%s, %s, %s, %s,
%s, %s, %s, %s)")
cnx = mysql.connector.connect(user='etc_user', password='1qaz!QAZ', host='10.64.32.48', database='etc')
cursor = cnx.cursor()
#csv_filepath = 'D:/etc/all_csv/TDCS_M06A_20140101_000000.csv'
csv_filepath = sys.argv[1]
f = open(csv_filepath, 'r')
try:
for row in csv.reader(f):
#print row[1]
r1 = datetime.strptime(row[1], '%Y-%m-%d %H:%M:%S')
r3 = datetime.strptime(row[3], '%Y-%m-%d %H:%M:%S')
m06a_data = (row[0], r1, row[2], r3, row[4], row[5], row[6], row[7])
#print m06a_data
cursor.execute(add_m06a_data, m06a_data)
cnx.commit()
except Exception as e:
print(e)
cursor.close()
f.close()
cnx.close()
| 28.466667
| 167
| 0.651054
|
f1781723f7857da106116e348e8a87396596e6be
| 2,964
|
py
|
Python
|
test/test_day13.py
|
daniel-stockhausen/adventofcode2021
|
e21969a9bf6fe3e9d520cf36dc34de02e24dd0d2
|
[
"MIT"
] | null | null | null |
test/test_day13.py
|
daniel-stockhausen/adventofcode2021
|
e21969a9bf6fe3e9d520cf36dc34de02e24dd0d2
|
[
"MIT"
] | null | null | null |
test/test_day13.py
|
daniel-stockhausen/adventofcode2021
|
e21969a9bf6fe3e9d520cf36dc34de02e24dd0d2
|
[
"MIT"
] | null | null | null |
import os
import unittest
from aoc.day13.day13 import count_dots_after_folds, do_fold, do_folds, get_example_data, get_input_data
def paper_str_to_lst(paper_str: str) -> list[list[str]]:
paper_expected1: list[list[str]] = []
for line in paper_str.split("\n"):
paper_expected1.append(list(line))
return paper_expected1
class TestDay13(unittest.TestCase):
def test_get_example_data(self):
paper_expected_str = (
"...#..#..#.\n"
"....#......\n"
"...........\n"
"#..........\n"
"...#....#.#\n"
"...........\n"
"...........\n"
"...........\n"
"...........\n"
"...........\n"
".#....#.##.\n"
"....#......\n"
"......#...#\n"
"#..........\n"
"#.#........"
)
paper_expected = paper_str_to_lst(paper_expected_str)
paper_actual, folds_actual = get_example_data()
self.assertEqual(paper_expected, paper_actual)
self.assertEqual([("y", 7), ("x", 5)], folds_actual)
def test_do_fold(self):
paper_expected1_str = (
"#.##..#..#.\n"
"#...#......\n"
"......#...#\n"
"#...#......\n"
".#.#..#.###\n"
"...........\n"
"..........."
)
paper_expected1 = paper_str_to_lst(paper_expected1_str)
paper, folds = get_example_data()
paper_folded1 = do_fold(paper, folds[0])
self.assertEqual(paper_expected1, paper_folded1)
paper_expected2_str = (
"#####\n"
"#...#\n"
"#...#\n"
"#...#\n"
"#####\n"
".....\n"
"....."
)
paper_expected2 = paper_str_to_lst(paper_expected2_str)
paper_folded2 = do_fold(paper_folded1, folds[1])
self.assertEqual(paper_expected2, paper_folded2)
def test_13a_example(self):
paper, folds = get_example_data()
paper_folded1 = count_dots_after_folds(paper, folds, 1)
self.assertEqual(17, paper_folded1)
def test_13a(self):
paper, folds = get_input_data()
self.assertEqual(814, count_dots_after_folds(paper, folds, 1))
def test_13b(self):
paper_expected_str = (
"###..####.####.#..#.###...##..####.###..\n"
"#..#....#.#....#..#.#..#.#..#.#....#..#.\n"
"#..#...#..###..####.#..#.#..#.###..#..#.\n"
"###...#...#....#..#.###..####.#....###..\n"
"#....#....#....#..#.#.#..#..#.#....#.#..\n"
"#....####.####.#..#.#..#.#..#.####.#..#."
) # PZEHRAER
paper_expected = paper_str_to_lst(paper_expected_str)
paper, folds = get_input_data()
self.assertEqual(paper_expected, do_folds(paper, folds))
def test_13_main(self):
self.assertEqual(0, os.system("python -m aoc.day13.day13"))
| 31.531915
| 103
| 0.437922
|
e89918a2486fa60ab48c408bc5ce2fde72c30568
| 7,422
|
py
|
Python
|
ansible/lib/ansible/modules/extras/web_infrastructure/ejabberd_user.py
|
kiv-box/redis
|
966a0c3f0a51282cd173b42a6e249d23f4e89dec
|
[
"Apache-2.0"
] | null | null | null |
ansible/lib/ansible/modules/extras/web_infrastructure/ejabberd_user.py
|
kiv-box/redis
|
966a0c3f0a51282cd173b42a6e249d23f4e89dec
|
[
"Apache-2.0"
] | null | null | null |
ansible/lib/ansible/modules/extras/web_infrastructure/ejabberd_user.py
|
kiv-box/redis
|
966a0c3f0a51282cd173b42a6e249d23f4e89dec
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013, Peter Sprygada <sprygada@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: ejabberd_user
version_added: "1.5"
author: "Peter Sprygada (@privateip)"
short_description: Manages users for ejabberd servers
requirements:
- ejabberd with mod_admin_extra
description:
- This module provides user management for ejabberd servers
options:
username:
description:
- the name of the user to manage
required: true
host:
description:
- the ejabberd host associated with this username
required: true
password:
description:
- the password to assign to the username
required: false
logging:
description:
- enables or disables the local syslog facility for this module
required: false
default: false
choices: [ 'true', 'false', 'yes', 'no' ]
state:
description:
- describe the desired state of the user to be managed
required: false
default: 'present'
choices: [ 'present', 'absent' ]
notes:
- Password parameter is required for state == present only
- Passwords must be stored in clear text for this release
- The ejabberd configuration file must include mod_admin_extra as a module.
'''
EXAMPLES = '''
Example playbook entries using the ejabberd_user module to manage users state.
tasks:
- name: create a user if it does not exists
action: ejabberd_user username=test host=server password=password
- name: delete a user if it exists
action: ejabberd_user username=test host=server state=absent
'''
import syslog
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.basic import *
class EjabberdUserException(Exception):
""" Base exeption for EjabberdUser class object """
pass
class EjabberdUser(object):
""" This object represents a user resource for an ejabberd server. The
object manages user creation and deletion using ejabberdctl. The following
commands are currently supported:
* ejabberdctl register
* ejabberdctl deregister
"""
def __init__(self, module):
self.module = module
self.logging = module.params.get('logging')
self.state = module.params.get('state')
self.host = module.params.get('host')
self.user = module.params.get('username')
self.pwd = module.params.get('password')
@property
def changed(self):
""" This method will check the current user and see if the password has
changed. It will return True if the user does not match the supplied
credentials and False if it does not
"""
try:
options = [self.user, self.host, self.pwd]
(rc, out, err) = self.run_command('check_password', options)
except EjabberdUserException:
e = get_exception()
(rc, out, err) = (1, None, "required attribute(s) missing")
return rc
@property
def exists(self):
""" This method will check to see if the supplied username exists for
host specified. If the user exists True is returned, otherwise False
is returned
"""
try:
options = [self.user, self.host]
(rc, out, err) = self.run_command('check_account', options)
except EjabberdUserException:
e = get_exception()
(rc, out, err) = (1, None, "required attribute(s) missing")
return not bool(int(rc))
def log(self, entry):
""" This method will log information to the local syslog facility """
if self.logging:
syslog.openlog('ansible-%s' % self.module._name)
syslog.syslog(syslog.LOG_NOTICE, entry)
def run_command(self, cmd, options):
""" This method will run the any command specified and return the
returns using the Ansible common module
"""
if not all(options):
raise EjabberdUserException
cmd = 'ejabberdctl %s ' % cmd
cmd += " ".join(options)
self.log('command: %s' % cmd)
return self.module.run_command(cmd.split())
def update(self):
""" The update method will update the credentials for the user provided
"""
try:
options = [self.user, self.host, self.pwd]
(rc, out, err) = self.run_command('change_password', options)
except EjabberdUserException:
e = get_exception()
(rc, out, err) = (1, None, "required attribute(s) missing")
return (rc, out, err)
def create(self):
""" The create method will create a new user on the host with the
password provided
"""
try:
options = [self.user, self.host, self.pwd]
(rc, out, err) = self.run_command('register', options)
except EjabberdUserException:
e = get_exception()
(rc, out, err) = (1, None, "required attribute(s) missing")
return (rc, out, err)
def delete(self):
""" The delete method will delete the user from the host
"""
try:
options = [self.user, self.host]
(rc, out, err) = self.run_command('unregister', options)
except EjabberdUserException:
e = get_exception()
(rc, out, err) = (1, None, "required attribute(s) missing")
return (rc, out, err)
def main():
module = AnsibleModule(
argument_spec = dict(
host=dict(default=None, type='str'),
username=dict(default=None, type='str'),
password=dict(default=None, type='str'),
state=dict(default='present', choices=['present', 'absent']),
logging=dict(default=False, type='bool')
),
supports_check_mode = True
)
obj = EjabberdUser(module)
rc = None
result = dict()
if obj.state == 'absent':
if obj.exists:
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = obj.delete()
if rc != 0:
module.fail_json(msg=err, rc=rc)
elif obj.state == 'present':
if not obj.exists:
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = obj.create()
elif obj.changed:
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = obj.update()
if rc is not None and rc != 0:
module.fail_json(msg=err, rc=rc)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
module.exit_json(**result)
main()
| 33.736364
| 79
| 0.613716
|
c3dca51f98e1acdf42061288bc1a4299b1ea2f4b
| 4,424
|
py
|
Python
|
ucsmsdk/mometa/ape/ApeReading.py
|
thinkitdata/ucsmsdk
|
da6599e1dbc1207a30eabe548a7e5791af5f476b
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/ape/ApeReading.py
|
thinkitdata/ucsmsdk
|
da6599e1dbc1207a30eabe548a7e5791af5f476b
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/ape/ApeReading.py
|
thinkitdata/ucsmsdk
|
da6599e1dbc1207a30eabe548a7e5791af5f476b
|
[
"Apache-2.0"
] | null | null | null |
"""This module contains the general information for ApeReading ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class ApeReadingConsts:
pass
class ApeReading(ManagedObject):
"""This is ApeReading class."""
consts = ApeReadingConsts()
naming_props = set([u'id'])
mo_meta = MoMeta("ApeReading", "apeReading", "reading-[id]", VersionMeta.Version101e, "InputOutput", 0xffff, [], ["read-only"], [u'apeMcTable'], [], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version101e, MoPropertyMeta.NAMING, 0x8, None, None, None, [], []),
"is_analog": MoPropertyMeta("is_analog", "is_analog", "ushort", VersionMeta.Version311e, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x20, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"sensor_type": MoPropertyMeta("sensor_type", "sensorType", "ushort", VersionMeta.Version311e, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, [], []),
"state": MoPropertyMeta("state", "state", "ushort", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x100, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"threshold_lc": MoPropertyMeta("threshold_lc", "thresholdLc", "float", VersionMeta.Version311e, MoPropertyMeta.READ_WRITE, 0x200, None, None, None, [], []),
"threshold_lnc": MoPropertyMeta("threshold_lnc", "thresholdLnc", "float", VersionMeta.Version311e, MoPropertyMeta.READ_WRITE, 0x400, None, None, None, [], []),
"threshold_lnr": MoPropertyMeta("threshold_lnr", "thresholdLnr", "float", VersionMeta.Version311e, MoPropertyMeta.READ_WRITE, 0x800, None, None, None, [], []),
"threshold_uc": MoPropertyMeta("threshold_uc", "thresholdUc", "float", VersionMeta.Version311e, MoPropertyMeta.READ_WRITE, 0x1000, None, None, None, [], []),
"threshold_unc": MoPropertyMeta("threshold_unc", "thresholdUnc", "float", VersionMeta.Version311e, MoPropertyMeta.READ_WRITE, 0x2000, None, None, None, [], []),
"threshold_unr": MoPropertyMeta("threshold_unr", "thresholdUnr", "float", VersionMeta.Version311e, MoPropertyMeta.READ_WRITE, 0x4000, None, None, None, [], []),
"value": MoPropertyMeta("value", "value", "ushort", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x8000, None, None, None, [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"id": "id",
"is_analog": "is_analog",
"rn": "rn",
"sacl": "sacl",
"sensorType": "sensor_type",
"state": "state",
"status": "status",
"thresholdLc": "threshold_lc",
"thresholdLnc": "threshold_lnc",
"thresholdLnr": "threshold_lnr",
"thresholdUc": "threshold_uc",
"thresholdUnc": "threshold_unc",
"thresholdUnr": "threshold_unr",
"value": "value",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.child_action = None
self.is_analog = None
self.sacl = None
self.sensor_type = None
self.state = None
self.status = None
self.threshold_lc = None
self.threshold_lnc = None
self.threshold_lnr = None
self.threshold_uc = None
self.threshold_unc = None
self.threshold_unr = None
self.value = None
ManagedObject.__init__(self, "ApeReading", parent_mo_or_dn, **kwargs)
| 58.210526
| 248
| 0.648734
|
b906e12d542ac4c65f279b97c4e0eb7329f3ed56
| 37,830
|
py
|
Python
|
analyzer/tests/functional/analyze/test_analyze.py
|
nettle/codechecker
|
1cc5855c7c2820b2a1dccb8c2a6fac1e19b344ab
|
[
"Apache-2.0"
] | null | null | null |
analyzer/tests/functional/analyze/test_analyze.py
|
nettle/codechecker
|
1cc5855c7c2820b2a1dccb8c2a6fac1e19b344ab
|
[
"Apache-2.0"
] | null | null | null |
analyzer/tests/functional/analyze/test_analyze.py
|
nettle/codechecker
|
1cc5855c7c2820b2a1dccb8c2a6fac1e19b344ab
|
[
"Apache-2.0"
] | null | null | null |
#
# -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""
Test case for the CodeChecker analyze command's direct functionality.
"""
import glob
import json
import os
import re
import shutil
import subprocess
import unittest
import zipfile
from libtest import env
from codechecker_analyzer.analyzers.clangsa import version
class TestAnalyze(unittest.TestCase):
_ccClient = None
def setUp(self):
# TEST_WORKSPACE is automatically set by test package __init__.py .
self.test_workspace = os.environ['TEST_WORKSPACE']
test_class = self.__class__.__name__
print('Running ' + test_class + ' tests in ' + self.test_workspace)
# Get the CodeChecker cmd if needed for the tests.
self._codechecker_cmd = env.codechecker_cmd()
self.report_dir = os.path.join(self.test_workspace, "reports")
self.test_dir = os.path.join(os.path.dirname(__file__), 'test_files')
# Change working dir to testfile dir so CodeChecker can be run easily.
self.__old_pwd = os.getcwd()
os.chdir(self.test_dir)
self.missing_checker_regex = re.compile(
r"No checker\(s\) with these names was found")
def tearDown(self):
"""Restore environment after tests have ran."""
os.chdir(self.__old_pwd)
if os.path.isdir(self.report_dir):
shutil.rmtree(self.report_dir)
def __analyze_incremental(self, content_, build_json, reports_dir,
plist_count, failed_count):
"""
Helper function to test analyze incremental mode. It's create a file
with the given content. Run analyze on that file and checks the count
of the plist end error files.
"""
source_file = os.path.join(self.test_workspace, "simple.cpp")
# Write content to the test file
with open(source_file, 'w',
encoding="utf-8", errors="ignore") as source:
source.write(content_)
# Create analyze command.
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "-o", reports_dir]
# Run analyze
process = subprocess.Popen(
analyze_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_dir,
encoding="utf-8",
errors="ignore")
out, err = process.communicate()
print(out)
print(err)
errcode = process.returncode
# This function checks incremental analysis. There are some test cases
# for failed analysis during incremental analysis, do the error code
# can also be 3.
self.assertIn(errcode, [0, 3])
# Check the count of the plist files.
plist_files = [os.path.join(reports_dir, filename)
for filename in os.listdir(reports_dir)
if filename.endswith('.plist')]
self.assertEqual(len(plist_files), plist_count)
# Check the count of the error files.
failed_dir = os.path.join(reports_dir, "failed")
failed_file_count = 0
if os.path.exists(failed_dir):
failed_files = [os.path.join(failed_dir, filename)
for filename in os.listdir(failed_dir)
if filename.endswith('.zip')]
failed_file_count = len(failed_files)
for f in failed_files:
os.remove(f)
self.assertEqual(failed_file_count, failed_count)
@unittest.skipIf(version.get("gcc") is not None,
"If gcc or g++ is a symlink to clang this test should be "
"skipped. Option filtering is different for the two "
"compilers. This test is gcc/g++ specific.")
def test_compiler_info_files(self):
'''
Test that the compiler info files are generated
'''
# GIVEN
build_json = os.path.join(self.test_workspace, "build_simple.json")
reports_dir = self.report_dir
source_file_cpp = os.path.join(self.test_workspace, "simple.cpp")
source_file_c = os.path.join(self.test_workspace, "simple.c")
# Create a compilation database.
build_log = [{"directory": self.test_workspace,
"command": "gcc -c " + source_file_c,
"file": source_file_c
},
{"directory": self.test_workspace,
"command": "clang++ -c " + source_file_cpp,
"file": source_file_cpp
}
]
with open(build_json, 'w',
encoding="utf-8", errors="ignore") as outfile:
json.dump(build_log, outfile)
# Test file contents
simple_file_content = "int main() { return 0; }"
# Write content to the test file
with open(source_file_cpp, 'w',
encoding="utf-8", errors="ignore") as source:
source.write(simple_file_content)
with open(source_file_c, 'w',
encoding="utf-8", errors="ignore") as source:
source.write(simple_file_content)
# Create analyze command.
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "-o", reports_dir]
# WHEN
# Run analyze.
process = subprocess.Popen(
analyze_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_dir,
encoding="utf-8",
errors="ignore")
process.communicate()
# THEN
errcode = process.returncode
self.assertEqual(errcode, 0)
info_File = os.path.join(reports_dir, 'compiler_info.json')
self.assertEqual(os.path.exists(info_File), True)
self.assertNotEqual(os.stat(info_File).st_size, 0)
# Test the validity of the json files.
with open(info_File, 'r', encoding="utf-8", errors="ignore") as f:
try:
data = json.load(f)
self.assertEqual(len(data), 1)
# For clang we do not collect anything.
self.assertTrue("g++" in data)
except ValueError:
self.fail("json.load should successfully parse the file %s"
% info_File)
def test_compiler_info_file_is_loaded(self):
'''
Test that compiler info file is loaded if option is set.
'''
reports_dir = self.report_dir
build_json = os.path.join(self.test_workspace, "build_simple.json")
source_file = os.path.join(self.test_workspace, "simple.cpp")
compiler_info_file = os.path.join(self.test_workspace,
"compiler_info.json")
# Create a compilation database.
build_log = [{"directory": self.test_workspace,
"command": "clang++ -c " + source_file,
"file": source_file}]
with open(build_json, 'w',
encoding="utf-8", errors="ignore") as outfile:
json.dump(build_log, outfile)
# Test file contents
simple_file_content = "int main() { return 0; }"
# Write content to the test file
with open(source_file, 'w',
encoding="utf-8", errors="ignore") as source:
source.write(simple_file_content)
with open(compiler_info_file, 'w',
encoding="utf-8", errors="ignore") as source:
source.write('''{
"clang++": {
"c++": {
"compiler_standard": "-std=FAKE_STD",
"target": "FAKE_TARGET",
"compiler_includes": [
"-isystem /FAKE_INCLUDE_DIR"
]
}
}
}''')
# Create analyze command.
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--compiler-info-file", compiler_info_file,
"--analyzers", "clangsa", "--verbose", "debug",
"-o", reports_dir]
# Run analyze.
process = subprocess.Popen(
analyze_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_dir,
encoding="utf-8",
errors="ignore")
out, _ = process.communicate()
print(out)
self.assertTrue("-std=FAKE_STD" in out)
self.assertTrue("--target=FAKE_TARGET" in out)
self.assertTrue("-isystem /FAKE_INCLUDE_DIR" in out)
def test_capture_analysis_output(self):
"""
Test if reports/success/<output_file>.[stdout,stderr].txt
files are created
"""
build_json = os.path.join(self.test_workspace, "build_success.json")
success_dir = os.path.join(self.report_dir, "success")
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "-o", self.report_dir,
"--capture-analysis-output"]
source_file = os.path.join(self.test_dir, "success.c")
build_log = [{"directory": self.test_workspace,
"command": "gcc -c " + source_file,
"file": source_file
}]
with open(build_json, 'w',
encoding="utf-8", errors="ignore") as outfile:
json.dump(build_log, outfile)
print(analyze_cmd)
process = subprocess.Popen(
analyze_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_dir,
encoding="utf-8",
errors="ignore")
out, err = process.communicate()
print(out)
print(err)
errcode = process.returncode
self.assertEqual(errcode, 0)
# We expect the sucess stderr file in the success directory.
success_files = os.listdir(success_dir)
print(success_files)
self.assertEqual(len(success_files), 1)
self.assertIn("success.c", success_files[0])
os.remove(os.path.join(success_dir, success_files[0]))
def test_failure(self):
"""
Test if reports/failed/<failed_file>.zip file is created
"""
build_json = os.path.join(self.test_workspace, "build.json")
failed_dir = os.path.join(self.report_dir, "failed")
source_file = os.path.join(self.test_dir, "failure.c")
# Create a compilation database.
build_log = [{"directory": self.test_workspace,
"command": "gcc -c " + source_file,
"file": source_file
}]
with open(build_json, 'w',
encoding="utf-8", errors="ignore") as outfile:
json.dump(build_log, outfile)
# Create and run analyze command.
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "--verbose", "debug",
"-o", self.report_dir]
print(analyze_cmd)
process = subprocess.Popen(
analyze_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_dir,
encoding="utf-8",
errors="ignore")
out, err = process.communicate()
print(out)
print(err)
errcode = process.returncode
self.assertEqual(errcode, 3)
self.assertNotIn("UserWarning: Duplicate name", err)
# We expect a failure archive to be in the failed directory.
failed_files = os.listdir(failed_dir)
self.assertEqual(len(failed_files), 1)
fail_zip = os.path.join(failed_dir, failed_files[0])
with zipfile.ZipFile(fail_zip, 'r') as archive:
files = archive.namelist()
self.assertIn("build-action", files)
self.assertIn("analyzer-command", files)
with archive.open("build-action", 'r') as archived_buildcmd:
self.assertEqual(archived_buildcmd.read().decode("utf-8"),
"gcc -c " + source_file)
source_in_archive = os.path.join("sources-root",
source_file.lstrip('/'))
self.assertIn(source_in_archive, files)
with archive.open(source_in_archive, 'r') as archived_code:
with open(source_file, 'r',
encoding="utf-8", errors="ignore") as source_code:
self.assertEqual(archived_code.read().decode("utf-8"),
source_code.read())
os.remove(os.path.join(failed_dir, failed_files[0]))
def test_robustness_for_dependencygen_failure(self):
"""
Test if failure ZIP is created even if the dependency generator creates
an invalid output.
"""
build_json = os.path.join(self.test_workspace, "build.json")
failed_dir = os.path.join(self.report_dir, "failed")
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "--verbose", "debug",
"-o", self.report_dir]
source_file = os.path.join(self.test_dir, "failure.c")
build_log = [{"directory": self.test_workspace,
"command": "cc -c -std=c++11 " + source_file,
"file": source_file
}]
# cc -std=c++11 writes error "-std=c++11 valid for C++ but not for C"
# to its output when invoked as a dependency generator for this
# build command.
with open(build_json, 'w',
encoding="utf-8", errors="ignore") as outfile:
json.dump(build_log, outfile)
print(analyze_cmd)
process = subprocess.Popen(
analyze_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_dir,
encoding="utf-8",
errors="ignore")
process.communicate()
errcode = process.returncode
self.assertEqual(errcode, 3)
# We expect a failure archive to be in the failed directory.
failed_files = os.listdir(failed_dir)
print(failed_files)
self.assertEqual(len(failed_files), 1)
self.assertIn("failure.c", failed_files[0])
os.remove(os.path.join(failed_dir, failed_files[0]))
def test_incremental_analyze(self):
"""
Test incremental mode to analysis command which overwrites only those
plist files that were update by the current build command.
"""
build_json = os.path.join(self.test_workspace, "build_simple.json")
reports_dir = os.path.join(self.test_workspace, "reports_incremental")
source_file = os.path.join(self.test_workspace, "simple.cpp")
# Create a compilation database.
build_log = [{"directory": self.test_workspace,
"command": "g++ -c " + source_file,
"file": source_file
}]
with open(build_json, 'w',
encoding="utf-8", errors="ignore") as outfile:
json.dump(build_log, outfile)
# Test file contents
simple_file_content = "int main() { return 0; }"
failed_file_content = "int main() { err; return 0; }"
# Run analyze on the simple file.
self.__analyze_incremental(simple_file_content, build_json,
reports_dir, 1, 0)
# Run analyze on the failed file.
self.__analyze_incremental(failed_file_content, build_json,
reports_dir, 0, 1)
# Run analyze on the simple file again.
self.__analyze_incremental(simple_file_content, build_json,
reports_dir, 1, 0)
def test_relative_include_paths(self):
"""
Test if the build json contains relative paths.
"""
build_json = os.path.join(self.test_workspace, "build_simple_rel.json")
report_dir = os.path.join(self.test_workspace, "reports_relative")
source_file = os.path.join(self.test_dir, "simple.c")
failed_dir = os.path.join(report_dir, "failed")
# Create a compilation database.
build_log = [{"directory": self.test_dir,
"command": "cc -c " + source_file + " -Iincludes",
"file": source_file
}]
with open(build_json, 'w',
encoding="utf-8", errors="ignore") as outfile:
json.dump(build_log, outfile)
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "-o", report_dir]
# CodeChecker is executed in a different
# dir than the containing folder of simple.c.
process = subprocess.Popen(
analyze_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_workspace,
encoding="utf-8",
errors="ignore")
process.communicate()
errcode = process.returncode
self.assertEqual(errcode, 0)
self.assertFalse(os.path.isdir(failed_dir))
def test_tidyargs_saargs(self):
"""
Test tidyargs and saargs config files
"""
build_json = os.path.join(self.test_workspace, "build_extra_args.json")
report_dir = os.path.join(self.test_workspace, "reports_extra_args")
source_file = os.path.join(self.test_dir, "extra_args.cpp")
tidyargs_file = os.path.join(self.test_dir, "tidyargs")
saargs_file = os.path.join(self.test_dir, "saargs")
build_log = [{"directory": self.test_dir,
"command": "g++ -c " + source_file,
"file": source_file
}]
with open(build_json, 'w',
encoding="utf-8", errors="ignore") as outfile:
json.dump(build_log, outfile)
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"-o", report_dir, "--tidyargs", tidyargs_file,
"--analyzer-config", 'clang-tidy:HeaderFilterRegex=.*',
'clang-tidy:Checks=modernize-use-bool-literals']
process = subprocess.Popen(
analyze_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_workspace,
encoding="utf-8",
errors="ignore")
process.communicate()
process = subprocess.Popen(
[self._codechecker_cmd, "parse", report_dir],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_workspace,
encoding="utf-8",
errors="ignore")
out, _ = process.communicate()
self.assertIn("division by zero", out)
self.assertIn("modernize-avoid-bind", out)
self.assertNotIn("performance-for-range-copy", out)
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"-o", report_dir, "--saargs", saargs_file]
process = subprocess.Popen(
analyze_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_workspace,
encoding="utf-8",
errors="ignore")
process.communicate()
process = subprocess.Popen(
[self._codechecker_cmd, "parse", report_dir],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_workspace,
encoding="utf-8",
errors="ignore")
out, _ = process.communicate()
self.assertIn("Dereference of null pointer", out)
def unique_json_helper(self, unique_json, is_a, is_b, is_s):
with open(unique_json,
encoding="utf-8", errors="ignore") as json_file:
data = json.load(json_file)
simple_a = False
simple_b = False
success = False
for d in data:
if "simple_a.o" in d["command"]:
simple_a = True
if "simple_b.o" in d["command"]:
simple_b = True
if "success.o" in d["command"]:
success = True
self.assertEqual(simple_a, is_a)
self.assertEqual(simple_b, is_b)
self.assertEqual(success, is_s)
def test_compile_uniqueing(self):
"""
Test complilation uniqueing
"""
build_json = os.path.join(self.test_workspace, "build_simple_rel.json")
report_dir = os.path.join(self.test_workspace, "reports_relative")
source_file = os.path.join(self.test_dir, "simple.c")
source_file2 = os.path.join(self.test_dir, "success.c")
failed_dir = os.path.join(report_dir, "failed")
unique_json = os.path.join(report_dir, "unique_compile_commands.json")
# Create a compilation database.
build_log = [{"directory": self.test_dir,
"command": "cc -c " + source_file +
" -Iincludes -o simple_b.o",
"file": source_file},
{"directory": self.test_dir,
"command": "cc -c " + source_file +
" -Iincludes -o simple_a.o",
"file": source_file},
{"directory": self.test_dir,
"command": "cc -c " + source_file2 +
" -Iincludes -o success.o",
"file": source_file2}]
with open(build_json, 'w',
encoding="utf-8", errors="ignore") as outfile:
json.dump(build_log, outfile)
# Testing alphabetic uniqueing mode.
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "-o", report_dir,
"--compile-uniqueing", "alpha"]
process = subprocess.Popen(
analyze_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_workspace,
encoding="utf-8",
errors="ignore")
process.communicate()
errcode = process.returncode
self.assertEqual(errcode, 0)
self.assertFalse(os.path.isdir(failed_dir))
self.unique_json_helper(unique_json, True, False, True)
# Testing regex mode.
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "-o", report_dir,
"--compile-uniqueing", ".*_b.*"]
process = subprocess.Popen(
analyze_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_workspace,
encoding="utf-8",
errors="ignore")
process.communicate()
errcode = process.returncode
self.assertEqual(errcode, 0)
self.assertFalse(os.path.isdir(failed_dir))
self.unique_json_helper(unique_json, False, True, True)
# Testing regex mode.error handling
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "-o", report_dir,
"--compile-uniqueing", ".*simple.*"]
process = subprocess.Popen(
analyze_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_workspace,
encoding="utf-8",
errors="ignore")
process.communicate()
errcode = process.returncode
# Since .*simple.* matches 2 files, thus we get an error
self.assertEqual(errcode, 1)
# Testing strict mode
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "-o", report_dir,
"--compile-uniqueing", "strict", "--verbose", "debug"]
process = subprocess.Popen(
analyze_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_workspace,
encoding="utf-8",
errors="ignore")
process.communicate()
# In strict mode the analysis must fail
# if there are more than one build
# commands for a single source.
errcode = process.returncode
self.assertEqual(errcode, 1)
self.assertFalse(os.path.isdir(failed_dir))
# Testing None mode.
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "-o", report_dir,
"--compile-uniqueing", "none"]
process = subprocess.Popen(
analyze_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_workspace,
encoding="utf-8",
errors="ignore")
process.communicate()
errcode = process.returncode
self.assertEqual(errcode, 0)
self.assertFalse(os.path.isdir(failed_dir))
self.unique_json_helper(unique_json, True, True, True)
def test_invalid_enabled_checker_name(self):
"""Warn in case of an invalid enabled checker."""
build_json = os.path.join(self.test_workspace, "build_success.json")
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "-o", self.report_dir,
"-e", "non-existing-checker-name"]
source_file = os.path.join(self.test_dir, "success.c")
build_log = [{"directory": self.test_workspace,
"command": "gcc -c " + source_file,
"file": source_file
}]
with open(build_json, 'w',
encoding="utf-8", errors="ignore") as outfile:
json.dump(build_log, outfile)
print(analyze_cmd)
process = subprocess.Popen(
analyze_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_dir,
encoding="utf-8",
errors="ignore")
out, _ = process.communicate()
match = self.missing_checker_regex.search(out)
self.assertIsNotNone(match)
self.assertTrue("non-existing-checker-name" in out)
errcode = process.returncode
self.assertEqual(errcode, 0)
def test_disable_all_warnings(self):
"""Test disabling warnings as checker groups."""
build_json = os.path.join(self.test_workspace, "build.json")
analyze_cmd = [self._codechecker_cmd, "check", "-l", build_json,
"--analyzers", "clang-tidy",
"-d", "clang-diagnostic",
"-e", "clang-diagnostic-unused"]
source_file = os.path.join(self.test_dir, "compiler_warning.c")
build_log = [{"directory": self.test_workspace,
"command": "gcc -c " + source_file,
"file": source_file
}]
with open(build_json, 'w',
encoding="utf-8", errors="ignore") as outfile:
json.dump(build_log, outfile)
process = subprocess.Popen(
analyze_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_dir,
encoding="utf-8",
errors="ignore")
out, _ = process.communicate()
self.assertNotIn("format specifies type 'int' but the argument has "
"type 'char *' [clang-diagnostic-format]", out)
self.assertIn("unused variable 'i' [clang-diagnostic-unused-variable]",
out)
def test_invalid_disabled_checker_name(self):
"""Warn in case of an invalid disabled checker."""
build_json = os.path.join(self.test_workspace, "build_success.json")
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "-o", self.report_dir,
"-d", "non-existing-checker-name"]
source_file = os.path.join(self.test_dir, "success.c")
build_log = [{"directory": self.test_workspace,
"command": "gcc -c " + source_file,
"file": source_file
}]
with open(build_json, 'w',
encoding="utf-8", errors="ignore") as outfile:
json.dump(build_log, outfile)
print(analyze_cmd)
process = subprocess.Popen(
analyze_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_dir,
encoding="utf-8",
errors="ignore")
out, _ = process.communicate()
match = self.missing_checker_regex.search(out)
self.assertIsNotNone(match)
self.assertTrue("non-existing-checker-name" in out)
errcode = process.returncode
self.assertEqual(errcode, 0)
def test_multiple_invalid_checker_names(self):
"""Warn in case of multiple invalid checker names."""
build_json = os.path.join(self.test_workspace, "build_success.json")
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "-o", self.report_dir,
"-e", "non-existing-checker-name",
"-e", "non-existing-checker",
"-d", "missing.checker",
"-d", "other.missing.checker"]
source_file = os.path.join(self.test_dir, "success.c")
build_log = [{"directory": self.test_workspace,
"command": "gcc -c " + source_file,
"file": source_file
}]
with open(build_json, 'w',
encoding="utf-8", errors="ignore") as outfile:
json.dump(build_log, outfile)
print(analyze_cmd)
process = subprocess.Popen(
analyze_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_dir,
encoding="utf-8",
errors="ignore")
out, _ = process.communicate()
match = self.missing_checker_regex.search(out)
self.assertIsNotNone(match)
self.assertTrue("non-existing-checker-name" in out)
self.assertTrue("non-existing-checker" in out)
self.assertTrue("missing.checker" in out)
self.assertTrue("other.missing.checker" in out)
errcode = process.returncode
self.assertEqual(errcode, 0)
def test_makefile_generation(self):
""" Test makefile generation. """
build_json = os.path.join(self.test_workspace, "build_extra_args.json")
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"-o", self.report_dir, '--makefile']
source_file = os.path.join(self.test_dir, "extra_args.cpp")
build_log = [{"directory": self.test_workspace,
"command": "g++ -DTIDYARGS -c " + source_file,
"file": source_file
},
{"directory": self.test_workspace,
"command": "g++ -DSAARGS -DTIDYARGS -c " + source_file,
"file": source_file
}]
with open(build_json, 'w',
encoding="utf-8", errors="ignore") as outfile:
json.dump(build_log, outfile)
process = subprocess.Popen(
analyze_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_dir,
encoding="utf-8",
errors="ignore")
process.communicate()
errcode = process.returncode
self.assertEqual(errcode, 0)
# Check the existence of the Makefile.
makefile = os.path.join(self.report_dir, 'Makefile')
self.assertTrue(os.path.exists(makefile))
# Run the generated Makefile and check the return code of it.
process = subprocess.Popen(["make"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.report_dir,
encoding="utf-8",
errors="ignore")
process.communicate()
errcode = process.returncode
self.assertEqual(errcode, 0)
plist_files = glob.glob(os.path.join(self.report_dir, '*.plist'))
self.assertEqual(len(plist_files), 4)
def test_analyzer_and_checker_config(self):
"""Test analyzer configuration through command line flags."""
build_json = os.path.join(self.test_workspace, "build_success.json")
source_file = os.path.join(self.test_dir, "checker_config.cpp")
build_log = [{"directory": self.test_workspace,
"command": "gcc -c " + source_file,
"file": source_file}]
with open(build_json, 'w',
encoding="utf-8", errors="ignore") as outfile:
json.dump(build_log, outfile)
analyze_cmd = [self._codechecker_cmd, "check", "-l", build_json,
"--analyzers", "clang-tidy", "-o", self.report_dir,
"--analyzer-config",
"clang-tidy:Checks=hicpp-use-nullptr"]
print(analyze_cmd)
process = subprocess.Popen(
analyze_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_dir,
encoding="utf-8",
errors="ignore")
out, _ = process.communicate()
print(out)
# First it's printed as the member of enabled checkers at the beginning
# of the output. Second it is printed as a found report.
self.assertEqual(out.count('hicpp-use-nullptr'), 1)
analyze_cmd = [self._codechecker_cmd, "check", "-l", build_json,
"--analyzers", "clang-tidy", "-o", self.report_dir,
"--analyzer-config",
"clang-tidy:Checks=hicpp-use-nullptr",
"--checker-config",
"clang-tidy:hicpp-use-nullptr.NullMacros=MY_NULL"]
print(analyze_cmd)
process = subprocess.Popen(
analyze_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_dir,
encoding="utf-8",
errors="ignore")
out, _ = process.communicate()
# First it's printed as the member of enabled checkers at the beginning
# of the output. Second and third it is printed as a found report.
self.assertEqual(out.count('hicpp-use-nullptr'), 2)
analyze_cmd = [self._codechecker_cmd, "check", "-l", build_json,
"--analyzers", "clangsa", "-o", self.report_dir,
"--checker-config",
"clangsa:optin.cplusplus.UninitializedObject:Pedantic"
"=true"]
print(analyze_cmd)
process = subprocess.Popen(
analyze_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_dir,
encoding="utf-8",
errors="ignore")
out, _ = process.communicate()
print(out)
# First it's printed as the member of enabled checkers at the beginning
# of the output. Second it is printed as a found report.
self.assertEqual(out.count('UninitializedObject'), 2)
analyze_cmd = [self._codechecker_cmd, "check", "-l", build_json,
"--analyzers", "clangsa", "-o", self.report_dir,
"--checker-config",
"clangsa:optin.cplusplus.UninitializedObject:Pedantic"
"=true",
"--analyzer-config",
"clangsa:max-nodes=1"]
print(analyze_cmd)
process = subprocess.Popen(
analyze_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_dir,
encoding="utf-8",
errors="ignore")
out, _ = process.communicate()
print(out)
# It is printed as the member of enabled checkers, but it gives no
# report.
self.assertEqual(out.count('UninitializedObject'), 1)
def test_invalid_compilation_database(self):
""" Warn in case of an invalid enabled checker. """
build_json = os.path.join(self.test_workspace, "build_corrupted.json")
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"-o", self.report_dir]
with open(build_json, 'w',
encoding="utf-8", errors="ignore") as outfile:
outfile.write("Corrupted JSON file!")
print(analyze_cmd)
process = subprocess.Popen(
analyze_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.test_dir,
encoding="utf-8",
errors="ignore")
process.communicate()
self.assertEqual(process.returncode, 1)
| 38.096677
| 79
| 0.555009
|
8244ccbdf190c12f950888b633ba7d483cd066f4
| 74
|
py
|
Python
|
venv/lib/python3.8/site-packages/smart_open/version.py
|
hongcuongnguyen/SearchEngine
|
2cb53999aac1d41aaae3e130bd018d29c9818e60
|
[
"MIT"
] | 3
|
2021-03-29T19:21:08.000Z
|
2021-12-31T09:30:11.000Z
|
venv/lib/python3.8/site-packages/smart_open/version.py
|
hongcuongnguyen/SearchEngine
|
2cb53999aac1d41aaae3e130bd018d29c9818e60
|
[
"MIT"
] | 1
|
2021-08-30T08:53:09.000Z
|
2021-08-30T08:53:09.000Z
|
venv/lib/python3.8/site-packages/smart_open/version.py
|
hongcuongnguyen/SearchEngine
|
2cb53999aac1d41aaae3e130bd018d29c9818e60
|
[
"MIT"
] | 1
|
2021-03-30T05:02:53.000Z
|
2021-03-30T05:02:53.000Z
|
__version__ = '5.0.0'
if __name__ == '__main__':
print(__version__)
| 12.333333
| 26
| 0.648649
|
91a02a373c8ba08501c5a10d365604f239228224
| 1,823
|
py
|
Python
|
sdk/python/setup.py
|
jkisk/multihook
|
86b3aae6ba40a3f3788693fe014cfd2f183eb2b5
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/setup.py
|
jkisk/multihook
|
86b3aae6ba40a3f3788693fe014cfd2f183eb2b5
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/setup.py
|
jkisk/multihook
|
86b3aae6ba40a3f3788693fe014cfd2f183eb2b5
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import errno
from setuptools import setup, find_packages
from setuptools.command.install import install
from subprocess import check_call
class InstallPluginCommand(install):
def run(self):
install.run(self)
try:
check_call(['pulumi', 'plugin', 'install', 'resource', 'multihook', '${PLUGIN_VERSION}'])
except OSError as error:
if error.errno == errno.ENOENT:
print("""
There was an error installing the multihook resource provider plugin.
It looks like `pulumi` is not installed on your system.
Please visit https://pulumi.com/ to install the Pulumi CLI.
You may try manually installing the plugin by running
`pulumi plugin install resource multihook ${PLUGIN_VERSION}`
""")
else:
raise
def readme():
try:
with open('README.md', encoding='utf-8') as f:
return f.read()
except FileNotFoundError:
return "multihook Pulumi Package - Development Version"
setup(name='pulumi_multihook',
version='${VERSION}',
long_description=readme(),
long_description_content_type='text/markdown',
cmdclass={
'install': InstallPluginCommand,
},
packages=find_packages(),
package_data={
'pulumi_multihook': [
'py.typed',
]
},
install_requires=[
'parver>=0.2.1',
'pulumi>=3.0.0,<4.0.0',
'pulumi-aws>=4.0.0,<5.0.0',
'pulumi-github>=3.0.0,<4.0.0',
'semver>=2.8.1'
],
zip_safe=False)
| 31.431034
| 101
| 0.580911
|
7e3968b5785f538bd8221f7d40e2e046c60acbb7
| 3,737
|
py
|
Python
|
management/commands/anonymize_db.py
|
mnieber/Django-Database-Anonymizer
|
228daa70560885b96fa49255c5506a980369d464
|
[
"MIT"
] | 9
|
2016-11-22T17:29:41.000Z
|
2021-09-03T10:16:21.000Z
|
management/commands/anonymize_db.py
|
mnieber/Django-Database-Anonymizer
|
228daa70560885b96fa49255c5506a980369d464
|
[
"MIT"
] | 1
|
2018-06-18T08:09:43.000Z
|
2019-02-19T10:23:03.000Z
|
management/commands/anonymize_db.py
|
mnieber/Django-Database-Anonymizer
|
228daa70560885b96fa49255c5506a980369d464
|
[
"MIT"
] | 2
|
2016-11-22T17:29:51.000Z
|
2018-06-16T15:10:21.000Z
|
# -*- coding: utf-8 -*-
import inspect
from importlib import import_module
import sys
import imp as _imp
from optparse import make_option
from django.conf import settings
from django.core.management import BaseCommand
from anonymizer.base import BaseAnonymizer, ANONYMIZER_MODULE_NAME
class Command(BaseCommand):
help = 'This tool replaces real (user-)data of model instances in your database with mock data.'
modules = None # List of anonymizers modules. They can be placed in every app
option_list = BaseCommand.option_list + (
make_option(
"-a",
"--app",
dest="app",
help="Only anonymize the given app.",
metavar="APP"
),
make_option(
"-m",
"--model",
"--models",
dest="models",
help="Models to anonymize. Separate multiples by comma.",
metavar="MODEL"
),
make_option(
"-b",
"--batch-size",
dest="batch_size",
help="batch size used in the bulk_update of the instances. Depends on the DB machine. Use 500 in vagrant.",
metavar="BATCH_SIZE"
),
)
def handle(self, *args, **options):
models = None
if options['models'] is not None:
models = [m.strip() for m in options['models'].split(',')]
print 'Autodiscovering anonymizers'
modules = self._autodiscover_module(ANONYMIZER_MODULE_NAME, app=options['app'])
print 'Found anonymizers for {} apps'.format(len(modules))
total_replacements_count = 0
for module in modules:
print '{}:'.format(module.__package__)
anonymizers = self._get_app_anonymizers(module, models=models)
if len(anonymizers) == 0:
print "- No anonymizers or skipped by --app or --model arguments"
continue
for anonymizer_class_name in anonymizers:
anonymizer = getattr(module, anonymizer_class_name)()
print '- {}'.format(anonymizer.model.__name__)
number_of_replaced_fields = anonymizer.run(options['batch_size']) # Start the anonymizing process
print '-- {} fields, {} model instances, {} total replacements'.format(
number_of_replaced_fields[0],
number_of_replaced_fields[1],
number_of_replaced_fields[2])
total_replacements_count += number_of_replaced_fields[2]
print "DONE. Replaced {} values in total".format(total_replacements_count)
def _autodiscover_module(self, module_name, app=None):
apps_to_search = [app] if app else settings.INSTALLED_APPS
modules = []
for app in apps_to_search:
try:
import_module(app)
app_path = sys.modules[app].__path__
except AttributeError:
continue
except ImportError:
print 'ERROR: Can not find app ' + app
exit(1)
try:
_imp.find_module(module_name, app_path)
except ImportError:
continue
import_module('%s.%s' % (app, module_name))
modules.append(sys.modules['%s.%s' % (app, module_name)])
return modules
def _get_app_anonymizers(self, module, models=None):
if models:
return [m[0] for m in inspect.getmembers(module, inspect.isclass)
if BaseAnonymizer in m[1].__bases__ and m[1].model.__name__ in models]
else:
return [m[0] for m in inspect.getmembers(module, inspect.isclass) if BaseAnonymizer in m[1].__bases__]
| 37.747475
| 119
| 0.590313
|
6d8b3dd2bc35d247b90663fa6107104fa4a78d16
| 14,939
|
py
|
Python
|
dffml/operation/output.py
|
agriyakhetarpal/dffml
|
f76f2ce94c3972634053377b00e7c16530f7f0a4
|
[
"MIT"
] | 171
|
2019-03-08T19:02:06.000Z
|
2022-03-29T16:17:23.000Z
|
dffml/operation/output.py
|
agriyakhetarpal/dffml
|
f76f2ce94c3972634053377b00e7c16530f7f0a4
|
[
"MIT"
] | 1,158
|
2019-03-08T19:07:50.000Z
|
2022-03-25T08:28:27.000Z
|
dffml/operation/output.py
|
agriyakhetarpal/dffml
|
f76f2ce94c3972634053377b00e7c16530f7f0a4
|
[
"MIT"
] | 183
|
2019-03-10T02:40:56.000Z
|
2022-03-27T18:51:26.000Z
|
import copy
import collections
from typing import Dict, Any, NamedTuple, List
from ..df.types import Definition, Stage, DataFlow
from ..df.base import (
op,
OperationImplementationContext,
BaseInputSetContext,
BaseInputNetworkContext,
)
from ..df.exceptions import DefinitionNotInContext
from ..util.data import traverse_get
class GroupBySpec(NamedTuple):
group: Definition
by: Definition
@classmethod
async def resolve(
cls,
ctx: BaseInputSetContext,
ictx: BaseInputNetworkContext,
exported: Dict[str, Any],
):
# TODO Address the need to copy operation implementation inputs dict
# In case the input is used elsewhere in the network
exported = copy.deepcopy(exported)
# Look up the definiton for the group and by fields
for convert in ["group", "by"]:
exported[convert] = await ictx.definition(ctx, exported[convert])
return cls(**exported)
group_by_spec = Definition(name="group_by_spec", primitive="Dict[str, Any]")
group_by_output = Definition(
name="group_by_output", primitive="Dict[str, List[Any]]"
)
@op(
name="group_by",
inputs={"spec": group_by_spec},
outputs={"output": group_by_output},
stage=Stage.OUTPUT,
)
class GroupBy(OperationImplementationContext):
async def run(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
# Convert group_by_spec into a dict with values being of the NamedTuple
# type GroupBySpec
outputs = {
key: await GroupBySpec.resolve(self.ctx, self.octx.ictx, value)
for key, value in inputs["spec"].items()
}
self.logger.debug("output spec: %s", outputs)
# Acquire all definitions within the context
async with self.octx.ictx.definitions(self.ctx) as od:
# Output dict
want = {}
# Group each requested output
for output_name, output in outputs.items():
# Create an array for this output data
want[output_name] = []
# Create an ordered dict which will be keyed off of and ordered
# by the values of the output.group definition as seen in the
# input network context
group_by = {}
async for item in od.inputs(output.by):
group_by[item.value] = (item, {})
group_by = collections.OrderedDict(sorted(group_by.items()))
# Find all inputs within the input network for the by definition
async for item in od.inputs(output.group):
# Get all the parents of the input
parents = list(item.get_parents())
for group, related in group_by.values():
# Ensure that the definition we need to group by is in
# the parents
if not group in parents:
continue
if not output.by.name in related:
related[output.by.name] = []
related[output.by.name].append(item.value)
for index, (_group, qdata) in group_by.items():
for def_name, results in qdata.items():
for value in results:
want[output_name].insert(index, value)
# # If only one and single is set then convert list to single
# # item for output dict
# if len(want[output_name]) == 1 and output.single:
# want[output_name] = want[output_name][0]
# # If the output needs to be a dict then make it one. This
# # will convert an array of arrays to a dict.
# elif output.ismap:
# want[output_name] = dict(want[output_name])
return want
get_multi_spec = Definition(name="get_multi_spec", primitive="array")
get_multi_output = Definition(name="get_multi_output", primitive="map")
@op(
name="get_multi",
inputs={"spec": get_multi_spec},
outputs={"output": get_multi_output},
stage=Stage.OUTPUT,
)
class GetMulti(OperationImplementationContext):
"""
Output operation to get all Inputs matching given definitions.
Parameters
----------
spec : list
List of definition names. Any Inputs with matching definition will be
returned.
Returns
-------
dict
Maps definition names to all the Inputs of that definition
Examples
--------
The following shows how to grab all Inputs with the URL definition. If we
had we run an operation which output a URL, that output URL would have also
been returned to us.
>>> import asyncio
>>> from dffml import *
>>>
>>> URL = Definition(name="URL", primitive="string")
>>>
>>> dataflow = DataFlow.auto(GetMulti)
>>> dataflow.seed.append(
... Input(
... value=[URL.name],
... definition=GetMulti.op.inputs["spec"]
... )
... )
>>>
>>> async def main():
... async for ctx, results in MemoryOrchestrator.run(dataflow, [
... Input(
... value="https://github.com/intel/dffml",
... definition=URL
... ),
... Input(
... value="https://github.com/intel/cve-bin-tool",
... definition=URL
... )
... ]):
... print(results)
...
>>> asyncio.run(main())
{'URL': ['https://github.com/intel/dffml', 'https://github.com/intel/cve-bin-tool']}
"""
async def run(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
# TODO Address the need to copy operation implementation inputs dict
# In case the input is used elsewhere in the network
exported = copy.deepcopy(inputs["spec"])
name_map = {}
for i, input_value in enumerate(exported):
if isinstance(input_value, dict):
name, value = list(input_value.items())[0]
name_map[value] = name
exported[i] = value
# Look up the definiton for each
for convert in range(0, len(exported)):
exported[convert] = await self.octx.ictx.definition(
self.ctx, exported[convert]
)
self.logger.debug("output spec: %s", exported)
# Acquire all definitions within the context
async with self.octx.ictx.definitions(self.ctx) as od:
# Output dict
want = {}
# Group each requested output
for definition in exported:
async for item in od.inputs(definition):
want.setdefault(definition.name, [])
want[definition.name].append(item.value)
# Rename outputs if present in name_map
for key, value in want.copy().items():
if name_map.get(key, None):
want[name_map[key]] = value
want.pop(key)
return want
get_single_spec = Definition(name="get_single_spec", primitive="array")
get_single_output = Definition(name="get_single_output", primitive="map")
@op(
name="get_single",
inputs={"spec": get_single_spec},
outputs={"output": get_single_output},
stage=Stage.OUTPUT,
)
class GetSingle(GetMulti):
"""
Output operation to get a single Input for each definition given.
Parameters
----------
spec : list
List of definition names. An Input with matching definition will be
returned.
Returns
-------
dict
Maps definition names to an Input of that definition
Examples
--------
The following shows how to grab an Inputs with the URL definition. If we
had we run an operation which output a URL, that output URL could have also
been returned to us.
>>> import asyncio
>>> from dffml import *
>>>
>>> URL = Definition(name="URL", primitive="string")
>>> ORG = Definition(name="ORG", primitive="string")
>>>
>>> dataflow = DataFlow.auto(GetSingle)
>>> dataflow.seed.append(
... Input(
... value=[{"Repo Link": URL.name}, ORG.name],
... definition=GetSingle.op.inputs["spec"]
... )
... )
>>>
>>> async def main():
... async for ctx, results in MemoryOrchestrator.run(dataflow, [
... Input(
... value="https://github.com/intel/dffml",
... definition=URL
... ),
... Input(
... value="Intel",
... definition=ORG
... )
... ]):
... print(results)
...
>>> asyncio.run(main())
{'ORG': 'Intel', 'Repo Link': 'https://github.com/intel/dffml'}
"""
async def run(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
# Output dict
want = await super().run(inputs)
for key, value in want.items():
want[key] = value.pop()
return want
associate_spec = Definition(name="associate_spec", primitive="List[str]")
associate_output = Definition(
name="associate_output", primitive="Dict[str, Any]"
)
@op(
name="associate",
inputs={"spec": associate_spec},
outputs={"output": associate_output},
stage=Stage.OUTPUT,
)
class Associate(OperationImplementationContext):
async def run(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
# TODO Address the need to copy operation implementation inputs dict
# In case the input is used elsewhere in the network
exported = copy.deepcopy(inputs["spec"])
# Look up the definiton for each
try:
for convert in range(0, len(exported)):
exported[convert] = await self.octx.ictx.definition(
self.ctx, exported[convert]
)
except DefinitionNotInContext:
return {exported[1]: {}}
# Make exported into key, value which it will be in output
key, value = exported
# Acquire all definitions within the context
async with self.octx.ictx.definitions(self.ctx) as od:
# Output dict
want = {}
async for item in od.inputs(value):
parents = item.get_parents()
for parent in parents:
if key == parent.definition:
want[parent.value] = item.value
break
return {value.name: want}
@op(
name="associate_definition",
inputs={"spec": associate_spec},
outputs={"output": associate_output},
stage=Stage.OUTPUT,
)
class AssociateDefinition(OperationImplementationContext):
"""
Examples
--------
>>> import asyncio
>>> from dffml import *
>>>
>>> feed_def = Definition(name="feed", primitive="string")
>>> dead_def = Definition(name="dead", primitive="string")
>>> output = Definition(name="output", primitive="string")
>>>
>>> feed_input = Input(value="my favorite value", definition=feed_def)
>>> face_input = Input(
... value="face", definition=output, parents=[feed_input]
... )
>>>
>>> dead_input = Input(
... value="my second favorite value", definition=dead_def
... )
>>> beef_input = Input(
... value="beef", definition=output, parents=[dead_input]
... )
>>>
>>> async def main():
... for value in ["feed", "dead"]:
... async for ctx, results in MemoryOrchestrator.run(
... DataFlow.auto(AssociateDefinition),
... [
... feed_input,
... face_input,
... dead_input,
... beef_input,
... Input(
... value={value: "output"},
... definition=AssociateDefinition.op.inputs["spec"],
... ),
... ],
... ):
... print(results)
>>>
>>> asyncio.run(main())
{'feed': 'face'}
{'dead': 'beef'}
"""
async def run(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
# Look up the definition for each definition name given
try:
spec = {
await self.octx.ictx.definition(
self.ctx, key
): await self.octx.ictx.definition(self.ctx, value)
for key, value in inputs["spec"].items()
}
except DefinitionNotInContext:
return {key: {} for key in inputs["spec"]}
# Output dict
want = {}
# Acquire all definitions within the context
async with self.octx.ictx.definitions(self.ctx) as od:
# Make exported into key, value which it will be in output
for key, value in spec.items():
async for item in od.inputs(value):
parents = item.get_parents()
for parent in parents:
if key.name == parent.definition.name:
want[key.name] = item.value
break
return want
class RemapConfig(NamedTuple):
dataflow: DataFlow
@classmethod
def _fromdict(cls, **kwargs):
kwargs["dataflow"] = DataFlow._fromdict(**kwargs["dataflow"])
return cls(**kwargs)
class RemapFailure(Exception):
"""
Raised whem results of a dataflow could not be remapped.
"""
# TODO Make it so that only one output operation gets run, the result of that
# operation is the result of the dataflow
@op(
inputs={"spec": Definition(name="remap_spec", primitive="map")},
outputs={"response": Definition(name="remap_output", primitive="map")},
stage=Stage.OUTPUT,
config_cls=RemapConfig,
)
async def remap(
self: OperationImplementationContext, spec: Dict[str, List[str]]
):
# Create a new orchestrator context. Specify that it should use the existing
# input set context, this way the output operations we'll be running have
# access to the data from this data flow rather than a new sub flow.
async with self.octx.parent(
self.config.dataflow, reuse={"ictx": self.octx.ictx}
) as octx:
_ctx, result = [result async for result in octx.run(ctx=self.ctx)][0]
# Remap the output operations to their feature (copied logic
# from CLI)
remap = {}
for (feature_name, traverse) in spec.items():
try:
remap[feature_name] = traverse_get(result, *traverse)
except KeyError as error:
raise RemapFailure(
"failed to remap %r. Results do not contain %r: %s"
% (feature_name, ".".join(traverse), result)
) from error
# Results have been remapped
return remap
| 34.107306
| 88
| 0.564161
|
29f01f0e4219b5515dacef09a92469a49fdc5701
| 4,678
|
py
|
Python
|
Twins/barlow.py
|
MaxLikesMath/Barlow-Twins-Pytorch
|
fb74e0f5a78846ecfb1d42387a54f45fb909c314
|
[
"MIT"
] | 7
|
2021-05-18T11:46:36.000Z
|
2022-02-14T04:46:36.000Z
|
Twins/barlow.py
|
MaxLikesMath/Barlow-Twins-Pytorch
|
fb74e0f5a78846ecfb1d42387a54f45fb909c314
|
[
"MIT"
] | null | null | null |
Twins/barlow.py
|
MaxLikesMath/Barlow-Twins-Pytorch
|
fb74e0f5a78846ecfb1d42387a54f45fb909c314
|
[
"MIT"
] | 2
|
2021-07-06T16:24:58.000Z
|
2022-01-23T23:03:28.000Z
|
import torch
import torch.nn as nn
'''
Implementation of Barlow Twins (https://arxiv.org/abs/2103.03230), adapted for ease of use for experiments from
https://github.com/facebookresearch/barlowtwins, with some modifications using code from
https://github.com/lucidrains/byol-pytorch
'''
def flatten(t):
return t.reshape(t.shape[0], -1)
class NetWrapper(nn.Module):
# from https://github.com/lucidrains/byol-pytorch
def __init__(self, net, layer = -2):
super().__init__()
self.net = net
self.layer = layer
self.hidden = None
self.hook_registered = False
def _find_layer(self):
if type(self.layer) == str:
modules = dict([*self.net.named_modules()])
return modules.get(self.layer, None)
elif type(self.layer) == int:
children = [*self.net.children()]
return children[self.layer]
return None
def _hook(self, _, __, output):
self.hidden = flatten(output)
def _register_hook(self):
layer = self._find_layer()
assert layer is not None, f'hidden layer ({self.layer}) not found'
handle = layer.register_forward_hook(self._hook)
self.hook_registered = True
def get_representation(self, x):
if self.layer == -1:
return self.net(x)
if not self.hook_registered:
self._register_hook()
_ = self.net(x)
hidden = self.hidden
self.hidden = None
assert hidden is not None, f'hidden layer {self.layer} never emitted an output'
return hidden
def forward(self, x):
representation = self.get_representation(x)
return representation
def off_diagonal(x):
# return a flattened view of the off-diagonal elements of a square matrix
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
class BarlowTwins(nn.Module):
'''
Adapted from https://github.com/facebookresearch/barlowtwins for arbitrary backbones, and arbitrary choice of which
latent representation to use. Designed for models which can fit on a single GPU (though training can be parallelized
across multiple as with any other model). Support for larger models can be done easily for individual use cases by
by following PyTorch's model parallelism best practices.
'''
def __init__(self, backbone, latent_id, projection_sizes, lambd, scale_factor=1):
'''
:param backbone: Model backbone
:param latent_id: name (or index) of the layer to be fed to the projection MLP
:param projection_sizes: size of the hidden layers in the projection MLP
:param lambd: tradeoff function
:param scale_factor: Factor to scale loss by, default is 1
'''
super().__init__()
self.backbone = backbone
self.backbone = NetWrapper(self.backbone, latent_id)
self.lambd = lambd
self.scale_factor = scale_factor
# projector
sizes = projection_sizes
layers = []
for i in range(len(sizes) - 2):
layers.append(nn.Linear(sizes[i], sizes[i + 1], bias=False))
layers.append(nn.BatchNorm1d(sizes[i + 1]))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Linear(sizes[-2], sizes[-1], bias=False))
self.projector = nn.Sequential(*layers)
# normalization layer for the representations z1 and z2
self.bn = nn.BatchNorm1d(sizes[-1], affine=False)
def forward(self, y1, y2):
z1 = self.backbone(y1)
z2 = self.backbone(y2)
z1 = self.projector(z1)
z2 = self.projector(z2)
# empirical cross-correlation matrix
c = torch.mm(self.bn(z1).T, self.bn(z2))
c.div_(z1.shape[0])
# use --scale-loss to multiply the loss by a constant factor
# see the Issues section of the readme
on_diag = torch.diagonal(c).add_(-1).pow_(2).sum()
off_diag = off_diagonal(c).pow_(2).sum()
loss = self.scale_factor*(on_diag + self.lambd * off_diag)
return loss
if __name__=='__main__':
import torchvision
model = torchvision.models.resnet18(zero_init_residual=True)
proj = [512, 512, 512, 512]
twins = BarlowTwins(model, 'avgpool', proj, 0.5)
inp1 = torch.rand(2,3,224,224)
inp2 = torch.rand(2,3,224,224)
outs =twins(inp1, inp2)
#model = model_utils.extract_latent.LatentHook(model, ['avgpool'])
#out, dicti = model(inp1)
print(outs)
#print(model)
| 35.172932
| 121
| 0.619068
|
e17e3576c2660a5fe9223a0e1bb746c3bba5985d
| 448
|
py
|
Python
|
Car_Game.py
|
hammadihamza/PythonBasicCarGame
|
59ad1a54315658e620ccab3633f7dc58b76666b2
|
[
"MIT"
] | null | null | null |
Car_Game.py
|
hammadihamza/PythonBasicCarGame
|
59ad1a54315658e620ccab3633f7dc58b76666b2
|
[
"MIT"
] | null | null | null |
Car_Game.py
|
hammadihamza/PythonBasicCarGame
|
59ad1a54315658e620ccab3633f7dc58b76666b2
|
[
"MIT"
] | null | null | null |
command = ""
i = 0
j = 0
while True:
command = input("> ").lower()
if command == "start" and i == 0:
print('car starting...')
i += 1
j = 0
elif command == "stop" and j == 0 and i > 0:
print('car stopping...')
j += 1
i = 0
elif command == "quit":
break
elif command == "help":
print("""start
stop
quit""")
else:
print("Error")
| 18.666667
| 49
| 0.419643
|
2f4ea16168b8cbf619ae99d590d154397cd9edda
| 12,289
|
py
|
Python
|
isi_sdk_9_0_0/isi_sdk_9_0_0/models/nfs_nlm_sessions_session.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_9_0_0/isi_sdk_9_0_0/models/nfs_nlm_sessions_session.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_9_0_0/isi_sdk_9_0_0/models/nfs_nlm_sessions_session.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 10
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class NfsNlmSessionsSession(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'cluster_ip': 'str',
'delegates': 'list[int]',
'host_type': 'str',
'hostname': 'str',
'id': 'str',
'is_active': 'bool',
'last_modified': 'int',
'notify_attempts_remaining': 'int',
'notify_error': 'str',
'notify_last_attempt': 'int',
'zone': 'str'
}
attribute_map = {
'cluster_ip': 'cluster_ip',
'delegates': 'delegates',
'host_type': 'host_type',
'hostname': 'hostname',
'id': 'id',
'is_active': 'is_active',
'last_modified': 'last_modified',
'notify_attempts_remaining': 'notify_attempts_remaining',
'notify_error': 'notify_error',
'notify_last_attempt': 'notify_last_attempt',
'zone': 'zone'
}
def __init__(self, cluster_ip=None, delegates=None, host_type=None, hostname=None, id=None, is_active=None, last_modified=None, notify_attempts_remaining=None, notify_error=None, notify_last_attempt=None, zone=None): # noqa: E501
"""NfsNlmSessionsSession - a model defined in Swagger""" # noqa: E501
self._cluster_ip = None
self._delegates = None
self._host_type = None
self._hostname = None
self._id = None
self._is_active = None
self._last_modified = None
self._notify_attempts_remaining = None
self._notify_error = None
self._notify_last_attempt = None
self._zone = None
self.discriminator = None
if cluster_ip is not None:
self.cluster_ip = cluster_ip
if delegates is not None:
self.delegates = delegates
if host_type is not None:
self.host_type = host_type
if hostname is not None:
self.hostname = hostname
if id is not None:
self.id = id
if is_active is not None:
self.is_active = is_active
if last_modified is not None:
self.last_modified = last_modified
if notify_attempts_remaining is not None:
self.notify_attempts_remaining = notify_attempts_remaining
if notify_error is not None:
self.notify_error = notify_error
if notify_last_attempt is not None:
self.notify_last_attempt = notify_last_attempt
if zone is not None:
self.zone = zone
@property
def cluster_ip(self):
"""Gets the cluster_ip of this NfsNlmSessionsSession. # noqa: E501
An IP address for which NSM has client records # noqa: E501
:return: The cluster_ip of this NfsNlmSessionsSession. # noqa: E501
:rtype: str
"""
return self._cluster_ip
@cluster_ip.setter
def cluster_ip(self, cluster_ip):
"""Sets the cluster_ip of this NfsNlmSessionsSession.
An IP address for which NSM has client records # noqa: E501
:param cluster_ip: The cluster_ip of this NfsNlmSessionsSession. # noqa: E501
:type: str
"""
self._cluster_ip = cluster_ip
@property
def delegates(self):
"""Gets the delegates of this NfsNlmSessionsSession. # noqa: E501
:return: The delegates of this NfsNlmSessionsSession. # noqa: E501
:rtype: list[int]
"""
return self._delegates
@delegates.setter
def delegates(self, delegates):
"""Sets the delegates of this NfsNlmSessionsSession.
:param delegates: The delegates of this NfsNlmSessionsSession. # noqa: E501
:type: list[int]
"""
self._delegates = delegates
@property
def host_type(self):
"""Gets the host_type of this NfsNlmSessionsSession. # noqa: E501
The sort of host that this entry represents # noqa: E501
:return: The host_type of this NfsNlmSessionsSession. # noqa: E501
:rtype: str
"""
return self._host_type
@host_type.setter
def host_type(self, host_type):
"""Sets the host_type of this NfsNlmSessionsSession.
The sort of host that this entry represents # noqa: E501
:param host_type: The host_type of this NfsNlmSessionsSession. # noqa: E501
:type: str
"""
allowed_values = ["client", "server", "reverse", "expired"] # noqa: E501
if host_type not in allowed_values:
raise ValueError(
"Invalid value for `host_type` ({0}), must be one of {1}" # noqa: E501
.format(host_type, allowed_values)
)
self._host_type = host_type
@property
def hostname(self):
"""Gets the hostname of this NfsNlmSessionsSession. # noqa: E501
The host being monitored # noqa: E501
:return: The hostname of this NfsNlmSessionsSession. # noqa: E501
:rtype: str
"""
return self._hostname
@hostname.setter
def hostname(self, hostname):
"""Sets the hostname of this NfsNlmSessionsSession.
The host being monitored # noqa: E501
:param hostname: The hostname of this NfsNlmSessionsSession. # noqa: E501
:type: str
"""
self._hostname = hostname
@property
def id(self):
"""Gets the id of this NfsNlmSessionsSession. # noqa: E501
Session ID # noqa: E501
:return: The id of this NfsNlmSessionsSession. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this NfsNlmSessionsSession.
Session ID # noqa: E501
:param id: The id of this NfsNlmSessionsSession. # noqa: E501
:type: str
"""
self._id = id
@property
def is_active(self):
"""Gets the is_active of this NfsNlmSessionsSession. # noqa: E501
Whether or not the client is actively being monitored # noqa: E501
:return: The is_active of this NfsNlmSessionsSession. # noqa: E501
:rtype: bool
"""
return self._is_active
@is_active.setter
def is_active(self, is_active):
"""Sets the is_active of this NfsNlmSessionsSession.
Whether or not the client is actively being monitored # noqa: E501
:param is_active: The is_active of this NfsNlmSessionsSession. # noqa: E501
:type: bool
"""
self._is_active = is_active
@property
def last_modified(self):
"""Gets the last_modified of this NfsNlmSessionsSession. # noqa: E501
Unix time in seconds that the client was last modified (monitored or unmonitored) # noqa: E501
:return: The last_modified of this NfsNlmSessionsSession. # noqa: E501
:rtype: int
"""
return self._last_modified
@last_modified.setter
def last_modified(self, last_modified):
"""Sets the last_modified of this NfsNlmSessionsSession.
Unix time in seconds that the client was last modified (monitored or unmonitored) # noqa: E501
:param last_modified: The last_modified of this NfsNlmSessionsSession. # noqa: E501
:type: int
"""
self._last_modified = last_modified
@property
def notify_attempts_remaining(self):
"""Gets the notify_attempts_remaining of this NfsNlmSessionsSession. # noqa: E501
Number of times we will attempt to notify this client before giving up # noqa: E501
:return: The notify_attempts_remaining of this NfsNlmSessionsSession. # noqa: E501
:rtype: int
"""
return self._notify_attempts_remaining
@notify_attempts_remaining.setter
def notify_attempts_remaining(self, notify_attempts_remaining):
"""Sets the notify_attempts_remaining of this NfsNlmSessionsSession.
Number of times we will attempt to notify this client before giving up # noqa: E501
:param notify_attempts_remaining: The notify_attempts_remaining of this NfsNlmSessionsSession. # noqa: E501
:type: int
"""
self._notify_attempts_remaining = notify_attempts_remaining
@property
def notify_error(self):
"""Gets the notify_error of this NfsNlmSessionsSession. # noqa: E501
Last error received attempting to notify this client # noqa: E501
:return: The notify_error of this NfsNlmSessionsSession. # noqa: E501
:rtype: str
"""
return self._notify_error
@notify_error.setter
def notify_error(self, notify_error):
"""Sets the notify_error of this NfsNlmSessionsSession.
Last error received attempting to notify this client # noqa: E501
:param notify_error: The notify_error of this NfsNlmSessionsSession. # noqa: E501
:type: str
"""
self._notify_error = notify_error
@property
def notify_last_attempt(self):
"""Gets the notify_last_attempt of this NfsNlmSessionsSession. # noqa: E501
Unix time in seconds when we last attempted to notify this clients # noqa: E501
:return: The notify_last_attempt of this NfsNlmSessionsSession. # noqa: E501
:rtype: int
"""
return self._notify_last_attempt
@notify_last_attempt.setter
def notify_last_attempt(self, notify_last_attempt):
"""Sets the notify_last_attempt of this NfsNlmSessionsSession.
Unix time in seconds when we last attempted to notify this clients # noqa: E501
:param notify_last_attempt: The notify_last_attempt of this NfsNlmSessionsSession. # noqa: E501
:type: int
"""
self._notify_last_attempt = notify_last_attempt
@property
def zone(self):
"""Gets the zone of this NfsNlmSessionsSession. # noqa: E501
Access zone name # noqa: E501
:return: The zone of this NfsNlmSessionsSession. # noqa: E501
:rtype: str
"""
return self._zone
@zone.setter
def zone(self, zone):
"""Sets the zone of this NfsNlmSessionsSession.
Access zone name # noqa: E501
:param zone: The zone of this NfsNlmSessionsSession. # noqa: E501
:type: str
"""
self._zone = zone
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NfsNlmSessionsSession):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.799499
| 234
| 0.617219
|
ec0f41bf6a19be2c389b53837c34cf8132b93145
| 755
|
py
|
Python
|
powerline_shell/themes/solarized_dark.py
|
Dakedres/powerline-shell
|
352d26846aa5d3beb6c92ee26588536bc399117c
|
[
"MIT"
] | 2,656
|
2017-11-28T05:11:53.000Z
|
2022-03-31T14:10:18.000Z
|
powerline_shell/themes/solarized_dark.py
|
jceaser/powerline-shell
|
a9b8c9bb39dbfb7ec3c639e497b5a76fa6dcb8cc
|
[
"MIT"
] | 193
|
2017-11-28T23:20:17.000Z
|
2022-03-25T12:57:55.000Z
|
powerline_shell/themes/solarized_dark.py
|
jceaser/powerline-shell
|
a9b8c9bb39dbfb7ec3c639e497b5a76fa6dcb8cc
|
[
"MIT"
] | 387
|
2017-11-29T22:33:39.000Z
|
2022-03-31T18:24:35.000Z
|
from powerline_shell.themes.default import DefaultColor
class Color(DefaultColor):
USERNAME_FG = 15
USERNAME_BG = 4
USERNAME_ROOT_BG = 1
HOSTNAME_FG = 15
HOSTNAME_BG = 10
HOME_SPECIAL_DISPLAY = False
PATH_FG = 7
PATH_BG = 10
CWD_FG = 15
SEPARATOR_FG = 14
READONLY_BG = 1
READONLY_FG = 7
REPO_CLEAN_FG = 14
REPO_CLEAN_BG = 0
REPO_DIRTY_FG = 3
REPO_DIRTY_BG = 0
JOBS_FG = 4
JOBS_BG = 8
CMD_PASSED_FG = 15
CMD_PASSED_BG = 2
CMD_FAILED_FG = 15
CMD_FAILED_BG = 1
SVN_CHANGES_FG = REPO_DIRTY_FG
SVN_CHANGES_BG = REPO_DIRTY_BG
VIRTUAL_ENV_BG = 15
VIRTUAL_ENV_FG = 2
AWS_PROFILE_FG = 7
AWS_PROFILE_BG = 2
TIME_FG = 15
TIME_BG = 10
| 16.777778
| 55
| 0.651656
|
b101ae1d98e4d42e9d26550c270328bc4054d721
| 466
|
py
|
Python
|
hackerearth/Algorithms/Help Fredo/test.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | 4
|
2020-07-24T01:59:50.000Z
|
2021-07-24T15:14:08.000Z
|
hackerearth/Algorithms/Help Fredo/test.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
hackerearth/Algorithms/Help Fredo/test.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'5',
'4 2 1 10 6',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(), '4\n')
if __name__ == '__main__':
unittest.main()
| 22.190476
| 53
| 0.650215
|
6413b2e0dfe089f3ca3a7af74159c313f9915488
| 11,527
|
py
|
Python
|
homeassistant/components/nest/climate_sdm.py
|
miccico/core
|
14c205384171dee59c1a908f8449f9864778b2dc
|
[
"Apache-2.0"
] | 6
|
2017-08-02T19:26:39.000Z
|
2020-03-14T22:47:41.000Z
|
homeassistant/components/nest/climate_sdm.py
|
miccico/core
|
14c205384171dee59c1a908f8449f9864778b2dc
|
[
"Apache-2.0"
] | 57
|
2020-10-15T06:47:00.000Z
|
2022-03-31T06:11:18.000Z
|
homeassistant/components/nest/climate_sdm.py
|
miccico/core
|
14c205384171dee59c1a908f8449f9864778b2dc
|
[
"Apache-2.0"
] | 14
|
2018-08-19T16:28:26.000Z
|
2021-09-02T18:26:53.000Z
|
"""Support for Google Nest SDM climate devices."""
from typing import Optional
from google_nest_sdm.device import Device
from google_nest_sdm.device_traits import FanTrait, TemperatureTrait
from google_nest_sdm.exceptions import GoogleNestException
from google_nest_sdm.thermostat_traits import (
ThermostatEcoTrait,
ThermostatHvacTrait,
ThermostatModeTrait,
ThermostatTemperatureSetpointTrait,
)
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_OFF,
FAN_OFF,
FAN_ON,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
PRESET_ECO,
PRESET_NONE,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.typing import HomeAssistantType
from .const import DATA_SUBSCRIBER, DOMAIN
from .device_info import DeviceInfo
# Mapping for sdm.devices.traits.ThermostatMode mode field
THERMOSTAT_MODE_MAP = {
"OFF": HVAC_MODE_OFF,
"HEAT": HVAC_MODE_HEAT,
"COOL": HVAC_MODE_COOL,
"HEATCOOL": HVAC_MODE_HEAT_COOL,
}
THERMOSTAT_INV_MODE_MAP = {v: k for k, v in THERMOSTAT_MODE_MAP.items()}
# Mode for sdm.devices.traits.ThermostatEco
THERMOSTAT_ECO_MODE = "MANUAL_ECO"
# Mapping for sdm.devices.traits.ThermostatHvac status field
THERMOSTAT_HVAC_STATUS_MAP = {
"OFF": CURRENT_HVAC_OFF,
"HEATING": CURRENT_HVAC_HEAT,
"COOLING": CURRENT_HVAC_COOL,
}
THERMOSTAT_RANGE_MODES = [HVAC_MODE_HEAT_COOL, HVAC_MODE_AUTO]
PRESET_MODE_MAP = {
"MANUAL_ECO": PRESET_ECO,
"OFF": PRESET_NONE,
}
PRESET_INV_MODE_MAP = {v: k for k, v in PRESET_MODE_MAP.items()}
FAN_MODE_MAP = {
"ON": FAN_ON,
"OFF": FAN_OFF,
}
FAN_INV_MODE_MAP = {v: k for k, v in FAN_MODE_MAP.items()}
async def async_setup_sdm_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the client entities."""
subscriber = hass.data[DOMAIN][DATA_SUBSCRIBER]
try:
device_manager = await subscriber.async_get_device_manager()
except GoogleNestException as err:
raise PlatformNotReady from err
entities = []
for device in device_manager.devices.values():
if ThermostatHvacTrait.NAME in device.traits:
entities.append(ThermostatEntity(device))
async_add_entities(entities)
class ThermostatEntity(ClimateEntity):
"""A nest thermostat climate entity."""
def __init__(self, device: Device):
"""Initialize ThermostatEntity."""
self._device = device
self._device_info = DeviceInfo(device)
self._supported_features = 0
@property
def should_poll(self) -> bool:
"""Disable polling since entities have state pushed via pubsub."""
return False
@property
def unique_id(self) -> Optional[str]:
"""Return a unique ID."""
# The API "name" field is a unique device identifier.
return self._device.name
@property
def name(self):
"""Return the name of the entity."""
return self._device_info.device_name
@property
def device_info(self):
"""Return device specific attributes."""
return self._device_info.device_info
async def async_added_to_hass(self):
"""Run when entity is added to register update signal handler."""
self._supported_features = self._get_supported_features()
self.async_on_remove(
self._device.add_update_listener(self.async_write_ha_state)
)
@property
def temperature_unit(self):
"""Return the unit of temperature measurement for the system."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
if TemperatureTrait.NAME not in self._device.traits:
return None
trait = self._device.traits[TemperatureTrait.NAME]
return trait.ambient_temperature_celsius
@property
def target_temperature(self):
"""Return the temperature currently set to be reached."""
trait = self._target_temperature_trait
if not trait:
return None
if self.hvac_mode == HVAC_MODE_HEAT:
return trait.heat_celsius
if self.hvac_mode == HVAC_MODE_COOL:
return trait.cool_celsius
return None
@property
def target_temperature_high(self):
"""Return the upper bound target temperature."""
if self.hvac_mode != HVAC_MODE_HEAT_COOL:
return None
trait = self._target_temperature_trait
if not trait:
return None
return trait.cool_celsius
@property
def target_temperature_low(self):
"""Return the lower bound target temperature."""
if self.hvac_mode != HVAC_MODE_HEAT_COOL:
return None
trait = self._target_temperature_trait
if not trait:
return None
return trait.heat_celsius
@property
def _target_temperature_trait(self):
"""Return the correct trait with a target temp depending on mode."""
if self.preset_mode == PRESET_ECO:
if ThermostatEcoTrait.NAME in self._device.traits:
return self._device.traits[ThermostatEcoTrait.NAME]
if ThermostatTemperatureSetpointTrait.NAME in self._device.traits:
return self._device.traits[ThermostatTemperatureSetpointTrait.NAME]
return None
@property
def hvac_mode(self):
"""Return the current operation (e.g. heat, cool, idle)."""
hvac_mode = HVAC_MODE_OFF
if ThermostatModeTrait.NAME in self._device.traits:
trait = self._device.traits[ThermostatModeTrait.NAME]
if trait.mode in THERMOSTAT_MODE_MAP:
hvac_mode = THERMOSTAT_MODE_MAP[trait.mode]
if hvac_mode == HVAC_MODE_OFF and self.fan_mode == FAN_ON:
hvac_mode = HVAC_MODE_FAN_ONLY
return hvac_mode
@property
def hvac_modes(self):
"""List of available operation modes."""
supported_modes = []
for mode in self._get_device_hvac_modes:
if mode in THERMOSTAT_MODE_MAP:
supported_modes.append(THERMOSTAT_MODE_MAP[mode])
if self.supported_features & SUPPORT_FAN_MODE:
supported_modes.append(HVAC_MODE_FAN_ONLY)
return supported_modes
@property
def _get_device_hvac_modes(self):
"""Return the set of SDM API hvac modes supported by the device."""
modes = []
if ThermostatModeTrait.NAME in self._device.traits:
trait = self._device.traits[ThermostatModeTrait.NAME]
modes.extend(trait.available_modes)
return set(modes)
@property
def hvac_action(self):
"""Return the current HVAC action (heating, cooling)."""
trait = self._device.traits[ThermostatHvacTrait.NAME]
if trait.status in THERMOSTAT_HVAC_STATUS_MAP:
return THERMOSTAT_HVAC_STATUS_MAP[trait.status]
return None
@property
def preset_mode(self):
"""Return the current active preset."""
if ThermostatEcoTrait.NAME in self._device.traits:
trait = self._device.traits[ThermostatEcoTrait.NAME]
return PRESET_MODE_MAP.get(trait.mode, PRESET_NONE)
return PRESET_NONE
@property
def preset_modes(self):
"""Return the available presets."""
modes = []
if ThermostatEcoTrait.NAME in self._device.traits:
trait = self._device.traits[ThermostatEcoTrait.NAME]
for mode in trait.available_modes:
if mode in PRESET_MODE_MAP:
modes.append(PRESET_MODE_MAP[mode])
return modes
@property
def fan_mode(self):
"""Return the current fan mode."""
if FanTrait.NAME in self._device.traits:
trait = self._device.traits[FanTrait.NAME]
return FAN_MODE_MAP.get(trait.timer_mode, FAN_OFF)
return FAN_OFF
@property
def fan_modes(self):
"""Return the list of available fan modes."""
modes = []
if FanTrait.NAME in self._device.traits:
modes = list(FAN_INV_MODE_MAP)
return modes
@property
def supported_features(self):
"""Bitmap of supported features."""
return self._supported_features
def _get_supported_features(self):
"""Compute the bitmap of supported features from the current state."""
features = 0
if HVAC_MODE_HEAT_COOL in self.hvac_modes:
features |= SUPPORT_TARGET_TEMPERATURE_RANGE
if HVAC_MODE_HEAT in self.hvac_modes or HVAC_MODE_COOL in self.hvac_modes:
features |= SUPPORT_TARGET_TEMPERATURE
if ThermostatEcoTrait.NAME in self._device.traits:
features |= SUPPORT_PRESET_MODE
if FanTrait.NAME in self._device.traits:
# Fan trait may be present without actually support fan mode
fan_trait = self._device.traits[FanTrait.NAME]
if fan_trait.timer_mode is not None:
features |= SUPPORT_FAN_MODE
return features
async def async_set_hvac_mode(self, hvac_mode):
"""Set new target hvac mode."""
if hvac_mode not in self.hvac_modes:
raise ValueError(f"Unsupported hvac_mode '{hvac_mode}'")
if hvac_mode == HVAC_MODE_FAN_ONLY:
# Turn the fan on but also turn off the hvac if it is on
await self.async_set_fan_mode(FAN_ON)
hvac_mode = HVAC_MODE_OFF
api_mode = THERMOSTAT_INV_MODE_MAP[hvac_mode]
trait = self._device.traits[ThermostatModeTrait.NAME]
await trait.set_mode(api_mode)
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
low_temp = kwargs.get(ATTR_TARGET_TEMP_LOW)
high_temp = kwargs.get(ATTR_TARGET_TEMP_HIGH)
temp = kwargs.get(ATTR_TEMPERATURE)
if ThermostatTemperatureSetpointTrait.NAME not in self._device.traits:
return
trait = self._device.traits[ThermostatTemperatureSetpointTrait.NAME]
if self.preset_mode == PRESET_ECO or self.hvac_mode == HVAC_MODE_HEAT_COOL:
if low_temp and high_temp:
await trait.set_range(low_temp, high_temp)
elif self.hvac_mode == HVAC_MODE_COOL and temp:
await trait.set_cool(temp)
elif self.hvac_mode == HVAC_MODE_HEAT and temp:
await trait.set_heat(temp)
async def async_set_preset_mode(self, preset_mode):
"""Set new target preset mode."""
if preset_mode not in self.preset_modes:
raise ValueError(f"Unsupported preset_mode '{preset_mode}'")
trait = self._device.traits[ThermostatEcoTrait.NAME]
await trait.set_mode(PRESET_INV_MODE_MAP[preset_mode])
async def async_set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
if fan_mode not in self.fan_modes:
raise ValueError(f"Unsupported fan_mode '{fan_mode}'")
trait = self._device.traits[FanTrait.NAME]
await trait.set_timer(FAN_INV_MODE_MAP[fan_mode])
| 35.358896
| 83
| 0.679361
|
1225d93b5fbbd4ed9bdc33d349808777adb6e98a
| 7,261
|
py
|
Python
|
web3/utils/abi.py
|
bellaj/web3py
|
882335941a0781a3a3a14b7616f727005e88d88d
|
[
"MIT"
] | null | null | null |
web3/utils/abi.py
|
bellaj/web3py
|
882335941a0781a3a3a14b7616f727005e88d88d
|
[
"MIT"
] | null | null | null |
web3/utils/abi.py
|
bellaj/web3py
|
882335941a0781a3a3a14b7616f727005e88d88d
|
[
"MIT"
] | null | null | null |
import itertools
import re
from eth_utils import (
coerce_args_to_bytes,
coerce_args_to_text,
coerce_return_to_text,
to_tuple,
add_0x_prefix,
is_list_like,
is_string,
is_integer,
is_boolean,
is_address,
)
from eth_abi.abi import (
process_type,
)
def filter_by_type(_type, contract_abi):
return [abi for abi in contract_abi if abi['type'] == _type]
def filter_by_name(name, contract_abi):
return [abi for abi in contract_abi if abi['name'] == name]
def get_abi_input_types(abi):
return [arg['type'] for arg in abi['inputs']]
def get_abi_output_types(abi):
return [arg['type'] for arg in abi['outputs']]
def get_abi_input_names(abi):
return [arg['name'] for arg in abi['inputs']]
def get_indexed_event_inputs(event_abi):
return [arg for arg in event_abi['inputs'] if arg['indexed'] is True]
def exclude_indexed_event_inputs(event_abi):
return [arg for arg in event_abi['inputs'] if arg['indexed'] is False]
def filter_by_argument_count(num_arguments, contract_abi):
return [
abi
for abi
in contract_abi
if len(abi['inputs']) == num_arguments
]
def filter_by_argument_name(argument_names, contract_abi):
return [
abi
for abi in contract_abi
if set(argument_names).intersection(
get_abi_input_names(abi)
) == set(argument_names)
]
def is_encodable(_type, value):
try:
base, sub, arrlist = _type
except ValueError:
base, sub, arrlist = process_type(_type)
if arrlist:
if not is_list_like(value):
return False
if arrlist[-1] and len(value) != arrlist[-1][0]:
return False
sub_type = (base, sub, arrlist[:-1])
return all(is_encodable(sub_type, sub_value) for sub_value in value)
elif base == 'bool':
return is_boolean(value)
elif base == 'uint':
if not is_integer(value):
return False
exp = int(sub)
if value < 0 or value >= 2**exp:
return False
return True
elif base == 'int':
if not is_integer(value):
return False
exp = int(sub)
if value <= -1 * 2**(exp - 1) or value >= 2**(exp - 1):
return False
return True
elif base == 'string':
if not is_string(value):
return False
return True
elif base == 'bytes':
if not is_string(value):
return False
if not sub:
return True
max_length = int(sub)
return len(value) <= max_length
elif base == 'address':
if not is_address(value):
return False
return True
else:
raise ValueError("Unsupported type")
def filter_by_encodability(args, kwargs, contract_abi):
return [
function_abi
for function_abi
in contract_abi
if check_if_arguments_can_be_encoded(function_abi, args, kwargs)
]
@coerce_args_to_bytes
def check_if_arguments_can_be_encoded(function_abi, args, kwargs):
try:
arguments = merge_args_and_kwargs(function_abi, args, kwargs)
except TypeError:
return False
if len(function_abi['inputs']) != len(arguments):
return False
types = get_abi_input_types(function_abi)
return all(
is_encodable(_type, arg)
for _type, arg in zip(types, arguments)
)
@coerce_args_to_text
def merge_args_and_kwargs(function_abi, args, kwargs):
if len(args) + len(kwargs) != len(function_abi['inputs']):
raise TypeError(
"Incorrect argument count. Expected '{0}'. Got '{1}'".format(
len(function_abi['inputs']),
len(args) + len(kwargs),
)
)
if not kwargs:
return args
args_as_kwargs = {
arg_abi['name']: arg
for arg_abi, arg in zip(function_abi['inputs'], args)
}
duplicate_keys = set(args_as_kwargs).intersection(kwargs.keys())
if duplicate_keys:
raise TypeError(
"{fn_name}() got multiple values for argument(s) '{dups}'".format(
fn_name=function_abi['name'],
dups=', '.join(duplicate_keys),
)
)
sorted_arg_names = [arg_abi['name'] for arg_abi in function_abi['inputs']]
unknown_kwargs = {key for key in kwargs.keys() if key not in sorted_arg_names}
if unknown_kwargs:
raise TypeError(
"{fn_name}() got unexpected keyword argument(s) '{dups}'".format(
fn_name=function_abi['name'],
dups=', '.join(unknown_kwargs),
)
)
sorted_args = list(zip(
*sorted(
itertools.chain(kwargs.items(), args_as_kwargs.items()),
key=lambda kv: sorted_arg_names.index(kv[0])
)
))
if sorted_args:
return sorted_args[1]
else:
return tuple()
def get_constructor_abi(contract_abi):
candidates = [
abi for abi in contract_abi if abi['type'] == 'constructor'
]
if len(candidates) == 1:
return candidates[0]
elif len(candidates) == 0:
return None
elif len(candidates) > 1:
raise ValueError("Found multiple constructors.")
DYNAMIC_TYPES = ['bytes', 'string']
STATIC_TYPES = list(itertools.chain(
['address', 'bool'],
['uint{0}'.format(i) for i in range(8, 257, 8)],
['int{0}'.format(i) for i in range(8, 257, 8)],
['bytes{0}'.format(i) for i in range(1, 33)],
))
BASE_TYPE_REGEX = '|'.join((
_type + '(?![a-z0-9])'
for _type
in itertools.chain(STATIC_TYPES, DYNAMIC_TYPES)
))
SUB_TYPE_REGEX = (
'\['
'[0-9]*'
'\]'
)
TYPE_REGEX = (
'^'
'(?:{base_type})'
'(?:(?:{sub_type})*)?'
'$'
).format(
base_type=BASE_TYPE_REGEX,
sub_type=SUB_TYPE_REGEX,
)
def is_recognized_type(abi_type):
return bool(re.match(TYPE_REGEX, abi_type))
NAME_REGEX = (
'[a-zA-Z_]'
'[a-zA-Z0-9_]*'
)
ENUM_REGEX = (
'^'
'{lib_name}'
'\.'
'{enum_name}'
'$'
).format(lib_name=NAME_REGEX, enum_name=NAME_REGEX)
def is_probably_enum(abi_type):
return bool(re.match(ENUM_REGEX, abi_type))
@to_tuple
def normalize_event_input_types(abi_args):
for arg in abi_args:
if is_recognized_type(arg['type']):
yield arg
elif is_probably_enum(arg['type']):
yield {k: 'uint8' if k == 'type' else v for k, v in arg.items()}
else:
yield arg
def abi_to_signature(abi):
function_signature = "{fn_name}({fn_input_types})".format(
fn_name=abi['name'],
fn_input_types=','.join([
arg['type'] for arg in normalize_event_input_types(abi.get('inputs', []))
]),
)
return function_signature
@coerce_return_to_text
def normalize_return_type(data_type, data_value):
try:
base, sub, arrlist = data_type
except ValueError:
base, sub, arrlist = process_type(data_type)
if arrlist:
sub_type = (base, sub, arrlist[:-1])
return [normalize_return_type(sub_type, sub_value) for sub_value in data_value]
elif base == 'address':
return add_0x_prefix(data_value)
else:
return data_value
| 24.447811
| 87
| 0.603498
|
3a1a2084954e4ccc19341b3370cda3705ff9152c
| 828
|
py
|
Python
|
tests/test_bad_tempfile_use.py
|
timgates42/dlint
|
501acbc53f710fed00d16b443076581d03f33163
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_bad_tempfile_use.py
|
timgates42/dlint
|
501acbc53f710fed00d16b443076581d03f33163
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_bad_tempfile_use.py
|
timgates42/dlint
|
501acbc53f710fed00d16b443076581d03f33163
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import unittest
import dlint
class TestBadTempfileUse(dlint.test.base.BaseTest):
def test_bad_tempfile_usage(self):
python_string = self.get_ast_node(
"""
import tempfile
tempfile.mktemp()
"""
)
linter = dlint.linters.BadTempfileUseLinter()
linter.visit(python_string)
result = linter.get_results()
expected = [
dlint.linters.base.Flake8Result(
lineno=4,
col_offset=0,
message=dlint.linters.BadTempfileUseLinter._error_tmpl
),
]
assert result == expected
if __name__ == "__main__":
unittest.main()
| 19.255814
| 70
| 0.583333
|
b06ea480ddc9e39163897780f5f55e50bc66bcbd
| 3,943
|
py
|
Python
|
atto/bin/client.py
|
Darthone/atto
|
4d9093b45e82ed5e561878de6064ec7a1d57da05
|
[
"MIT"
] | 1
|
2015-12-09T21:00:49.000Z
|
2015-12-09T21:00:49.000Z
|
atto/bin/client.py
|
Darthone/atto
|
4d9093b45e82ed5e561878de6064ec7a1d57da05
|
[
"MIT"
] | null | null | null |
atto/bin/client.py
|
Darthone/atto
|
4d9093b45e82ed5e561878de6064ec7a1d57da05
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import logging
import os
import sys
import zmq
import zmq.auth
from zmq.auth.thread import ThreadAuthenticator
import time
import util
import util.config
import plugins
from daemon import Daemon
logger = logging.getLogger()
class Client(Daemon):
def load_plugins(self):
self.plugs = [plugins.CPUPlugin()]
def run(self):
# These directories are generated by the generate_certificates script
keys_dir = self.config['certs']['certs']
public_keys_dir = self.config['certs']['public']
secret_keys_dir = self.config['certs']['private']
if not (util.check_dir(keys_dir) and util.check_dir(public_keys_dir) and util.check_dir(secret_keys_dir)):
logging.critical("Certificates are missing - run generate_certificates.py script first")
sys.exit(1)
logger.info("Keys: %s | Public: %s | Secret: %s", keys_dir, public_keys_dir, secret_keys_dir)
ctx = zmq.Context.instance()
client = ctx.socket(zmq.REQ)
client.RCVTIMEO = self.config['client']['timeout']
# We need two certificates, one for the client and one for
# the server. The client must know the server's public key
# to make a CURVE connection.
client_secret_file = os.path.join(secret_keys_dir, "client.key_secret")
client_public, client_secret = zmq.auth.load_certificate(client_secret_file)
client.curve_secretkey = client_secret
client.curve_publickey = client_public
server_public_file = os.path.join(public_keys_dir, "server.key")
server_public, _ = zmq.auth.load_certificate(server_public_file)
# The client must know the server's public key to make a CURVE connection.
client.curve_serverkey = server_public
connection_str = 'tcp://%s:%s' % (self.config['client']['dest'], self.config['client']['port'])
logger.info("Trying to connect to %s", connection_str)
client.connect(connection_str)
logger.info("Loading plguins")
self.load_plugins()
logger.info("Starting Loop")
while True:
try:
for p in self.plugs:
logging.info("Running %s", p._name)
client.send(p.encode())
logging.debug("Waiting for ack")
message = client.recv()
logging.info("Recieved ack")
time.sleep(self.config['client']['sleep']/1000)
self.check_config()
except zmq.error.ZMQError as e:
logger.critical("ZMQError, Exiting: %s", e)
exit()
if __name__ == '__main__':
if zmq.zmq_version_info() < (4,0):
raise RuntimeError("Security is not supported in libzmq version < 4.0. libzmq version {0}".format(zmq.zmq_version()))
config = util.load_yaml_file(util.config.CLIENT["config"])
util.init_logging(**config['logging'])
daemon = Client(config['pid_file'], config_file=util.config.CLIENT["config"])
daemon.run()
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart" % sys.argv[0]
sys.exit(2)
| 41.946809
| 125
| 0.544002
|
feb9590fdb60a1fd11aaf4ea0a9b4a00b20bd9e3
| 9,660
|
py
|
Python
|
mltrain-nips-2017/lu_jensen/visdial_workshop.pytorch/eval/eval_D.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1
|
2019-05-10T09:16:23.000Z
|
2019-05-10T09:16:23.000Z
|
mltrain-nips-2017/lu_jensen/visdial_workshop.pytorch/eval/eval_D.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | null | null | null |
mltrain-nips-2017/lu_jensen/visdial_workshop.pytorch/eval/eval_D.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1
|
2019-05-10T09:17:28.000Z
|
2019-05-10T09:17:28.000Z
|
from __future__ import print_function
import argparse
import os
import random
import sys
sys.path.append(os.getcwd())
import pdb
import time
import numpy as np
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from misc.utils import repackage_hidden, clip_gradient, adjust_learning_rate, decode_txt
import misc.dataLoader as dl
import misc.model as model
from misc.encoder_QIH import _netE
import datetime
import h5py
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default='', help='folder to output images and model checkpoints')
parser.add_argument('--input_img_h5', default='vdl_img_vgg.h5', help='')
parser.add_argument('--input_ques_h5', default='visdial_data.h5', help='visdial_data.h5')
parser.add_argument('--input_json', default='visdial_params.json', help='visdial_params.json')
parser.add_argument('--model_path', default='', help='folder to output images and model checkpoints')
parser.add_argument('--cuda' , action='store_true', help='enables cuda')
opt = parser.parse_args()
opt.manualSeed = random.randint(1, 10000) # fix seed
if opt.cuda:
torch.cuda.manual_seed(opt.manualSeed)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
####################################################################################
# Data Loader
####################################################################################
if opt.model_path != '':
print("=> loading checkpoint '{}'".format(opt.model_path))
checkpoint = torch.load(opt.model_path)
model_path = opt.model_path
data_dir = opt.data_dir
input_img_h5 = opt.input_img_h5
input_ques_h5 = opt.input_ques_h5
input_json = opt.input_json
opt = checkpoint['opt']
opt.start_epoch = checkpoint['epoch']
opt.batchSize = 5
opt.data_dir = data_dir
opt.model_path = model_path
input_img_h5 = os.path.join(opt.data_dir, opt.input_img_h5)
input_ques_h5 = os.path.join(opt.data_dir, opt.input_ques_h5)
input_json = os.path.join(opt.data_dir, opt.input_json)
dataset_val = dl.validate(input_img_h5=input_img_h5, input_ques_h5=input_ques_h5,
input_json=input_json, negative_sample = opt.negative_sample,
num_val = opt.num_val, data_split = 'test')
dataloader_val = torch.utils.data.DataLoader(dataset_val, batch_size=opt.batchSize,
shuffle=False, num_workers=int(opt.workers))
####################################################################################
# Build the Model
####################################################################################
n_words = dataset_val.vocab_size
ques_length = dataset_val.ques_length
ans_length = dataset_val.ans_length + 1
his_length = ques_length+dataset_val.ans_length
itow = dataset_val.itow
img_feat_size = 512
netE = _netE(opt.model, opt.ninp, opt.nhid, opt.nlayers, opt.dropout, img_feat_size)
netW = model._netW(n_words, opt.ninp, opt.dropout)
netD = model._netD(opt.model, opt.ninp, opt.nhid, opt.nlayers, n_words, opt.dropout)
critD = model.nPairLoss(opt.nhid, 2)
if opt.model_path != '': # load the pre-trained model.
netW.load_state_dict(checkpoint['netW'])
netE.load_state_dict(checkpoint['netE'])
netD.load_state_dict(checkpoint['netD'])
print('Loading model Success!')
if opt.cuda: # ship to cuda, if has GPU
netW.cuda(), netE.cuda(), netD.cuda()
critD.cuda()
n_neg = 100
####################################################################################
# Some Functions
####################################################################################
def eval():
netW.eval()
netE.eval()
netD.eval()
data_iter_val = iter(dataloader_val)
ques_hidden = netE.init_hidden(opt.batchSize)
hist_hidden = netE.init_hidden(opt.batchSize)
opt_hidden = netD.init_hidden(opt.batchSize)
i = 0
display_count = 0
average_loss = 0
rank_all_tmp = []
result_all = []
img_atten = torch.FloatTensor(100 * 30, 10, 7, 7)
while i < len(dataloader_val):#len(1000):
data = data_iter_val.next()
image, history, question, answer, answerT, questionL, opt_answer, \
opt_answerT, answer_ids, answerLen, opt_answerLen, img_id = data
batch_size = question.size(0)
image = image.view(-1, 512)
img_input.data.resize_(image.size()).copy_(image)
save_tmp = [[] for j in range(batch_size)]
for rnd in range(10):
# get the corresponding round QA and history.
ques = question[:,rnd,:].t()
his = history[:,:rnd+1,:].clone().view(-1, his_length).t()
opt_ans = opt_answerT[:,rnd,:].clone().view(-1, ans_length).t()
gt_id = answer_ids[:,rnd]
ques_input.data.resize_(ques.size()).copy_(ques)
his_input.data.resize_(his.size()).copy_(his)
gt_index.data.resize_(gt_id.size()).copy_(gt_id)
opt_ans_input.data.resize_(opt_ans.size()).copy_(opt_ans)
opt_len = opt_answerLen[:,rnd,:].clone().view(-1)
ques_emb = netW(ques_input, format = 'index')
his_emb = netW(his_input, format = 'index')
ques_hidden = repackage_hidden(ques_hidden, batch_size)
hist_hidden = repackage_hidden(hist_hidden, his_input.size(1))
featD, ques_hidden = netE(ques_emb, his_emb, img_input, \
ques_hidden, hist_hidden, rnd+1)
#img_atten[i*batch_size:(i+1)*batch_size, rnd, :] = img_atten_weight.data.view(batch_size, 7, 7)
opt_ans_emb = netW(opt_ans_input, format = 'index')
opt_hidden = repackage_hidden(opt_hidden, opt_ans_input.size(1))
opt_feat = netD(opt_ans_emb, opt_ans_input, opt_hidden, n_words)
opt_feat = opt_feat.view(batch_size, -1, opt.ninp)
featD = featD.view(-1, opt.ninp, 1)
score = torch.bmm(opt_feat, featD)
score = score.view(-1, 100)
for b in range(batch_size):
gt_index.data[b] = gt_index.data[b] + b*100
gt_score = score.view(-1).index_select(0, gt_index)
sort_score, sort_idx = torch.sort(score, 1, descending=True)
count = sort_score.gt(gt_score.view(-1,1).expand_as(sort_score))
rank = count.sum(1) + 1
rank_all_tmp += list(rank.view(-1).data.cpu().numpy())
i += 1
result_all += save_tmp
if i % 50 == 0:
R1 = np.sum(np.array(rank_all_tmp)==1) / float(len(rank_all_tmp))
R5 = np.sum(np.array(rank_all_tmp)<=5) / float(len(rank_all_tmp))
R10 = np.sum(np.array(rank_all_tmp)<=10) / float(len(rank_all_tmp))
ave = np.sum(np.array(rank_all_tmp)) / float(len(rank_all_tmp))
mrr = np.sum(1/(np.array(rank_all_tmp, dtype='float'))) / float(len(rank_all_tmp))
print ('%d/%d: mrr: %f R1: %f R5 %f R10 %f Mean %f' %(1, len(dataloader_val), mrr, R1, R5, R10, ave))
return img_atten
####################################################################################
# Main
####################################################################################
img_input = torch.FloatTensor(opt.batchSize)
ques_input = torch.LongTensor(ques_length, opt.batchSize)
his_input = torch.LongTensor(his_length, opt.batchSize)
# answer input
opt_ans_input = torch.LongTensor(ans_length, opt.batchSize)
fake_ans_input = torch.FloatTensor(ques_length, opt.batchSize, n_words)
sample_ans_input = torch.LongTensor(1, opt.batchSize)
# answer index location.
opt_index = torch.LongTensor( opt.batchSize)
fake_index = torch.LongTensor(opt.batchSize)
batch_sample_idx = torch.LongTensor(opt.batchSize)
# answer len
fake_len = torch.LongTensor(opt.batchSize)
# noise
noise_input = torch.FloatTensor(opt.batchSize)
gt_index = torch.LongTensor(opt.batchSize)
if opt.cuda:
ques_input, his_input, img_input = ques_input.cuda(), his_input.cuda(), img_input.cuda()
opt_ans_input = opt_ans_input.cuda()
fake_ans_input, sample_ans_input = fake_ans_input.cuda(), sample_ans_input.cuda()
opt_index, fake_index = opt_index.cuda(), fake_index.cuda()
fake_len = fake_len.cuda()
noise_input = noise_input.cuda()
batch_sample_idx = batch_sample_idx.cuda()
gt_index = gt_index.cuda()
ques_input = Variable(ques_input)
img_input = Variable(img_input)
his_input = Variable(his_input)
opt_ans_input = Variable(opt_ans_input)
fake_ans_input = Variable(fake_ans_input)
sample_ans_input = Variable(sample_ans_input)
opt_index = Variable(opt_index)
fake_index = Variable(fake_index)
fake_len = Variable(fake_len)
noise_input = Variable(noise_input)
batch_sample_idx = Variable(batch_sample_idx)
gt_index = Variable(gt_index)
atten = eval()
R1 = np.sum(np.array(rank_all)==1) / float(len(rank_all))
R5 = np.sum(np.array(rank_all)<=5) / float(len(rank_all))
R10 = np.sum(np.array(rank_all)<=10) / float(len(rank_all))
ave = np.sum(np.array(rank_all)) / float(len(rank_all))
mrr = np.sum(1/(np.array(rank_all, dtype='float'))) / float(len(rank_all))
print ('%d/%d: mrr: %f R1: %f R5 %f R10 %f Mean %f' %(1, len(dataloader_val), mrr, R1, R5, R10, ave))
| 36.870229
| 113
| 0.63882
|
e416e30b742a496685687723d6c37aadc8add83f
| 945
|
py
|
Python
|
setup.py
|
IVRTech/simpleeval
|
6c6957e09815b7c9a5a08309a87b4ea2ea5b816c
|
[
"MIT"
] | null | null | null |
setup.py
|
IVRTech/simpleeval
|
6c6957e09815b7c9a5a08309a87b4ea2ea5b816c
|
[
"MIT"
] | null | null | null |
setup.py
|
IVRTech/simpleeval
|
6c6957e09815b7c9a5a08309a87b4ea2ea5b816c
|
[
"MIT"
] | null | null | null |
from setuptools import setup
__version__ = '0.9.11'
setup(
name='simpleeval',
py_modules=['simpleeval'],
version=__version__,
description='A simple, safe single expression evaluator library.',
long_description=open('README.rst', 'r').read(),
long_description_content_type='text/x-rst',
author='IVR Technology Group',
author_email='developers@ivrtechnology.com',
url='https://github.com/IVRTech/simpleeval/',
download_url='https://github.com/IVRTech/simpleeval/releases/tag/' + __version__,
keywords=['eval', 'simple', 'expression', 'parse', 'ast'],
test_suite='test_simpleeval',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
],
)
| 37.8
| 85
| 0.637037
|
3f2597c216c1b101e8a8189cf235be9a99faa3aa
| 4,972
|
py
|
Python
|
slp_utils.py
|
harangju/roninpay
|
acdb7c6d57e61d3f32a9626d396ec47d14f3ece3
|
[
"MIT"
] | 60
|
2021-07-25T10:42:58.000Z
|
2022-03-21T21:35:20.000Z
|
slp_utils.py
|
harangju/roninpay
|
acdb7c6d57e61d3f32a9626d396ec47d14f3ece3
|
[
"MIT"
] | 15
|
2021-08-05T07:53:24.000Z
|
2022-03-23T22:12:57.000Z
|
slp_utils.py
|
harangju/roninpay
|
acdb7c6d57e61d3f32a9626d396ec47d14f3ece3
|
[
"MIT"
] | 66
|
2021-07-25T10:43:06.000Z
|
2022-03-16T12:03:23.000Z
|
from datetime import datetime, timedelta
from eth_account.messages import encode_defunct
from web3 import Web3
import json, requests
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36"
headers = {
"Content-Type": "application/json",
"User-Agent": USER_AGENT }
web3 = Web3(Web3.HTTPProvider('https://proxy.roninchain.com/free-gas-rpc', request_kwargs={ "headers": headers }))
web3_2 = Web3(Web3.HTTPProvider('https://api.roninchain.com/rpc', request_kwargs={ "headers": headers }))
with open('slp_abi.json') as f:
slp_abi = json.load(f)
slp_contract = web3.eth.contract(address=Web3.toChecksumAddress("0xa8754b9fa15fc18bb59458815510e40a12cd2014"), abi=slp_abi)
slp_contract_2 = web3_2.eth.contract(address=Web3.toChecksumAddress("0xa8754b9fa15fc18bb59458815510e40a12cd2014"), abi=slp_abi)
def get_claimed_slp(address):
return int(slp_contract_2.functions.balanceOf(address).call())
def get_unclaimed_slp(address):
response = requests.get(f"https://game-api.skymavis.com/game-api/clients/{address}/items/1", headers=headers, data="")
if (response.status_code != 200):
print(response.text)
assert(response.status_code == 200)
result = response.json()
total = int(result["total"]) - int(result["claimable_total"])
last_claimed_item_at = datetime.utcfromtimestamp(int(result["last_claimed_item_at"]))
if (datetime.utcnow() + timedelta(days=-14) < last_claimed_item_at):
total = 0
return total
def execute_slp_claim(claim, nonce):
if (claim.state["signature"] == None):
access_token = get_jwt_access_token(claim.address, claim.private_key)
custom_headers = headers.copy()
custom_headers["authorization"] = f"Bearer {access_token}"
response = requests.post(f"https://game-api.skymavis.com/game-api/clients/{claim.address}/items/1/claim", headers=custom_headers, json="")
if (response.status_code != 200):
print(response.text)
assert(response.status_code == 200)
result = response.json()["blockchain_related"]["signature"]
claim.state["signature"] = result["signature"].replace("0x", "")
claim.state["amount"] = result["amount"]
claim.state["timestamp"] = result["timestamp"]
claim_txn = slp_contract.functions.checkpoint(claim.address, claim.state["amount"], claim.state["timestamp"], claim.state["signature"]).buildTransaction({'gas': 1000000, 'gasPrice': 0, 'nonce': nonce})
signed_txn = web3.eth.account.sign_transaction(claim_txn, private_key = bytearray.fromhex(claim.private_key.replace("0x", "")))
web3.eth.send_raw_transaction(signed_txn.rawTransaction)
return web3.toHex(web3.keccak(signed_txn.rawTransaction)) # Returns transaction hash.
def transfer_slp(transaction, private_key, nonce):
transfer_txn = slp_contract.functions.transfer(
transaction.to_address,
transaction.amount).buildTransaction({
'chainId': 2020,
'gas': 100000,
'gasPrice': web3.toWei('0', 'gwei'),
'nonce': nonce,
})
signed_txn = web3.eth.account.sign_transaction(transfer_txn, private_key = bytearray.fromhex(private_key.replace("0x", "")))
web3.eth.send_raw_transaction(signed_txn.rawTransaction)
return web3.toHex(web3.keccak(signed_txn.rawTransaction)) # Returns transaction hash.
def sign_message(message, private_key):
message_encoded = encode_defunct(text = message)
message_signed = Web3().eth.account.sign_message(message_encoded, private_key = private_key)
return message_signed['signature'].hex()
def get_jwt_access_token(address, private_key):
random_message = create_random_message()
random_message_signed = sign_message(random_message, private_key)
payload = {
"operationName": "CreateAccessTokenWithSignature",
"variables": {
"input": {
"mainnet": "ronin",
"owner": f"{address}",
"message": f"{random_message}",
"signature": f"{random_message_signed}"
}
},
"query": "mutation CreateAccessTokenWithSignature($input: SignatureInput!) { createAccessTokenWithSignature(input: $input) { newAccount result accessToken __typename } } "
}
response = requests.post("https://axieinfinity.com/graphql-server-v2/graphql", headers=headers, json=payload)
if (response.status_code != 200):
print(response.text)
assert(response.status_code == 200)
return response.json()['data']['createAccessTokenWithSignature']['accessToken']
def create_random_message():
payload = {
"operationName": "CreateRandomMessage",
"variables": {},
"query": "mutation CreateRandomMessage { createRandomMessage } "
}
response = requests.post("https://axieinfinity.com/graphql-server-v2/graphql", headers=headers, json=payload)
if (response.status_code != 200):
print(response.text)
assert(response.status_code == 200)
return response.json()["data"]["createRandomMessage"]
| 44.392857
| 205
| 0.720233
|
99ad443d062cfa94e6da5e094d4e97c96456f5d9
| 629
|
py
|
Python
|
ML/Recommendation_sys/product_based_search/data_processing.py
|
shauryajaggi/Team_Nodemon
|
7ffe9d02c53e6cbc96af6381803bd69619238da1
|
[
"MIT"
] | 9
|
2021-09-10T18:27:43.000Z
|
2021-12-18T14:12:14.000Z
|
ML/Recommendation_sys/product_based_search/data_processing.py
|
shauryajaggi/Team_Nodemon
|
7ffe9d02c53e6cbc96af6381803bd69619238da1
|
[
"MIT"
] | null | null | null |
ML/Recommendation_sys/product_based_search/data_processing.py
|
shauryajaggi/Team_Nodemon
|
7ffe9d02c53e6cbc96af6381803bd69619238da1
|
[
"MIT"
] | 4
|
2021-09-10T18:28:24.000Z
|
2021-10-01T07:51:40.000Z
|
import pandas as pd
def get_data_db(cnx, tag_input):
db_cursor = cnx.cursor()
tag_input = tag_input.strip()
query = f"SELECT * FROM shadowfax.products WHERE( product_name LIKE '%{tag_input}%' );"
df_tags = pd.read_sql(query, con=cnx)
df_tags['id'] = df_tags['inc_id']
df_tags.set_index('id', inplace=True)
return df_tags
def data_to_dict(df_tags):
df_json = df_tags.to_dict(orient='records')
return df_json
def final_dict_conversion(df_json, resp):
final_dict = {}
final_dict['status'] = resp
final_dict['data'] = df_json
# print(final_dict)
return final_dict
| 26.208333
| 91
| 0.674086
|
eb053aecfe9410859af2ae80ec39e8610f37bed8
| 3,906
|
py
|
Python
|
core/training/deepnn/train_wrappers.py
|
wumpusman/ComicTransfer
|
855eccc1c418a0aca12f44626c408a056cc67484
|
[
"MIT"
] | 2
|
2020-11-08T08:58:40.000Z
|
2021-07-24T05:32:31.000Z
|
core/training/deepnn/train_wrappers.py
|
wumpusman/ComicTransfer
|
855eccc1c418a0aca12f44626c408a056cc67484
|
[
"MIT"
] | 4
|
2021-06-08T22:34:29.000Z
|
2022-01-13T03:22:40.000Z
|
core/training/deepnn/train_wrappers.py
|
wumpusman/ComicTransfer
|
855eccc1c418a0aca12f44626c408a056cc67484
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import deprecated
from torch.utils import data as data_torch
#
class TrainModel():
#
def __init__(self,model_to_learn,loss_function):
"""A simple wrapper for handling loss functions
"""
self._loss_function=loss_function
self.model=model_to_learn
def set_loss_function(self,loss_function):
"""
Args:
loss_function:
Returns:
"""
self._loss_function=loss_function
def prepare_loss(self,batch):
"""
formats the data to be prepared in terms of the loss function shape
Args:
batch: a 2 by N Matrix where the first is the input, and the second is the output
Returns:
"""
x=batch[0]
y=batch[1]
x=x.permute((1,0,2,3,4))[0]
y=y.permute((1,0,2,3,4))[0]
predicted_y, features=self.model(x)
return self._loss_function(predicted_y,y)
def train(self,epochs,dataloader,optimizer,is_training=True)->[float]:
"""
train a model or just run through a lose
Args:
epochs:
dataloader:
optimizer:
is_training:
Returns:
"""
loss_vals=[]
for epoch in range(epochs):
cycle=(iter(dataloader))
temp_losses_batch = []
for i in range(len(cycle)):
x,y,z=next(cycle)
x=x.cuda()
y=y.cuda()
z=z.cuda()
loss=None
if is_training==True:
loss=self.prepare_loss((x,y,z)) #array so it can. be arbitrary features
else:
with torch.no_grad():
loss=self.prepare_loss((x,y,z))
temp_losses_batch.append(loss.cpu().detach().numpy())
optimizer.zero_grad()
if is_training==True:
loss.backward()
optimizer.step()
del loss #for memory management on my very limited GPU
del x
del y
del z
torch.cuda.empty_cache()
loss_vals.append(np.array(temp_losses_batch).sum())
print(loss_vals[-1])
return loss_vals
class TrainWithFeatureChannels(TrainModel):
def __init__(self,model_to_learn,loss_function):
"""
trains a model but also expects an additional feature channel from earlier layer in the network
it needs to be udpated
Args:
model_to_learn: model that will be trained
loss_function: loss function to be used
"""
super().__init__(model_to_learn,loss_function)
def prepare_loss(self,batch):
x=batch[0]
y=batch[1]
z=batch[2]
x=x.permute((1,0,2,3,4))[0]
y=y.permute((1,0,2,3,4))[0]
z=z.permute((1,0,2,3,4))[0] #earlier layer
predicted_y, features=self.model(x)
l1= self._loss_function(predicted_y,y)
return l1
class TrainCoordWithUNET(TrainModel):
def __init_(self,model_to_learn,loss_function):
"""
trains model with craft text but also uses appended coordconv that has a slightly different output shape
Args:
model_to_learn:
loss_function:
Returns:
"""
super().__init__(model_to_learn,loss_function)
def prepare_loss(self,batch):
x=batch[0]
y=batch[1]
x=x.permute((1,0,2,3,4))[0]
y=y.permute((1,0,2,3,4))[0]
predicted_y, features=self.model(x)
predicted_y=predicted_y.permute((0,2,3,1)) #shape is different
return self._loss_function(predicted_y,y)
| 26.04
| 112
| 0.545315
|
dbd1d2afcd5b3e0c31ecb8dc31786d96102b0a1b
| 2,250
|
py
|
Python
|
main/auth/yahoo.py
|
ssxenon01/music-app
|
fcc562713a048f3a2d222756ce6fd9ed83386ff6
|
[
"MIT"
] | null | null | null |
main/auth/yahoo.py
|
ssxenon01/music-app
|
fcc562713a048f3a2d222756ce6fd9ed83386ff6
|
[
"MIT"
] | null | null | null |
main/auth/yahoo.py
|
ssxenon01/music-app
|
fcc562713a048f3a2d222756ce6fd9ed83386ff6
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import flask
import auth
import model
import util
from main import app
yahoo_config = dict(
access_token_url='https://api.login.yahoo.com/oauth/v2/get_token',
authorize_url='https://api.login.yahoo.com/oauth/v2/request_auth',
base_url='https://query.yahooapis.com/',
consumer_key=model.Config.get_master_db().yahoo_consumer_key,
consumer_secret=model.Config.get_master_db().yahoo_consumer_secret,
request_token_url='https://api.login.yahoo.com/oauth/v2/get_request_token',
)
yahoo = auth.create_oauth_app(yahoo_config, 'yahoo')
@app.route('/_s/callback/yahoo/oauth-authorized/')
def yahoo_authorized():
response = yahoo.authorized_response()
if response is None:
flask.flash('You denied the request to sign in.')
return flask.redirect(util.get_next_url())
flask.session['oauth_token'] = (
response['oauth_token'],
response['oauth_token_secret'],
)
fields = 'guid, emails, familyName, givenName, nickname'
me = yahoo.get(
'/v1/yql',
data={
'format': 'json',
'q': 'select %s from social.profile where guid = me;' % fields,
'realm': 'yahooapis.com',
},
)
user_db = retrieve_user_from_yahoo(me.data['query']['results']['profile'])
return auth.signin_user_db(user_db)
@yahoo.tokengetter
def get_yahoo_oauth_token():
return flask.session.get('oauth_token')
@app.route('/signin/yahoo/')
def signin_yahoo():
return auth.signin_oauth(yahoo)
def retrieve_user_from_yahoo(response):
auth_id = 'yahoo_%s' % response['guid']
user_db = model.User.get_by('auth_ids', auth_id)
if user_db:
return user_db
names = [response.get('givenName', ''), response.get('familyName', '')]
emails = response.get('emails', {})
if not isinstance(emails, list):
emails = [emails]
emails = [e for e in emails if 'handle' in e]
emails.sort(key=lambda e: e.get('primary', False))
email = emails[0]['handle'] if emails else ''
return auth.create_user_db(
auth_id=auth_id,
name=' '.join(names).strip() or response['nickname'],
username=response['nickname'],
email=email,
verified=bool(email),
)
| 28.481013
| 79
| 0.658667
|
07cf9d21c0eeb53efad9f0984921c0b4086c3a34
| 10,324
|
py
|
Python
|
examples/rocky_test/multi_demos.py
|
Corsair-cxs/micropython-rocky
|
796008edfffb65e050218cbb98a540840db5942a
|
[
"MIT"
] | 181
|
2018-01-31T08:22:21.000Z
|
2021-12-25T12:46:18.000Z
|
examples/rocky_test/multi_demos.py
|
Corsair-cxs/micropython-rocky
|
796008edfffb65e050218cbb98a540840db5942a
|
[
"MIT"
] | 19
|
2018-01-31T10:07:19.000Z
|
2021-03-11T07:32:28.000Z
|
examples/rocky_test/multi_demos.py
|
Corsair-cxs/micropython-rocky
|
796008edfffb65e050218cbb98a540840db5942a
|
[
"MIT"
] | 87
|
2018-03-22T00:41:48.000Z
|
2022-02-05T10:09:59.000Z
|
import sensor, image, time, machine, pyb, nn
barCol = 0
def DrawPgsBar(img, barLen, loopCnt, startTick, width=5):
global barCol
lnLen = (barLen * (loopCnt - (time.ticks() - startTick))) // loopCnt
if (barCol & 0x80) == 0:
c = barCol & 0x7F
else:
c = 128 - (barCol & 0x7F)
img.draw_rectangle(2, 2, barLen + 2, width, color=(0,0,0))
img.draw_rectangle(2, 3, lnLen, width-2, fill=True, color=(c,c,c))
barCol += 16
def BlobTest(thresholds, loopCnt = 390, barLen = 120):
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.CIF)
#sensor.set_windowing((320,240))
sensor.set_auto_gain(True)
#sensor.set_auto_whitebal(True) # must be turned off for color tracking
clock = time.clock()
avg = 0.0
startTick = time.ticks()
while(True):
if time.ticks() - startTick > loopCnt:
break
clock.tick()
img = sensor.snapshot()
img.draw_string(4, 8, 'red,green,blue blob detect', color=(0,0,0))
t0 = time.ticks()
blobSet = img.find_blobs(thresholds, pixels_threshold=200, area_threshold=200)
t1 = time.ticks() - t0
avg = avg * 0.95 + t1 * 0.05
lnLen = (barLen * (loopCnt - (time.ticks() - startTick))) // loopCnt
DrawPgsBar(img, barLen, loopCnt, startTick)
for blob in blobSet:
img.draw_rectangle(blob.rect())
img.draw_cross(blob.cx(), blob.cy())
print('algo time cost : %.2f ms' % (avg))
def CorrTest(loopCnt = 220, barLen=120):
sensor.reset()
# Sensor settings
sensor.set_contrast(1)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.QVGA)
sensor.set_pixformat(sensor.RGB565)
#sensor.set_windowing((480,272))
clock = time.clock()
avg = 0.0
startTick = time.ticks()
corr = 0.3
while (True):
if time.ticks() - startTick > loopCnt:
break
clock.tick()
img = sensor.snapshot()
for i in range(7):
img.draw_rectangle(160-i*15, 120-i*15, i*15*2, i*15*2)
corr += 0.05
if corr >= 4.0:
corr = 0.3
img.lens_corr(corr)
lnLen = (barLen * (loopCnt - (time.ticks() - startTick))) // loopCnt
DrawPgsBar(img, barLen, loopCnt, startTick)
img.draw_string(4,4,'Lens correction %.2f' % (corr), color=(0,0,0))
def FaceTest(loopCnt = 220, barLen=120):
sensor.reset()
# Sensor settings
sensor.set_contrast(1)
#sensor.set_gainceiling(16)
# HQVGA and GRAYSCALE are the best for face tracking.
#sensor.set_framesize(sensor.VGA)
#sensor.set_windowing((320,240))
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((320,240))
sensor.set_pixformat(sensor.GRAYSCALE)
#sensor.set_auto_gain(False)
#sensor.set_auto_whitebal(True) # must be turned off for color tracking
# Load Haar Cascade
# By default this will use all stages, lower satges is faster but less accurate.
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)
clock = time.clock()
avg = 0.0
startTick = time.ticks()
while (True):
if time.ticks() - startTick > loopCnt:
break
clock.tick()
img = sensor.snapshot()
img.draw_string(4,4,'Face Detect', color=(0,0,0))
t0 = time.ticks()
objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25)
t1 = time.ticks() - t0
avg = avg * 0.90 + t1 * 0.10
fID = 0
lnLen = (barLen * (loopCnt - (time.ticks() - startTick))) // loopCnt
DrawPgsBar(img, barLen, loopCnt, startTick)
for r in objects:
img.draw_rectangle(r, thickness=3)
img.draw_rectangle(r[0], r[1], 48, 10, fill=True)
fID += 1
s = 'face %d' % (fID)
img.draw_string(r[0], r[1], s, color = (0,0,0))
print('algo time cost : %.2f ms' % (avg))
def CIFAR10Test(loopCnt = 600, isFull = False, barLen = 105):
pyb.LED(1).off()
sensor.reset() # Reset and initialize the sensor.
sensor.set_contrast(3)
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.VGA) # Set frame size to QVGA (320x240)
sensor.set_windowing((192, 192)) # Set window
sensor.skip_frames(time = 300) # Wait for settings take effect.
sensor.set_auto_gain(False)
#sensor.set_framerate(0<<9|1<<12)
if isFull:
net = nn.load('cifar10.network')
else:
net = nn.load('cifar10_fast.network')
labels = ['plane', 'auto', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
clock = time.clock()
tAvg = 0.0
startTick = time.ticks()
while (True):
if time.ticks() - startTick > loopCnt:
break
clock.tick()
img = sensor.snapshot()
t0 = time.ticks()
lst = net.search(img, threshold=0.640, min_scale=1, scale_mul=0.8, \
x_overlap=-1, y_overlap=-1, contrast_threshold=0.5)
t1 = time.ticks() - t0
tAvg = tAvg * 0.9 + t1 * 0.1
img.draw_string(4,8,'CIFAR-10: classify:\nplane,auto,cat,dog,\ndeer,horse,frog,ship,\ntruck,horse', color=(0,0,0))
lnLen = (barLen * (loopCnt - (time.ticks() - startTick))) // loopCnt
DrawPgsBar(img, barLen, loopCnt, startTick)
for obj in lst:
print(' %s - Confidence %f%%' % (labels[obj.index()], obj.value()))
rc = obj.rect()
#img.draw_rectangle(rc, color=(255,255,255))
img.draw_rectangle(barLen+10,1,50,8,fill=True,color=(0,0,0))
img.draw_string(barLen+10, 0, labels[obj.index()])
print('algo time cost : %.2f ms' % (tAvg))
def LENETTest(loopCnt = 1200, barLen=60):
sensor.reset() # Reset and initialize the sensor.
sensor.set_contrast(3)
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.VGA) # Set frame size to QVGA (320x240)
sensor.set_windowing((84, 84)) # Set 128x128 window.
sensor.skip_frames(time = 1400) # Wait for settings take effect.
sensor.set_auto_gain(False)
sensor.set_framerate(2<<2)
#sensor.set_auto_whitebal(False)
#sensor.set_auto_exposure(False)
net = nn.load('/lenet.network')
labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
clock = time.clock()
avg = 0.0
pyb.LED(1).on()
startTick = time.ticks()
while (True):
if time.ticks() - startTick > loopCnt:
break
clock.tick()
img = sensor.snapshot()
img.draw_string(3,8,'recg 0-9', color=(0,0,0))
t1 = time.ticks()
tmp_img = img.copy().binary([(120,255)], invert=True)
lst = net.search(tmp_img, threshold=0.8, min_scale=1, scale_mul=0.8, \
x_overlap=-1, y_overlap=-1, contrast_threshold=0.5, softmax=False)
t2 = time.ticks() - t1
avg = avg * 0.95 + t2 * 0.05
lnLen = (barLen * (loopCnt - (time.ticks() - startTick))) // loopCnt
img.draw_rectangle(0, 2, barLen + 1, 3)
img.draw_rectangle(0, 3, lnLen, 1, fill=True)
for obj in lst:
print('Detected %s - Confidence %f%%' % (labels[obj.index()], obj.value()))
img.draw_rectangle(obj.rect())
img.draw_string(barLen+8,2,labels[obj.index()], color = (0,0,0))
# print(clock.fps())
print('algo time cost : %.2f ms' % (avg))
# Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max)
# The below thresholds track in general red/green things. You may wish to tune them...
thresholds = [(30, 100, 15, 127, 15, 127), # generic_red_thresholds
(30, 100, -64, -8, -32, 32), # generic_green_thresholds
(0, 15, 0, 40, -80, -20)] # generic_blue_thresholds
# You may pass up to 16 thresholds above. However, it's not really possible to segment any
# scene with 16 thresholds before color thresholds start to overlap heavily.
thresholds2 = [(50, 92, -68, -16, 9, 119)]
thresholds3 = [(92, 64, -81, -9, 13, 75),(63, 25, 86, 63, 11, 127),(27, 69, -50,15, -67, -22)]
def LEDTest():
i = 0
while(True):
if i & 1:
pyb.LED(1).on()
else:
pyb.LED(1).off()
if i & 2:
pyb.LED(2).on()
else:
pyb.LED(2).off()
if i & 4:
pyb.LED(3).on()
else:
pyb.LED(3).off()
i += 1
time.sleep(5)
def QRCodeTest(loopCnt = 120, barLen = 120):
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((400,272))
sensor.skip_frames(time = 1000)
sensor.set_auto_gain(False)
clock = time.clock()
avg = 0.0
startTick = time.ticks()
while (True):
if time.ticks() - startTick > loopCnt:
break
clock.tick()
img = sensor.snapshot()
#img.lens_corr(1.5) # strength of 1.8 is good for the 2.8mm lens.
img.draw_string(4,8,'QR Code Scan', color=(0,0,0))
t1 = time.ticks()
codeSet = img.find_qrcodes()
t2 = time.ticks() - t1
lnLen = (barLen * (loopCnt - (time.ticks() - startTick))) // loopCnt
DrawPgsBar(img, barLen, loopCnt, startTick)
avg = avg * 0.92 + t2 * 0.08
for code in codeSet:
rc = code.rect()
img.draw_rectangle(rc, thickness = 2, color=(0,191,255))
#print(type(code))
#print(code.payload())
sPayload = code.payload()
#print(len(sPayload))
lnLen = len(sPayload) * 8
if rc[0] + lnLen >= 400:
x = 400 - lnLen
else:
x = rc[0]
img.draw_rectangle(x - 1, rc[1]+1, lnLen+2, 8, color=(0,0,0), fill=True)
img.draw_string(x, rc[1], sPayload)
print('algo time cost : %.2f ms' % (avg))
while (True):
#LEDTest()
pyb.LED(1).on()
#pyb.LED(2).on()
#pyb.LED(3).on()
#pyb.LED(4).off()
CorrTest(9000)
CIFAR10Test(40000, True)
QRCodeTest(40000)
BlobTest(thresholds3, 30000)
FaceTest(40000)
#LENETTest(20000)
| 37.136691
| 122
| 0.573905
|
37d37c06b3158942879be82a292b3ac327a6b657
| 923
|
py
|
Python
|
forms.py
|
torjeikenes/Lokomaten
|
130af52b13c3c6bb07c8e3fec558ae9a11d5ed7e
|
[
"MIT"
] | 1
|
2020-06-08T16:40:24.000Z
|
2020-06-08T16:40:24.000Z
|
forms.py
|
torjeikenes/Lokomaten
|
130af52b13c3c6bb07c8e3fec558ae9a11d5ed7e
|
[
"MIT"
] | null | null | null |
forms.py
|
torjeikenes/Lokomaten
|
130af52b13c3c6bb07c8e3fec558ae9a11d5ed7e
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms import RadioField
from wtforms.fields.html5 import DateField
from wtforms.validators import DataRequired
import datetime
class LokForm(FlaskForm):
navn = StringField('Navn', validators=[DataRequired()])
lokstreker = RadioField('Løkstreker',
choices=[('1','1'),('2','2'),('3','3'),('4','4'),('5','5'),('6','6'),('10','Superløk (10)'),('20','Megaløk (20)')],
default='1',
validators=[DataRequired()])
forklaring = StringField('Forklaring',
validators=[DataRequired()])
dato = DateField('Dato',
format='%Y-%m-%d',
default=datetime.datetime.today,
validators=[DataRequired()])
submit = SubmitField('Send inn')
| 41.954545
| 143
| 0.569881
|
f15428733e7b1aa68c0998ffc05e36c81acf0c3b
| 225
|
py
|
Python
|
ex11.py
|
bsdnetworks/pthw
|
0502a249bf12b4b2c576d15b586d61ea73ce1103
|
[
"BSD-3-Clause"
] | null | null | null |
ex11.py
|
bsdnetworks/pthw
|
0502a249bf12b4b2c576d15b586d61ea73ce1103
|
[
"BSD-3-Clause"
] | null | null | null |
ex11.py
|
bsdnetworks/pthw
|
0502a249bf12b4b2c576d15b586d61ea73ce1103
|
[
"BSD-3-Clause"
] | null | null | null |
print "How old are you?",
age = raw_input()
print "How tall are you?",
height = raw_input()
print "How much do you weigh?",
weight = raw_input()
print "So, you're %r old, %r tall and %r heavy." % (
age, height, weight)
| 22.5
| 52
| 0.64
|
c78138ce64d01e1c4f5f06f4532700d415d3d49f
| 1,934
|
py
|
Python
|
var/spack/repos/builtin/packages/r-clusterprofiler/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2020-09-10T22:50:08.000Z
|
2021-01-12T22:18:54.000Z
|
var/spack/repos/builtin/packages/r-clusterprofiler/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 14
|
2021-07-20T01:04:53.000Z
|
2022-03-02T01:08:36.000Z
|
var/spack/repos/builtin/packages/r-clusterprofiler/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1
|
2021-05-06T00:17:46.000Z
|
2021-05-06T00:17:46.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RClusterprofiler(RPackage):
"""statistical analysis and visualization of functional profiles for genes
and gene clusters.
This package implements methods to analyze and visualize functional
profiles (GO and KEGG) of gene and gene clusters."""
homepage = "https://bioconductor.org/packages/clusterProfiler"
git = "https://git.bioconductor.org/packages/clusterProfiler.git"
version('3.12.0', commit='6ec88d10832bdfd938e9c065b377015eedb7eee2')
version('3.10.1', commit='39927ef7ff6f97e27557bcf4147e2133b364fd3c')
version('3.8.1', commit='81e1a7ac49e4713703c55f87f945b20de5e7ab36')
version('3.6.0', commit='ff15e3dba69b93bc872d5f5d07821cd9ae20d829')
version('3.4.4', commit='b86b00e8405fe130e439362651a5567736e2d9d7')
depends_on('r@3.3.1:', type=('build', 'run'))
depends_on('r-dose@3.1.3:', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
depends_on('r-go-db', type=('build', 'run'))
depends_on('r-gosemsim@2.0.0:', when='@3.4.4:3.6.0', type=('build', 'run'))
depends_on('r-gosemsim', type=('build', 'run'))
depends_on('r-magrittr', type=('build', 'run'))
depends_on('r-plyr', type=('build', 'run'))
depends_on('r-qvalue', type=('build', 'run'))
depends_on('r-rvcheck', type=('build', 'run'))
depends_on('r-tidyr', type=('build', 'run'))
depends_on('r-dose@3.3.2:', when='@3.6.0:', type=('build', 'run'))
depends_on('r@3.4.0:', when='@3.8.1:', type=('build', 'run'))
depends_on('r-dose@3.5.1:', when='@3.8.1:', type=('build', 'run'))
depends_on('r-enrichplot@0.99.7:', when='@3.8.1:', type=('build', 'run'))
| 44.976744
| 79
| 0.660807
|
4790d00d7fadae725e817247a70ba93018ea5647
| 290
|
py
|
Python
|
app/scripts/scrape_hemnet.py
|
hsadler/scrapers_scrapers_scrapers
|
35db63c51dbb1cc4d81c6e61b789147af31e3f47
|
[
"MIT"
] | 1
|
2021-08-16T16:35:51.000Z
|
2021-08-16T16:35:51.000Z
|
app/scripts/scrape_hemnet.py
|
hsadler/scrapers_scrapers_scrapers
|
35db63c51dbb1cc4d81c6e61b789147af31e3f47
|
[
"MIT"
] | null | null | null |
app/scripts/scrape_hemnet.py
|
hsadler/scrapers_scrapers_scrapers
|
35db63c51dbb1cc4d81c6e61b789147af31e3f47
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('..')
import json
from service.scrape import Scrape
scrape_result = Scrape.scrape_hemnet_api()
filepath = '/hemnet_scrape_results.json'
with open(filepath, "w") as f:
json_string = json.dumps(scrape_result, indent=2, sort_keys=True)
f.write(json_string)
| 26.363636
| 69
| 0.758621
|
bd5163559153c2e1afcfc88bbd0f8f148bec95d5
| 5,481
|
py
|
Python
|
mittmedia-dl.py
|
Armandur/mittmedia-dl
|
193d9b657ebb6b4902ece3dff533a08e3886bd36
|
[
"CC0-1.0"
] | 1
|
2020-10-28T11:10:26.000Z
|
2020-10-28T11:10:26.000Z
|
mittmedia-dl.py
|
Armandur/mittmedia-dl
|
193d9b657ebb6b4902ece3dff533a08e3886bd36
|
[
"CC0-1.0"
] | null | null | null |
mittmedia-dl.py
|
Armandur/mittmedia-dl
|
193d9b657ebb6b4902ece3dff533a08e3886bd36
|
[
"CC0-1.0"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver import Firefox
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.firefox.options import Options
from selenium.common import exceptions
import requests
import time
import os
import sys
from pathlib import Path
import shutil
from datetime import date
from PyPDF2 import PdfFileMerger
from PyPDF2 import PdfFileReader
from PyPDF2 import utils
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("email", help="Email", type=str)
parser.add_argument("password", help="Password", type=str)
parser.add_argument("secret", help="Secret (x-textalk-content-client-authorize)", type=str)
# parser.add_argument("-o", "--output", nargs=1, help="Output directory", type=str, default=os.getcwd())
parser.add_argument("-v", "--verbose", help="Verbose", action="store_true")
parser.add_argument("--not-headless", help="Don't run driver in headless mode", action="store_true")
args = parser.parse_args()
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:82.0) Gecko/20100101 Firefox/82.0',
'Accept': 'application/pdf',
'Accept-Language': 'sv-SE,sv;q=0.8,en-US;q=0.5,en;q=0.3',
'x-textalk-content-client-authorize': args.secret,
'Connection': 'keep-alive',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
}
output_path = os.getcwd()
options = Options()
if not args.not_headless:
options.add_argument('-headless')
driver = Firefox(executable_path='geckodriver', options=options)
driver.implicitly_wait(2)
driver.get("https://etidning.allehanda.se/#sign-in")
form_email = driver.find_element_by_name("prenlyLogin")
form_password = driver.find_element_by_name("prenlyPassword")
form_email.send_keys(args.email)
form_password.send_keys(args.password)
form_password.submit()
if args.verbose:
print("Submitting credentials")
time.sleep(2)
latest_issue = driver.find_element_by_xpath("//a[contains(@href,'/369/Tidningen-Angermanland')]")
latest_issue.click()
print("Logged in")
if args.verbose:
print("Opening latest issue")
# Todo: Add check for if issue corresponds with current date, otherwise sleep/die
time.sleep(2)
right_btn = driver.find_element_by_xpath("//button[@title='Nästa']")
left_btn = driver.find_element_by_xpath("//button[@title='Föregående']")
if args.verbose:
print("Locating navigation buttons")
stored_requests = []
try:
transverse_time = 1 # Sometimes not all blobs with pdf-requests load in time so we wait a bit.
while True:
time.sleep(transverse_time)
# Page has 3 blobs loaded at a time
current_img_blobs = driver.find_elements_by_xpath("//img[contains(@src, 'blob:')]")
for blob in current_img_blobs:
request = blob.get_attribute("data-src")
if request not in stored_requests: # Only add unique hashes
if args.verbose:
print(f"Adding request {request}")
stored_requests.append(request)
if args.verbose:
print("Advancing to next page")
right_btn.click() # Advance to next set of pages
except exceptions.ElementClickInterceptedException:
print(f"Found {len(stored_requests)} pages")
driver.quit()
temp_path = os.path.join(output_path, "tmp")
Path(temp_path).mkdir(parents=True, exist_ok=True)
current_date = date.today().strftime("%Y-%m-%d")
i = 1 # Indexer for individual pages, TODO: maybe make it prettier?
for request in stored_requests:
if args.verbose:
print(f"GET:ing response for {request}, writing file {current_date} - {i}.pdf to {temp_path}")
response = requests.get(request, headers=headers)
# Writing files to tempdir for merging
with open(os.path.join(temp_path, f"{current_date} - {i}.pdf"), "wb") as file:
file.write(response.content)
i = i+1
# List only files and not dirs in temp_path
files_in_temp = [f for f in os.listdir(temp_path) if os.path.isfile(os.path.join(temp_path, f))]
errors_found = False
try:
while True: # We loop until we have removed all eventual error files.
mergedPDF = PdfFileMerger()
current_file = ""
try:
for file in files_in_temp:
current_file = file
mergedPDF.append(PdfFileReader(os.path.join(temp_path, file), "rb")) # Merge files
print(f"Writing output: Tidningen Allehanda - {current_date}.pdf")
mergedPDF.write(os.path.join(output_path, f"Tidningen Allehanda - {current_date}.pdf")) # Write output
break
except utils.PdfReadError:
broken_path = os.path.join(output_path, "broken")
print(f"File error found with {current_file}, we'll leave it alone, you can find it in {broken_path}")
Path(broken_path).mkdir(parents=True, exist_ok=True)
shutil.copy2(os.path.join(temp_path, current_file), os.path.join(broken_path, current_file))
files_in_temp.remove(current_file)
errors_found = True
except: # TODO: Add some better exceptions...
print("Oops, something went wrong, we'll still clean up after ourselves though")
if errors_found:
print("Broken pdfs were found, maybe try https://pdf2go.com/repair-pdf) and merge them manually...")
if args.verbose:
print("Cleaning up")
shutil.rmtree(temp_path) # delete /tmp
if args.verbose:
print("Bye, bye!")
| 38.328671
| 115
| 0.705163
|
da09e7fd2e980a0718a2792e8bbebb9c5165b482
| 210
|
py
|
Python
|
autocom/__init__.py
|
avcopan/autocom
|
aae7f9288df9144350e1d793bbcf310b2fc10fa7
|
[
"Apache-2.0"
] | null | null | null |
autocom/__init__.py
|
avcopan/autocom
|
aae7f9288df9144350e1d793bbcf310b2fc10fa7
|
[
"Apache-2.0"
] | null | null | null |
autocom/__init__.py
|
avcopan/autocom
|
aae7f9288df9144350e1d793bbcf310b2fc10fa7
|
[
"Apache-2.0"
] | null | null | null |
""" command-line interface module
"""
from . import arg
from ._autocom import call_subcommand
from ._autocom import values_with_logger
__all__ = [
'arg',
'call_subcommand',
'values_with_logger',
]
| 17.5
| 40
| 0.719048
|
31e39c48ffd5b3c54856969b985bdc8d77d2a113
| 1,737
|
py
|
Python
|
src/pycture/commands/edit_commands/gamma_correction.py
|
miguel-martinr/Pycture
|
f174699f620244dd188cb1650e3455f553cb5090
|
[
"MIT"
] | null | null | null |
src/pycture/commands/edit_commands/gamma_correction.py
|
miguel-martinr/Pycture
|
f174699f620244dd188cb1650e3455f553cb5090
|
[
"MIT"
] | 51
|
2021-10-06T02:40:17.000Z
|
2022-01-13T12:45:43.000Z
|
src/pycture/commands/edit_commands/gamma_correction.py
|
miguel-martinr/Pycture
|
f174699f620244dd188cb1650e3455f553cb5090
|
[
"MIT"
] | 1
|
2022-01-17T16:10:25.000Z
|
2022-01-17T16:10:25.000Z
|
from PyQt5.QtWidgets import QMainWindow, QWidget
from pycture.dialogs.gamma_correction_dialog import GammaCorrectionDialog, gamma
from ..command import Command
class GammaCorrection(Command):
def __init__(self, parent: QWidget):
super().__init__(parent, "Gamma correction")
def execute(self, main_window: QMainWindow):
dialog = GammaCorrectionDialog(main_window, main_window.get_editor_list())
dialog.applied.connect(lambda editor, gamma_value, color_options:
self.apply(main_window, editor, gamma_value, color_options)
)
editor = main_window.get_active_editor_name()
if editor is not None:
dialog.set_dropdown_image(editor)
def apply(self, main_window: QMainWindow, editor: str,
gamma_value: int, color_options: (int, int, int)
):
image = main_window.get_editor(editor).get_image()
if image is None:
return
lut = self.get_LUT(gamma_value)
lut_or_none = lambda condition: lut if condition else None
new_image = image.apply_LUTs((
lut_or_none(color_options[0]),
lut_or_none(color_options[1]),
lut_or_none(color_options[2])
))
main_window.add_editor(
new_image, editor + f" (Gamma corrected - {gamma_value})"
)
def get_LUT(self, gamma_value: int):
# put 256 values in the x axis between 0 and 1
x_values = [x / 255 for x in range(256)]
# get the equivalent y with the respective gamma function
y_values = [gamma(x, gamma_value) for x in x_values]
# map the y values to the interval [0. 255]
lut = [round(y * 255) for y in y_values]
return lut
| 36.957447
| 82
| 0.651698
|
a86e868eacbc813bd30439c407f9987c9e527358
| 342
|
py
|
Python
|
optimization/utilities/__init__.py
|
miecio190/optimization
|
a5ce621ae2ba142c8fac0788be3654304b4cd2cb
|
[
"MIT"
] | 2
|
2020-08-06T10:14:07.000Z
|
2021-06-14T08:20:36.000Z
|
optimization/utilities/__init__.py
|
miecio190/optimization
|
a5ce621ae2ba142c8fac0788be3654304b4cd2cb
|
[
"MIT"
] | 16
|
2020-08-06T10:59:42.000Z
|
2021-01-31T11:44:33.000Z
|
optimization/utilities/__init__.py
|
miecio190/optimization
|
a5ce621ae2ba142c8fac0788be3654304b4cd2cb
|
[
"MIT"
] | null | null | null |
"""
Helper function package.
It contains:
- abstraction layer for random values generation
- binary search algorithm
"""
from .random_values import generate_random_int, generate_random_float, \
choose_random_value, choose_random_values, choose_random_value_with_weights, \
shuffle, shuffled
from .other import binary_search
| 26.307692
| 82
| 0.789474
|
34240e6ca61b55b046c2d2a36206c8a3c0301a43
| 4,286
|
py
|
Python
|
networking_mlnx/eswitchd/cli/conn_utils.py
|
mail2nsrajesh/networking-mlnx
|
9051eac0c2bc6abf3c8790e01917e405dc479922
|
[
"Apache-2.0"
] | null | null | null |
networking_mlnx/eswitchd/cli/conn_utils.py
|
mail2nsrajesh/networking-mlnx
|
9051eac0c2bc6abf3c8790e01917e405dc479922
|
[
"Apache-2.0"
] | null | null | null |
networking_mlnx/eswitchd/cli/conn_utils.py
|
mail2nsrajesh/networking-mlnx
|
9051eac0c2bc6abf3c8790e01917e405dc479922
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from networking_mlnx.eswitchd.common import constants
from oslo_serialization import jsonutils
import zmq
from networking_mlnx.eswitchd.cli import exceptions
from networking_mlnx.eswitchd.utils.helper_utils import set_conn_url
REQUEST_TIMEOUT = 50000
class ConnUtil(object):
def __init__(self):
transport = constants.SOCKET_OS_TRANSPORT
port = constants.SOCKET_OS_PORT
addr = constants.SOCKET_OS_ADDR
self.conn_url = set_conn_url(transport, addr, port)
def send_msg(self, msg):
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.setsockopt(zmq.LINGER, 0)
socket.connect(self.conn_url)
try:
socket.send(msg)
poller = zmq.Poller()
poller.register(socket, zmq.POLLIN)
conn = dict(poller.poll(REQUEST_TIMEOUT))
if conn:
if conn.get(socket) == zmq.POLLIN:
response_msg = socket.recv(zmq.NOBLOCK)
response = self.parse_response_msg(response_msg)
return response
else:
print('no result received')
finally:
socket.close()
context.term()
def parse_response_msg(self, recv_msg):
msg = jsonutils.loads(recv_msg)
error_msg = " "
if msg['status'] == 'OK':
if 'response' in msg:
return msg['response']
return
elif msg['status'] == 'FAIL':
error_msg = "Action %s failed: %s" % (msg['action'], msg['reason'])
else:
error_msg = "Unknown operation status %s" % msg['status']
raise exceptions.MlxException(error_msg)
def allocate_nic(self, vnic_mac, device_id, fabric, vnic_type,
dev_name=None):
msg = jsonutils.dumps({'action': 'create_port',
'vnic_mac': vnic_mac,
'device_id': device_id,
'fabric': fabric,
'vnic_type': vnic_type,
'dev_name': dev_name})
recv_msg = self.send_msg(msg)
try:
dev = recv_msg['dev']
except Exception:
error_msg = "Failed to allocate %s on %s" % (vnic_mac, fabric)
raise exceptions.MlxException(error_msg)
return dev
def plug_nic(self, vnic_mac, device_id, fabric, vif_type, dev_name):
msg = jsonutils.dumps({'action': 'plug_nic',
'vnic_mac': vnic_mac,
'device_id': device_id,
'fabric': fabric,
'vnic_type': vif_type,
'dev_name': dev_name})
recv_msg = self.send_msg(msg)
try:
dev = recv_msg['dev']
except Exception:
error_msg = "Failed to plug_nic %s on %s" % (vnic_mac, fabric)
raise exceptions.MlxException(error_msg)
return dev
def deallocate_nic(self, vnic_mac, fabric):
msg = jsonutils.dumps({'action': 'delete_port',
'fabric': fabric,
'vnic_mac': vnic_mac})
recv_msg = self.send_msg(msg)
try:
dev = recv_msg['dev']
except Exception:
error_msg = "Failed to deallocate %s on %s" % (vnic_mac, fabric)
raise exceptions.MlxException(error_msg)
return dev
def get_tables(self, fabric):
msg = jsonutils.dumps({'action': 'get_eswitch_tables',
'fabric': fabric})
recv_msg = self.send_msg(msg)
tables = recv_msg['tables']
return tables
| 36.016807
| 79
| 0.576995
|
5f822f8df545cdbaff64fc3f97beb8e0cb23a039
| 5,783
|
py
|
Python
|
Peter_section2.py
|
clarype/PythonFunctions
|
2594623e99abe33d49271602e6f3166965fb75fc
|
[
"MIT"
] | null | null | null |
Peter_section2.py
|
clarype/PythonFunctions
|
2594623e99abe33d49271602e6f3166965fb75fc
|
[
"MIT"
] | null | null | null |
Peter_section2.py
|
clarype/PythonFunctions
|
2594623e99abe33d49271602e6f3166965fb75fc
|
[
"MIT"
] | null | null | null |
import Peter_lab4_cookbook as Pcbk4
a = raw_input('Enter a path to a folder with no spaces. examples. for Mac something like this /Users/katie/Downloads/lab44 or for Windows C:\\\Users\\\clarype\\\Desktop\\\New folder ')
#
def headfootfile(path,filename):
test = Pcbk4.check_filename(path, filename) #returns a tuple. element 0 tells us
if not(test[0]): # if the path/filename doesn't test out, get out
print("Error: headfootfile cannot operate on the path, file you gave")
print path
print filename
return False, None #return the bad news.
fn = test[1] #assign the full path/filename to the variable fn
file_in_object = open(fn) #fn becomes the object we operate on
outfn = fn[:-4]+'_headfoot'+fn[-4:] #Interpose the headfoot!
try:
file_out_object = open(outfn, 'w') #NOTE the 'w' syntax.
except:
# A problem! Report and bail
print("Error: problem writing the file:")
print(outfn) #show the user what was tried
file_in_object.close() #closes the file we opened earlier
return False, None #return empty
from datetime import datetime
timestamp = datetime.now()
header = "FileWritten on "+str(timestamp.day)+"_"+str(timestamp.month)+"_"+str(timestamp.year)+" at "+ str(timestamp.hour)+"_"+str(timestamp.minute)+".\n"
try:
file_out_object.write(header)
except:
print("Error: I could open the file, but not write to it.")
#Clean both up by closing
file_in_object.close()
file_out_object.close()
return False, None
#-----------------------------------problem 4------------------------------------
count = 0
for line in file_in_object:
try:
count +=1
file_out_object.write(str(count)+"\t" + "Hi, we are indented. :)" + line)
#-----------------------------------------End of Problem-----------------------------------------
except:
print("Error: Error writing the file.")
file_in_object.close()
file_out_object.close()
return False, None
try:
message = "\nA parting thought: Have a great day coding!" #again, note the
# \n as a carriage return to add a blank line.
file_out_object.write(message) #just write the line
except:
print("Error: Error writing the file.")
file_in_object.close()
file_out_object.close()
return False, None
print("Successfully wrote the file")
print(outfn) #remember -- this was the filename we constructed
file_in_object.close()
file_out_object.close()
return True, outfn
#-------------------------Problem 5-------------------------------
import Peter_lab4_cookbook as pckb # imports codebook
def folder_dir(path): # function
import os
p = pckb.check_path(path) #checks file path
if p[0] == True: #checks to make sure that the imported tulpe from the check_path function is true
filelist = [] # creates an empty list for each file in the folder
formt = raw_input('Enter a file type. examples .txt, .shp, .pdf, etc. ') # user input for type of file to be tested
for file in os.listdir(path): # scan directory for files.
if file.endswith(formt): # scan directroy for a type of file
filelist.append(file) # adds found type of file to the empty list
if len(filelist) > 0: # tells users if a file of a type was found
print "You have at least one file"
else:
return 'There are no files of that format. '
pathlist = [] # creates a empty list for each file path to be used in os.stat().st_size functions
sizeA = raw_input('Enter the upper end of the file size you are looking for. example 50000 ') # user input to help find a certain sized file
sizeB = raw_input('Enter the lower end of the file size you are looking for. example 1 ') # user input to help find a certain sized file
for i in filelist: # loops througth filelist to join file name and path
dfg = path + "/" + i # ''
pathlist.append(dfg) # adds joined path and file name to empty list
for o in pathlist: # loops througth pathlist for file size condistions
if os.stat(o).st_size < int(sizeA) and os.stat(o).st_size > int(sizeB): # checks file size with upper and lower condistions
tt = open(path + '\WEEE.txt', 'w') # creates a new doc to write on
count = 0 # this is to list each file on the doc
for n in range(len(pathlist)): # loop writes each file on to doc
count += 1 # numbering each file
tt.write(str(count) + '. ' + pathlist[n] + ' has a size less then {0} but higher than {1}.'.format(sizeA, sizeB) + '\n') # message to write on doc
tt.close() # closes file
print "A file meets these qulifcations! And it is printed to a text called WEEE in the folder at the end of the path. "
else:
print "A file does not meet these qualifcations"
else:
return "Check your file path format."
#print headfootfile(a, b)
print folder_dir(a)
#------------------------------------------------------------------------------------------------------------------------
| 46.264
| 187
| 0.550233
|
931eba6eb64fafc451ccf1a7564aaa5a5f62b582
| 14,549
|
py
|
Python
|
train.py
|
hukefei/DistanceWeightedSampling
|
7a7d377150ececdaf2171a4178cfdec1a1ee992d
|
[
"MIT"
] | null | null | null |
train.py
|
hukefei/DistanceWeightedSampling
|
7a7d377150ececdaf2171a4178cfdec1a1ee992d
|
[
"MIT"
] | null | null | null |
train.py
|
hukefei/DistanceWeightedSampling
|
7a7d377150ececdaf2171a4178cfdec1a1ee992d
|
[
"MIT"
] | null | null | null |
import argparse
import logging
import time
import os
import random
import warnings
import numpy as np
from bottleneck import argpartition
import torch
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from torch import nn
from torch.autograd import Variable
from torch.nn import Parameter
from model import *
from resnet import *
from sampler import BalancedBatchSampler
logging.basicConfig(level=logging.INFO)
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
def parse_argument():
parser = argparse.ArgumentParser(description='train a model for image classification.')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--data-path', type=str, required=True,
help='path of data, which contains train,val subdirectory')
parser.add_argument('--embed-dim', type=int, default=128,
help='dimensionality of image embedding. default is 128.')
parser.add_argument('--feat-dim', type=int, default=512,
help='dimensionality of base_net output. default is 512.')
parser.add_argument('--classes', type=int, required=True,
help='number of classes in dataset')
parser.add_argument('--batch-num', type=int, required=True,
help='number of batches in one epoch')
parser.add_argument('--batch-size', type=int, default=70,
help='total batch_size on all gpus.')
parser.add_argument('--batch-k', type=int, default=5,
help='number of images per class in a batch. default is 5.')
parser.add_argument('--gpus', type=str, default='',
help='list of gpus to use, e.g. 0 or 0,2,5.')
parser.add_argument('--epochs', type=int, default=80,
help='number of training epochs. default is 20.')
parser.add_argument('--lr', type=float, default=0.0001,
help='learning rate. default is 0.0001.')
parser.add_argument('--lr-beta', type=float, default=0.1,
help='learning rate for the beta in margin based loss. default is 0.1.')
parser.add_argument('--margin', type=float, default=0.2,
help='margin for the margin based loss. default is 0.2.')
parser.add_argument('--momentum', type=float, default=0.9,
help='momentum')
parser.add_argument('--beta', type=float, default=1.2,
help='initial value for beta. default is 1.2.')
parser.add_argument('--nu', type=float, default=0.0,
help='regularization parameter for beta. default is 0.0.')
parser.add_argument('--factor', type=float, default=0.5,
help='learning rate schedule factor. default is 0.5.')
parser.add_argument('--steps', type=str, default='20,40,60',
help='epochs to update learning rate. default is 20,40,60.')
parser.add_argument('--resume', type=str, default=None,
help='path to checkpoint')
parser.add_argument('--wd', type=float, default=0.0001,
help='weight decay rate. default is 0.0001.')
parser.add_argument('--seed', type=int, default=None,
help='random seed to use')
parser.add_argument('--model', type=str, default='resnet50', choices=model_names,
help='type of model to use. see vision_model for options.')
parser.add_argument('--save-prefix', type=str, required=True,
help='prefix of saved checkpoint.')
parser.add_argument('--use-pretrained', action='store_true',
help='enable using pretrained model.')
parser.add_argument('--normalize-weights', action='store_true',
help='normalize log weights .')
parser.add_argument('--print-freq', type=int, default=20,
help='number of batches to wait before logging.')
args = parser.parse_args()
logging.info(args)
return args
def main(args):
# checking
assert args['batch_size'] % args['batch_k'] == 0
assert args['batch_size'] > 0 and args['batch_k'] > 0
assert args['batch_size'] // args['batch_k'] < args['classes']
# seed
if args['seed'] is not None:
random.seed(args['seed'])
torch.manual_seed(args['seed'])
cudnn.deterministic = True
warnings.warn(
'''You have chosen to seed training. This will turn on the CUDNN deterministic setting, which can slow down your training considerably! You may see unexpected behavior when restarting from checkpoints.''')
# gpus setting
os.environ['CUDA_VISIBLE_DEVICES'] = args['gpus']
# construct model
if not args['use_pretrained']:
model = resnet50(num_classes=args['feat_dim'])
else:
model = resnet50(pretrained=True)
try:
model.fc = nn.Linear(model.fc.in_features, args['feat_dim'])
except NameError as e:
print("Error: current works only with model having fc layer as the last layer, try modify the code")
exit(-1)
model = MarginNet(base_net=model, emb_dim=args['embed_dim'], batch_k=args['batch_k'], feat_dim=args['feat_dim'],
normalize=args['normalize_weights'])
print(model.state_dict().keys())
model.cuda()
if args['loss'] == 'margin':
criterion = MarginLoss(margin=args['margin'], nu=args['nu'])
elif args['loss'] == 'triplet':
criterion = TripletLoss(margin=args['margin'], nu=args['nu'])
else:
raise NotImplementedError
optimizer = torch.optim.SGD(model.parameters(), args['lr'], momentum=args['momentum'],
weight_decay=args['wd'])
beta = Parameter(torch.ones((args['classes'],), dtype=torch.float32, device=torch.device('cuda')) * args['beta'])
optimizer_beta = torch.optim.SGD([beta], args['lr_beta'], momentum=args['momentum'], weight_decay=args['wd'])
if args['resume']:
if os.path.isfile(args['resume']):
print("=> loading checkpoint '{}'".format(args['resume']))
checkpoint = torch.load(args['resume'])
args['start_epoch'] = checkpoint['epoch']
state_dict = {}
for k, v in checkpoint['state_dict'].items():
if k.startswith('module.'):
k = k[7:]
state_dict[k] = v
model.load_state_dict(state_dict)
optimizer.load_state_dict(checkpoint['optimizer'])
optimizer_beta.load_state_dict(checkpoint['optimizer_beta'])
beta = checkpoint['beta']
print("=> loaded checkpoint '{}' (epoch {})"
.format(args['resume'], checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args['resume']))
# if len(args.gpus.split(',')) > 1:
# model = torch.nn.DataParallel(model)
# dataset
traindir = os.path.join(args['data_path'], 'train')
valdir = os.path.join(args['data_path'], 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
# transforms.RandomResizedCrop(224),
transforms.Resize((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize])
)
val_dataset = datasets.ImageFolder(
valdir,
transforms.Compose([
# transforms.RandomResizedCrop(224),
transforms.Resize((224, 224)),
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize])
)
batch_sampler = BalancedBatchSampler(train_dataset, args['batch_size'], args['batch_k'], length=args['batch_num'])
train_loader = torch.utils.data.DataLoader(
batch_sampler=batch_sampler,
dataset=train_dataset,
num_workers=args['workers'],
pin_memory=True
)
val_loader = torch.utils.data.DataLoader(
batch_sampler=batch_sampler,
dataset=val_dataset,
num_workers=args['workers'],
pin_memory=True
)
if not os.path.exists('checkpoints/'):
os.mkdir('checkpoints/')
for epoch in range(args['start_epoch'], args['epochs']):
adjust_learning_rate(optimizer, epoch, args)
adjust_learning_rate(optimizer_beta, epoch, args, beta=True)
# print(optimizer.state_dict()['param_groups'][0]['lr'])
# print(optimizer_beta.state_dict()['param_groups'][0]['lr'])
# train for one epoch
train(train_loader, model, criterion, optimizer, optimizer_beta, beta, epoch, args)
# evaluate
#
state = {
'epoch': epoch + 1,
'arch': args['model'],
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'optimizer_beta': optimizer_beta.state_dict(),
'beta': beta
}
torch.save(state, 'checkpoints/%s_checkpoint_%d.pth.tar' % (args['save_prefix'], epoch + 1))
def get_distance_matrix(x):
"""Get distance matrix given a matrix. Used in testing."""
square = torch.sum(x ** 2.0, axis=1, keepdims=True)
distance_square = square + square.transpose() - (2.0 * torch.dot(x, x.transpose()))
return torch.sqrt(distance_square)
def evaluate_emb(emb, labels):
"""Evaluate embeddings based on Recall@k."""
d_mat = get_distance_matrix(emb)
d_mat = d_mat.asnumpy()
labels = labels.asnumpy()
names = []
accs = []
for k in [1, 2, 4, 8, 16]:
names.append('Recall@%d' % k)
correct, cnt = 0.0, 0.0
for i in range(emb.shape[0]):
d_mat[i, i] = 1e10
nns = argpartition(d_mat[i], k)[:k]
if any(labels[i] == labels[nn] for nn in nns):
correct += 1
cnt += 1
accs.append(correct / cnt)
return names, accs
# def validate(val_loader, model, criterion, args):
# """Test a model."""
# outputs = []
# labels = []
# for batch in val_loader:
# data = torch.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
# label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
# for x in data:
# outputs.append(net(x)[-1])
# labels += label
#
# outputs = torch.concatenate(outputs, axis=0)[:val_data.n_test]
# labels = torch.concatenate(labels, axis=0)[:val_data.n_test]
# return evaluate_emb(outputs, labels)
# def validate(val_loader, model, criterion, args):
# outputs = []
# labels = []
#
# model.eval()
#
# with torch.no_grad():
# end = time.time()
# for i, (input, target) in enumerate(val_loader):
# outpus += model(input)[-1].cpu().tolist()
# labels += target.cpu().tolist()
#
def train(train_loader, model, criterion, optimizer, optimizer_beta, beta, epoch, args):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
pair_cnts = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (x, y) in enumerate(train_loader):
if i == args['batch_num']:
return
# measure data loading time
data_time.update(time.time() - end)
y = y.cuda()
x = x.cuda()
optimizer.zero_grad()
optimizer_beta.zero_grad()
# compute output
a_indices, anchors, positives, negatives, _ = model(x)
if args['lr_beta'] > 0.0:
loss, pair_cnt = criterion(anchors, positives, negatives, beta, y[a_indices])
else:
loss, pair_cnt = criterion(anchors, positives, negatives, args['beta'], None)
# measure accuracy and record loss
losses.update(loss.item(), x.size(0))
# compute gradient and do SGD step
loss.backward()
optimizer.step()
optimizer_beta.step()
# print(beta)
pair_cnts.update(pair_cnt)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args['print_freq'] == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Lr: {3} Lr_beta: {4}\t'
'BetaMax: {5:.2f} BetaMin: {6:.2f}\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'PairNum {pair_cnt.val:.2f} ({pair_cnt.avg: .2f}) '.format(
epoch, i, len(train_loader), optimizer.state_dict()['param_groups'][0]['lr'],
optimizer_beta.state_dict()['param_groups'][0]['lr'], beta.detach().cpu().numpy().max(),
beta.detach().cpu().numpy().min(),
batch_time=batch_time, data_time=data_time, loss=losses, pair_cnt=pair_cnts))
def adjust_learning_rate(optimizer, epoch, args, beta=False):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
steps = [int(step) for step in args['steps'].split(',')]
if not beta:
lr = args['lr']
else:
lr = args['lr_beta']
for i in range(epoch + 1):
if i in steps:
lr *= args['factor']
for param_group in optimizer.param_groups:
# param_group['lr'] = lr
param_group['lr'] = lr
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if __name__ == "__main__":
args = parse_argument().__dict__
main(args)
| 38.086387
| 217
| 0.592412
|
d1394ea05f20151e0d01e43a536f203c24751f6b
| 5,041
|
py
|
Python
|
stellar/operations.py
|
abitrolly/stellar5
|
15b2904fa36c8acbe188324f7e58032e81f2484e
|
[
"MIT"
] | 932
|
2015-01-01T16:51:32.000Z
|
2022-03-14T05:12:52.000Z
|
stellar/operations.py
|
abitrolly/stellar5
|
15b2904fa36c8acbe188324f7e58032e81f2484e
|
[
"MIT"
] | 43
|
2015-01-05T13:31:30.000Z
|
2021-10-12T17:24:07.000Z
|
stellar/operations.py
|
abitrolly/stellar5
|
15b2904fa36c8acbe188324f7e58032e81f2484e
|
[
"MIT"
] | 64
|
2015-01-02T14:44:22.000Z
|
2021-12-10T18:47:44.000Z
|
import logging
import sqlalchemy_utils
logger = logging.getLogger(__name__)
SUPPORTED_DIALECTS = (
'postgresql',
'mysql'
)
class NotSupportedDatabase(Exception):
pass
def get_engine_url(raw_conn, database):
url = str(raw_conn.engine.url)
if url.count('/') == 3 and url.endswith('/'):
return '%s%s' % (url, database)
else:
if not url.endswith('/'):
url += '/'
return '%s/%s' % ('/'.join(url.split('/')[0:-2]), database)
def _get_pid_column(raw_conn):
# Some distros (e.g Debian) may inject their branding into server_version
server_version = raw_conn.execute('SHOW server_version;').first()[0]
version_string, _, _ = server_version.partition(' ')
version = [int(x) for x in version_string.split('.')]
return 'pid' if version >= [9, 2] else 'procpid'
def terminate_database_connections(raw_conn, database):
logger.debug('terminate_database_connections(%r)', database)
if raw_conn.engine.dialect.name == 'postgresql':
pid_column = _get_pid_column(raw_conn)
raw_conn.execute(
'''
SELECT pg_terminate_backend(pg_stat_activity.%(pid_column)s)
FROM pg_stat_activity
WHERE
pg_stat_activity.datname = '%(database)s' AND
%(pid_column)s <> pg_backend_pid();
''' % {'pid_column': pid_column, 'database': database}
)
else:
# NotYetImplemented
pass
def create_database(raw_conn, database):
logger.debug('create_database(%r)', database)
return sqlalchemy_utils.functions.create_database(
get_engine_url(raw_conn, database)
)
def copy_database(raw_conn, from_database, to_database):
logger.debug('copy_database(%r, %r)', from_database, to_database)
terminate_database_connections(raw_conn, from_database)
if raw_conn.engine.dialect.name == 'postgresql':
raw_conn.execute(
'''
CREATE DATABASE "%s" WITH TEMPLATE "%s";
''' %
(
to_database,
from_database
)
)
elif raw_conn.engine.dialect.name == 'mysql':
# Horribly slow implementation.
create_database(raw_conn, to_database)
for row in raw_conn.execute('SHOW TABLES in %s;' % from_database):
raw_conn.execute('''
CREATE TABLE %s.%s LIKE %s.%s
''' % (
to_database,
row[0],
from_database,
row[0]
))
raw_conn.execute('ALTER TABLE %s.%s DISABLE KEYS' % (
to_database,
row[0]
))
raw_conn.execute('''
INSERT INTO %s.%s SELECT * FROM %s.%s
''' % (
to_database,
row[0],
from_database,
row[0]
))
raw_conn.execute('ALTER TABLE %s.%s ENABLE KEYS' % (
to_database,
row[0]
))
else:
raise NotSupportedDatabase()
def database_exists(raw_conn, database):
logger.debug('database_exists(%r)', database)
return sqlalchemy_utils.functions.database_exists(
get_engine_url(raw_conn, database)
)
def remove_database(raw_conn, database):
logger.debug('remove_database(%r)', database)
terminate_database_connections(raw_conn, database)
return sqlalchemy_utils.functions.drop_database(
get_engine_url(raw_conn, database)
)
def rename_database(raw_conn, from_database, to_database):
logger.debug('rename_database(%r, %r)', from_database, to_database)
terminate_database_connections(raw_conn, from_database)
if raw_conn.engine.dialect.name == 'postgresql':
raw_conn.execute(
'''
ALTER DATABASE "%s" RENAME TO "%s"
''' %
(
from_database,
to_database
)
)
elif raw_conn.engine.dialect.name == 'mysql':
create_database(raw_conn, to_database)
for row in raw_conn.execute('SHOW TABLES in %s;' % from_database):
raw_conn.execute('''
RENAME TABLE %s.%s TO %s.%s;
''' % (
from_database,
row[0],
to_database,
row[0]
))
remove_database(raw_conn, from_database)
else:
raise NotSupportedDatabase()
def list_of_databases(raw_conn):
logger.debug('list_of_databases()')
if raw_conn.engine.dialect.name == 'postgresql':
return [
row[0]
for row in raw_conn.execute('''
SELECT datname FROM pg_database
WHERE datistemplate = false
''')
]
elif raw_conn.engine.dialect.name == 'mysql':
return [
row[0]
for row in raw_conn.execute('''SHOW DATABASES''')
]
else:
raise NotSupportedDatabase()
| 29.828402
| 77
| 0.564571
|
3f09ed2513aa0fe4df19211c860c4fd7a357a8b2
| 9,996
|
py
|
Python
|
neurom/io/neurolucida.py
|
Asjidkalam/NeuroM
|
c3b0447c34994d12a77db5136838df50caea72bc
|
[
"BSD-3-Clause"
] | 1
|
2016-10-25T09:23:16.000Z
|
2016-10-25T09:23:16.000Z
|
neurom/io/neurolucida.py
|
Asjidkalam/NeuroM
|
c3b0447c34994d12a77db5136838df50caea72bc
|
[
"BSD-3-Clause"
] | 1
|
2021-02-16T20:53:48.000Z
|
2021-02-16T20:53:48.000Z
|
neurom/io/neurolucida.py
|
Asjidkalam/NeuroM
|
c3b0447c34994d12a77db5136838df50caea72bc
|
[
"BSD-3-Clause"
] | 1
|
2021-02-16T20:50:02.000Z
|
2021-02-16T20:50:02.000Z
|
# Copyright (c) 2016, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Reader for Neurolucida .ASC files, v3.
reversed engineered from looking at output from Neuroludica
"""
import warnings
from io import open
import numpy as np
from neurom.core.dataformat import COLS, POINT_TYPE
from .datawrapper import DataWrapper
WANTED_SECTIONS = {
'CellBody': POINT_TYPE.SOMA,
'Axon': POINT_TYPE.AXON,
'Dendrite': POINT_TYPE.BASAL_DENDRITE,
'Apical': POINT_TYPE.APICAL_DENDRITE,
}
UNWANTED_SECTION_NAMES = [
# Meta-data?
'Closed', 'Color', 'FillDensity', 'GUID', 'ImageCoords', 'MBFObjectType',
'Marker', 'Name', 'Resolution', 'Set', 'Description',
# Marker names?
'Asterisk', 'Cross', 'Dot', 'DoubleCircle', 'FilledCircle', 'FilledDownTriangle',
'FilledSquare', 'FilledStar', 'FilledUpTriangle', 'FilledUpTriangle', 'Flower',
'Flower2', 'OpenCircle', 'OpenDiamond', 'OpenDownTriangle', 'OpenSquare', 'OpenStar',
'OpenUpTriangle', 'Plus', 'ShadedStar', 'Splat', 'TriStar',
]
UNWANTED_SECTIONS = {name: True for name in UNWANTED_SECTION_NAMES}
def _match_section(section, match):
"""Checks whether the `type` of section is in the `match` dictionary.
Works around the unknown ordering of s-expressions in each section.
For instance, the `type` is the 3-rd one in for CellBodies
("CellBody"
(Color Yellow)
(CellBody)
(Set "cell10")
)
Returns:
value associated with match[section_type], None if no match
"""
# TODO: rewrite this so it is more clear, and handles sets & dictionaries for matching
for i in range(5):
if i >= len(section):
return None
if isinstance(section[i], str) and section[i] in match:
return match[section[i]]
return None
def _get_tokens(morph_fd):
"""Split a file-like into tokens: split on whitespace.
Note: this also strips newlines and comments
"""
for line in morph_fd:
line = line.rstrip() # remove \r\n
line = line.split(';', 1)[0] # strip comments
squash_token = [] # quoted strings get squashed into one token
if '<(' in line: # skip spines, which exist on a single line
assert ')>' in line, 'Missing end of spine'
# The following line is covered but 'tox -e coverage does not see it'
# TODO: find out why
continue # pragma: no cover
for token in line.replace('(', ' ( ').replace(')', ' ) ').split():
if squash_token:
squash_token.append(token)
if token.endswith('"'):
token = ' '.join(squash_token)
squash_token = []
yield token
elif token.startswith('"') and not token.endswith('"'):
squash_token.append(token)
else:
yield token
def _parse_section(token_iter):
"""Create a tree structure (defined by the s-expressions) from a stream of tokens."""
sexp = []
for token in token_iter:
if token == '(':
new_sexp = _parse_section(token_iter)
if not _match_section(new_sexp, UNWANTED_SECTIONS):
sexp.append(new_sexp)
elif token == ')':
return sexp
else:
sexp.append(token)
return sexp
def _parse_sections(morph_fd):
"""Returns array of all the sections that exist.
The format is nested lists that correspond to the s-expressions
"""
sections = []
token_iter = _get_tokens(morph_fd)
for token in token_iter:
if token == '(': # find top-level sections
section = _parse_section(token_iter)
if not _match_section(section, UNWANTED_SECTIONS):
sections.append(section)
return sections
def _flatten_subsection(subsection, _type, offset, parent):
"""Flatten a subsection from its nested version.
Args:
subsection: Nested subsection as produced by _parse_section, except one level in
_type: type of section, ie: AXON, etc
parent: first element has this as it's parent
offset: position in the final array of the first element
Returns:
Generator of values corresponding to [X, Y, Z, R, TYPE, ID, PARENT_ID]
"""
for row in subsection:
# TODO: Figure out what these correspond to in neurolucida
if row in ('Low', 'Generated', 'High', ):
continue
if isinstance(row[0], str):
if len(row) in (4, 5, ):
if len(row) == 5:
assert row[4][0] == 'S', \
'Only known usage of a fifth member is Sn, found: %s' % row[4][0]
yield (float(row[0]), float(row[1]), float(row[2]), float(row[3]) / 2.,
_type, offset, parent)
parent = offset
offset += 1
elif isinstance(row[0], list):
split_parent = offset - 1
start_offset = 0
slices = []
start = 0
for i, value in enumerate(row):
if value == '|':
slices.append(slice(start + start_offset, i))
start = i + 1
slices.append(slice(start + start_offset, len(row)))
for split_slice in slices:
for _row in _flatten_subsection(row[split_slice], _type, offset,
split_parent):
offset += 1
yield _row
def _extract_section(section):
"""Find top level sections, and get their flat contents, and append them all.
Returns a numpy array with the row format:
[X, Y, Z, R, TYPE, ID, PARENT_ID]
Note: PARENT_ID starts at -1 for soma and 0 for neurites
"""
# sections with only one element will be skipped,
if len(section) == 1:
assert section[0] == 'Sections', \
('Only known usage of a single Section content is "Sections", found %s' %
section[0])
return None
# try and detect type
_type = WANTED_SECTIONS.get(section[0][0], None)
start = 1
# CellBody often has [['"CellBody"'], ['CellBody'] as its first two elements
if _type is None:
_type = WANTED_SECTIONS.get(section[1][0], None)
if _type is None: # can't determine the type
return None
start = 2
parent = -1 if _type == POINT_TYPE.SOMA else 0
subsections = list(_flatten_subsection(section[start:], _type, offset=0,
parent=parent))
return np.array(subsections)
def _sections_to_raw_data(sections):
"""Convert list of sections into the `raw_data` format used in neurom.
This finds the soma, and attaches the neurites
"""
soma = None
neurites = []
for section in sections:
neurite = _extract_section(section)
if neurite is None:
continue
if neurite[0][COLS.TYPE] == POINT_TYPE.SOMA:
assert soma is None, 'Multiple somas defined in file'
soma = neurite
else:
neurites.append(neurite)
assert soma is not None, 'Missing CellBody element (ie. soma)'
total_length = len(soma) + sum(len(neurite) for neurite in neurites)
ret = np.zeros((total_length, 7,), dtype=np.float64)
pos = len(soma)
ret[0:pos, :] = soma
for neurite in neurites:
end = pos + len(neurite)
ret[pos:end, :] = neurite
ret[pos:end, COLS.P] += pos
ret[pos:end, COLS.ID] += pos
# TODO: attach the neurite at the closest point on the soma
ret[pos, COLS.P] = len(soma) - 1
pos = end
return ret
def read(morph_file, data_wrapper=DataWrapper):
"""Return a DataWrapper object.
It is 'raw_data' np.array with the full neuron, and the format of the file
suitable to be wrapped by DataWrapper
"""
warnings.warn('This is an experimental reader. '
'There are no guarantees regarding ability to parse '
'Neurolucida .asc files or correctness of output.')
with open(morph_file, encoding='utf-8', errors='replace') as morph_fd:
sections = _parse_sections(morph_fd)
raw_data = _sections_to_raw_data(sections)
return data_wrapper(raw_data, 'NL-ASCII')
| 36.615385
| 90
| 0.623349
|
8caa41bc209588bd5ee483d42aed4405987f24ef
| 2,727
|
py
|
Python
|
sleepin/tests/test_views.py
|
turtle1229/private_sleepin
|
fdc352aff7430ab324fcaa30d181545037dd95c4
|
[
"MIT"
] | null | null | null |
sleepin/tests/test_views.py
|
turtle1229/private_sleepin
|
fdc352aff7430ab324fcaa30d181545037dd95c4
|
[
"MIT"
] | 2
|
2021-02-15T04:41:03.000Z
|
2021-02-15T04:48:43.000Z
|
sleepin/tests/test_views.py
|
turtle1229/private_sleepin
|
fdc352aff7430ab324fcaa30d181545037dd95c4
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse_lazy
from ..models import Sleeptime, Meal, Health, Diary
class LoggedInTestCase(TestCase):
"""各テストクラスで共通の事前準備処理をオーバーライドした独自TestCaseクラス"""
def setUp(self):
"""テストメソッド実行前の事前設定"""
#テストユーザーのパスワード
self.password = 'test0000'
#各インスタンスメソッドで使うテスト用ユーザーを生成しインスタンス変数に格納しておく
self.test_user = get_user_model().objects.create_user(
username='test',
email='test@2test2.com',
password=self.password)
#テスト用ユーザーでログインする
self.client.login(email=self.test_user.email, password=self.password)
class TestSleeptimeRegisterView(LoggedInTestCase):
"""SleeptimeRegisterView用のテストクラス"""
def test_register_sleeptime_success(self):
"""睡眠時間登録処理が成功することを検証する"""
#Postパラメータ
params = {'create_date': '日付',
'sleep_at': '就寝時間',
'wakeup_at': '起床時間'}
#新規睡眠時間登録処理(Post)を実行
response = self.client.post(reverse_lazy('sleepin:sleeptime_register'), params)
#登録完了画面へのリダイレクトを検証
self.assertRedirects(response, reverse_lazy('sleepin:registered'))
#睡眠時間データがDBに登録されたか検証
self.assertEqual(Sleeptime.objects.filter(create_date='日付').count(), 1)
def test_register_sleeptime_failure(self):
"""新規睡眠時間登録処理が失敗することを検証する"""
#新規睡眠時間登録処理(Post)を実行
response = self.client.post(reverse_lazy('sleepin:sleeptime_register'))
#必須フォームフィールドが未入力によりエラーになることを検証
self.assertFormError(response, 'form', 'create_date', 'このフィールドは必須です。')
class TestSleeptimeEditView(LoggedInTestCase):
"""SleeptimeEditView用のテストクラス"""
def test_edit_sleeptime_success(self):
"""睡眠時間編集処理が成功することを検証する"""
#テスト用睡眠時間データの作成
sleeptime = Sleeptime.objects.create(user=self.test_user, create_date='日付編集前')
#Postパラメータ
params = {'create_date': '日付編集後'}
#睡眠時間編集処理(Post)を実行
response = self.client.post(reverse_lazy('sleepin:sleeptime_edit', kwargs={'pk': sleeptime.pk}), params)
#睡眠時間詳細画面へのリダイレクトを検証
self.assertRedirects(response, reverse_lazy('sleepin:sleeptime_detail', kwargs={'pk': sleeptime.pk}))
#睡眠時間データが編集されたかを検証
self.assertEqual(Sleeptime.objects.get(pk=sleeptime.pk).create_date, '日付編集後')
def test_edit_sleeptime_failure(self):
"""睡眠時間編集処理が失敗することを検証する"""
#睡眠時間編集処理(Post)を実行
response = self.client.post(reverse_lazy('sleepin:sleeptime_edit', kwargs={'pk': 999}))
#存在しない睡眠時間データを編集しようとしてエラーになることを検証
self.assertEqual(response.status_code, 404)
| 30.988636
| 112
| 0.6685
|
f686da2a2f55ed2f09249bc9db825511b5cdfecd
| 4,475
|
py
|
Python
|
otrrentworker/azurestorage/tablemodels.py
|
omza/otrrentworker
|
faa70884a29dd271a33346ed45b3f43e5d7a7a21
|
[
"MIT"
] | null | null | null |
otrrentworker/azurestorage/tablemodels.py
|
omza/otrrentworker
|
faa70884a29dd271a33346ed45b3f43e5d7a7a21
|
[
"MIT"
] | null | null | null |
otrrentworker/azurestorage/tablemodels.py
|
omza/otrrentworker
|
faa70884a29dd271a33346ed45b3f43e5d7a7a21
|
[
"MIT"
] | null | null | null |
""" imports & Gloabls """
import datetime
from azure.common import AzureException
from azure.storage.table import Entity, TableService, EntityProperty, EdmType
from azurestorage.wrapper import StorageTableModel, StorageTableCollection
from helpers.helper import safe_cast
""" configure logging """
from config import log, config
class Torrent(StorageTableModel):
_tablename = 'torrents'
_dateformat = '%d.%m.%Y'
_datetimeformat = '%d.%m.%Y %H:%M:%S'
Id = 0
Resolution = ''
TorrentFile = ''
TorrentLink = ''
finished = 0
loading = 0
loaded = 0
def __setPartitionKey__(self):
self.PartitionKey = self.Id
return super().__setPartitionKey__()
def __setRowKey__(self):
self.RowKey = self.Resolution
return super().__setRowKey__()
class Recording(StorageTableModel):
_tablename = 'recordings'
_dateformat = '%d.%m.%Y'
_datetimeformat = '%d.%m.%Y %H:%M:%S'
Id = 0
beginn = datetime.datetime.strptime('01.01.1900 00:00:00', _datetimeformat)
ende = datetime.datetime.strptime('01.01.1900 00:00:00', _datetimeformat)
dauer = 0
sender = ''
titel = ''
typ = ''
text = ''
genre_id = 0
genre = ''
fsk = ''
language = ''
weekday = ''
zusatz = ''
wdh = ''
downloadlink = ''
infolink = ''
programlink = ''
rating = ''
previewimagelink = ''
torrentCount = 0
Torrents = StorageTableCollection(_tablename)
def __setPartitionKey__(self):
self.PartitionKey = self.beginn.strftime('%Y_%m_%d')
return super().__setPartitionKey__()
def __setRowKey__(self):
self.RowKey = str(self.Id)
return super().__setRowKey__()
def __setCollections__(self):
self.Torrents = StorageTableCollection('torrents', "PartitionKey eq '{}'".format(self.RowKey))
return super().__setCollections__()
class Genre(StorageTableModel):
_tablename = 'genres'
Genre_Id = 0
Genre = ''
class Genres():
_tablename = 'genres'
_collection = []
def __init__(self, tableservice, filter):
"""Initializes the GenresList with the specified settings dict.
Required settings are:
- db = Azure Table Storage tableservice
"""
self._tableservice = tableservice
self._tablename = self.__class__._tablename
self._filter = filter
self._collection = []
self.__loadcollection__()
def __loadcollection__(self):
allentities = self._tableservice.query_entities(self._tablename, self._filter)
for entity in allentities:
self._collection.append(entity)
def getgenrefromid(self, id):
""" has to be overwritten """
for genre in self._collection:
if genre['Genre_Id'] == safe_cast(id, int,0):
return genre['Genre']
break
return 'Sonstiges'
class History(StorageTableModel):
_tablename = 'history'
_datetimeformat = '%d.%m.%Y %H:%M:%S'
taskid = ''
tasktype = ''
epgid = 0
beginn = datetime.datetime.strptime('01.01.1900 00:00:00', _datetimeformat)
sender = ''
titel = ''
genre = ''
previewimagelink = ''
resolution = ''
sourcefile = ''
ip = ''
platform = ''
browser = ''
version = ''
language = ''
status = ''
created = datetime.datetime.strptime('01.01.1900 00:00:00', _datetimeformat)
updated = datetime.datetime.strptime('01.01.1900 00:00:00', _datetimeformat)
class User(StorageTableModel):
_tablename = 'userprofile'
_datetimeformat = '%d.%m.%Y %H:%M:%S'
AdsRemoved = False
ProUser = False
PushVideo = False
OtrUser = ''
OtrPassword = ''
UseCutlist = True
UseSubfolder = False
Protocol = 'ftp'
Server = ''
Port = 21
FtpUser = ''
FtpPassword = ''
ServerPath = '/'
created = datetime.datetime.strptime('01.01.1900 00:00:00', _datetimeformat)
updated = datetime.datetime.strptime('01.01.1900 00:00:00', _datetimeformat)
FtpConnectionChecked = None
OtrCredentialsChecked = None
def __setEncryptedProperties__(self):
self._encryptedproperties = ['OtrUser', 'OtrPassword', 'Server', 'FtpUser', 'FtpPassword']
return super().__setEncryptedProperties__()
| 27.121212
| 109
| 0.605587
|
16b5e5f5ae89f63ff1246ff975563b9b74f157ea
| 2,236
|
py
|
Python
|
video_compression/scripts/generate_video_cfg.py
|
AyanKumarBhunia/STDF-PyTorch
|
5a11613fa0dfad4697b495fadd5d4b718a1d7fb6
|
[
"OLDAP-2.2.1"
] | null | null | null |
video_compression/scripts/generate_video_cfg.py
|
AyanKumarBhunia/STDF-PyTorch
|
5a11613fa0dfad4697b495fadd5d4b718a1d7fb6
|
[
"OLDAP-2.2.1"
] | null | null | null |
video_compression/scripts/generate_video_cfg.py
|
AyanKumarBhunia/STDF-PyTorch
|
5a11613fa0dfad4697b495fadd5d4b718a1d7fb6
|
[
"OLDAP-2.2.1"
] | null | null | null |
# no maximum nfs restriction. for test videos.
# video name: VideoName_widthxheight_nfs.yuv
import glob
import os
import sys
import os.path as op
dir_dataset = sys.argv[1]
tab = sys.argv[2] # test_18 | train_108
def generate_vid_cfg(dir_dataset, tab):
raw_video_dir = op.join(dir_dataset, f'{tab}/raw')
cfg_save_dir = f'./video_cfg/{tab}'
if not os.path.exists(cfg_save_dir):
os.makedirs(cfg_save_dir)
# find all raw videos
raw_video_list = glob.glob(os.path.join(raw_video_dir, "*.yuv"))
num_videos = len(raw_video_list)
print(f'{num_videos} videos found.')
# generate VideoName.cfg and save
for ite_vid in range(num_videos):
raw_video_path = raw_video_list[ite_vid]
raw_video_name = os.path.basename(raw_video_path).split(".")[0]
_res = raw_video_name.split("_")[1]
width = _res.split("x")[0]
height = _res.split("x")[1]
nfs = raw_video_name.split("_")[2]
cfg_path = os.path.join(cfg_save_dir, raw_video_name + ".cfg")
fp = open(cfg_path, 'w')
_str = "#======== File I/O ===============\n"
fp.write(_str)
video_path = os.path.join(raw_video_dir, raw_video_name + ".yuv")
_str = "InputFile : " + video_path + "\n"
fp.write(_str)
_str = "InputBitDepth : 8 # Input bitdepth\n"
fp.write(_str)
_str = "InputChromaFormat : 420 # Ratio of luminance to chrominance samples\n"
fp.write(_str)
_str = "FrameRate : 50 # Frame Rate per second\n"
fp.write(_str)
_str = "FrameSkip : 0 # Number of frames to be skipped in input\n"
fp.write(_str)
_str = "SourceWidth : " + width + " # Input frame width\n"
fp.write(_str)
_str = "SourceHeight : " + height + " # Input frame height\n"
fp.write(_str)
_str = "FramesToBeEncoded : " + nfs + " # Number of frames to be coded\n"
fp.write(_str)
_str = "Level : 3.1\n"
fp.write(_str)
fp.close()
| 37.266667
| 106
| 0.54517
|
cf4812009598b2678e867c1c20fc7dc1530a64d7
| 9,457
|
py
|
Python
|
examples/simple_example.py
|
thetestgame/panda3d_astron
|
9ceeba53a1f497fcc601905a5c87a4eddb281714
|
[
"MIT"
] | 2
|
2022-02-18T23:22:48.000Z
|
2022-02-21T13:15:33.000Z
|
examples/simple_example.py
|
thetestgame/panda3d_astron
|
9ceeba53a1f497fcc601905a5c87a4eddb281714
|
[
"MIT"
] | null | null | null |
examples/simple_example.py
|
thetestgame/panda3d_astron
|
9ceeba53a1f497fcc601905a5c87a4eddb281714
|
[
"MIT"
] | null | null | null |
# For UberDOGs
from direct.distributed.DistributedObjectGlobal import DistributedObjectGlobal
from direct.distributed.DistributedObjectGlobalAI import DistributedObjectGlobalAI
from direct.distributed.DistributedObjectGlobalUD import DistributedObjectGlobalUD
# For regular DOs
from direct.distributed.DistributedObject import DistributedObject
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from direct.distributed.DistributedObjectUD import DistributedObjectUD
from direct.distributed.DistributedObjectOV import DistributedObjectOV
# For DOs that are also Panda3D scene graph nodes
from direct.distributed.DistributedNode import DistributedNode
from direct.distributed.DistributedNodeAI import DistributedNodeAI
# AI tasks
from direct.task import Task
from datetime import datetime
# Constant DO and channel IDs
from simple_example_globals import LoginManagerId
# Game settings
avatar_speed = 3.0
avatar_rotation_speed = 90.0
# -------------------------------------------------------------------
# LoginManager
# * Registers a DistributedMaproot
# * Authenticates Clients
# * Makes DistributedMaproot create an avatar for new Clients.
# -------------------------------------------------------------------
class LoginManager(DistributedObjectGlobal):
def generateInit(self):
print(datetime.now().strftime("%H:%M:%S")+" LoginManager.generateInit() for "+str(self.doId))
def login(self, username, password):
# FIXME: Use TLS so that these are encrypted!
print(datetime.now().strftime("%H:%M:%S")+" LoginManager.login("+username+", <password>) in "+str(self.doId))
self.sendUpdate("login", [username, password])
class LoginManagerAI(DistributedObjectGlobalAI):
def generate(self):
print(datetime.now().strftime("%H:%M:%S")+" LoginManagerAI.generate() for "+str(self.doId))
def set_maproot(self, maproot_doId):
print(datetime.now().strftime("%H:%M:%S")+" LoginManagerAI.set_maproot("+str(maproot_doId)+") in "+str(self.doId))
self.sendUpdate("set_maproot", [maproot_doId])
class LoginManagerUD(DistributedObjectGlobalUD):
def generate(self):
print(datetime.now().strftime("%H:%M:%S")+" LoginManagerUD.generate() for "+str(self.doId))
def set_maproot(self, maproot_doId):
"""Tells the LoginManagerUD what maproot to notify on login."""
print(datetime.now().strftime("%H:%M:%S")+" LoginManagerUD.set_maproot("+str(maproot_doId)+") in "+str(self.doId))
self.maproot = DistributedMaprootUD(self.air)
self.maproot.generateWithRequiredAndId(maproot_doId, 0, 1)
def login(self, username, password):
clientId = self.air.get_msg_sender()
print(datetime.now().strftime("%H:%M:%S")+" LoginManagerUD.login("+username+", <password>) in "+str(self.doId)+" for client "+str(clientId))
if (username == "guest") and (password == "guest"):
# Authenticate a client
# FIXME: "2" is the magic number for CLIENT_STATE_ESTABLISHED,
# for which currently no mapping exists.
self.air.setClientState(clientId, 2)
# The client is now authenticated; create an Avatar
#self.maproot.sendUpdate("createAvatar", # Field to call
# [clientId]) # Arguments
self.maproot.create_avatar(clientId)
# log login
self.notify.info("Login successful (user: %s)" % (username,))
else:
# Disconnect for bad auth
# FIXME: "122" is the magic number for login problems.
# See https://github.com/Astron/Astron/blob/master/doc/protocol/10-client.md
self.air.eject(clientId, 122, "Bad credentials")
# log login attempt
self.notify.info("Ejecting client for bad credentials (user: %s)" % (username,))
# -------------------------------------------------------------------
# DistributedMaproot
# * has all avatars in its zone 0
# * generates new avatars
# -------------------------------------------------------------------
#class DistributedMaproot(DistributedObject):
# def generateInit(self):
# print(datetime.now().strftime("%H:%M:%S")+" DistributedMaproot.generateInit() for "+str(self.doId))
#class DistributedMaprootOV(DistributedObjectOV):
# def generate(self):
# print(datetime.now().strftime("%H:%M:%S")+" DistributedMaprootOV.generate() for "+str(self.doId))
class DistributedMaproot(DistributedObjectAI):
pass
class DistributedMaprootAI(DistributedObjectAI):
def generate(self):
print(datetime.now().strftime("%H:%M:%S")+" DistributedMaprootAI.generate() for "+str(self.doId))
def set_maproot(self):
print(datetime.now().strftime("%H:%M:%S")+" DistributedMaprootAI.set_maproot() in "+str(self.doId))
login_manager = self.air.generateGlobalObject(LoginManagerId, 'LoginManager')
login_manager.set_maproot(self.doId)
def createAvatar(self, clientId):
print(datetime.now().strftime("%H:%M:%S")+" DistributedMaprootAI.createAvatar("+str(clientId)+") in "+str(self.doId))
# Create the avatar
avatar = DistributedAvatarAI(self.air)
avatar.generateWithRequiredAndId(self.air.allocateChannel(), self.getDoId(), 0) # random doId, parentId, zoneId
self.air.setAI(avatar.doId, self.air.ourChannel)
# Set the client to be interested in our zone 0. He can't do
# that himself (or rather: shouldn't be allowed to) as he has
# no visibility of this object.
# We're always using the interest_id 0 because different
# clients use different ID spaces, so why make things more
# complicated?
self.air.clientAddInterest(clientId, 0, self.getDoId(), 0) # client, interest, parent, zone
# Set its owner to the client, upon which in the Clients repo
# magically OV (OwnerView) is generated.
self.air.setOwner(avatar.getDoId(), clientId)
# Declare this to be a session object.
self.air.clientAddSessionObject(clientId, self.getDoId())
class DistributedMaprootUD(DistributedObjectUD):
def generate(self):
print(datetime.now().strftime("%H:%M:%S")+" DistributedMaprootUD.generate() for "+str(self.doId))
def create_avatar(self, clientId):
print(datetime.now().strftime("%H:%M:%S")+" DistributedMaprootUD.create_avatar("+str(clientId)+") in "+str(self.doId))
self.sendUpdate("createAvatar", # Field to call
[clientId]) # Arguments
# -------------------------------------------------------------------
# DistributedAvatar
# * represents players in the scene graph
# * routes indications of movement intents to AI
# * updates the actual position and orientation
# -------------------------------------------------------------------
class DistributedAvatar(DistributedNode):
def generateInit(self):
print(datetime.now().strftime("%H:%M:%S")+" DistributedAvatar.generateInit() for "+str(self.doId))
model = base.loader.loadModel("models/smiley")
model.reparent_to(self)
model.setH(180.0)
# Signal app that this is its avatar
base.messenger.send("distributed_avatar", [self])
def delete(self):
print(datetime.now().strftime("%H:%M:%S")+" DistributedAvatar.delete() for "+str(self.doId))
def setXYZH(self, *args):
DistributedNode.setXYZH(self, *args)
class DistributedAvatarOV(DistributedObjectOV):
def generateInit(self):
# Make yourself known to the client
print(datetime.now().strftime("%H:%M:%S")+" DistributedAvatarOV.generate() for "+str(self.doId))
base.messenger.send("avatar", [self])
def delete(self):
print(datetime.now().strftime("%H:%M:%S")+" DistributedAvatarOV.delete() for "+str(self.doId))
def indicateIntent(self, heading, speed):
self.sendUpdate("indicateIntent", [heading, speed])
class DistributedAvatarAI(DistributedNodeAI):
def generate(self, repository=None):
print(datetime.now().strftime("%H:%M:%S")+" DistributedAvatarAI.generate() for "+str(self.doId))
self.heading = 0.0
self.speed = 0.0
self.update_task = base.taskMgr.add(self.update_position, "Avatar position update")
def delete(self):
print(datetime.now().strftime("%H:%M:%S")+" DistributedAvatarAI.delete() for "+str(self.doId))
base.taskMgr.remove(self.update_task)
def indicateIntent(self, heading, speed):
if (heading < -1.0) or (heading > 1.0) or (speed < -1.0) or (speed > 1.0):
# Client is cheating!
# FIXME: Eject client
return
self.heading = heading
self.speed = speed
def update_position(self, task):
if (self.heading != 0.0) or (self.speed != 0.0):
dt = globalClock.getDt()
self.setH((self.getH() + self.heading * avatar_rotation_speed * dt) % 360.0)
self.setY(self, self.speed * avatar_speed * dt)
if self.getX() < -10.0:
self.setX(-10.0)
if self.getX() > 10.0:
self.setX(10.0)
if self.getY() < -10.0:
self.setY(-10.0)
if self.getY() > 10.0:
self.setY(10.0)
self.b_setXYZH(self.getX(), self.getY(), self.getZ(), self.getH())
return Task.cont
| 45.68599
| 149
| 0.633922
|
5b5818e458c5699a3f260646250c5ecae00beb63
| 1,019
|
py
|
Python
|
test/final/old/PythonSocket-1/client.py
|
Riteme/test
|
b511d6616a25f4ae8c3861e2029789b8ee4dcb8d
|
[
"BSD-Source-Code"
] | 3
|
2018-08-30T09:43:20.000Z
|
2019-12-03T04:53:43.000Z
|
test/final/old/PythonSocket-1/client.py
|
Riteme/test
|
b511d6616a25f4ae8c3861e2029789b8ee4dcb8d
|
[
"BSD-Source-Code"
] | null | null | null |
test/final/old/PythonSocket-1/client.py
|
Riteme/test
|
b511d6616a25f4ae8c3861e2029789b8ee4dcb8d
|
[
"BSD-Source-Code"
] | null | null | null |
#
# Copyright 2015 riteme
#
import random
import time
import socket
import sys
SERVER_IP = '127.0.0.1'
SERVER_PORT = 2048
DATA_START = 1
if not __name__ == '__main__' or len(sys.argv) == 1:
print 'No parameters given.'
else:
if len(sys.argv) > 1:
SERVER_IP = sys.argv[1]
DATA_START += 1
if len(sys.argv) > 2:
SERVER_PORT = int(sys.argv[2])
DATA_START += 1
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect((SERVER_IP, SERVER_PORT))
print 'Connected to the server.'
BUF_MAXSIZE = 1024
print client_socket.recv(BUF_MAXSIZE)
for data in sys.argv[DATA_START:]:
client_socket.send(data)
if data == 'exit':
print 'Exited.'
break
if data == 'exitserver':
print 'Client and server exited.'
break
print client_socket.recv(BUF_MAXSIZE)
time.sleep(random.random() * 2)
if not 'exit' in sys.argv and not 'exitserver' in sys.argv:
client_socket.send('exit')
client_socket.close()
| 21.680851
| 65
| 0.662414
|
fb8a6ad885184d2374ac72230c33cf2189555a8d
| 15,168
|
py
|
Python
|
wavefront_api_client/api/query_api.py
|
mdennehy/python-client
|
4d9cfa32075a6a65d88a38fe9e72b282e87b8808
|
[
"Apache-2.0"
] | null | null | null |
wavefront_api_client/api/query_api.py
|
mdennehy/python-client
|
4d9cfa32075a6a65d88a38fe9e72b282e87b8808
|
[
"Apache-2.0"
] | null | null | null |
wavefront_api_client/api/query_api.py
|
mdennehy/python-client
|
4d9cfa32075a6a65d88a38fe9e72b282e87b8808
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: support@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from wavefront_api_client.api_client import ApiClient
class QueryApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def query_api(self, q, s, g, **kwargs): # noqa: E501
"""Perform a charting query against Wavefront servers that returns the appropriate points in the specified time window and granularity # noqa: E501
Long time spans and small granularities can take a long time to calculate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.query_api(q, s, g, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str q: the query expression to execute (required)
:param str s: the start time of the query window in epoch milliseconds (required)
:param str g: the granularity of the points returned (required)
:param str n: name used to identify the query
:param str e: the end time of the query window in epoch milliseconds (null to use now)
:param str p: the approximate maximum number of points to return (may not limit number of points exactly)
:param bool i: whether series with only points that are outside of the query window will be returned (defaults to true)
:param bool auto_events: whether events for sources included in the query will be automatically returned by the query
:param str summarization: summarization strategy to use when bucketing points together
:param bool list_mode: retrieve events more optimally displayed for a list
:param bool strict: do not return points outside the query window [s;e), defaults to false
:param bool include_obsolete_metrics: include metrics that have not been reporting recently, defaults to false
:param bool sorted: sorts the output so that returned series are in order, defaults to false
:param bool cached: whether the query cache is used, defaults to true
:return: QueryResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.query_api_with_http_info(q, s, g, **kwargs) # noqa: E501
else:
(data) = self.query_api_with_http_info(q, s, g, **kwargs) # noqa: E501
return data
def query_api_with_http_info(self, q, s, g, **kwargs): # noqa: E501
"""Perform a charting query against Wavefront servers that returns the appropriate points in the specified time window and granularity # noqa: E501
Long time spans and small granularities can take a long time to calculate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.query_api_with_http_info(q, s, g, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str q: the query expression to execute (required)
:param str s: the start time of the query window in epoch milliseconds (required)
:param str g: the granularity of the points returned (required)
:param str n: name used to identify the query
:param str e: the end time of the query window in epoch milliseconds (null to use now)
:param str p: the approximate maximum number of points to return (may not limit number of points exactly)
:param bool i: whether series with only points that are outside of the query window will be returned (defaults to true)
:param bool auto_events: whether events for sources included in the query will be automatically returned by the query
:param str summarization: summarization strategy to use when bucketing points together
:param bool list_mode: retrieve events more optimally displayed for a list
:param bool strict: do not return points outside the query window [s;e), defaults to false
:param bool include_obsolete_metrics: include metrics that have not been reporting recently, defaults to false
:param bool sorted: sorts the output so that returned series are in order, defaults to false
:param bool cached: whether the query cache is used, defaults to true
:return: QueryResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['q', 's', 'g', 'n', 'e', 'p', 'i', 'auto_events', 'summarization', 'list_mode', 'strict', 'include_obsolete_metrics', 'sorted', 'cached'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method query_api" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'q' is set
if ('q' not in params or
params['q'] is None):
raise ValueError("Missing the required parameter `q` when calling `query_api`") # noqa: E501
# verify the required parameter 's' is set
if ('s' not in params or
params['s'] is None):
raise ValueError("Missing the required parameter `s` when calling `query_api`") # noqa: E501
# verify the required parameter 'g' is set
if ('g' not in params or
params['g'] is None):
raise ValueError("Missing the required parameter `g` when calling `query_api`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'n' in params:
query_params.append(('n', params['n'])) # noqa: E501
if 'q' in params:
query_params.append(('q', params['q'])) # noqa: E501
if 's' in params:
query_params.append(('s', params['s'])) # noqa: E501
if 'e' in params:
query_params.append(('e', params['e'])) # noqa: E501
if 'g' in params:
query_params.append(('g', params['g'])) # noqa: E501
if 'p' in params:
query_params.append(('p', params['p'])) # noqa: E501
if 'i' in params:
query_params.append(('i', params['i'])) # noqa: E501
if 'auto_events' in params:
query_params.append(('autoEvents', params['auto_events'])) # noqa: E501
if 'summarization' in params:
query_params.append(('summarization', params['summarization'])) # noqa: E501
if 'list_mode' in params:
query_params.append(('listMode', params['list_mode'])) # noqa: E501
if 'strict' in params:
query_params.append(('strict', params['strict'])) # noqa: E501
if 'include_obsolete_metrics' in params:
query_params.append(('includeObsoleteMetrics', params['include_obsolete_metrics'])) # noqa: E501
if 'sorted' in params:
query_params.append(('sorted', params['sorted'])) # noqa: E501
if 'cached' in params:
query_params.append(('cached', params['cached'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/x-javascript; charset=UTF-8', 'application/javascript; charset=UTF-8']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/chart/api', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='QueryResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def query_raw(self, metric, **kwargs): # noqa: E501
"""Perform a raw data query against Wavefront servers that returns second granularity points grouped by tags # noqa: E501
An API to check if ingested points are as expected. Points ingested within a single second are averaged when returned. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.query_raw(metric, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str metric: metric to query ingested points for (cannot contain wildcards) (required)
:param str host: host to query ingested points for (cannot contain wildcards). host or source is equivalent, only one should be used.
:param str source: source to query ingested points for (cannot contain wildcards). host or source is equivalent, only one should be used.
:param int start_time: start time in epoch milliseconds (cannot be more than a day in the past) null to use an hour before endTime
:param int end_time: end time in epoch milliseconds (cannot be more than a day in the past) null to use now
:return: list[RawTimeseries]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.query_raw_with_http_info(metric, **kwargs) # noqa: E501
else:
(data) = self.query_raw_with_http_info(metric, **kwargs) # noqa: E501
return data
def query_raw_with_http_info(self, metric, **kwargs): # noqa: E501
"""Perform a raw data query against Wavefront servers that returns second granularity points grouped by tags # noqa: E501
An API to check if ingested points are as expected. Points ingested within a single second are averaged when returned. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.query_raw_with_http_info(metric, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str metric: metric to query ingested points for (cannot contain wildcards) (required)
:param str host: host to query ingested points for (cannot contain wildcards). host or source is equivalent, only one should be used.
:param str source: source to query ingested points for (cannot contain wildcards). host or source is equivalent, only one should be used.
:param int start_time: start time in epoch milliseconds (cannot be more than a day in the past) null to use an hour before endTime
:param int end_time: end time in epoch milliseconds (cannot be more than a day in the past) null to use now
:return: list[RawTimeseries]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['metric', 'host', 'source', 'start_time', 'end_time'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method query_raw" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'metric' is set
if ('metric' not in params or
params['metric'] is None):
raise ValueError("Missing the required parameter `metric` when calling `query_raw`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'host' in params:
query_params.append(('host', params['host'])) # noqa: E501
if 'source' in params:
query_params.append(('source', params['source'])) # noqa: E501
if 'metric' in params:
query_params.append(('metric', params['metric'])) # noqa: E501
if 'start_time' in params:
query_params.append(('startTime', params['start_time'])) # noqa: E501
if 'end_time' in params:
query_params.append(('endTime', params['end_time'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/chart/raw', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[RawTimeseries]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 50.392027
| 409
| 0.643064
|
8d6a785982b1f48468669c0657cf63d5bc90e027
| 740
|
py
|
Python
|
vvn_sample.py
|
YD-CRDG/VoicevoxEngine_Negotiator_Python
|
542ed90930b5a031f798041e229e836a49b4b6cf
|
[
"MIT"
] | null | null | null |
vvn_sample.py
|
YD-CRDG/VoicevoxEngine_Negotiator_Python
|
542ed90930b5a031f798041e229e836a49b4b6cf
|
[
"MIT"
] | null | null | null |
vvn_sample.py
|
YD-CRDG/VoicevoxEngine_Negotiator_Python
|
542ed90930b5a031f798041e229e836a49b4b6cf
|
[
"MIT"
] | null | null | null |
import vvx_nego
if __name__ == "__main__":
#hogeの部分をエンジンが有るpathに変更して実行してください
vvn = vvx_nego.VoicevoxNegotiation("hoge\\run.exe")
vvn.request_audio_query("これは", speaker=1)
vvn.request_synthesis(vvn.audio_query, speaker=1)
vvn.multi_synthesis.append(vvn.synthesis)
vvn.request_audio_query("読み上げを実行する", speaker=3)
vvn.request_synthesis(vvn.audio_query, speaker=3)
vvn.multi_synthesis.append(vvn.synthesis)
vvn.request_audio_query("サンプルコードです", speaker=5)
vvn.request_synthesis(vvn.audio_query, speaker=5)
vvn.multi_synthesis.append(vvn.synthesis)
vvn.request_connect_waves(vvn.multi_synthesis)
#音が出ます
vvn.local_play_synthesis(vvn.synthesis)
input()
| 32.173913
| 56
| 0.724324
|
c18fbe22aae976259ca87ddb8676b020bc2f54c6
| 669
|
py
|
Python
|
turismo/sitio/migrations/0019_auto_20170525_0052.py
|
giocastagno/I.W._Delpuppo_Kopech_Castagno
|
821cf8e11eb78d9f478e642e9eb0db1534dc28b3
|
[
"MIT"
] | null | null | null |
turismo/sitio/migrations/0019_auto_20170525_0052.py
|
giocastagno/I.W._Delpuppo_Kopech_Castagno
|
821cf8e11eb78d9f478e642e9eb0db1534dc28b3
|
[
"MIT"
] | null | null | null |
turismo/sitio/migrations/0019_auto_20170525_0052.py
|
giocastagno/I.W._Delpuppo_Kopech_Castagno
|
821cf8e11eb78d9f478e642e9eb0db1534dc28b3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-05-25 00:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sitio', '0018_auto_20170524_2155'),
]
operations = [
migrations.RemoveField(
model_name='perfil_usuario',
name='localidad',
),
migrations.AlterField(
model_name='perfil_usuario',
name='telefono',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.DeleteModel(
name='Localidad',
),
]
| 23.892857
| 73
| 0.590433
|
6c2ea45174bb7c7bd38c2a8914b76c64a88e92cc
| 4,662
|
py
|
Python
|
readthedocs/core/history.py
|
yarons/readthedocs.org
|
05c99a0adc222a1d48654d305b492ec142c3026b
|
[
"MIT"
] | 4,054
|
2015-01-01T00:58:07.000Z
|
2019-06-28T05:50:49.000Z
|
readthedocs/core/history.py
|
yarons/readthedocs.org
|
05c99a0adc222a1d48654d305b492ec142c3026b
|
[
"MIT"
] | 4,282
|
2015-01-01T21:38:49.000Z
|
2019-06-28T15:41:00.000Z
|
readthedocs/core/history.py
|
yarons/readthedocs.org
|
05c99a0adc222a1d48654d305b492ec142c3026b
|
[
"MIT"
] | 3,224
|
2015-01-01T07:38:45.000Z
|
2019-06-28T09:19:10.000Z
|
import structlog
from functools import partial
from django import forms
from django.db import models
from django.utils.translation import gettext_lazy as _
from simple_history.admin import SimpleHistoryAdmin
from simple_history.models import HistoricalRecords
from simple_history.utils import update_change_reason
log = structlog.get_logger(__name__)
def set_change_reason(instance, reason):
"""
Set the change reason for the historical record created from the instance.
This method should be called before calling ``save()`` or ``delete``.
It sets `reason` to the `_change_reason` attribute of the instance,
that's used to create the historical record on the save/delete signals.
https://django-simple-history.readthedocs.io/en/latest/historical_model.html#change-reason # noqa
"""
instance._change_reason = reason
def safe_update_change_reason(instance, reason):
"""
Wrapper around update_change_reason to catch exceptions.
.. warning::
The implementation of django-simple-history's `update_change_reason`
is very brittle, as it queries for a previous historical record
that matches the attributes of the instance to update the ``change_reason``,
which could end up updating the wrong record, or not finding it.
If you already have control over the object, use `set_change_reason`
before updating/deleting the object instead.
That's more safe, since the attribute is passed to the signal
and used at the creation time of the record.
https://django-simple-history.readthedocs.io/en/latest/historical_model.html#change-reason # noqa
"""
try:
update_change_reason(instance=instance, reason=reason)
except Exception:
log.exception(
'An error occurred while updating the change reason of the instance.',
instance=instance,
)
class ExtraFieldsHistoricalModel(models.Model):
"""
Abstract model to allow history models track extra data.
Extra data includes:
- User information to retain after they have been deleted
- IP & browser
"""
extra_history_user_id = models.IntegerField(
_('ID'),
blank=True,
null=True,
db_index=True,
)
extra_history_user_username = models.CharField(
_('username'),
max_length=150,
null=True,
db_index=True,
)
extra_history_ip = models.CharField(
_('IP address'),
blank=True,
null=True,
max_length=250,
)
extra_history_browser = models.CharField(
_('Browser user-agent'),
max_length=250,
blank=True,
null=True,
)
class Meta:
abstract = True
ExtraHistoricalRecords = partial(HistoricalRecords, bases=[ExtraFieldsHistoricalModel])
"""Helper partial to use instead of HistoricalRecords."""
class ExtraSimpleHistoryAdmin(SimpleHistoryAdmin):
"""Set the change_reason on the model changed through this admin view."""
change_reason = None
def get_change_reason(self):
if self.change_reason:
return self.change_reason
klass = self.__class__.__name__
return f'origin=admin class={klass}'
def save_model(self, request, obj, form, change):
if obj:
set_change_reason(obj, self.get_change_reason())
super().save_model(request, obj, form, change)
def delete_model(self, request, obj):
if obj:
set_change_reason(obj, self.get_change_reason())
super().delete_model(request, obj)
class SimpleHistoryModelForm(forms.ModelForm):
"""Set the change_reason on the model changed through this form."""
change_reason = None
def get_change_reason(self):
if self.change_reason:
return self.change_reason
klass = self.__class__.__name__
return f'origin=form class={klass}'
def save(self, commit=True):
if self.instance:
set_change_reason(self.instance, self.get_change_reason())
return super().save(commit=commit)
class UpdateChangeReasonPostView:
"""
Set the change_reason on the model changed through the POST method of this view.
Use this class for views that don't use a form, like ``DeleteView``.
"""
change_reason = None
def get_change_reason(self):
if self.change_reason:
return self.change_reason
klass = self.__class__.__name__
return f'origin=form class={klass}'
def get_object(self):
obj = super().get_object()
set_change_reason(obj, self.get_change_reason())
return obj
| 29.320755
| 106
| 0.68254
|
effbae8821da7a2fddb378cb5cace0d1c6d047d7
| 6,384
|
py
|
Python
|
delta/data/preprocess/text_ops.py
|
didichuxing/delta
|
31dfebc8f20b7cb282b62f291ff25a87e403cc86
|
[
"Apache-2.0"
] | 1,442
|
2019-07-09T07:34:28.000Z
|
2020-11-15T09:52:09.000Z
|
delta/data/preprocess/text_ops.py
|
didichuxing/delta
|
31dfebc8f20b7cb282b62f291ff25a87e403cc86
|
[
"Apache-2.0"
] | 93
|
2019-07-22T09:20:20.000Z
|
2020-11-13T01:59:30.000Z
|
delta/data/preprocess/text_ops.py
|
didichuxing/delta
|
31dfebc8f20b7cb282b62f291ff25a87e403cc86
|
[
"Apache-2.0"
] | 296
|
2019-07-09T07:35:28.000Z
|
2020-11-16T02:27:51.000Z
|
# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
''' Text related pre-process in ops.'''
import delta.compat as tf
from absl import logging
from delta import utils
from core.ops import py_x_ops
from delta.data.utils import read_lines_from_text_file
def tokenize_label(label, maxlen, label_vocab_file_path, pad_id):
"""Tokenize labels"""
vocabs = read_lines_from_text_file(label_vocab_file_path)
label_id, _ = py_x_ops.sentence_to_ids(
label,
maxlen=maxlen,
use_vocab_file=False,
vocab=vocabs,
load_token_ids_from_vocab=True,
pad_id=pad_id,
check_tokens=False)
return label_id
def tokenize_sentence(texts, max_seq_len, vocab_path):
"""Tokenize sentence"""
vocabs = read_lines_from_text_file(vocab_path)
token_ids, _ = py_x_ops.sentence_to_ids(
texts,
maxlen=max_seq_len,
use_vocab_file=False,
vocab=vocabs,
load_token_ids_from_vocab=True,
pad_id=utils.PAD_IDX,
check_tokens=False)
return token_ids
def chinese_word_cut_tf(input_str, use_file=False):
""""""
output_str = py_x_ops.jieba_cut(input_str, use_file=use_file, hmm=True)
return output_str
def clean_english_str_tf(input_str):
"""Clean English string with tensorflow oprations."""
# pylint: disable=anomalous-backslash-in-string
string = tf.regex_replace(input_str, r"[^A-Za-z0-9(),!?\'\`<>/]", " ")
string = tf.regex_replace(string, "\'s", " \'s")
string = tf.regex_replace(string, "\'ve", " \'ve")
string = tf.regex_replace(string, "n\'t", " n\'t")
string = tf.regex_replace(string, "\'re", " \'re")
string = tf.regex_replace(string, "\'d", " \'d")
string = tf.regex_replace(string, "\'ll", " \'ll")
string = tf.regex_replace(string, ",", " , ")
string = tf.regex_replace(string, "!", " ! ")
string = tf.regex_replace(string, "\(", " ( ")
string = tf.regex_replace(string, "\)", " ) ")
string = tf.regex_replace(string, "\?", " ? ")
string = tf.regex_replace(string, "\s{2,}", " ")
string = tf.string_strip(string)
string = py_x_ops.str_lower(string)
return string
def char_cut_tf(input_str):
"""Cut sentence char by char with tensoflow operations."""
input_str = tf.convert_to_tensor(input_str)
rank = len(input_str.get_shape())
if rank == 1:
output_str = tf.strings.unicode_split(input_str,
"UTF-8").to_tensor(default_value="")
output_str = tf.strings.reduce_join(output_str, axis=1, separator=" ")
elif rank == 0:
output_str = tf.strings.unicode_split(input_str, "UTF-8")
output_str = tf.strings.reduce_join(output_str, axis=0, separator=" ")
else:
logging.error("Please check the shape of input_str!")
raise Exception("Error input shape for input_str.")
output_str = tf.strings.strip(output_str)
return output_str
def load_textline_dataset(paths, column_num):
"""Load raw data for text task."""
ds = tf.data.TextLineDataset(paths)
ds = ds.map(lambda x: tf.squeeze(
tf.strings.split(x, sep="\t", result_type="RaggedTensor"), axis=0))
ds = ds.filter(lambda line: tf.equal(tf.size(line), column_num))
ds_list = []
for i in range(column_num):
ds_list.append(ds.map(lambda x: x[i]))
return tuple(ds_list)
def process_one_label_dataset(label_ds, config, output_index=None):
"""process one-label data set."""
logging.info("Loading one label dataset...")
num_parallel_calls = config["data"]["task"]["num_parallel_calls"]
classes = config["data"]["task"]["classes"]
if isinstance(classes, list):
if output_index is None or output_index not in range(len(classes)):
raise IndexError("output_index:{} not in the range of classes length: "
"{}!".format(output_index, len(classes)))
num_classes = classes[output_index]["num_classes"]
label_vocab_file_path = config["data"]["task"]["label_vocab"][output_index]
else:
num_classes = classes["num_classes"]
label_vocab_file_path = config["data"]["task"]["label_vocab"]
label_ds = label_ds.map(
lambda x: tokenize_label(
x, maxlen=1, label_vocab_file_path=label_vocab_file_path, pad_id=0),
num_parallel_calls=num_parallel_calls)
label_ds = label_ds.map(
lambda l: tf.one_hot(l, num_classes, dtype=tf.int32),
num_parallel_calls=num_parallel_calls)
label_ds = label_ds.map(tf.squeeze, num_parallel_calls=num_parallel_calls)
return label_ds
def process_multi_label_dataset(label_ds, config, output_index=None):
"""process multi-label data set."""
logging.info("Loading multi label dataset...")
label_vocab_file_path = config["data"]["task"]["label_vocab"]
num_parallel_calls = config["data"]["task"]["num_parallel_calls"]
max_seq_len = config["data"]["task"]["max_seq_len"]
label_vocab_file_path = config["data"]["task"]["label_vocab"]
if isinstance(label_vocab_file_path, list):
if output_index is None or output_index not in range(
len(label_vocab_file_path)):
raise IndexError("output_index:{} not in the range of classes length: "
"{}!".format(output_index, len(label_vocab_file_path)))
label_vocab_file_path = label_vocab_file_path[output_index]
else:
label_vocab_file_path = label_vocab_file_path
label_ds = label_ds.map(
lambda x: tokenize_label(
x,
maxlen=max_seq_len,
label_vocab_file_path=label_vocab_file_path,
pad_id=0),
num_parallel_calls=num_parallel_calls)
label_ds = label_ds.map(tf.squeeze, num_parallel_calls=num_parallel_calls)
return label_ds
def load_dense_dataset(dense_feature):
"""Load dense data set"""
dataset = tf.data.Dataset.from_tensor_slices(dense_feature)
return dataset
| 36.067797
| 80
| 0.693452
|
d875e29fb0fea10a5202e3bf3b0039acfa02aa90
| 4,137
|
py
|
Python
|
slot/w/sword.py
|
Zeiin/dl
|
bce5e239dc751baa9266aa5adbe7c8d078d8a9ac
|
[
"Apache-2.0"
] | null | null | null |
slot/w/sword.py
|
Zeiin/dl
|
bce5e239dc751baa9266aa5adbe7c8d078d8a9ac
|
[
"Apache-2.0"
] | null | null | null |
slot/w/sword.py
|
Zeiin/dl
|
bce5e239dc751baa9266aa5adbe7c8d078d8a9ac
|
[
"Apache-2.0"
] | null | null | null |
from slot import WeaponBase
from slot.w import agito_buffs
class HDT1_Crimson(WeaponBase):
ele = ['flame']
wt = 'sword'
att = 765
s3 = {
"dmg" : 5*1.65 ,
"sp" : 6847 ,
"startup" : 0.1 ,
"recovery" : 3.1 ,
"hit" : 5 ,
} # Crimson Storm
a = [('k', 0.3, 'vs HMS')]
class HDT2_Absolute_Crimson(WeaponBase):
ele = ['flame']
wt = 'sword'
att = 1530
s3 = {
"dmg" : 5*1.65 ,
"sp" : 6847 ,
"startup" : 0.1 ,
"recovery" : 3.1 ,
"hit" : 5 ,
} # Infinite Crimson
a = []
class HDT1_Aqua(WeaponBase):
ele = ['water']
wt = 'sword'
att = 765
s3 = {
"dmg" : 3*2.48 ,
"sp" : 6418 ,
"startup" : 0.1 ,
"recovery" : 1.9 ,
"hit" : 3 ,
} # Aqua Storm
a = [('k', 0.3, 'vs HBH')]
class HDT2_Absolute_Aqua(WeaponBase):
ele = ['water']
wt = 'sword'
att = 1530
s3 = {
"dmg" : 3*2.48 ,
"sp" : 6418 ,
"startup" : 0.1 ,
"recovery" : 1.9 ,
"hit" : 3 ,
} # Infinite Aqua
a = []
class HDT1_Tempest(WeaponBase):
ele = ['wind']
wt = 'sword'
att = 705
s3 = {
"dmg" : 3*2.48 ,
"sp" : 6418 ,
"startup" : 0.1 ,
"recovery" : 1.9 ,
"hit" : 3 ,
} # Tempest Storm
a = [('k', 0.3, 'vs HMC')]
class HDT2_Absolute_Tempest(WeaponBase):
ele = ['wind']
wt = 'sword'
att = 1411
s3 = {
"dmg" : 3*2.48 ,
"sp" : 6418 ,
"startup" : 0.1 ,
"recovery" : 1.9 ,
"hit" : 3 ,
} # Infinite Tempest
a = []
class HDT1_Lightning(WeaponBase):
ele = ['light']
wt = 'sword'
att = 743
s3 = {
"dmg" : 3*2.48 ,
"sp" : 6418 ,
"startup" : 0.1 ,
"recovery" : 1.9 ,
"hit" : 3 ,
} # Lightning Storm
a = [('k', 0.3, 'vs HZD')]
class HDT2_Absolute_Lightning(WeaponBase):
ele = ['light']
wt = 'sword'
att = 1485
s3 = {
"dmg" : 3*2.48 ,
"sp" : 6418 ,
"startup" : 0.1 ,
"recovery" : 1.9 ,
"hit" : 3 ,
} # Infinite Lightning
a = []
class HDT1_Hex(WeaponBase):
ele = ['shadow']
wt = 'sword'
att = 743
s3 = {
"dmg" : 5*1.65 ,
"sp" : 6163 ,
"startup" : 0.1 ,
"recovery" : 3.1 ,
"hit" : 5 ,
} # Hexing Storm
a = [('k', 0.3, 'vs HJP')]
class HDT2_Absolute_Hex(WeaponBase):
ele = ['shadow']
wt = 'sword'
att = 1485
s3 = {
"dmg" : 5*1.65 ,
"sp" : 6163 ,
"startup" : 0.1 ,
"recovery" : 3.1 ,
"hit" : 5 ,
} # Infinite Hexes
a = []
class Chimeratech_Sword(WeaponBase):
ele = ['flame', 'shadow', 'wind']
wt = 'sword'
att = 972
s3 = {} #
a = [('uo', 0.04)]
class Agito2_Nothung(WeaponBase):
ele = ['flame']
wt = 'sword'
att = 1696
s3 = agito_buffs['flame'][1]
class Agito1_Yitian_Jian(WeaponBase):
ele = ['shadow']
wt = 'sword'
att = 1544
s3 = agito_buffs['shadow'][1]
class Agito1_Nothung(WeaponBase):
ele = ['flame']
wt = 'sword'
att = 1544
s3 = agito_buffs['flame'][1]
class Agito1_Excalibur(WeaponBase):
ele = ['wind']
wt = 'sword'
att = 1544
s3 = agito_buffs['wind'][1]
class Agito1_Ame_no_Murakumo(WeaponBase):
ele = ['water']
wt = 'sword'
att = 1544
s3 = agito_buffs['water'][1]
class UnreleasedAgitoStr_LightSword(Agito1_Nothung):
ele = ['light']
class UnreleasedAgitoSpd_LightSword(Agito1_Yitian_Jian):
ele = ['light']
flame = Agito2_Nothung
water = Agito1_Ame_no_Murakumo
wind = Agito1_Excalibur
light = HDT2_Absolute_Lightning
shadow = Agito1_Yitian_Jian
| 22.856354
| 56
| 0.434856
|
a951e83f920ac96654b0090e5b7490e3a7787c0c
| 1,277
|
py
|
Python
|
DjangoFirst/GrayScale/views.py
|
FromF/GrayScaleDjango
|
538af606d75ace4a873295a792e523ade10287bc
|
[
"MIT"
] | null | null | null |
DjangoFirst/GrayScale/views.py
|
FromF/GrayScaleDjango
|
538af606d75ace4a873295a792e523ade10287bc
|
[
"MIT"
] | null | null | null |
DjangoFirst/GrayScale/views.py
|
FromF/GrayScaleDjango
|
538af606d75ace4a873295a792e523ade10287bc
|
[
"MIT"
] | null | null | null |
#from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from django.shortcuts import render, redirect
from .forms import DocumentForm
from .models import Document
import cv2
from django.conf import settings
def index(request):
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('index')
else:
form = DocumentForm()
if Document.objects.count() > 0:
max_id = Document.objects.latest('id').id
obj = Document.objects.get(id = max_id)
input_path = settings.BASE_DIR + obj.photo.url
output_path = settings.BASE_DIR + "/media/output/output.jpg"
gray(input_path,output_path)
return render(request, 'grayscale/index.html', {
'form': form,
'obj':obj,
})
return render(request,'grayscale/index.html', {
'form': form,
})
###########ここをカスタマイズ############
def gray(input_path,output_path):
img = cv2.imread(input_path)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imwrite(output_path, img_gray)
######################################
| 30.404762
| 72
| 0.593579
|
6f16350ac01eb845c8a62de5b60eb40669a3a4db
| 12,374
|
py
|
Python
|
mmpose/datasets/datasets/bottom_up/bottom_up_mpii.py
|
yeshaokai/mmpose_for_maDLC
|
84efe0ff00de3d916086c8c5579eae17c1ef43cb
|
[
"Apache-2.0"
] | 5
|
2022-01-13T15:06:45.000Z
|
2022-01-28T19:39:54.000Z
|
mmpose/datasets/datasets/bottom_up/bottom_up_mpii.py
|
yeshaokai/mmpose_for_maDLC
|
84efe0ff00de3d916086c8c5579eae17c1ef43cb
|
[
"Apache-2.0"
] | null | null | null |
mmpose/datasets/datasets/bottom_up/bottom_up_mpii.py
|
yeshaokai/mmpose_for_maDLC
|
84efe0ff00de3d916086c8c5579eae17c1ef43cb
|
[
"Apache-2.0"
] | 1
|
2022-01-13T11:46:55.000Z
|
2022-01-13T11:46:55.000Z
|
import os
from collections import OrderedDict, defaultdict
import json_tricks as json
import numpy as np
import xtcocotools
from xtcocotools.coco import COCO
from xtcocotools.cocoeval import COCOeval
from mmpose.datasets.builder import DATASETS
from .bottom_up_base_dataset import BottomUpBaseDataset
@DATASETS.register_module()
class BottomUpMPIIDataset(BottomUpBaseDataset):
"""COCO dataset for bottom-up pose estimation.
The dataset loads raw features and apply specified transforms
to return a dict containing the image tensors and other information.
MPII keypoint indexes::
0: 'right_ankle'
1: 'right_knee',
2: 'right_hip',
3: 'left_hip',
4: 'left_knee',
5: 'left_ankle',
6: 'pelvis',
7: 'thorax',
8: 'upper_neck',
9: 'head_top',
10: 'right_wrist',
11: 'right_elbow',
12: 'right_shoulder',
13: 'left_shoulder',
14: 'left_elbow',
15: 'left_wrist'
Args:
ann_file (str): Path to the annotation file.
img_prefix (str): Path to a directory where images are held.
Default: None.
data_cfg (dict): config
pipeline (list[dict | callable]): A sequence of data transforms.
test_mode (bool): Store True when building test or
validation dataset. Default: False.
"""
def __init__(self,
ann_file,
img_prefix,
data_cfg,
pipeline,
test_mode=False):
super().__init__(ann_file, img_prefix, data_cfg, pipeline, test_mode)
self.ann_info['flip_index'] = [
5, 4, 3, 2, 1, 0, 6, 7, 8, 9, 15, 14, 13, 12, 11, 10
]
self.ann_info['use_different_joint_weights'] = False
self.ann_info['joint_weights'] = np.array(
[
1.5, 1.2, 1., 1., 1.2, 1.5, 1., 1., 1., 1., 1.5, 1.2, 1., 1.,
1.2, 1.5
],
dtype=np.float32).reshape((self.ann_info['num_joints'], 1))
# 'https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/'
# 'pycocotools/cocoeval.py#L523'
self.sigmas = np.array([
.89, .83, 1.07, 1.07, .83, .89, .26, .26, .26, .26, .62, .72, 1.79,
1.79, .72, .62
]) / 10.0
#self.sigmas = np.ones(16)*16/10.0
self.coco = COCO(ann_file)
cats = [
cat['name'] for cat in self.coco.loadCats(self.coco.getCatIds())
]
self.classes = ['__background__'] + cats
self.num_classes = len(self.classes)
self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))
self._class_to_coco_ind = dict(zip(cats, self.coco.getCatIds()))
self._coco_ind_to_class_ind = dict(
(self._class_to_coco_ind[cls], self._class_to_ind[cls])
for cls in self.classes[1:])
self.img_ids = self.coco.getImgIds()
if not test_mode:
self.img_ids = [
img_id for img_id in self.img_ids
]
self.num_images = len(self.img_ids)
self.id2name, self.name2id = self._get_mapping_id_name(self.coco.imgs)
self.dataset_name = 'mpii'
print(f'=> num_images: {self.num_images}')
@staticmethod
def _get_mapping_id_name(imgs):
"""
Args:
imgs (dict): dict of image info.
Returns:
tuple: Image name & id mapping dicts.
- id2name (dict): Mapping image id to name.
- name2id (dict): Mapping image name to id.
"""
id2name = {}
name2id = {}
for image_id, image in imgs.items():
file_name = image['file_name']
id2name[image_id] = file_name
name2id[file_name] = image_id
return id2name, name2id
def _get_single(self, idx):
"""Get anno for a single image.
Args:
idx (int): image idx
Returns:
dict: info for model training
"""
coco = self.coco
img_id = self.img_ids[idx]
ann_ids = coco.getAnnIds(imgIds=img_id)
anno = coco.loadAnns(ann_ids)
mask = self._get_mask(anno, idx)
anno = [
obj for obj in anno
if obj['iscrowd'] == 0 or obj['num_keypoints'] > 0
]
joints = self._get_joints(anno)
mask_list = [mask.copy() for _ in range(self.ann_info['num_scales'])]
joints_list = [
joints.copy() for _ in range(self.ann_info['num_scales'])
]
db_rec = {}
db_rec['dataset'] = self.dataset_name
db_rec['image_file'] = os.path.join(self.img_prefix,
self.id2name[img_id])
db_rec['mask'] = mask_list
db_rec['joints'] = joints_list
return db_rec
def _get_joints(self, anno):
"""Get joints for all people in an image."""
num_people = len(anno)
if self.ann_info['scale_aware_sigma']:
joints = np.zeros((num_people, self.ann_info['num_joints'], 4),
dtype=np.float32)
else:
joints = np.zeros((num_people, self.ann_info['num_joints'], 3),
dtype=np.float32)
for i, obj in enumerate(anno):
joints[i, :self.ann_info['num_joints'], :3] = \
np.array(obj['keypoints']).reshape([-1, 3])
if self.ann_info['scale_aware_sigma']:
# get person box
box = obj['bbox']
size = max(box[2], box[3])
sigma = size / self.base_size * self.base_sigma
if self.int_sigma:
sigma = int(np.ceil(sigma))
assert sigma > 0, sigma
joints[i, :, 3] = sigma
return joints
def _get_mask(self, anno, idx):
"""Get ignore masks to mask out losses."""
coco = self.coco
img_info = coco.loadImgs(self.img_ids[idx])[0]
m = np.zeros((img_info['height'], img_info['width']), dtype=np.float32)
for obj in anno:
if 'segmentation' in obj:
if obj['iscrowd']:
rle = xtcocotools.mask.frPyObjects(obj['segmentation'],
img_info['height'],
img_info['width'])
m += xtcocotools.mask.decode(rle)
elif obj['num_keypoints'] == 0:
rles = xtcocotools.mask.frPyObjects(
obj['segmentation'], img_info['height'],
img_info['width'])
for rle in rles:
m += xtcocotools.mask.decode(rle)
return m < 0.5
#def evaluate(self, outputs, res_folder, metric='PCKh', **kwargs):
def evaluate(self, outputs, res_folder, metric='mAP', **kwargs):
"""Evaluate coco keypoint results. The pose prediction results will be
saved in `${res_folder}/result_keypoints.json`.
Note:
num_people: P
num_keypoints: K
Args:
outputs (list(preds, scores, image_path, heatmap)):
* preds (list[np.ndarray(P, K, 3+tag_num)]):
Pose predictions for all people in images.
* scores (list[P]):
* image_path (list[str]): For example, ['coco/images/
val2017/000000397133.jpg']
* heatmap (np.ndarray[N, K, H, W]): model outputs.
res_folder (str): Path of directory to save the results.
metric (str | list[str]): Metric to be performed. Defaults: 'mAP'.
Returns:
dict: Evaluation results for evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['mAP']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
res_file = os.path.join(res_folder, 'result_keypoints.json')
preds = []
scores = []
image_paths = []
for output in outputs:
preds.append(output['preds'])
scores.append(output['scores'])
image_paths.append(output['image_paths'][0])
kpts = defaultdict(list)
# iterate over images
for idx, _preds in enumerate(preds):
str_image_path = image_paths[idx]
image_id = self.name2id[os.path.join('images',os.path.basename(str_image_path))]
# iterate over people
for idx_person, kpt in enumerate(_preds):
# use bbox area
area = (np.max(kpt[:, 0]) - np.min(kpt[:, 0])) * (
np.max(kpt[:, 1]) - np.min(kpt[:, 1]))
kpts[image_id].append({
'keypoints': kpt[:, 0:3],
'score': scores[idx][idx_person],
'tags': kpt[:, 3],
'image_id': image_id,
'area': area,
})
oks_nmsed_kpts = []
for img in kpts.keys():
img_kpts = kpts[img]
keep = []
if len(keep) == 0:
oks_nmsed_kpts.append(img_kpts)
else:
oks_nmsed_kpts.append([img_kpts[_keep] for _keep in keep])
self._write_coco_keypoint_results(oks_nmsed_kpts, res_file)
info_str = self._do_python_keypoint_eval(res_file)
name_value = OrderedDict(info_str)
return name_value
def _write_coco_keypoint_results(self, keypoints, res_file):
"""Write results into a json file."""
data_pack = [{
'cat_id': self._class_to_coco_ind[cls],
'cls_ind': cls_ind,
'cls': cls,
'ann_type': 'keypoints',
'keypoints': keypoints
} for cls_ind, cls in enumerate(self.classes)
if not cls == '__background__']
results = self._coco_keypoint_results_one_category_kernel(data_pack[0])
with open(res_file, 'w') as f:
json.dump(results, f, sort_keys=True, indent=4)
def _coco_keypoint_results_one_category_kernel(self, data_pack):
"""Get coco keypoint results."""
cat_id = data_pack['cat_id']
keypoints = data_pack['keypoints']
cat_results = []
for img_kpts in keypoints:
if len(img_kpts) == 0:
continue
_key_points = np.array(
[img_kpt['keypoints'] for img_kpt in img_kpts])
key_points = _key_points.reshape(-1,
self.ann_info['num_joints'] * 3)
for img_kpt, key_point in zip(img_kpts, key_points):
kpt = key_point.reshape((self.ann_info['num_joints'], 3))
left_top = np.amin(kpt, axis=0)
right_bottom = np.amax(kpt, axis=0)
w = right_bottom[0] - left_top[0]
h = right_bottom[1] - left_top[1]
cat_results.append({
'image_id': img_kpt['image_id'],
'category_id': cat_id,
'keypoints': key_point.tolist(),
'score': img_kpt['score'],
'bbox': [left_top[0], left_top[1], w, h]
})
return cat_results
def _do_python_keypoint_eval(self, res_file):
"""Keypoint evaluation using COCOAPI."""
stats_names = [
'AP', 'AP .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', 'AR .5',
'AR .75', 'AR (M)', 'AR (L)'
]
with open(res_file, 'r') as file:
res_json = json.load(file)
if not res_json:
info_str = list(zip(stats_names, [
0,
] * len(stats_names)))
return info_str
coco_det = self.coco.loadRes(res_file)
coco_eval = COCOeval(self.coco, coco_det, 'keypoints', self.sigmas)
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
info_str = list(zip(stats_names, coco_eval.stats))
return info_str
| 33.3531
| 92
| 0.528851
|
800ce10d5765dfdc104e7e439f1ab12ffa36e7bf
| 343
|
py
|
Python
|
tests/scripts/test_pclean.py
|
thesamesam/pkgcore
|
be2d9264a3fe61a323f0075cbc4838ed6ec5ffcf
|
[
"BSD-3-Clause"
] | null | null | null |
tests/scripts/test_pclean.py
|
thesamesam/pkgcore
|
be2d9264a3fe61a323f0075cbc4838ed6ec5ffcf
|
[
"BSD-3-Clause"
] | null | null | null |
tests/scripts/test_pclean.py
|
thesamesam/pkgcore
|
be2d9264a3fe61a323f0075cbc4838ed6ec5ffcf
|
[
"BSD-3-Clause"
] | null | null | null |
from pkgcore.scripts import pclean
from pkgcore.test.scripts.helpers import ArgParseMixin
from snakeoil.test import TestCase
class CommandlineTest(TestCase, ArgParseMixin):
_argparser = pclean.argparser
suppress_domain = True
def test_parser(self):
self.assertError('the following arguments are required: subcommand')
| 24.5
| 76
| 0.781341
|
11522c21538644675d3e57063583db9f72bbc551
| 4,639
|
py
|
Python
|
models/slim/export_inference_graph.py
|
SanderVanhove/tensorflow-visual-inspection
|
74ae46ecb4ef1ef08028d51316c4a2b8b5c8a276
|
[
"Apache-2.0"
] | 62
|
2018-01-10T02:27:32.000Z
|
2021-03-02T21:59:24.000Z
|
slim/export_inference_graph.py
|
jraiskin/TF_screenshots_object_detector
|
512b2cd376be20e2eb3dc25f8fd2e3a487605290
|
[
"MIT"
] | 3
|
2018-01-23T06:33:13.000Z
|
2020-02-17T16:25:01.000Z
|
slim/export_inference_graph.py
|
jraiskin/TF_screenshots_object_detector
|
512b2cd376be20e2eb3dc25f8fd2e3a487605290
|
[
"MIT"
] | 23
|
2018-03-10T12:14:56.000Z
|
2020-06-15T06:43:02.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Saves out a GraphDef containing the architecture of the model.
To use it, run something like this, with a model name defined by slim:
bazel build tensorflow_models/slim:export_inference_graph
bazel-bin/tensorflow_models/slim/export_inference_graph \
--model_name=inception_v3 --output_file=/tmp/inception_v3_inf_graph.pb
If you then want to use the resulting model with your own or pretrained
checkpoints as part of a mobile model, you can run freeze_graph to get a graph
def with the variables inlined as constants using:
bazel build tensorflow/python/tools:freeze_graph
bazel-bin/tensorflow/python/tools/freeze_graph \
--input_graph=/tmp/inception_v3_inf_graph.pb \
--input_checkpoint=/tmp/checkpoints/inception_v3.ckpt \
--input_binary=true --output_graph=/tmp/frozen_inception_v3.pb \
--output_node_names=InceptionV3/Predictions/Reshape_1
The output node names will vary depending on the model, but you can inspect and
estimate them using the summarize_graph tool:
bazel build tensorflow/tools/graph_transforms:summarize_graph
bazel-bin/tensorflow/tools/graph_transforms/summarize_graph \
--in_graph=/tmp/inception_v3_inf_graph.pb
To run the resulting graph in C++, you can look at the label_image sample code:
bazel build tensorflow/examples/label_image:label_image
bazel-bin/tensorflow/examples/label_image/label_image \
--image=${HOME}/Pictures/flowers.jpg \
--input_layer=input \
--output_layer=InceptionV3/Predictions/Reshape_1 \
--graph=/tmp/frozen_inception_v3.pb \
--labels=/tmp/imagenet_slim_labels.txt \
--input_mean=0 \
--input_std=255
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.platform import gfile
from datasets import dataset_factory
from nets import nets_factory
slim = tf.contrib.slim
tf.app.flags.DEFINE_string(
'model_name', 'inception_v3', 'The name of the architecture to save.')
tf.app.flags.DEFINE_boolean(
'is_training', False,
'Whether to save out a training-focused version of the model.')
tf.app.flags.DEFINE_integer(
'image_size', None,
'The image size to use, otherwise use the model default_image_size.')
tf.app.flags.DEFINE_integer(
'batch_size', None,
'Batch size for the exported model. Defaulted to "None" so batch size can '
'be specified at model runtime.')
tf.app.flags.DEFINE_string('dataset_name', 'imagenet',
'The name of the dataset to use with the model.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'output_file', '', 'Where to save the resulting file to.')
tf.app.flags.DEFINE_string(
'dataset_dir', '', 'Directory to save intermediate dataset files to')
FLAGS = tf.app.flags.FLAGS
def main(_):
if not FLAGS.output_file:
raise ValueError('You must supply the path to save to with --output_file')
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default() as graph:
dataset = dataset_factory.get_dataset(FLAGS.dataset_name, 'train',
FLAGS.dataset_dir)
network_fn = nets_factory.get_network_fn(
FLAGS.model_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
is_training=FLAGS.is_training)
image_size = FLAGS.image_size or network_fn.default_image_size
placeholder = tf.placeholder(name='input', dtype=tf.float32,
shape=[FLAGS.batch_size, image_size,
image_size, 3])
network_fn(placeholder)
graph_def = graph.as_graph_def()
with gfile.GFile(FLAGS.output_file, 'wb') as f:
f.write(graph_def.SerializeToString())
if __name__ == '__main__':
tf.app.run()
| 37.41129
| 80
| 0.733132
|
a7e3a7db3c6de9d2f3b4cbc2a9022df2548b9699
| 2,255
|
py
|
Python
|
minion_files/edna/fileDiff.py
|
Orange-OpenSource/EDNA
|
38ef05dbdbd7326e8196e25a04e9563b83b30570
|
[
"Apache-2.0"
] | null | null | null |
minion_files/edna/fileDiff.py
|
Orange-OpenSource/EDNA
|
38ef05dbdbd7326e8196e25a04e9563b83b30570
|
[
"Apache-2.0"
] | null | null | null |
minion_files/edna/fileDiff.py
|
Orange-OpenSource/EDNA
|
38ef05dbdbd7326e8196e25a04e9563b83b30570
|
[
"Apache-2.0"
] | 1
|
2021-11-20T16:39:44.000Z
|
2021-11-20T16:39:44.000Z
|
"""
Copyright 2021 Orange
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pyinotify, subprocess, salt.client
str1 = ""
cont = 0
path_attacker = './network_event'
path_healer = './response_action'
def sendAttackEvent(ip, mask, next_hop):
caller = salt.client.Caller('/etc/salt/minion')
caller.sminion.functions['event.send'](
'salt/beacon/minion/inotify//attacker',
{
"change": "IN_MODIFY",
"id": "minion",
"path": path_attacker,
"ip": ip,
"mask": mask,
"next_hop": next_hop
}
)
def sendHealingEvent(ip, mask):
caller = salt.client.Caller('/etc/salt/minion')
caller.sminion.functions['event.send'](
'salt/beacon/minion/inotify//healer',
{
"change": "IN_MODIFY",
"id": "minion",
"path": path_healer,
"ip": ip,
"mask": mask
}
)
def onAttackerChange(ev):
fo = open(path_attacker, "r")
str1 = fo.read();
fo.close()
announce = str1.split("-")[0]
ip = announce.split("/")[0]
mask = announce.split("/")[1]
next_hop = str1.split("-")[1][:-1]
print path_attacker
print str1[:-1]
print ip
print mask
print next_hop
sendAttackEvent(ip, mask, next_hop)
def onHealerChange(ev):
fo = open(path_healer, "r")
str1 = fo.read();
fo.close()
ip = str1.split("/")[0]
mask = str1.split("/")[1][:]
print path_healer
print str1[:]
print ip
print mask
sendHealingEvent(ip, mask)
wm = pyinotify.WatchManager()
wm.add_watch('./network_events', pyinotify.IN_MODIFY, onAttackerChange)
wm.add_watch('./response_action', pyinotify.IN_MODIFY, onHealerChange)
notifier = pyinotify.Notifier(wm)
notifier.loop()
| 26.529412
| 75
| 0.636364
|
733f29a74fb536c43ce92f35e764729485ed2339
| 2,120
|
py
|
Python
|
models/LL.py
|
Tarheel-Formal-Methods/kaa
|
5bffede6bdfb11b4108d9373012dfa475fb420b3
|
[
"MIT"
] | 1
|
2020-07-14T16:04:09.000Z
|
2020-07-14T16:04:09.000Z
|
models/LL.py
|
Tarheel-Formal-Methods/sapo-python
|
5bffede6bdfb11b4108d9373012dfa475fb420b3
|
[
"MIT"
] | null | null | null |
models/LL.py
|
Tarheel-Formal-Methods/sapo-python
|
5bffede6bdfb11b4108d9373012dfa475fb420b3
|
[
"MIT"
] | 1
|
2021-03-26T21:11:48.000Z
|
2021-03-26T21:11:48.000Z
|
import sympy as sp
import numpy as np
from kaa.bundle import Bundle
from kaa.model import Model
class LL(Model):
def __init__(self):
x1, x2, x3, x4, x5, x6, x7 = sp.Symbol('x1'), sp.Symbol('x2'), sp.Symbol('x3'), sp.Symbol('x4'),sp.Symbol('x5'), sp.Symbol('x6'), sp.Symbol('x7')
delta = 0.05
dx1 = x1 + (1.4*x3 - 0.9*x1)*delta
dx2 = x2 + (2.5*x5 - 1.5*x2)*delta
dx3 = x3 + (0.6*x7 - 0.8*x2*x3)*delta
dx4 = x4 + (2 - 1.3*x3*x4)*delta
dx5 = x5 + (0.7*x1 - x4*x5)*delta
dx6 = x6 + (0.3*x1 - 3.1*x6)*delta
dx7 = x7 + (1.8*x6 - 1.5*x2*x7)*delta
vars = [x1, x2, x3, x4, x5, x6, x7]
dyns = [dx1, dx2, dx3, dx4, dx5, dx6, dx7]
dim_sys = 7
num_dirs = 9
num_temps = 2
L = np.zeros([num_dirs, dim_sys])
for i in range(dim_sys):
L[i][i] = 1
L[7][2] = 1; L[7][3] = 1;
L[8][3] = 1; L[8][4] = 1;
T = np.zeros([num_temps, dim_sys])
T[0][0] = 0; T[0][1] = 1; T[0][2] = 2; T[0][3] = 3; T[0][4] = 4; T[0][5] = 5; T[0][6] = 6;
#T[0][0] = 1; T[0][1] = 2; T[0][2] = 3; T[0][3] = 4; T[0][4] = 5; T[0][5] = 6; T[0][6] = 7;
T[1][0] = 2; T[1][1] = 3; T[1][2] = 4; T[1][3] = 5; T[1][4] = 6; T[1][5] = 7; T[1][6] = 8;
offu = np.zeros(num_dirs)
offl = np.zeros(num_dirs)
'''
offu[0] = 1.3; offl[0] = -1.1;
offu[1] = 1.15; offl[1] = -0.95;
offu[2] = 1.6; offl[2] = -1.4;
offu[3] = 2.5; offl[3] = -2.3;
offu[4] = 1.1; offl[4] = -0.9;
offu[5] = 0.2; offl[5] = 0.0;
offu[6] = 0.55; offl[6] = -0.35;
'''
offu[0] = 1.21; offl[0] = -1.19;
offu[1] = 1.06; offl[1] = -1.04;
offu[2] = 1.51; offl[2] = -1.49;
offu[3] = 2.41; offl[3] = -2.39;
offu[4] = 1.01; offl[4] = -0.99;
offu[5] = 0.11; offl[5] = -0.09;
offu[6] = 0.46; offl[6] = -0.44;
offu[7] = 2.87; offl[7] = -2.81;
offu[8] = 2.66; offl[8] = -2.58;
b = Bundle(T, L, offu, offl, vars)
super().__init__(b, dyns, vars)
| 31.176471
| 153
| 0.420755
|
6a5678bfeacf834c99af3a2021e0b6a7f6ef429d
| 1,743
|
py
|
Python
|
test/client/network/test_connection.py
|
redmic-project/device-oag-buoy-buoy-client
|
d1644a8a1de63671c8757155333370385f2f02ec
|
[
"MIT"
] | null | null | null |
test/client/network/test_connection.py
|
redmic-project/device-oag-buoy-buoy-client
|
d1644a8a1de63671c8757155333370385f2f02ec
|
[
"MIT"
] | 2
|
2021-03-25T23:09:49.000Z
|
2021-06-02T00:44:01.000Z
|
test/client/network/test_connection.py
|
redmic-project/device-oag-buoy-buoy-client
|
d1644a8a1de63671c8757155333370385f2f02ec
|
[
"MIT"
] | null | null | null |
import unittest
from subprocess import CalledProcessError
from unittest.mock import patch
from nose.tools import eq_
from buoy.client.network import connection
path_ping = '/bin/ping'
class TestConnectionInternet(unittest.TestCase):
def setUp(self):
pass
@patch.object(connection, 'check_call', side_effect=CalledProcessError(1, path_ping))
def test_internet_conection_fail(self, mock_method):
max_attempts = 5
eq_(False, connection.is_connected_to_internet(max_attempts=max_attempts, time_between_attempts=1))
eq_(mock_method.call_count, max_attempts)
@patch.object(connection, 'check_call', side_effect=[CalledProcessError(1, path_ping),
CalledProcessError(1, path_ping),
CalledProcessError(1, path_ping),
CalledProcessError(1, path_ping),
CalledProcessError(1, path_ping),
CalledProcessError(1, path_ping),
CalledProcessError(1, path_ping),
CalledProcessError(1, path_ping),
CalledProcessError(1, path_ping), 0])
def test_internet_conection_return_ok_after_various_fails(self, mock_method):
max_attempts = 10
eq_(True, connection.is_connected_to_internet(max_attempts=max_attempts, time_between_attempts=1))
eq_(mock_method.call_count, max_attempts)
if __name__ == '__main__':
unittest.main()
| 42.512195
| 107
| 0.57315
|
49054df3b18f78f4d5bb898b952539107f50d643
| 504
|
py
|
Python
|
classgrade/gradapp/migrations/0013_auto_20160928_1134.py
|
classgrade/classgrade
|
144dcfc9579e6858ff4aa79835c76b9611ed73b2
|
[
"MIT"
] | 5
|
2016-11-15T17:46:27.000Z
|
2022-01-10T08:06:17.000Z
|
classgrade/gradapp/migrations/0013_auto_20160928_1134.py
|
classgrade/classgrade
|
144dcfc9579e6858ff4aa79835c76b9611ed73b2
|
[
"MIT"
] | 21
|
2016-11-07T14:58:22.000Z
|
2021-02-02T21:41:12.000Z
|
classgrade/gradapp/migrations/0013_auto_20160928_1134.py
|
classgrade/classgrade
|
144dcfc9579e6858ff4aa79835c76b9611ed73b2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-28 11:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gradapp', '0012_auto_20160927_1134'),
]
operations = [
migrations.AlterField(
model_name='evalassignment',
name='grade_assignment_comments',
field=models.TextField(blank=True, default='', max_length=3000),
),
]
| 24
| 76
| 0.640873
|
054007ee80d875453e51b3d798960f09e336c821
| 6,971
|
py
|
Python
|
Code/Lanes1 - Copy - Copy.py
|
rutwik777/ML-and-IoT-based-Self-Driving-car
|
007a41774c0aa3b72e9c2c43c1cfc188573ad55e
|
[
"CC0-1.0"
] | null | null | null |
Code/Lanes1 - Copy - Copy.py
|
rutwik777/ML-and-IoT-based-Self-Driving-car
|
007a41774c0aa3b72e9c2c43c1cfc188573ad55e
|
[
"CC0-1.0"
] | null | null | null |
Code/Lanes1 - Copy - Copy.py
|
rutwik777/ML-and-IoT-based-Self-Driving-car
|
007a41774c0aa3b72e9c2c43c1cfc188573ad55e
|
[
"CC0-1.0"
] | null | null | null |
import numpy as np
from PIL import ImageGrab
import cv2
import time
from numpy import ones,vstack
from numpy.linalg import lstsq
#from directkeys import PressKey, W, A, S, D
from statistics import mean
import mss
import serial
import os
arduinoSerial = serial.Serial('com4',9600)
time.sleep(1)
def roi(img, vertices):
#blank mask:
mask = np.zeros_like(img)
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, 255)
#returning the image only where mask pixels are nonzero
masked = cv2.bitwise_and(img, mask)
return masked
def draw_lanes(img, lines, color=[0, 255, 255], thickness=1.5):
# if this fails, go with some default line
try:
# finds the maximum y value for a lane marker
# (since we cannot assume the horizon will always be at the same point.)
ys = []
for i in lines:
for ii in i:
ys += [ii[1],ii[3]]
min_y = min(ys)
max_y = 600
new_lines = []
line_dict = {}
for idx,i in enumerate(lines):
for xyxy in i:
# These four lines:
# modified from http://stackoverflow.com/questions/21565994/method-to-return-the-equation-of-a-straight-line-given-two-points
# Used to calculate the definition of a line, given two sets of coords.
x_coords = (xyxy[0],xyxy[2])
y_coords = (xyxy[1],xyxy[3])
A = vstack([x_coords,ones(len(x_coords))]).T
m, b = lstsq(A, y_coords)[0]
# Calculating our new, and improved, xs
x1 = (min_y-b) / m
x2 = (max_y-b) / m
line_dict[idx] = [m,b,[int(x1), min_y, int(x2), max_y]]
new_lines.append([int(x1), min_y, int(x2), max_y])
final_lanes = {}
for idx in line_dict:
final_lanes_copy = final_lanes.copy()
m = line_dict[idx][0]
b = line_dict[idx][1]
line = line_dict[idx][2]
if len(final_lanes) == 0:
final_lanes[m] = [ [m,b,line] ]
else:
found_copy = False
for other_ms in final_lanes_copy:
if not found_copy:
if abs(other_ms*1.2) > abs(m) > abs(other_ms*0.8):
if abs(final_lanes_copy[other_ms][0][1]*1.2) > abs(b) > abs(final_lanes_copy[other_ms][0][1]*0.8):
final_lanes[other_ms].append([m,b,line])
found_copy = True
break
else:
final_lanes[m] = [ [m,b,line] ]
line_counter = {}
for lanes in final_lanes:
line_counter[lanes] = len(final_lanes[lanes])
top_lanes = sorted(line_counter.items(), key=lambda item: item[1])[::-1][:2]
lane1_id = top_lanes[0][0]
lane2_id = top_lanes[1][0]
def average_lane(lane_data):
x1s = []
y1s = []
x2s = []
y2s = []
for data in lane_data:
x1s.append(data[2][0])
y1s.append(data[2][1])
x2s.append(data[2][2])
y2s.append(data[2][3])
return int(mean(x1s)), int(mean(y1s)), int(mean(x2s)), int(mean(y2s))
l1_x1, l1_y1, l1_x2, l1_y2 = average_lane(final_lanes[lane1_id])
l2_x1, l2_y1, l2_x2, l2_y2 = average_lane(final_lanes[lane2_id])
return [l1_x1, l1_y1, l1_x2, l1_y2], [l2_x1, l2_y1, l2_x2, l2_y2], lane1_id, lane2_id
except Exception as e:
print(str(e))
def process_img(image):
original_image = image
# convert to gray
processed_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# edge detection
processed_img = cv2.Canny(processed_img, threshold1 = 50, threshold2=150)
processed_img = cv2.GaussianBlur(processed_img,(5,5),0)
vertices = np.array([[10,500],[10,300],[300,200],[500,200],[800,300],[800,500],
], np.int32)
processed_img = roi(processed_img, [vertices])
# more info: http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.html
# rho theta thresh min length, max gap:
lines = cv2.HoughLinesP(processed_img, 1, np.pi/180, 180, 150, 200)
m1 = 0
m2 = 0
try:
l1, l2, m1,m2 = draw_lanes(original_image,lines)
cv2.line(original_image, (l1[0], l1[1]), (l1[2], l1[3]), [0,255,0], 30)
cv2.line(original_image, (l2[0], l2[1]), (l2[2], l2[3]), [0,255,0], 30)
except Exception as e:
print(str(e))
pass
try:
for coords in lines:
coords = coords[0]
try:
cv2.line(processed_img, (coords[0], coords[1]), (coords[2], coords[3]), [255,0,0], 3)
except Exception as e:
pass #print(str(e))
except Exception as e:
pass
return processed_img,original_image, m1, m2
def main():
with mss.mss() as sct:
monitor = {"top": 60, "left": 0, "width": 800, "height": 640}
last_time = time.time()
i = 1
while(i < 100):
screen = np.array(sct.grab(monitor))
#pts1 = np.float32([[300, 300], [500, 300], [40, 600], [770, 600]])
#pts2 = np.float32([[0, 0], [800, 0], [0, 640], [800, 640]])
#matrix = cv2.getPerspectiveTransform(pts1, pts2)
#result = cv2.warpPerspective(screen, matrix, (800, 640))
new_screen,original_image,m1,m2 = process_img(screen)
#print('FPS {} seconds'.format(1/(time.time()-last_time)))
last_time = time.time()
cv2.imshow('window', new_screen)
#cv2.imshow('window2', original_image)
if m1 < 0 and m2 < 0:
print ('right')
arduinoSerial.write(b'6') #send 6
elif m1 > 0 and m2 > 0:
print ('Left')
arduinoSerial.write(b'4') #send 4
else:
print ('Straight')
arduinoSerial.write(b'8') #send 8
if (i == 99):
cv2.destroyAllWindows()
if __name__== '__main__':
main()
os.execv(__file__, sys.argv)
else:
i += 1
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
main()
##cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
| 34.509901
| 142
| 0.501793
|
2d2a7166b5fc6d0391c3b941aaf3f909395376bc
| 1,072
|
py
|
Python
|
lagtraj/domain/__init__.py
|
BuildJet/lagtraj
|
a49bff9c165b225b37e212dec4c1d319452cc3f3
|
[
"MIT"
] | 4
|
2020-04-16T22:57:00.000Z
|
2021-10-05T02:37:58.000Z
|
lagtraj/domain/__init__.py
|
BuildJet/lagtraj
|
a49bff9c165b225b37e212dec4c1d319452cc3f3
|
[
"MIT"
] | 112
|
2020-05-21T09:47:14.000Z
|
2022-03-20T16:00:27.000Z
|
lagtraj/domain/__init__.py
|
BuildJet/lagtraj
|
a49bff9c165b225b37e212dec4c1d319452cc3f3
|
[
"MIT"
] | 5
|
2020-05-14T11:04:07.000Z
|
2022-03-11T16:38:35.000Z
|
from collections import namedtuple
from .. import build_data_path
from .sources import (
interpolate_to_height_levels,
interpolate_to_pressure_levels,
) # noqa
from .sources import calc_auxiliary_variable # noqa
from ..input_definitions.examples import LAGTRAJ_EXAMPLES_PATH_PREFIX
LatLonBoundingBox = namedtuple(
"LatLonBoundingBox", ["lat_min", "lat_max", "lon_min", "lon_max"]
)
LatLonSamplingResolution = namedtuple("LatLonSamplingResolution", ["lat", "lon"])
INPUT_REQUIRED_FIELDS = dict(
source=str,
lat_min=float,
lat_max=float,
lon_min=float,
lon_max=float,
lat_samp=float,
lon_samp=float,
)
def build_domain_data_path(root_data_path, domain_name):
# we need to strip the `lagtraj://` prefix before we construct the path
# since the data is stored locally
if domain_name.startswith(LAGTRAJ_EXAMPLES_PATH_PREFIX):
domain_name = domain_name[len(LAGTRAJ_EXAMPLES_PATH_PREFIX) :]
return build_data_path(root_data_path=root_data_path, data_type="domain") / (
f"{domain_name}_data"
)
| 28.210526
| 81
| 0.747201
|
136d7842d806e91c4d411fa9f213485531273c51
| 10,789
|
py
|
Python
|
salt/modules/trafficserver.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 12
|
2015-01-21T00:18:25.000Z
|
2021-07-11T07:35:26.000Z
|
salt/modules/trafficserver.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 86
|
2017-01-27T11:54:46.000Z
|
2020-05-20T06:25:26.000Z
|
salt/modules/trafficserver.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 12
|
2015-01-05T09:50:42.000Z
|
2019-08-19T01:43:40.000Z
|
# -*- coding: utf-8 -*-
'''
Apache Traffic Server execution module.
.. versionadded:: 2015.8.0
``traffic_ctl`` is used to execute individual Traffic Server commands and to
script multiple commands in a shell.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import logging
import subprocess
# Import salt libs
import salt.utils.path
import salt.utils.stringutils
__virtualname__ = 'trafficserver'
log = logging.getLogger(__name__)
def __virtual__():
if salt.utils.path.which('traffic_ctl') or salt.utils.path.which('traffic_line'):
return __virtualname__
return (False, 'trafficserver execution module not loaded: '
'neither traffic_ctl nor traffic_line was found.')
_TRAFFICLINE = salt.utils.path.which('traffic_line')
_TRAFFICCTL = salt.utils.path.which('traffic_ctl')
def _traffic_ctl(*args):
return [_TRAFFICCTL] + list(args)
def _traffic_line(*args):
return [_TRAFFICLINE] + list(args)
def _statuscmd():
if _TRAFFICCTL:
cmd = _traffic_ctl('server', 'status')
else:
cmd = _traffic_line('--status')
return _subprocess(cmd)
def _subprocess(cmd):
'''
Function to standardize the subprocess call
'''
log.debug('Running: "%s"', ' '.join(cmd))
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
ret = salt.utils.stringutils.to_unicode(proc.communicate()[0]).strip()
retcode = proc.wait()
if ret:
return ret
elif retcode != 1:
return True
else:
return False
except OSError as err:
log.error(err)
return False
def bounce_cluster():
'''
Bounce all Traffic Server nodes in the cluster. Bouncing Traffic Server
shuts down and immediately restarts Traffic Server, node-by-node.
.. code-block:: bash
salt '*' trafficserver.bounce_cluster
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('cluster', 'restart')
else:
cmd = _traffic_line('-B')
return _subprocess(cmd)
def bounce_local(drain=False):
'''
Bounce Traffic Server on the local node. Bouncing Traffic Server shuts down
and immediately restarts the Traffic Server node.
drain
This option modifies the restart behavior such that traffic_server
is not shut down until the number of active client connections
drops to the number given by the
proxy.config.restart.active_client_threshold configuration
variable.
.. code-block:: bash
salt '*' trafficserver.bounce_local
salt '*' trafficserver.bounce_local drain=True
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('server', 'restart')
else:
cmd = _traffic_line('-b')
if drain:
cmd = cmd + ['--drain']
return _subprocess(cmd)
def clear_cluster():
'''
Clears accumulated statistics on all nodes in the cluster.
.. code-block:: bash
salt '*' trafficserver.clear_cluster
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('metric', 'clear', '--cluster')
else:
cmd = _traffic_line('-C')
return _subprocess(cmd)
def clear_node():
'''
Clears accumulated statistics on the local node.
.. code-block:: bash
salt '*' trafficserver.clear_node
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('metric', 'clear')
else:
cmd = _traffic_line('-c')
return _subprocess(cmd)
def restart_cluster():
'''
Restart the traffic_manager process and the traffic_server process on all
the nodes in a cluster.
.. code-block:: bash
salt '*' trafficserver.restart_cluster
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('cluster', 'restart', '--manager')
else:
cmd = _traffic_line('-M')
return _subprocess(cmd)
def restart_local(drain=False):
'''
Restart the traffic_manager and traffic_server processes on the local node.
drain
This option modifies the restart behavior such that
``traffic_server`` is not shut down until the number of
active client connections drops to the number given by the
``proxy.config.restart.active_client_threshold`` configuration
variable.
.. code-block:: bash
salt '*' trafficserver.restart_local
salt '*' trafficserver.restart_local drain=True
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('server', 'restart', '--manager')
else:
cmd = _traffic_line('-L')
if drain:
cmd = cmd + ['--drain']
return _subprocess(cmd)
def match_metric(regex):
'''
Display the current values of all metrics whose names match the
given regular expression.
.. versionadded:: 2016.11.0
.. code-block:: bash
salt '*' trafficserver.match_metric regex
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('metric', 'match', regex)
else:
cmd = _traffic_ctl('-m', regex)
return _subprocess(cmd)
def match_config(regex):
'''
Display the current values of all configuration variables whose
names match the given regular expression.
.. versionadded:: 2016.11.0
.. code-block:: bash
salt '*' trafficserver.match_config regex
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('config', 'match', regex)
else:
cmd = _traffic_line('-m', regex)
return _subprocess(cmd)
def read_config(*args):
'''
Read Traffic Server configuration variable definitions.
.. versionadded:: 2016.11.0
.. code-block:: bash
salt '*' trafficserver.read_config proxy.config.http.keep_alive_post_out
'''
ret = {}
if _TRAFFICCTL:
cmd = _traffic_ctl('config', 'get')
else:
cmd = _traffic_line('-r')
try:
for arg in args:
log.debug('Querying: %s', arg)
ret[arg] = _subprocess(cmd + [arg])
except KeyError:
pass
return ret
def read_metric(*args):
'''
Read Traffic Server one or more metrics.
.. versionadded:: 2016.11.0
.. code-block:: bash
salt '*' trafficserver.read_metric proxy.process.http.tcp_hit_count_stat
'''
ret = {}
if _TRAFFICCTL:
cmd = _traffic_ctl('metric', 'get')
else:
cmd = _traffic_line('-r')
try:
for arg in args:
log.debug('Querying: %s', arg)
ret[arg] = _subprocess(cmd + [arg])
except KeyError:
pass
return ret
def set_config(variable, value):
'''
Set the value of a Traffic Server configuration variable.
variable
Name of a Traffic Server configuration variable.
value
The new value to set.
.. versionadded:: 2016.11.0
.. code-block:: bash
salt '*' trafficserver.set_config proxy.config.http.keep_alive_post_out 0
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('config', 'set', variable, value)
else:
cmd = _traffic_line('-s', variable, '-v', value)
log.debug('Setting %s to %s', variable, value)
return _subprocess(cmd)
def shutdown():
'''
Shut down Traffic Server on the local node.
.. code-block:: bash
salt '*' trafficserver.shutdown
'''
# Earlier versions of traffic_ctl do not support
# "server stop", so we prefer traffic_line here.
if _TRAFFICLINE:
cmd = _traffic_line('-S')
else:
cmd = _traffic_ctl('server', 'stop')
_subprocess(cmd)
return _statuscmd()
def startup():
'''
Start Traffic Server on the local node.
.. code-block:: bash
salt '*' trafficserver.start
'''
# Earlier versions of traffic_ctl do not support
# "server start", so we prefer traffic_line here.
if _TRAFFICLINE:
cmd = _traffic_line('-U')
else:
cmd = _traffic_ctl('server', 'start')
_subprocess(cmd)
return _statuscmd()
def refresh():
'''
Initiate a Traffic Server configuration file reread. Use this command to
update the running configuration after any configuration file modification.
The timestamp of the last reconfiguration event (in seconds since epoch) is
published in the proxy.node.config.reconfigure_time metric.
.. code-block:: bash
salt '*' trafficserver.refresh
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('config', 'reload')
else:
cmd = _traffic_line('-x')
return _subprocess(cmd)
def zero_cluster():
'''
Reset performance statistics to zero across the cluster.
.. code-block:: bash
salt '*' trafficserver.zero_cluster
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('metric', 'clear', '--cluster')
else:
cmd = _traffic_line('-Z')
return _subprocess(cmd)
def zero_node():
'''
Reset performance statistics to zero on the local node.
.. code-block:: bash
salt '*' trafficserver.zero_cluster
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('metric', 'clear')
else:
cmd = _traffic_line('-z')
return _subprocess(cmd)
def offline(path):
'''
Mark a cache storage device as offline. The storage is identified by a path
which must match exactly a path specified in storage.config. This removes
the storage from the cache and redirects requests that would have used this
storage to other storage. This has exactly the same effect as a disk
failure for that storage. This does not persist across restarts of the
traffic_server process.
.. code-block:: bash
salt '*' trafficserver.offline /path/to/cache
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('storage', 'offline', path)
else:
cmd = _traffic_line('--offline', path)
return _subprocess(cmd)
def alarms():
'''
List all alarm events that have not been acknowledged (cleared).
.. code-block:: bash
salt '*' trafficserver.alarms
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('alarm', 'list')
else:
cmd = _traffic_line('--alarms')
return _subprocess(cmd)
def clear_alarms(alarm):
'''
Clear (acknowledge) an alarm event. The arguments are “all” for all current
alarms, a specific alarm number (e.g. ‘‘1’‘), or an alarm string identifier
(e.g. ‘’MGMT_ALARM_PROXY_CONFIG_ERROR’‘).
.. code-block:: bash
salt '*' trafficserver.clear_alarms [all | #event | name]
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('alarm', 'clear', alarm)
else:
cmd = _traffic_line('--clear_alarms', alarm)
return _subprocess(cmd)
def status():
'''
Show the current proxy server status, indicating if we’re running or not.
.. code-block:: bash
salt '*' trafficserver.status
'''
return _statuscmd()
| 22.524008
| 85
| 0.629994
|
cd7830b157914af215c4fa4e2c01e50712083022
| 29,100
|
py
|
Python
|
.github/scripts/trymerge.py
|
arthuryuan1987/pytorch
|
bf730e5039e8bf1614de6ad08ac6a24a1622dcbe
|
[
"Intel"
] | null | null | null |
.github/scripts/trymerge.py
|
arthuryuan1987/pytorch
|
bf730e5039e8bf1614de6ad08ac6a24a1622dcbe
|
[
"Intel"
] | null | null | null |
.github/scripts/trymerge.py
|
arthuryuan1987/pytorch
|
bf730e5039e8bf1614de6ad08ac6a24a1622dcbe
|
[
"Intel"
] | null | null | null |
#!/usr/bin/env python3
import base64
import json
import os
import re
from dataclasses import dataclass
from urllib.request import urlopen, Request
from urllib.error import HTTPError
from typing import cast, Any, Callable, Dict, List, Optional, Tuple, Union
from gitutils import get_git_remote_name, get_git_repo_dir, patterns_to_regex, GitRepo
from functools import lru_cache
from warnings import warn
GH_GET_PR_INFO_QUERY = """
query ($owner: String!, $name: String!, $number: Int!) {
repository(owner: $owner, name: $name) {
pullRequest(number: $number) {
closed
isCrossRepository
author {
login
}
title
body
headRefName
headRepository {
nameWithOwner
}
baseRefName
baseRepository {
nameWithOwner
isPrivate
defaultBranchRef {
name
}
}
mergeCommit {
oid
}
commits_with_authors:commits(first: 100) {
nodes {
commit {
author {
user {
login
}
email
name
}
oid
}
}
totalCount
}
commits(last: 1) {
nodes {
commit {
checkSuites(first: 10) {
nodes {
app {
name
databaseId
}
workflowRun {
workflow {
name
}
}
checkRuns(first: 50) {
nodes {
name
conclusion
}
pageInfo {
endCursor
hasNextPage
}
}
conclusion
}
pageInfo {
endCursor
hasNextPage
}
}
oid
}
}
}
changedFiles
files(first: 100) {
nodes {
path
}
pageInfo {
endCursor
hasNextPage
}
}
reviews(last: 100) {
nodes {
author {
login
}
state
}
totalCount
}
comments(last: 5) {
nodes {
bodyText
author {
login
}
authorAssociation
editor {
login
}
databaseId
}
pageInfo {
startCursor
hasPreviousPage
}
}
}
}
}
"""
GH_GET_PR_NEXT_FILES_QUERY = """
query ($owner: String!, $name: String!, $number: Int!, $cursor: String!) {
repository(name: $name, owner: $owner) {
pullRequest(number: $number) {
files(first: 100, after: $cursor) {
nodes {
path
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
}
"""
GH_GET_PR_NEXT_CHECK_RUNS = """
query ($owner: String!, $name: String!, $number: Int!, $cursor: String!) {
repository(name: $name, owner: $owner) {
pullRequest(number: $number) {
commits(last: 1) {
nodes {
commit {
oid
checkSuites(first: 10, after: $cursor) {
nodes {
app {
name
databaseId
}
workflowRun {
workflow {
name
}
}
checkRuns(first: 50) {
nodes {
name
conclusion
}
pageInfo {
endCursor
hasNextPage
}
}
conclusion
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
}
}
}
}
"""
GH_GET_PR_PREV_COMMENTS = """
query ($owner: String!, $name: String!, $number: Int!, $cursor: String!) {
repository(name: $name, owner: $owner) {
pullRequest(number: $number) {
comments(last: 100, before: $cursor) {
nodes {
bodyText
author {
login
}
authorAssociation
editor {
login
}
databaseId
}
pageInfo {
startCursor
hasPreviousPage
}
}
}
}
}
"""
# This query needs read-org permission
GH_GET_TEAM_MEMBERS_QUERY = """
query($org: String!, $name: String!, $cursor: String) {
organization(login: $org) {
team(slug: $name) {
members(first: 100, after: $cursor) {
nodes {
login
}
pageInfo {
hasNextPage
endCursor
}
}
}
}
}
"""
RE_GHSTACK_HEAD_REF = re.compile(r"^(gh/[^/]+/[0-9]+/)head$")
RE_GHSTACK_SOURCE_ID = re.compile(r'^ghstack-source-id: (.+)\n?', re.MULTILINE)
RE_PULL_REQUEST_RESOLVED = re.compile(
r'Pull Request resolved: '
r'https://github.com/(?P<owner>[^/]+)/(?P<repo>[^/]+)/pull/(?P<number>[0-9]+)',
re.MULTILINE
)
RE_REVERT_CMD = re.compile(r"@pytorch(merge|)bot\s+revert\s+this")
RE_DIFF_REV = re.compile(r'^Differential Revision:.+?(D[0-9]+)', re.MULTILINE)
def _fetch_url(url: str, *,
headers: Optional[Dict[str, str]] = None,
data: Optional[Dict[str, Any]] = None,
method: Optional[str] = None,
reader: Callable[[Any], Any] = lambda x: x.read()) -> Any:
if headers is None:
headers = {}
token = os.environ.get("GITHUB_TOKEN")
if token is not None and url.startswith('https://api.github.com/'):
headers['Authorization'] = f'token {token}'
data_ = json.dumps(data).encode() if data is not None else None
try:
with urlopen(Request(url, headers=headers, data=data_, method=method)) as conn:
return reader(conn)
except HTTPError as err:
if err.code == 403 and all(key in err.headers for key in ['X-RateLimit-Limit', 'X-RateLimit-Used']):
print(f"Rate limit exceeded: {err.headers['X-RateLimit-Used']}/{err.headers['X-RateLimit-Limit']}")
raise
def fetch_json(url: str,
params: Optional[Dict[str, Any]] = None,
data: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]:
headers = {'Accept': 'application/vnd.github.v3+json'}
if params is not None and len(params) > 0:
url += '?' + '&'.join(f"{name}={val}" for name, val in params.items())
return cast(List[Dict[str, Any]], _fetch_url(url, headers=headers, data=data, reader=json.load))
def gh_post_comment(org: str, project: str, pr_num: int, comment: str, dry_run: bool = False) -> List[Dict[str, Any]]:
if dry_run:
print(comment)
return []
return fetch_json(f'https://api.github.com/repos/{org}/{project}/issues/{pr_num}/comments',
data={"body": comment})
def gh_add_labels(org: str, project: str, pr_num: int, labels: Union[str, List[str]]) -> None:
fetch_json(f'https://api.github.com/repos/{org}/{project}/issues/{pr_num}/labels',
data={"labels": labels})
def gh_graphql(query: str, **kwargs: Any) -> Dict[str, Any]:
rc = _fetch_url("https://api.github.com/graphql", data={"query": query, "variables": kwargs}, reader=json.load)
if "errors" in rc:
raise RuntimeError(f"GraphQL query {query}, args {kwargs} failed: {rc['errors']}")
return cast(Dict[str, Any], rc)
def gh_get_pr_info(org: str, proj: str, pr_no: int) -> Any:
rc = gh_graphql(GH_GET_PR_INFO_QUERY, name=proj, owner=org, number=pr_no)
return rc["data"]["repository"]["pullRequest"]
@lru_cache(maxsize=None)
def gh_get_team_members(org: str, name: str) -> List[str]:
rc: List[str] = []
team_members: Dict[str, Any] = {"pageInfo": {"hasNextPage": "true", "endCursor": None}}
while bool(team_members["pageInfo"]["hasNextPage"]):
query = gh_graphql(GH_GET_TEAM_MEMBERS_QUERY, org=org, name=name, cursor=team_members["pageInfo"]["endCursor"])
team = query["data"]["organization"]["team"]
if team is None:
warn(f"Requested non-existing team {org}/{name}")
return []
team_members = team["members"]
rc += [member["login"] for member in team_members["nodes"]]
return rc
def parse_args() -> Any:
from argparse import ArgumentParser
parser = ArgumentParser("Merge PR into default branch")
parser.add_argument("--dry-run", action="store_true")
parser.add_argument("--revert", action="store_true")
parser.add_argument("--force", action="store_true")
parser.add_argument("--comment-id", type=int)
parser.add_argument("pr_num", type=int)
return parser.parse_args()
@dataclass
class GitHubComment:
body_text: str
author_login: str
author_association: str
editor_login: Optional[str]
database_id: int
class GitHubPR:
def __init__(self, org: str, project: str, pr_num: int) -> None:
assert isinstance(pr_num, int)
self.org = org
self.project = project
self.pr_num = pr_num
self.info = gh_get_pr_info(org, project, pr_num)
self.changed_files: Optional[List[str]] = None
self.conclusions: Optional[Dict[str, str]] = None
self.comments: Optional[List[GitHubComment]] = None
def is_closed(self) -> bool:
return bool(self.info["closed"])
def is_cross_repo(self) -> bool:
return bool(self.info["isCrossRepository"])
def base_ref(self) -> str:
return cast(str, self.info["baseRefName"])
def default_branch(self) -> str:
return cast(str, self.info["baseRepository"]["defaultBranchRef"]["name"])
def head_ref(self) -> str:
return cast(str, self.info["headRefName"])
def is_ghstack_pr(self) -> bool:
return RE_GHSTACK_HEAD_REF.match(self.head_ref()) is not None
def is_base_repo_private(self) -> bool:
return bool(self.info["baseRepository"]["isPrivate"])
def get_changed_files_count(self) -> int:
return int(self.info["changedFiles"])
def get_changed_files(self) -> List[str]:
if self.changed_files is None:
info = self.info
self.changed_files = []
# Do not try to fetch more than 10K files
for _ in range(100):
self.changed_files += [x["path"] for x in info["files"]["nodes"]]
if not info["files"]["pageInfo"]["hasNextPage"]:
break
rc = gh_graphql(GH_GET_PR_NEXT_FILES_QUERY,
name=self.project,
owner=self.org,
number=self.pr_num,
cursor=info["files"]["pageInfo"]["endCursor"])
info = rc["data"]["repository"]["pullRequest"]
if len(self.changed_files) != self.get_changed_files_count():
raise RuntimeError("Changed file count mismatch")
return self.changed_files
def _get_reviewers(self) -> List[Tuple[str, str]]:
reviews_count = int(self.info["reviews"]["totalCount"])
nodes = self.info["reviews"]["nodes"]
if len(nodes) != reviews_count:
raise RuntimeError("Can't fetch all PR reviews")
reviews = {}
for node in nodes:
author = node["author"]["login"]
state = node["state"]
if state != "COMMENTED":
reviews[author] = state
return list(reviews.items())
def get_approved_by(self) -> List[str]:
return [login for (login, state) in self._get_reviewers() if state == "APPROVED"]
def get_commit_count(self) -> int:
return int(self.info["commits_with_authors"]["totalCount"])
def get_pr_creator_login(self) -> str:
return cast(str, self.info["author"]["login"])
def get_committer_login(self, num: int = 0) -> str:
user = self.info["commits_with_authors"]["nodes"][num]["commit"]["author"]["user"]
# If author is not github user, user node will be null
if user is None:
return ""
return cast(str, user["login"])
def get_committer_author(self, num: int = 0) -> str:
node = self.info["commits_with_authors"]["nodes"][num]["commit"]["author"]
return f"{node['name']} <{node['email']}>"
def get_checkrun_conclusions(self) -> Dict[str, str]:
""" Returns list of checkrun / conclusions """
if self.conclusions is not None:
return self.conclusions
orig_last_commit = self.info["commits"]["nodes"][-1]["commit"]
checksuites = orig_last_commit["checkSuites"]
conclusions = {}
def add_conclusions(nodes: List[Dict[str, Any]]) -> None:
for node in nodes:
workflow_run = node["workflowRun"]
checkruns = node["checkRuns"]
if workflow_run is not None:
conclusions[workflow_run["workflow"]["name"]] = node["conclusion"]
continue
if checkruns is not None:
for checkrun_node in checkruns["nodes"]:
conclusions[checkrun_node["name"]] = checkrun_node["conclusion"]
add_conclusions(checksuites["nodes"])
while bool(checksuites["pageInfo"]["hasNextPage"]):
rc = gh_graphql(GH_GET_PR_NEXT_CHECK_RUNS,
name=self.project,
owner=self.org,
number=self.pr_num,
cursor=checksuites["pageInfo"]["endCursor"])
info = rc["data"]["repository"]["pullRequest"]
last_commit = info["commits"]["nodes"][-1]["commit"]
if last_commit["oid"] != orig_last_commit["oid"]:
raise RuntimeError("Last commit changed on PR")
checksuites = last_commit["checkSuites"]
add_conclusions(checksuites["nodes"])
self.conclusions = conclusions
return conclusions
def get_authors(self) -> Dict[str, str]:
rc = {}
for idx in range(self.get_commit_count()):
rc[self.get_committer_login(idx)] = self.get_committer_author(idx)
return rc
def get_author(self) -> str:
authors = self.get_authors()
if len(authors) == 1:
return next(iter(authors.values()))
return self.get_authors()[self.get_pr_creator_login()]
def get_title(self) -> str:
return cast(str, self.info["title"])
def get_body(self) -> str:
return cast(str, self.info["body"])
def get_merge_commit(self) -> Optional[str]:
mc = self.info["mergeCommit"]
return mc["oid"] if mc is not None else None
def get_pr_url(self) -> str:
return f"https://github.com/{self.org}/{self.project}/pull/{self.pr_num}"
@staticmethod
def _comment_from_node(node: Any) -> GitHubComment:
editor = node["editor"]
return GitHubComment(body_text=node["bodyText"],
author_login=node["author"]["login"],
author_association=node["authorAssociation"],
editor_login=editor["login"] if editor else None,
database_id=node["databaseId"]
)
def get_comments(self) -> List[GitHubComment]:
if self.comments is not None:
return self.comments
self.comments = []
info = self.info["comments"]
# Do not try to fetch more than 10K comments
for _ in range(100):
self.comments = [self._comment_from_node(node) for node in info["nodes"]] + self.comments
if not info["pageInfo"]["hasPreviousPage"]:
break
rc = gh_graphql(GH_GET_PR_PREV_COMMENTS,
name=self.project,
owner=self.org,
number=self.pr_num,
cursor=info["pageInfo"]["startCursor"])
info = rc["data"]["repository"]["pullRequest"]["comments"]
return self.comments
def get_last_comment(self) -> GitHubComment:
return self._comment_from_node(self.info["comments"]["nodes"][-1])
def get_comment_by_id(self, database_id: int) -> GitHubComment:
if self.comments is None:
# Fastpath - try searching in partial prefetched comments
for node in self.info["comments"]["nodes"]:
comment = self._comment_from_node(node)
if comment.database_id == database_id:
return comment
for comment in self.get_comments():
if comment.database_id == database_id:
return comment
raise RuntimeError(f"Comment with id {database_id} not found")
def get_diff_revision(self) -> Optional[str]:
rc = RE_DIFF_REV.search(self.get_body())
return rc.group(1) if rc is not None else None
def has_internal_changes(self) -> bool:
checkrun_name = "Meta Internal-Only Changes Check"
if self.get_diff_revision() is None:
return False
checks = self.get_checkrun_conclusions()
if checks is None or checkrun_name not in checks:
return False
return checks[checkrun_name] != "SUCCESS"
def merge_ghstack_into(self, repo: GitRepo, force: bool) -> None:
assert self.is_ghstack_pr()
approved_by = self.get_approved_by()
# For ghstack, cherry-pick commits based from origin
orig_ref = f"{repo.remote}/{re.sub(r'/head$', '/orig', self.head_ref())}"
rev_list = repo.revlist(f"{self.default_branch()}..{orig_ref}")
for idx, rev in enumerate(reversed(rev_list)):
msg = repo.commit_message(rev)
m = RE_PULL_REQUEST_RESOLVED.search(msg)
if m is None:
raise RuntimeError(f"Could not find PR-resolved string in {msg} of ghstacked PR {self.pr_num}")
if self.org != m.group('owner') or self.project != m.group('repo'):
raise RuntimeError(f"PR {m.group('number')} resolved to wrong owner/repo pair")
pr_num = int(m.group('number'))
if pr_num != self.pr_num:
pr = GitHubPR(self.org, self.project, pr_num)
if pr.is_closed():
print(f"Skipping {idx+1} of {len(rev_list)} PR (#{pr_num}) as its already been merged")
continue
approved_by = pr.get_approved_by()
# Raises exception if matching rule is not found
find_matching_merge_rule(pr, repo, force=force)
# Adding the url here makes it clickable within the Github UI
approved_by_urls = ', '.join(prefix_with_github_url(login) for login in approved_by)
repo.cherry_pick(rev)
msg = re.sub(RE_GHSTACK_SOURCE_ID, "", msg)
msg += f"\nApproved by: {approved_by_urls}\n"
repo.amend_commit_message(msg)
def merge_into(self, repo: GitRepo, *, force: bool = False, dry_run: bool = False) -> None:
# Raises exception if matching rule is not found
find_matching_merge_rule(self, repo, force=force)
if self.has_internal_changes():
raise RuntimeError("This PR must be landed via phabricator")
if repo.current_branch() != self.default_branch():
repo.checkout(self.default_branch())
if not self.is_ghstack_pr():
# Adding the url here makes it clickable within the Github UI
approved_by_urls = ', '.join(prefix_with_github_url(login) for login in self.get_approved_by())
msg = self.get_title() + "\n\n" + self.get_body()
msg += f"\nPull Request resolved: {self.get_pr_url()}\n"
msg += f"Approved by: {approved_by_urls}\n"
pr_branch_name = f"__pull-request-{self.pr_num}__init__"
repo.fetch(f"pull/{self.pr_num}/head", pr_branch_name)
repo._run_git("merge", "--squash", pr_branch_name)
repo._run_git("commit", f"--author=\"{self.get_author()}\"", "-m", msg)
else:
self.merge_ghstack_into(repo, force)
repo.push(self.default_branch(), dry_run)
@dataclass
class MergeRule:
name: str
patterns: List[str]
approved_by: List[str]
mandatory_checks_name: Optional[List[str]]
def read_merge_rules(repo: Optional[GitRepo], org: str, project: str) -> List[MergeRule]:
from pathlib import Path
repo_relative_rules_path = Path(".github") / "merge_rules.json"
if repo is None:
json_data = _fetch_url(
f"https://api.github.com/repos/{org}/{project}/contents/{repo_relative_rules_path}",
headers={'Accept': 'application/vnd.github.v3+json'},
reader=json.load,
)
content = base64.b64decode(json_data["content"])
return cast(List[MergeRule], json.loads(content, object_hook=lambda x: MergeRule(**x)))
else:
rules_path = Path(repo.repo_dir) / repo_relative_rules_path
if not rules_path.exists():
print(f"{rules_path} does not exist, returning empty rules")
return []
with open(rules_path) as fp:
rc = json.load(fp, object_hook=lambda x: MergeRule(**x))
return cast(List[MergeRule], rc)
def find_matching_merge_rule(pr: GitHubPR,
repo: Optional[GitRepo] = None,
force: bool = False,
skip_internal_checks: bool = False
) -> MergeRule:
"""Returns merge rule matching to this pr or raises an exception"""
changed_files = pr.get_changed_files()
approved_by = set(pr.get_approved_by())
rules = read_merge_rules(repo, pr.org, pr.project)
reject_reason = f"PR {pr.pr_num} does not match merge rules"
# Used to determine best rejection reason
# Score 0 to 10K - how many files rule matched
# Score 10K - matched all files, but no overlapping approvers
# Score 20K - matched all files and approvers, but lacks mandatory checks
reject_reason_score = 0
for rule in rules:
rule_name = rule.name
patterns_re = patterns_to_regex(rule.patterns)
non_matching_files = []
for fname in changed_files:
if not patterns_re.match(fname):
non_matching_files.append(fname)
if len(non_matching_files) > 0:
num_matching_files = len(changed_files) - len(non_matching_files)
if num_matching_files > reject_reason_score:
reject_reason_score = num_matching_files
reject_reason = (f"{num_matching_files} files matched rule {rule_name}, but there are still non-matching files: " +
f"{','.join(non_matching_files[:5])}{', ...' if len(non_matching_files) > 5 else ''}")
continue
# If rule needs approvers but PR has not been reviewed, skip it
if len(rule.approved_by) > 0 and len(approved_by) == 0:
if reject_reason_score < 10000:
reject_reason_score = 10000
reject_reason = f"Matched rule {rule_name}, but PR has not been reviewed yet"
continue
rule_approvers_set = set()
for approver in rule.approved_by:
if "/" in approver:
org, name = approver.split("/")
rule_approvers_set.update(gh_get_team_members(org, name))
else:
rule_approvers_set.add(approver)
approvers_intersection = approved_by.intersection(rule_approvers_set)
# If rule requires approvers but they aren't the ones that reviewed PR
if len(approvers_intersection) == 0 and len(rule_approvers_set) > 0:
if reject_reason_score < 10000:
reject_reason_score = 10000
reject_reason = (f"Matched rule {rule_name}, but it was not reviewed yet by any of:" +
f"{','.join(list(rule_approvers_set)[:5])}{', ...' if len(rule_approvers_set) > 5 else ''}")
continue
if rule.mandatory_checks_name is not None:
pass_checks = True
checks = pr.get_checkrun_conclusions()
# HACK: We don't want to skip CLA check, even when forced
for checkname in filter(lambda x: force is False or "CLA Check" in x, rule.mandatory_checks_name):
if checkname not in checks or checks[checkname] != "SUCCESS":
if reject_reason_score < 20000:
reject_reason_score = 20000
reject_reason = f"Refusing to merge as mandatory check {checkname} "
reject_reason += "has not been run" if checkname not in checks else "failed"
reject_reason += f" for rule {rule_name}"
pass_checks = False
if not pass_checks:
continue
if not skip_internal_checks and pr.has_internal_changes():
raise RuntimeError("This PR has internal changes and must be landed via Phabricator")
return rule
raise RuntimeError(reject_reason)
def try_revert(repo: GitRepo, pr: GitHubPR, *, dry_run: bool = False, comment_id: Optional[int] = None) -> None:
def post_comment(msg: str) -> None:
gh_post_comment(pr.org, pr.project, pr.pr_num, msg, dry_run=dry_run)
if not pr.is_closed():
return post_comment(f"Can't revert open PR #{pr.pr_num}")
comment = pr.get_last_comment() if comment_id is None else pr.get_comment_by_id(comment_id)
if not RE_REVERT_CMD.match(comment.body_text):
raise RuntimeError(f"Comment {comment.body_text} does not seem to be a valid revert command")
if comment.editor_login is not None:
return post_comment("Don't want to revert based on edited command")
author_association = comment.author_association
author_login = comment.author_login
# For some reason, one can not be a member of private repo, only CONTRIBUTOR
expected_association = "CONTRIBUTOR" if pr.is_base_repo_private() else "MEMBER"
if author_association != expected_association and author_association != "OWNER":
return post_comment(f"Will not revert as @{author_login} is not a {expected_association}, but {author_association}")
# Raises exception if matching rule is not found, but ignores all status checks
find_matching_merge_rule(pr, repo, force=True)
commit_sha = pr.get_merge_commit()
if commit_sha is None:
commits = repo.commits_resolving_gh_pr(pr.pr_num)
if len(commits) == 0:
raise RuntimeError("Can't find any commits resolving PR")
commit_sha = commits[0]
msg = repo.commit_message(commit_sha)
rc = RE_DIFF_REV.search(msg)
if rc is not None:
raise RuntimeError(f"Can't revert PR that was landed via phabricator as {rc.group(1)}")
repo.checkout(pr.default_branch())
repo.revert(commit_sha)
msg = repo.commit_message("HEAD")
msg = re.sub(RE_PULL_REQUEST_RESOLVED, "", msg)
msg += f"\nReverted {pr.get_pr_url()} on behalf of {prefix_with_github_url(author_login)}\n"
repo.amend_commit_message(msg)
repo.push(pr.default_branch(), dry_run)
if not dry_run:
gh_add_labels(pr.org, pr.project, pr.pr_num, ["reverted"])
def prefix_with_github_url(suffix_str: str) -> str:
return f"https://github.com/{suffix_str}"
def main() -> None:
args = parse_args()
repo = GitRepo(get_git_repo_dir(), get_git_remote_name())
org, project = repo.gh_owner_and_name()
pr = GitHubPR(org, project, args.pr_num)
if args.revert:
try:
try_revert(repo, pr, dry_run=args.dry_run, comment_id=args.comment_id)
except Exception as e:
msg = f"Reverting PR {args.pr_num} failed due to {e}"
run_url = os.getenv("GH_RUN_URL")
if run_url is not None:
msg += f"\nRaised by {run_url}"
gh_post_comment(org, project, args.pr_num, msg, dry_run=args.dry_run)
return
if pr.is_closed():
gh_post_comment(org, project, args.pr_num, f"Can't merge closed PR #{args.pr_num}", dry_run=args.dry_run)
return
if pr.is_cross_repo() and pr.is_ghstack_pr():
gh_post_comment(org, project, args.pr_num, "Cross-repo ghstack merges are not supported", dry_run=args.dry_run)
return
try:
pr.merge_into(repo, dry_run=args.dry_run, force=args.force)
except Exception as e:
msg = f"Merge failed due to {e}"
run_url = os.getenv("GH_RUN_URL")
if run_url is not None:
msg += f"\nRaised by {run_url}"
gh_post_comment(org, project, args.pr_num, msg, dry_run=args.dry_run)
import traceback
traceback.print_exc()
if __name__ == "__main__":
main()
| 37.164751
| 131
| 0.578797
|
5987e72b254ac255443c1f347f89e2bb8b1d74aa
| 16,825
|
py
|
Python
|
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_async.py
|
aalapatirvbd/azure-sdk-for-python
|
aae867a31f53286b123cf008a43cf0cd3746f8ba
|
[
"MIT"
] | null | null | null |
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_async.py
|
aalapatirvbd/azure-sdk-for-python
|
aae867a31f53286b123cf008a43cf0cd3746f8ba
|
[
"MIT"
] | 226
|
2019-07-24T07:57:21.000Z
|
2019-10-15T01:07:24.000Z
|
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_async.py
|
aalapatirvbd/azure-sdk-for-python
|
aae867a31f53286b123cf008a43cf0cd3746f8ba
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
import functools
from azure.core.credentials import AzureKeyCredential
from azure.core.exceptions import ServiceRequestError, ClientAuthenticationError, HttpResponseError
from azure.ai.formrecognizer import FormContentType
from azure.ai.formrecognizer.aio import FormRecognizerClient, FormTrainingClient
from azure.ai.formrecognizer._generated.models import AnalyzeOperationResult
from azure.ai.formrecognizer._response_handlers import prepare_form_result
from testcase import GlobalFormRecognizerAccountPreparer
from testcase import GlobalTrainingAccountPreparer as _GlobalTrainingAccountPreparer
from asynctestcase import AsyncFormRecognizerTest
GlobalTrainingAccountPreparer = functools.partial(_GlobalTrainingAccountPreparer, FormTrainingClient)
class TestCustomFormsAsync(AsyncFormRecognizerTest):
@GlobalFormRecognizerAccountPreparer()
async def test_custom_form_none_model_id(self, resource_group, location, form_recognizer_account, form_recognizer_account_key):
client = FormRecognizerClient(form_recognizer_account, AzureKeyCredential(form_recognizer_account_key))
with self.assertRaises(ValueError):
await client.begin_recognize_custom_forms(model_id=None, form=b"xx")
@GlobalFormRecognizerAccountPreparer()
async def test_custom_form_empty_model_id(self, resource_group, location, form_recognizer_account, form_recognizer_account_key):
client = FormRecognizerClient(form_recognizer_account, AzureKeyCredential(form_recognizer_account_key))
with self.assertRaises(ValueError):
await client.begin_recognize_custom_forms(model_id="", form=b"xx")
@GlobalFormRecognizerAccountPreparer()
async def test_custom_form_bad_endpoint(self, resource_group, location, form_recognizer_account, form_recognizer_account_key):
with open(self.form_jpg, "rb") as fd:
myfile = fd.read()
with self.assertRaises(ServiceRequestError):
client = FormRecognizerClient("http://notreal.azure.com", AzureKeyCredential(form_recognizer_account_key))
poller = await client.begin_recognize_custom_forms(model_id="xx", form=myfile)
result = await poller.result()
@GlobalFormRecognizerAccountPreparer()
async def test_authentication_bad_key(self, resource_group, location, form_recognizer_account, form_recognizer_account_key):
client = FormRecognizerClient(form_recognizer_account, AzureKeyCredential("xxxx"))
with self.assertRaises(ClientAuthenticationError):
poller = await client.begin_recognize_custom_forms(model_id="xx", form=b"xx", content_type="image/jpeg")
result = await poller.result()
@GlobalFormRecognizerAccountPreparer()
async def test_passing_unsupported_url_content_type(self, resource_group, location, form_recognizer_account, form_recognizer_account_key):
client = FormRecognizerClient(form_recognizer_account, AzureKeyCredential(form_recognizer_account_key))
with self.assertRaises(TypeError):
poller = await client.begin_recognize_custom_forms(model_id="xx", form="https://badurl.jpg", content_type="application/json")
result = await poller.result()
@GlobalFormRecognizerAccountPreparer()
async def test_auto_detect_unsupported_stream_content(self, resource_group, location, form_recognizer_account, form_recognizer_account_key):
client = FormRecognizerClient(form_recognizer_account, AzureKeyCredential(form_recognizer_account_key))
with open(self.unsupported_content_py, "rb") as fd:
myfile = fd.read()
with self.assertRaises(ValueError):
poller = await client.begin_recognize_custom_forms(
model_id="xxx",
form=myfile,
)
result = await poller.result()
@GlobalFormRecognizerAccountPreparer()
@GlobalTrainingAccountPreparer()
async def test_custom_form_damaged_file(self, client, container_sas_url):
fr_client = client.get_form_recognizer_client()
training_poller = await client.begin_training(container_sas_url, use_training_labels=False)
model = await training_poller.result()
with self.assertRaises(HttpResponseError):
poller = await fr_client.begin_recognize_custom_forms(
model.model_id,
b"\x25\x50\x44\x46\x55\x55\x55",
)
result = await poller.result()
@GlobalFormRecognizerAccountPreparer()
@GlobalTrainingAccountPreparer()
async def test_custom_form_unlabeled(self, client, container_sas_url):
fr_client = client.get_form_recognizer_client()
training_poller = await client.begin_training(container_sas_url, use_training_labels=False)
model = await training_poller.result()
with open(self.form_jpg, "rb") as fd:
myfile = fd.read()
poller = await fr_client.begin_recognize_custom_forms(model.model_id, myfile, content_type=FormContentType.image_jpeg)
form = await poller.result()
self.assertEqual(form[0].form_type, "form-0")
self.assertFormPagesHasValues(form[0].pages)
for label, field in form[0].fields.items():
self.assertIsNotNone(field.confidence)
self.assertIsNotNone(field.name)
self.assertIsNotNone(field.value)
self.assertIsNotNone(field.value_data.text)
self.assertIsNotNone(field.label_data.text)
@GlobalFormRecognizerAccountPreparer()
@GlobalTrainingAccountPreparer(multipage=True)
async def test_custom_form_multipage_unlabeled(self, client, container_sas_url):
fr_client = client.get_form_recognizer_client()
training_poller = await client.begin_training(container_sas_url, use_training_labels=False)
model = await training_poller.result()
with open(self.multipage_invoice_pdf, "rb") as fd:
myfile = fd.read()
poller = await fr_client.begin_recognize_custom_forms(
model.model_id,
myfile,
content_type=FormContentType.application_pdf
)
forms = await poller.result()
for form in forms:
self.assertEqual(form.form_type, "form-0")
self.assertFormPagesHasValues(form.pages)
for label, field in form.fields.items():
self.assertIsNotNone(field.confidence)
self.assertIsNotNone(field.name)
self.assertIsNotNone(field.value)
self.assertIsNotNone(field.value_data.text)
self.assertIsNotNone(field.label_data.text)
@GlobalFormRecognizerAccountPreparer()
@GlobalTrainingAccountPreparer()
async def test_custom_form_labeled(self, client, container_sas_url):
fr_client = client.get_form_recognizer_client()
training_poller = await client.begin_training(container_sas_url, use_training_labels=True)
model = await training_poller.result()
with open(self.form_jpg, "rb") as fd:
myfile = fd.read()
poller = await fr_client.begin_recognize_custom_forms(model.model_id, myfile, content_type=FormContentType.image_jpeg)
form = await poller.result()
self.assertEqual(form[0].form_type, "form-"+model.model_id)
self.assertFormPagesHasValues(form[0].pages)
for label, field in form[0].fields.items():
self.assertIsNotNone(field.confidence)
self.assertIsNotNone(field.name)
self.assertIsNotNone(field.value_data.text)
self.assertIsNotNone(field.value_data.bounding_box)
@GlobalFormRecognizerAccountPreparer()
@GlobalTrainingAccountPreparer(multipage=True)
async def test_custom_form_multipage_labeled(self, client, container_sas_url):
fr_client = client.get_form_recognizer_client()
training_poller = await client.begin_training(
container_sas_url,
use_training_labels=True
)
model = await training_poller.result()
with open(self.multipage_invoice_pdf, "rb") as fd:
myfile = fd.read()
poller = await fr_client.begin_recognize_custom_forms(
model.model_id,
myfile,
content_type=FormContentType.application_pdf
)
forms = await poller.result()
for form in forms:
self.assertEqual(form.form_type, "form-"+model.model_id)
self.assertFormPagesHasValues(form.pages)
for label, field in form.fields.items():
self.assertIsNotNone(field.confidence)
self.assertIsNotNone(field.name)
self.assertIsNotNone(field.value_data.text)
self.assertIsNotNone(field.value_data.bounding_box)
@GlobalFormRecognizerAccountPreparer()
@GlobalTrainingAccountPreparer()
async def test_form_unlabeled_transform(self, client, container_sas_url):
fr_client = client.get_form_recognizer_client()
training_poller = await client.begin_training(container_sas_url, use_training_labels=False)
model = await training_poller.result()
responses = []
def callback(raw_response, _, headers):
analyze_result = fr_client._client._deserialize(AnalyzeOperationResult, raw_response)
form = prepare_form_result(analyze_result, model.model_id)
responses.append(analyze_result)
responses.append(form)
with open(self.form_jpg, "rb") as fd:
myfile = fd.read()
poller = await fr_client.begin_recognize_custom_forms(
model.model_id,
myfile,
include_text_content=True,
cls=callback
)
form = await poller.result()
actual = responses[0]
recognized_form = responses[1]
read_results = actual.analyze_result.read_results
page_results = actual.analyze_result.page_results
actual_fields = actual.analyze_result.page_results[0].key_value_pairs
self.assertFormPagesTransformCorrect(recognized_form[0].pages, read_results, page_results)
self.assertEqual(recognized_form[0].page_range.first_page_number, page_results[0].page)
self.assertEqual(recognized_form[0].page_range.last_page_number, page_results[0].page)
self.assertUnlabeledFormFieldDictTransformCorrect(recognized_form[0].fields, actual_fields, read_results)
@GlobalFormRecognizerAccountPreparer()
@GlobalTrainingAccountPreparer(multipage=True)
async def test_custom_forms_multipage_unlabeled_transform(self, client, container_sas_url):
fr_client = client.get_form_recognizer_client()
training_poller = await client.begin_training(container_sas_url, use_training_labels=False)
model = await training_poller.result()
responses = []
def callback(raw_response, _, headers):
analyze_result = fr_client._client._deserialize(AnalyzeOperationResult, raw_response)
form = prepare_form_result(analyze_result, model.model_id)
responses.append(analyze_result)
responses.append(form)
with open(self.multipage_invoice_pdf, "rb") as fd:
myfile = fd.read()
poller = await fr_client.begin_recognize_custom_forms(
model.model_id,
myfile,
include_text_content=True,
cls=callback
)
form = await poller.result()
actual = responses[0]
recognized_form = responses[1]
read_results = actual.analyze_result.read_results
page_results = actual.analyze_result.page_results
# bug_skip_text_content should be removed after bug fix: https://github.com/Azure/azure-sdk-for-python/issues/11014
self.assertFormPagesTransformCorrect(recognized_form, read_results, page_results, bug_skip_text_content=True)
for form, actual in zip(recognized_form, page_results):
self.assertEqual(form.page_range.first_page_number, actual.page)
self.assertEqual(form.page_range.last_page_number, actual.page)
self.assertUnlabeledFormFieldDictTransformCorrect(form.fields, actual.key_value_pairs, read_results, bug_skip_text_content=True)
@GlobalFormRecognizerAccountPreparer()
@GlobalTrainingAccountPreparer()
async def test_form_labeled_transform(self, client, container_sas_url):
fr_client = client.get_form_recognizer_client()
training_polling = await client.begin_training(container_sas_url, use_training_labels=True)
model = await training_polling.result()
responses = []
def callback(raw_response, _, headers):
analyze_result = fr_client._client._deserialize(AnalyzeOperationResult, raw_response)
form = prepare_form_result(analyze_result, model.model_id)
responses.append(analyze_result)
responses.append(form)
with open(self.form_jpg, "rb") as fd:
myfile = fd.read()
poller = await fr_client.begin_recognize_custom_forms(
model.model_id,
myfile,
include_text_content=True,
cls=callback
)
form = await poller.result()
actual = responses[0]
recognized_form = responses[1]
read_results = actual.analyze_result.read_results
page_results = actual.analyze_result.page_results
actual_fields = actual.analyze_result.document_results[0].fields
self.assertFormPagesTransformCorrect(recognized_form[0].pages, read_results, page_results)
self.assertEqual(recognized_form[0].page_range.first_page_number, page_results[0].page)
self.assertEqual(recognized_form[0].page_range.last_page_number, page_results[0].page)
self.assertLabeledFormFieldDictTransformCorrect(recognized_form[0].fields, actual_fields, read_results)
@GlobalFormRecognizerAccountPreparer()
@GlobalTrainingAccountPreparer(multipage=True)
async def test_custom_forms_multipage_labeled_transform(self, client, container_sas_url):
fr_client = client.get_form_recognizer_client()
training_poller = await client.begin_training(container_sas_url, use_training_labels=True)
model = await training_poller.result()
responses = []
def callback(raw_response, _, headers):
analyze_result = fr_client._client._deserialize(AnalyzeOperationResult, raw_response)
form = prepare_form_result(analyze_result, model.model_id)
responses.append(analyze_result)
responses.append(form)
with open(self.multipage_invoice_pdf, "rb") as fd:
myfile = fd.read()
poller = await fr_client.begin_recognize_custom_forms(
model.model_id,
myfile,
include_text_content=True,
cls=callback
)
form = await poller.result()
actual = responses[0]
recognized_form = responses[1]
read_results = actual.analyze_result.read_results
page_results = actual.analyze_result.page_results
document_results = actual.analyze_result.document_results
self.assertFormPagesTransformCorrect(recognized_form, read_results, page_results)
for form, actual in zip(recognized_form, document_results):
self.assertEqual(form.page_range.first_page_number, actual.page_range[0])
self.assertEqual(form.page_range.last_page_number, actual.page_range[1])
self.assertEqual(form.form_type, "form-"+model.model_id)
self.assertLabeledFormFieldDictTransformCorrect(form.fields, actual.fields, read_results)
@GlobalFormRecognizerAccountPreparer()
@GlobalTrainingAccountPreparer()
@pytest.mark.live_test_only
async def test_custom_form_continuation_token(self, client, container_sas_url):
fr_client = client.get_form_recognizer_client()
poller = await client.begin_training(container_sas_url, use_training_labels=False)
model = await poller.result()
with open(self.form_jpg, "rb") as fd:
myfile = fd.read()
initial_poller = await fr_client.begin_recognize_custom_forms(
model.model_id,
myfile
)
cont_token = initial_poller.continuation_token()
poller = await fr_client.begin_recognize_custom_forms(
model.model_id,
myfile,
continuation_token=cont_token
)
result = await poller.result()
self.assertIsNotNone(result)
await initial_poller.wait() # necessary so azure-devtools doesn't throw assertion error
| 44.986631
| 144
| 0.710728
|
07ec650881bccf4084d626900a5f1943fbc705c5
| 7,654
|
py
|
Python
|
imagenet/models/resnet.py
|
matej-ulicny/harmonic-networks
|
0fccf674806a0b876e641ef5271aad520ff90739
|
[
"BSD-3-Clause"
] | 41
|
2019-05-02T02:58:20.000Z
|
2022-03-28T22:37:16.000Z
|
imagenet/models/resnet.py
|
matej-ulicny/harmonic-networks
|
0fccf674806a0b876e641ef5271aad520ff90739
|
[
"BSD-3-Clause"
] | 4
|
2020-09-14T12:50:24.000Z
|
2021-03-19T04:36:46.000Z
|
imagenet/models/resnet.py
|
matej-ulicny/harmonic-networks
|
0fccf674806a0b876e641ef5271aad520ff90739
|
[
"BSD-3-Clause"
] | 8
|
2019-05-04T09:37:06.000Z
|
2021-08-15T15:38:45.000Z
|
"""
Definition of harmonic Residual Networks.
Licensed under the BSD License [see LICENSE for details].
Written by Matej Ulicny, based on torchvision implementation:
https://github.com/pytorch/vision/tree/master/torchvision/models
"""
import torch.nn as nn
from utils import load_pretrained
from harmonic import Harm2d
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
def harm3x3(in_planes, out_planes, stride=1, level=None):
"""3x3 harmonic convolution with padding"""
return Harm2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1,
bias=False, use_bn=False, level=level)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, harm=True, level=None):
super(BasicBlock, self).__init__()
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
if harm:
self.harm1 = harm3x3(planes, planes, stride, level=level)
self.harm2 = harm3x3(planes, planes, level=level)
else:
self.conv1 = conv3x3(planes, planes, stride)
self.conv2 = conv3x3(planes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.harm1(x) if hasattr(self, 'harm1') else self.conv1(out)
out = self.bn1(out)
out = self.relu(out)
out = self.harm2(out) if hasattr(self, 'harm2') else self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, harm=True, level=None):
super(Bottleneck, self).__init__()
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
if harm:
self.harm2 = harm3x3(planes, planes, stride, level=level)
else:
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.harm2(out) if hasattr(self, 'harm2') else self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, harm_root=True, harm_res_blocks=True, pool=None, levels=[None, None, None, None]):
super(ResNet, self).__init__()
self.inplanes = 64
root_stride = 2 if pool in ['avg', 'max'] else 4
if harm_root:
self.harm1 = Harm2d(3, 64, kernel_size=7, stride=root_stride, padding=3,
bias=False, use_bn=True)
else:
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=root_stride, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
if pool == 'avg':
self.pool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
elif pool == 'max':
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], harm=harm_res_blocks, level=levels[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, harm=harm_res_blocks, level=levels[1])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, harm=harm_res_blocks, level=levels[2])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, harm=harm_res_blocks, level=levels[3])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d) and m.affine:
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, harm=True, level=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, harm, level))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, harm=harm, level=level))
return nn.Sequential(*layers)
def forward(self, x):
x = self.harm1(x) if hasattr(self, 'harm1') else self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
if hasattr(self, 'pool'):
x = self.pool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def resnet34(pretrained=False, **kwargs):
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def resnet50(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained and kwargs['harm_root'] and kwargs['harm_res_blocks'] and (not 'pool' in kwargs or not kwargs['pool'] in ['avg', 'max']) \
and (not 'levels' in kwargs or kwargs['levels'] == [None, None, None, None]):
load_pretrained(model, 'https://github.com/matej-ulicny/harmonic-networks/releases/download/0.1.0/harm_resnet50-eec30392.pth')
return model
def resnet101(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained and kwargs['harm_root'] and kwargs['harm_res_blocks'] and (not 'pool' in kwargs or not kwargs['pool'] in ['avg', 'max']) \
and (not 'levels' in kwargs or kwargs['levels'] == [None, None, None, None]):
load_pretrained(model, 'https://github.com/matej-ulicny/harmonic-networks/releases/download/0.1.0/harm_resnet101-62e185b1.pth')
return model
def resnet152(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
return model
| 36.103774
| 140
| 0.617978
|
231b66365a4574453f7243555be73c889858c617
| 12,052
|
py
|
Python
|
components/handlers/modified_modules.py
|
nus-mtp/another-cs-study-planner
|
02b52871a34f580b779ede08750f2d4e887bcf65
|
[
"MIT"
] | 1
|
2017-04-30T17:59:08.000Z
|
2017-04-30T17:59:08.000Z
|
components/handlers/modified_modules.py
|
nus-mtp/another-cs-study-planner
|
02b52871a34f580b779ede08750f2d4e887bcf65
|
[
"MIT"
] | 87
|
2017-02-13T09:06:13.000Z
|
2017-04-14T09:23:08.000Z
|
components/handlers/modified_modules.py
|
nus-mtp/another-cs-study-planner
|
02b52871a34f580b779ede08750f2d4e887bcf65
|
[
"MIT"
] | 1
|
2017-04-11T05:26:00.000Z
|
2017-04-11T05:26:00.000Z
|
'''
This module contains the handler for web requests pertaining to
retrieiving modules that have been modified.
'''
from app import RENDER
import web
from components import model, session
from components.handlers.fixed_module_mountings import Fixed
from components.handlers.tentative_module_mountings import Tentative
class Modified(object):
'''
This class contains the implementations of the GET and POST requests.
It retrieves a list of modified modules and determine which attributes
have been modified.
'''
def get_modules_with_modified_mounting(self):
'''
Get all modules whose mounting has been modified in a future AY.
Return the module code, current AY-Sem, target AY-Sem, and mounting change
'''
# Generate fixed mounting plan
fixed_mounting_handler = Fixed()
current_ay = model.get_current_ay()
fixed_mounting_handler.populate_module_code_and_name()
fixed_mounting_handler.populate_module_ay_sem_data()
fixed_full_mounting_plan = fixed_mounting_handler.full_mounting_plan
modified_modules = []
target_ay = current_ay
# Loop through each future AY
for i in range(model.get_number_of_ay_in_system()-1):
target_ay = model.get_next_ay(target_ay)
# Generate tentative mounting plan
tenta_mounting_handler = Tentative()
tenta_mounting_handler.populate_module_code_and_name()
tenta_mounting_handler.populate_module_ay_sem_data(target_ay)
tenta_full_mounting_plan = tenta_mounting_handler.full_mounting_plan
# Compare the fixed and tentative mounting of each module for each semester
# to see if there is any difference (if there is, means it's modified)
for i in range(len(fixed_full_mounting_plan)):
fixed_subplan = fixed_full_mounting_plan[i]
tenta_subplan = tenta_full_mounting_plan[i]
module_code = fixed_subplan[0]
module_name = fixed_subplan[1]
fixed_sem_1_mounting = fixed_subplan[2]
tenta_sem_1_mounting = tenta_subplan[2]
fixed_sem_2_mounting = fixed_subplan[3]
tenta_sem_2_mounting = tenta_subplan[3]
if tenta_sem_1_mounting == 0:
modified_modules.append([module_code, module_name, current_ay+" Sem 1",
target_ay+" Sem 1", 0]) # Mounted --> Unmounted
elif tenta_sem_1_mounting == 1 and fixed_sem_1_mounting == -1:
modified_modules.append([module_code, module_name, current_ay+" Sem 1",
target_ay+" Sem 1", 1]) # Unmounted --> Mounted
if tenta_sem_2_mounting == 0:
modified_modules.append([module_code, module_name, current_ay+" Sem 2",
target_ay+" Sem 2", 0]) # Mounted --> Unmounted
elif tenta_sem_2_mounting == 1 and fixed_sem_2_mounting == -1:
modified_modules.append([module_code, module_name, current_ay+" Sem 2",
target_ay+" Sem 2", 1]) # Unmounted --> Mounted
return modified_modules
def get_modules_with_modified_quota(self):
'''
Get all modules whose quota has been modified in a future AY.
Return the module code, module name, current AY-Sem, target AY-Sem,
current AY-Sem's quota, target AY-Sem's quota, and quota change
'''
modified_modules = model.get_modules_with_modified_quota()
modified_modules = [list(module) for module in modified_modules]
for module in modified_modules:
current_quota = module[4]
modified_quota = module[5]
if current_quota is None:
quota_change = '+' + str(modified_quota)
elif modified_quota is None:
quota_change = str(-current_quota)
else:
quota_change = modified_quota - current_quota
if quota_change > 0:
quota_change = '+' + str(quota_change)
else:
quota_change = str(quota_change)
module.append(quota_change)
# Include modules with specified quota that have mounting changes
modules_wth_modified_mountings = self.get_modules_with_modified_mounting()
for module in modules_wth_modified_mountings:
mounting_change = module[4]
if mounting_change == 1: # Unmounted --> Mounted
code = module[0]
name = module[1]
current_ay = module[2]
target_ay = module[3]
quota = model.get_quota_of_target_tenta_ay_sem(code, target_ay)
if quota is not None and quota > 0:
modified_modules.append((code, name, current_ay, target_ay,
"Unmounted", quota, '+'+str(quota)))
elif mounting_change == 0: # Mounted --> Unmounted
code = module[0]
name = module[1]
current_ay = module[2]
target_ay = module[3]
quota = model.get_quota_of_target_fixed_ay_sem(code, current_ay)
if quota is not None and quota > 0:
modified_modules.append((code, name, current_ay, target_ay,
quota, "Unmounted", '-'+str(quota)))
return modified_modules
def get_modules_with_modified_details(self):
'''
Get all modules whose details (name/description/MC) has been modified.
Return the module details
(code, old name, old desc, old MC, new name, new desc, new MC),
and whether the details have been modified
(is_named_modified, is_desc_modified, is_MC_modified)
'''
modified_modules = model.get_modules_with_modified_details()
modified_modules = [list(module) for module in modified_modules]
i = 0
while i < len(modified_modules):
module_details = modified_modules[i]
module_code = module_details[0]
old_module_name = module_details[1]
old_module_desc = module_details[2]
old_module_mc = module_details[3]
current_module_name = module_details[4]
current_module_desc = module_details[5]
current_module_mc = module_details[6]
is_name_modified = (current_module_name.rstrip() != old_module_name.rstrip())
is_desc_modified = (current_module_desc.rstrip() != old_module_desc.rstrip())
is_mc_modified = (current_module_mc != old_module_mc)
if not is_name_modified and not is_desc_modified and not is_mc_modified:
model.remove_original_module_info(module_code)
del modified_modules[i]
continue
modifications = [is_name_modified, is_desc_modified, is_mc_modified]
modified_modules[i] = (module_details, modifications)
i += 1
return modified_modules
def get_all_modified_modules(self):
'''
Get all modules that have been modified in some way or another.
Return the module code, whether mounting is modified,
whether quota is modified, and whether module details are modified
'''
modified_mounting_modules = self.get_modules_with_modified_mounting()
modified_quota_modules = self.get_modules_with_modified_quota()
modified_details_modules = self.get_modules_with_modified_details()
modified_mounting_module_codes_names = [module[0:2] for module in modified_mounting_modules]
modified_quota_module_codes_names = [module[0:2] for module in modified_quota_modules]
modified_details_module_codes_names = [module[0][0::4]
for module in modified_details_modules]
modified_module_codes_names = modified_mounting_module_codes_names +\
modified_quota_module_codes_names +\
modified_details_module_codes_names
modified_modules = []
for modified_module in modified_module_codes_names:
module_code = modified_module[0]
if module_code in [module[0] for module in modified_modules]:
continue
module_name = modified_module[1]
is_mounting_modified = any(module[0] == module_code
for module in modified_mounting_module_codes_names)
is_quota_modified = any(module[0] == module_code
for module in modified_quota_module_codes_names)
is_details_modified = any(module[0] == module_code
for module in modified_details_module_codes_names)
modified_modules.append((module_code, module_name, is_mounting_modified,
is_quota_modified, is_details_modified))
return modified_modules
def GET(self):
'''
Renders the modified modules page if users requested
for the page through the GET method.
'''
web.header('X-Frame-Options', 'SAMEORIGIN')
web.header('X-Content-Type-Options', 'nosniff')
web.header('X-XSS-Protection', '1')
if not session.validate_session():
raise web.seeother('/login')
# User can select the type of modified information they want to see
# By default, the page will show ALL modified modules
modify_type = None
module_code = None
input_data = model.validate_input(web.input(), ["code", "modify_type"], attr_required=False)
try:
modify_type = input_data.modifyType
except AttributeError:
modify_type = None
try:
module_code = input_data.code
except AttributeError:
module_code = None
# If module code is specified, only return data for the specified module
modified_modules_summary = []
modified_modules_mounting = []
modified_modules_quota = []
modified_modules_details = []
modified_modules = []
if module_code is not None:
if modify_type.lower() == "mounting":
modified_modules = self.get_modules_with_modified_mounting()
module = [module for module in modified_modules if module[0] == module_code]
elif modify_type.lower() == "quota":
modified_modules = self.get_modules_with_modified_quota()
module = [module for module in modified_modules if module[0] == module_code]
elif modify_type.lower() == "moduledetails":
module = None
modified_modules = self.get_modules_with_modified_details()
module = None
for mm in modified_modules:
if mm[0][0] == module_code:
module = mm
break
modified_modules = module
# Else return all 4 modification tables, for all the modified modules
else:
modified_modules_summary = self.get_all_modified_modules()
modified_modules_mounting = self.get_modules_with_modified_mounting()
modified_modules_quota = self.get_modules_with_modified_quota()
modified_modules_details = self.get_modules_with_modified_details()
return RENDER.moduleModified(modify_type, modified_modules_summary,
modified_modules_mounting, modified_modules_quota,
modified_modules_details, module_code, modified_modules)
| 46.713178
| 100
| 0.613591
|
bcb2bddf2cab84cb28140329f4867de883642226
| 3,981
|
py
|
Python
|
tests/micro/arduino/conftest.py
|
TNBase/tvm
|
9824d2e2c047cb366eca81fb54829390615a6285
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 2
|
2021-08-12T17:27:40.000Z
|
2021-11-17T10:56:52.000Z
|
tests/micro/arduino/conftest.py
|
TNBase/tvm
|
9824d2e2c047cb366eca81fb54829390615a6285
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null |
tests/micro/arduino/conftest.py
|
TNBase/tvm
|
9824d2e2c047cb366eca81fb54829390615a6285
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 1
|
2021-04-19T06:59:59.000Z
|
2021-04-19T06:59:59.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import pathlib
import pytest
import tvm.target.target
# The models that should pass this configuration. Maps a short, identifying platform string to
# (model, zephyr_board).
PLATFORMS = {
"due": ("sam3x8e", "due"),
"feathers2": ("esp32", "feathers2"),
"nano33ble": ("nrf52840", "nano33ble"),
"pybadge": ("atsamd51", "pybadge"),
"spresense": ("cxd5602gg", "spresense"),
"teensy40": ("imxrt1060", "teensy40"),
"teensy41": ("imxrt1060", "teensy41"),
"wioterminal": ("atsamd51", "wioterminal"),
}
TEMPLATE_PROJECT_DIR = (
pathlib.Path(__file__).parent
/ ".."
/ ".."
/ ".."
/ "apps"
/ "microtvm"
/ "arduino"
/ "template_project"
).resolve()
def pytest_addoption(parser):
parser.addoption(
"--microtvm-platforms",
nargs="+",
required=True,
choices=PLATFORMS.keys(),
help="Target platforms for microTVM tests.",
)
parser.addoption(
"--arduino-cli-cmd",
default="arduino-cli",
help="Path to `arduino-cli` command for flashing device.",
)
parser.addoption(
"--test-build-only",
action="store_true",
help="Only run tests that don't require physical hardware.",
)
parser.addoption(
"--tvm-debug",
action="store_true",
default=False,
help="If given, enable a debug session while the test is running. Before running the test, in a separate shell, you should run: <python -m tvm.exec.microtvm_debug_shell>",
)
def pytest_configure(config):
config.addinivalue_line(
"markers", "requires_hardware: mark test to run only when an Arduino board is connected"
)
def pytest_collection_modifyitems(config, items):
if config.getoption("--test-build-only"):
skip_hardware_tests = pytest.mark.skip(reason="--test-build-only was passed")
for item in items:
if "requires_hardware" in item.keywords:
item.add_marker(skip_hardware_tests)
# We might do project generation differently for different boards in the future
# (to take advantage of multiple cores / external memory / etc.), so all tests
# are parameterized by board
def pytest_generate_tests(metafunc):
platforms = metafunc.config.getoption("microtvm_platforms")
metafunc.parametrize("platform", platforms, scope="session")
@pytest.fixture(scope="session")
def arduino_cli_cmd(request):
return request.config.getoption("--arduino-cli-cmd")
@pytest.fixture(scope="session")
def tvm_debug(request):
return request.config.getoption("--tvm-debug")
def make_workspace_dir(test_name, platform):
_, arduino_board = PLATFORMS[platform]
filepath = pathlib.Path(__file__)
board_workspace = (
filepath.parent
/ f"workspace_{test_name}_{arduino_board}"
/ datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
)
number = 0
while board_workspace.exists():
number += 1
board_workspace = pathlib.Path(str(board_workspace) + f"-{number}")
board_workspace.parent.mkdir(exist_ok=True, parents=True)
t = tvm.contrib.utils.tempdir(board_workspace)
# time.sleep(200)
return t
| 32.104839
| 179
| 0.680482
|
49728a471fcc8fc3d74dbfaa8360f85829aea594
| 40
|
py
|
Python
|
audit_service/api/base/tests/__init__.py
|
LucasBiason/LoggingPy
|
aa73a59842c74ae2b11031875be11beb3da54048
|
[
"MIT"
] | null | null | null |
audit_service/api/base/tests/__init__.py
|
LucasBiason/LoggingPy
|
aa73a59842c74ae2b11031875be11beb3da54048
|
[
"MIT"
] | null | null | null |
audit_service/api/base/tests/__init__.py
|
LucasBiason/LoggingPy
|
aa73a59842c74ae2b11031875be11beb3da54048
|
[
"MIT"
] | null | null | null |
from .logger_test import LoggerNoSQLTest
| 40
| 40
| 0.9
|
a12e7180865fedfa67746ee7b322c157f26a535c
| 1,511
|
py
|
Python
|
example files/flask_apps/upload/controller.py
|
nikku1234/InbreastData-Html-Page
|
5f02b2e03e5f2f8f9fe9e2ce1b089b4dd2e36323
|
[
"Apache-2.0"
] | 1
|
2020-07-02T06:06:18.000Z
|
2020-07-02T06:06:18.000Z
|
example files/flask_apps/upload/controller.py
|
nikku1234/InbreastData-Html-Page
|
5f02b2e03e5f2f8f9fe9e2ce1b089b4dd2e36323
|
[
"Apache-2.0"
] | 6
|
2020-06-17T14:19:47.000Z
|
2022-03-12T00:36:20.000Z
|
example files/flask_apps/upload/controller.py
|
nikku1234/InbreastData-Html-Page
|
5f02b2e03e5f2f8f9fe9e2ce1b089b4dd2e36323
|
[
"Apache-2.0"
] | null | null | null |
from compute import compute_mean_std as compute_function
from flask import Flask, render_template, request
from model import Average
from werkzeug import secure_filename
import os
# Application object
app = Flask(__name__)
# Relative path of directory for uploaded files
UPLOAD_DIR = 'uploads/'
app.config['UPLOAD_FOLDER'] = UPLOAD_DIR
app.secret_key = 'MySecretKey'
if not os.path.isdir(UPLOAD_DIR):
os.mkdir(UPLOAD_DIR)
# Allowed file types for file upload
ALLOWED_EXTENSIONS = set(['txt', 'dat', 'npy'])
def allowed_file(filename):
"""Does filename have the right extension?"""
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
# Path to the web application
@app.route('/', methods=['GET', 'POST'])
def index():
form = Average(request.form)
filename = None # default
if request.method == 'POST':
# Save uploaded file on server if it exists and is valid
if request.files:
file = request.files[form.filename.name]
if file and allowed_file(file.filename):
# Make a valid version of filename for any file ystem
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'],
filename))
result = compute_function(filename)
else:
result = None
return render_template("view.html", form=form, result=result)
if __name__ == '__main__':
app.run(debug=True)
| 29.057692
| 69
| 0.662475
|
26c398a70e5d597d4a35b912c4b1dbece52d3295
| 3,278
|
py
|
Python
|
tests/demoapp/demo/admin.py
|
saxix/django-admin-extra-urls
|
295112cbc194c20de0c176c535d0cddf3b3cb680
|
[
"BSD-1-Clause"
] | 24
|
2015-05-01T03:27:53.000Z
|
2022-02-20T07:45:02.000Z
|
tests/demoapp/demo/admin.py
|
saxix/django-admin-extra-urls
|
295112cbc194c20de0c176c535d0cddf3b3cb680
|
[
"BSD-1-Clause"
] | 19
|
2015-03-28T09:54:56.000Z
|
2022-01-07T14:56:09.000Z
|
tests/demoapp/demo/admin.py
|
saxix/django-admin-extra-urls
|
295112cbc194c20de0c176c535d0cddf3b3cb680
|
[
"BSD-1-Clause"
] | 15
|
2015-10-20T10:15:18.000Z
|
2022-02-01T16:25:53.000Z
|
from django.contrib import admin
from django.contrib.admin import SimpleListFilter
from django.contrib.admin.templatetags.admin_urls import admin_urlname
from django.http import HttpResponseRedirect
from django.urls import reverse
from admin_extra_urls.api import ExtraUrlMixin, UrlButton, confirm_action, url
from admin_extra_urls.decorators import button
from .models import DemoModel1, DemoModel2, DemoModel3, DemoModel4
from .upload import UploadMixin
class TestFilter(SimpleListFilter):
parameter_name = 'filter'
title = "Dummy filter for testing"
def lookups(self, request, model_admin):
return (
('on', "On"),
('off', "Off"),
)
def queryset(self, request, queryset):
return queryset
class Admin1(ExtraUrlMixin, admin.ModelAdmin):
list_filter = [TestFilter]
@url(permission='demo.add_demomodel1', button=True)
def refresh(self, request):
opts = self.model._meta
self.message_user(request, 'refresh called')
return HttpResponseRedirect(reverse(admin_urlname(opts, 'changelist')))
@url(button=UrlButton(label='Refresh'), permission=lambda request, object: False)
def refresh_callable(self, request):
opts = self.model._meta
self.message_user(request, 'refresh called')
return HttpResponseRedirect(reverse(admin_urlname(opts, 'changelist')))
@button(path='a/b/', button=True)
def custom_path(self, request):
opts = self.model._meta
self.message_user(request, 'refresh called')
return HttpResponseRedirect(reverse(admin_urlname(opts, 'changelist')))
@url(button=True)
def no_response(self, request):
self.message_user(request, 'No_response')
@url(button=True)
def confirm(self, request):
def _action(request):
pass
return confirm_action(self, request, _action, "Confirm action",
"Successfully executed", )
class Admin2(ExtraUrlMixin, admin.ModelAdmin):
list_filter = [TestFilter]
@url(permission='demo_delete_demomodel2', button=True, details=True)
def update(self, request, pk):
opts = self.model._meta
self.message_user(request, 'action called')
return HttpResponseRedirect(reverse(admin_urlname(opts, 'changelist')))
@url(button=True)
def no_response(self, request, object_id):
self.message_user(request, 'No_response')
@url(permission=lambda request, obj: False)
def update_callable_permission(self, request, object_id):
opts = self.model._meta
self.message_user(request, 'action called')
return HttpResponseRedirect(reverse(admin_urlname(opts, 'changelist')))
@url(path='a/b/<path:object_id>', button=True)
def custom_update(self, request, object_id):
opts = self.model._meta
self.message_user(request, 'action called')
return HttpResponseRedirect(reverse(admin_urlname(opts, 'changelist')))
class Admin3(admin.ModelAdmin):
pass
class Admin4(UploadMixin, admin.ModelAdmin):
upload_handler = lambda *args: [1, 2, 3]
admin.site.register(DemoModel1, Admin1)
admin.site.register(DemoModel2, Admin2)
admin.site.register(DemoModel3, Admin3)
admin.site.register(DemoModel4, Admin4)
| 32.78
| 85
| 0.702257
|
170dbf6c3634a671c97361697a96ee92c3398b1d
| 50,646
|
py
|
Python
|
network/datadog_checks/network/network.py
|
tcpatterson/integrations-core
|
3692601de09f8db60f42612b0d623509415bbb53
|
[
"BSD-3-Clause"
] | null | null | null |
network/datadog_checks/network/network.py
|
tcpatterson/integrations-core
|
3692601de09f8db60f42612b0d623509415bbb53
|
[
"BSD-3-Clause"
] | null | null | null |
network/datadog_checks/network/network.py
|
tcpatterson/integrations-core
|
3692601de09f8db60f42612b0d623509415bbb53
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2010-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
"""
Collects network metrics.
"""
import array
import distutils.spawn
import os
import re
import socket
import struct
from collections import defaultdict
import psutil
from six import PY3, iteritems, itervalues
from datadog_checks.base import AgentCheck, ConfigurationError, is_affirmative
from datadog_checks.base.utils.common import pattern_filter
from datadog_checks.base.utils.platform import Platform
from datadog_checks.base.utils.subprocess_output import SubprocessOutputEmptyError, get_subprocess_output
try:
import datadog_agent
except ImportError:
from datadog_checks.base.stubs import datadog_agent
try:
import fcntl
except ImportError:
fcntl = None
if PY3:
long = int
BSD_TCP_METRICS = [
(re.compile(r"^\s*(\d+) data packets \(\d+ bytes\) retransmitted\s*$"), 'system.net.tcp.retrans_packs'),
(re.compile(r"^\s*(\d+) packets sent\s*$"), 'system.net.tcp.sent_packs'),
(re.compile(r"^\s*(\d+) packets received\s*$"), 'system.net.tcp.rcv_packs'),
]
SOLARIS_TCP_METRICS = [
(re.compile(r"\s*tcpRetransSegs\s*=\s*(\d+)\s*"), 'system.net.tcp.retrans_segs'),
(re.compile(r"\s*tcpOutDataSegs\s*=\s*(\d+)\s*"), 'system.net.tcp.in_segs'),
(re.compile(r"\s*tcpInSegs\s*=\s*(\d+)\s*"), 'system.net.tcp.out_segs'),
]
# constants for extracting ethtool data via ioctl
SIOCETHTOOL = 0x8946
ETHTOOL_GSTRINGS = 0x0000001B
ETHTOOL_GSSET_INFO = 0x00000037
ETHTOOL_GSTATS = 0x0000001D
ETH_SS_STATS = 0x1
ETH_GSTRING_LEN = 32
# ENA metrics that we're collecting
# https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Agent-network-performance.html
ENA_METRIC_PREFIX = "aws.ec2."
ENA_METRIC_NAMES = [
"bw_in_allowance_exceeded",
"bw_out_allowance_exceeded",
"conntrack_allowance_exceeded",
"linklocal_allowance_exceeded",
"pps_allowance_exceeded",
]
class Network(AgentCheck):
SOURCE_TYPE_NAME = 'system'
PSUTIL_TYPE_MAPPING = {socket.SOCK_STREAM: 'tcp', socket.SOCK_DGRAM: 'udp'}
PSUTIL_FAMILY_MAPPING = {socket.AF_INET: '4', socket.AF_INET6: '6'}
def check(self, instance):
if instance is None:
instance = {}
self._excluded_ifaces = instance.get('excluded_interfaces', [])
if not isinstance(self._excluded_ifaces, list):
raise ConfigurationError(
"Expected 'excluded_interfaces' to be a list, got '{}'".format(type(self._excluded_ifaces).__name__)
)
self._collect_cx_state = instance.get('collect_connection_state', False)
self._collect_cx_queues = instance.get('collect_connection_queues', False)
self._collect_rate_metrics = instance.get('collect_rate_metrics', True)
self._collect_count_metrics = instance.get('collect_count_metrics', False)
self._collect_ena_metrics = instance.get('collect_aws_ena_metrics', False)
if fcntl is None and self._collect_ena_metrics:
raise ConfigurationError("fcntl not importable, collect_aws_ena_metrics should be disabled")
# This decides whether we should split or combine connection states,
# along with a few other things
self._setup_metrics(instance)
self._exclude_iface_re = None
exclude_re = instance.get('excluded_interface_re', None)
if exclude_re:
self.log.debug("Excluding network devices matching: %s", exclude_re)
self._exclude_iface_re = re.compile(exclude_re)
if Platform.is_linux():
self._check_linux(instance)
elif Platform.is_bsd():
self._check_bsd(instance)
elif Platform.is_solaris():
self._check_solaris(instance)
elif Platform.is_windows():
self._check_psutil(instance)
def _setup_metrics(self, instance):
self._combine_connection_states = instance.get('combine_connection_states', True)
if self._combine_connection_states:
self.cx_state_gauge = {
('udp4', 'connections'): 'system.net.udp4.connections',
('udp6', 'connections'): 'system.net.udp6.connections',
('tcp4', 'established'): 'system.net.tcp4.established',
('tcp4', 'opening'): 'system.net.tcp4.opening',
('tcp4', 'closing'): 'system.net.tcp4.closing',
('tcp4', 'listening'): 'system.net.tcp4.listening',
('tcp4', 'time_wait'): 'system.net.tcp4.time_wait',
('tcp6', 'established'): 'system.net.tcp6.established',
('tcp6', 'opening'): 'system.net.tcp6.opening',
('tcp6', 'closing'): 'system.net.tcp6.closing',
('tcp6', 'listening'): 'system.net.tcp6.listening',
('tcp6', 'time_wait'): 'system.net.tcp6.time_wait',
}
self.tcp_states = {
"ss": {
"ESTAB": "established",
"SYN-SENT": "opening",
"SYN-RECV": "opening",
"FIN-WAIT-1": "closing",
"FIN-WAIT-2": "closing",
"TIME-WAIT": "time_wait",
"UNCONN": "closing",
"CLOSE-WAIT": "closing",
"LAST-ACK": "closing",
"LISTEN": "listening",
"CLOSING": "closing",
},
"netstat": {
"ESTABLISHED": "established",
"SYN_SENT": "opening",
"SYN_RECV": "opening",
"FIN_WAIT1": "closing",
"FIN_WAIT2": "closing",
"TIME_WAIT": "time_wait",
"CLOSE": "closing",
"CLOSE_WAIT": "closing",
"LAST_ACK": "closing",
"LISTEN": "listening",
"CLOSING": "closing",
},
"psutil": {
psutil.CONN_ESTABLISHED: "established",
psutil.CONN_SYN_SENT: "opening",
psutil.CONN_SYN_RECV: "opening",
psutil.CONN_FIN_WAIT1: "closing",
psutil.CONN_FIN_WAIT2: "closing",
psutil.CONN_TIME_WAIT: "time_wait",
psutil.CONN_CLOSE: "closing",
psutil.CONN_CLOSE_WAIT: "closing",
psutil.CONN_LAST_ACK: "closing",
psutil.CONN_LISTEN: "listening",
psutil.CONN_CLOSING: "closing",
psutil.CONN_NONE: "connections", # CONN_NONE is always returned for udp connections
},
}
else:
self.cx_state_gauge = {
('udp4', 'connections'): 'system.net.udp4.connections',
('udp6', 'connections'): 'system.net.udp6.connections',
('tcp4', 'estab'): 'system.net.tcp4.estab',
('tcp4', 'syn_sent'): 'system.net.tcp4.syn_sent',
('tcp4', 'syn_recv'): 'system.net.tcp4.syn_recv',
('tcp4', 'fin_wait_1'): 'system.net.tcp4.fin_wait_1',
('tcp4', 'fin_wait_2'): 'system.net.tcp4.fin_wait_2',
('tcp4', 'time_wait'): 'system.net.tcp4.time_wait',
('tcp4', 'unconn'): 'system.net.tcp4.unconn',
('tcp4', 'close'): 'system.net.tcp4.close',
('tcp4', 'close_wait'): 'system.net.tcp4.close_wait',
('tcp4', 'closing'): 'system.net.tcp4.closing',
('tcp4', 'listen'): 'system.net.tcp4.listen',
('tcp4', 'last_ack'): 'system.net.tcp4.time_wait',
('tcp6', 'estab'): 'system.net.tcp6.estab',
('tcp6', 'syn_sent'): 'system.net.tcp6.syn_sent',
('tcp6', 'syn_recv'): 'system.net.tcp6.syn_recv',
('tcp6', 'fin_wait_1'): 'system.net.tcp6.fin_wait_1',
('tcp6', 'fin_wait_2'): 'system.net.tcp6.fin_wait_2',
('tcp6', 'time_wait'): 'system.net.tcp6.time_wait',
('tcp6', 'unconn'): 'system.net.tcp6.unconn',
('tcp6', 'close'): 'system.net.tcp6.close',
('tcp6', 'close_wait'): 'system.net.tcp6.close_wait',
('tcp6', 'closing'): 'system.net.tcp6.closing',
('tcp6', 'listen'): 'system.net.tcp6.listen',
('tcp6', 'last_ack'): 'system.net.tcp6.time_wait',
}
self.tcp_states = {
"ss": {
"ESTAB": "estab",
"SYN-SENT": "syn_sent",
"SYN-RECV": "syn_recv",
"FIN-WAIT-1": "fin_wait_1",
"FIN-WAIT-2": "fin_wait_2",
"TIME-WAIT": "time_wait",
"UNCONN": "unconn",
"CLOSE-WAIT": "close_wait",
"LAST-ACK": "last_ack",
"LISTEN": "listen",
"CLOSING": "closing",
},
"netstat": {
"ESTABLISHED": "estab",
"SYN_SENT": "syn_sent",
"SYN_RECV": "syn_recv",
"FIN_WAIT1": "fin_wait_1",
"FIN_WAIT2": "fin_wait_2",
"TIME_WAIT": "time_wait",
"CLOSE": "close",
"CLOSE_WAIT": "close_wait",
"LAST_ACK": "last_ack",
"LISTEN": "listen",
"CLOSING": "closing",
},
"psutil": {
psutil.CONN_ESTABLISHED: "estab",
psutil.CONN_SYN_SENT: "syn_sent",
psutil.CONN_SYN_RECV: "syn_recv",
psutil.CONN_FIN_WAIT1: "fin_wait_1",
psutil.CONN_FIN_WAIT2: "fin_wait_2",
psutil.CONN_TIME_WAIT: "time_wait",
psutil.CONN_CLOSE: "close",
psutil.CONN_CLOSE_WAIT: "close_wait",
psutil.CONN_LAST_ACK: "last_ack",
psutil.CONN_LISTEN: "listen",
psutil.CONN_CLOSING: "closing",
psutil.CONN_NONE: "connections", # CONN_NONE is always returned for udp connections
},
}
def _submit_netmetric(self, metric, value, tags=None):
if self._collect_rate_metrics:
self.rate(metric, value, tags=tags)
if self._collect_count_metrics:
self.monotonic_count('{}.count'.format(metric), value, tags=tags)
def _submit_devicemetrics(self, iface, vals_by_metric, tags):
if iface in self._excluded_ifaces or (self._exclude_iface_re and self._exclude_iface_re.match(iface)):
# Skip this network interface.
return False
# adding the device to the tags as device_name is deprecated
metric_tags = [] if tags is None else tags[:]
metric_tags.append('device:{}'.format(iface))
expected_metrics = self._get_expected_metrics()
for m in expected_metrics:
assert m in vals_by_metric
assert len(vals_by_metric) == len(expected_metrics)
count = 0
for metric, val in iteritems(vals_by_metric):
self.rate('system.net.%s' % metric, val, tags=metric_tags)
count += 1
self.log.debug("tracked %s network metrics for interface %s", count, iface)
def _get_expected_metrics(self):
expected_metrics = [
'bytes_rcvd',
'bytes_sent',
'packets_in.count',
'packets_in.error',
'packets_out.count',
'packets_out.error',
]
if Platform.is_linux() or Platform.is_windows():
expected_metrics.extend(
[
'packets_in.drop',
'packets_out.drop',
]
)
return expected_metrics
def _submit_ena_metrics(self, iface, vals_by_metric, tags):
if iface in self._excluded_ifaces or (self._exclude_iface_re and self._exclude_iface_re.match(iface)):
# Skip this network interface.
return False
metric_tags = [] if tags is None else tags[:]
metric_tags.append('device:{}'.format(iface))
allowed = [ENA_METRIC_PREFIX + m for m in ENA_METRIC_NAMES]
for m in vals_by_metric:
assert m in allowed
count = 0
for metric, val in iteritems(vals_by_metric):
self.gauge('system.net.%s' % metric, val, tags=metric_tags)
count += 1
self.log.debug("tracked %s network ena metrics for interface %s", count, iface)
def _parse_value(self, v):
try:
return long(v)
except ValueError:
return 0
def _submit_regexed_values(self, output, regex_list, tags):
lines = output.splitlines()
for line in lines:
for regex, metric in regex_list:
value = re.match(regex, line)
if value:
self._submit_netmetric(metric, self._parse_value(value.group(1)), tags=tags)
def _is_collect_cx_state_runnable(self, proc_location):
"""
Determine if collect_connection_state is set and can effectively run.
If self._collect_cx_state is True and a custom proc_location is provided, the system cannot
run `ss` or `netstat` over a custom proc_location
:param proc_location: str
:return: bool
"""
if self._collect_cx_state is False:
return False
if proc_location != "/proc":
# If we have `ss`, we're fine with a non-standard `/proc` location
if distutils.spawn.find_executable("ss") is None:
self.warning(
"Cannot collect connection state: `ss` cannot be found and "
"currently with a custom /proc path: %s",
proc_location,
)
return False
else:
return True
return True
def _check_linux(self, instance):
"""
_check_linux can be run inside a container and still collects the network metrics from the host
For that procfs_path can be set to something like "/host/proc"
When a custom procfs_path is set, the collect_connection_state option is ignored
"""
proc_location = datadog_agent.get_config('procfs_path')
if not proc_location:
proc_location = '/proc'
proc_location = proc_location.rstrip('/')
custom_tags = instance.get('tags', [])
self._get_iface_sys_metrics(custom_tags)
net_proc_base_location = self._get_net_proc_base_location(proc_location)
if self._is_collect_cx_state_runnable(net_proc_base_location):
try:
self.log.debug("Using `ss` to collect connection state")
# Try using `ss` for increased performance over `netstat`
ss_env = {"PROC_ROOT": net_proc_base_location}
# By providing the environment variables in ss_env, the PATH will be overridden. In CentOS,
# datadog-agent PATH is "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin", while sh PATH
# will be '/usr/local/bin:/usr/bin'. In CentOS, ss is located in /sbin and /usr/sbin, not
# in the sh PATH, which will result in network metric collection failure.
#
# The line below will set sh PATH explicitly as the datadog-agent PATH to fix that issue.
if "PATH" in os.environ:
ss_env["PATH"] = os.environ["PATH"]
metrics = self._get_metrics()
for ip_version in ['4', '6']:
# Call `ss` for each IP version because there's no built-in way of distinguishing
# between the IP versions in the output
# Also calls `ss` for each protocol, because on some systems (e.g. Ubuntu 14.04), there is a
# bug that print `tcp` even if it's `udp`
# The `-H` flag isn't available on old versions of `ss`.
cmd = "ss --numeric --tcp --all --ipv{} | cut -d ' ' -f 1 | sort | uniq -c".format(ip_version)
output, _, _ = get_subprocess_output(["sh", "-c", cmd], self.log, env=ss_env)
# 7624 CLOSE-WAIT
# 72 ESTAB
# 9 LISTEN
# 1 State
# 37 TIME-WAIT
lines = output.splitlines()
self._parse_short_state_lines(lines, metrics, self.tcp_states['ss'], ip_version=ip_version)
cmd = "ss --numeric --udp --all --ipv{} | wc -l".format(ip_version)
output, _, _ = get_subprocess_output(["sh", "-c", cmd], self.log, env=ss_env)
metric = self.cx_state_gauge[('udp{}'.format(ip_version), 'connections')]
metrics[metric] = int(output) - 1 # Remove header
if self._collect_cx_queues:
cmd = "ss --numeric --tcp --all --ipv{}".format(ip_version)
output, _, _ = get_subprocess_output(["sh", "-c", cmd], self.log, env=ss_env)
for (state, recvq, sendq) in self._parse_queues("ss", output):
self.histogram('system.net.tcp.recv_q', recvq, custom_tags + ["state:" + state])
self.histogram('system.net.tcp.send_q', sendq, custom_tags + ["state:" + state])
for metric, value in iteritems(metrics):
self.gauge(metric, value, tags=custom_tags)
except OSError as e:
self.log.info("`ss` invocation failed: %s. Using `netstat` as a fallback", str(e))
output, _, _ = get_subprocess_output(["netstat", "-n", "-u", "-t", "-a"], self.log)
lines = output.splitlines()
# Active Internet connections (w/o servers)
# Proto Recv-Q Send-Q Local Address Foreign Address State
# tcp 0 0 46.105.75.4:80 79.220.227.193:2032 SYN_RECV
# tcp 0 0 46.105.75.4:143 90.56.111.177:56867 ESTABLISHED
# tcp 0 0 46.105.75.4:50468 107.20.207.175:443 TIME_WAIT
# tcp6 0 0 46.105.75.4:80 93.15.237.188:58038 FIN_WAIT2
# tcp6 0 0 46.105.75.4:80 79.220.227.193:2029 ESTABLISHED
# udp 0 0 0.0.0.0:123 0.0.0.0:*
# udp6 0 0 :::41458 :::*
metrics = self._parse_linux_cx_state(lines[2:], self.tcp_states['netstat'], 5)
for metric, value in iteritems(metrics):
self.gauge(metric, value, tags=custom_tags)
if self._collect_cx_queues:
for (state, recvq, sendq) in self._parse_queues("netstat", output):
self.histogram('system.net.tcp.recv_q', recvq, custom_tags + ["state:" + state])
self.histogram('system.net.tcp.send_q', sendq, custom_tags + ["state:" + state])
except SubprocessOutputEmptyError:
self.log.exception("Error collecting connection states.")
proc_dev_path = "{}/net/dev".format(net_proc_base_location)
try:
with open(proc_dev_path, 'r') as proc:
lines = proc.readlines()
except IOError:
# On Openshift, /proc/net/snmp is only readable by root
self.log.debug("Unable to read %s.", proc_dev_path)
lines = []
# Inter-| Receive | Transmit
# face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed # noqa: E501
# lo:45890956 112797 0 0 0 0 0 0 45890956 112797 0 0 0 0 0 0 # noqa: E501
# eth0:631947052 1042233 0 19 0 184 0 1206 1208625538 1320529 0 0 0 0 0 0 # noqa: E501
# eth1: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 # noqa: E501
for line in lines[2:]:
cols = line.split(':', 1)
x = cols[1].split()
# Filter inactive interfaces
if self._parse_value(x[0]) or self._parse_value(x[8]):
iface = cols[0].strip()
metrics = {
'bytes_rcvd': self._parse_value(x[0]),
'bytes_sent': self._parse_value(x[8]),
'packets_in.count': self._parse_value(x[1]),
'packets_in.drop': self._parse_value(x[3]),
'packets_in.error': self._parse_value(x[2]) + self._parse_value(x[3]),
'packets_out.count': self._parse_value(x[9]),
'packets_out.drop': self._parse_value(x[11]),
'packets_out.error': self._parse_value(x[10]) + self._parse_value(x[11]),
}
self._submit_devicemetrics(iface, metrics, custom_tags)
# read ENA metrics, if configured and available
if self._collect_ena_metrics:
ena_metrics = self._collect_ena(iface)
if ena_metrics:
self._submit_ena_metrics(iface, ena_metrics, custom_tags)
netstat_data = {}
for f in ['netstat', 'snmp']:
proc_data_path = "{}/net/{}".format(net_proc_base_location, f)
try:
with open(proc_data_path, 'r') as netstat:
while True:
n_header = netstat.readline()
if not n_header:
break # No more? Abort!
n_data = netstat.readline()
h_parts = n_header.strip().split(' ')
h_values = n_data.strip().split(' ')
ns_category = h_parts[0][:-1]
netstat_data[ns_category] = {}
# Turn the data into a dictionary
for idx, hpart in enumerate(h_parts[1:]):
netstat_data[ns_category][hpart] = h_values[idx + 1]
except IOError:
# On Openshift, /proc/net/snmp is only readable by root
self.log.debug("Unable to read %s.", proc_data_path)
nstat_metrics_names = {
'Tcp': {
'RetransSegs': 'system.net.tcp.retrans_segs',
'InSegs': 'system.net.tcp.in_segs',
'OutSegs': 'system.net.tcp.out_segs',
},
'TcpExt': {
'ListenOverflows': 'system.net.tcp.listen_overflows',
'ListenDrops': 'system.net.tcp.listen_drops',
'TCPBacklogDrop': 'system.net.tcp.backlog_drops',
'TCPRetransFail': 'system.net.tcp.failed_retransmits',
'IPReversePathFilter': 'system.net.ip.reverse_path_filter',
'PruneCalled': 'system.net.tcp.prune_called',
'RcvPruned': 'system.net.tcp.prune_rcv_drops',
'OfoPruned': 'system.net.tcp.prune_ofo_called',
'PAWSActive': 'system.net.tcp.paws_connection_drops',
'PAWSEstab': 'system.net.tcp.paws_established_drops',
'SyncookiesSent': 'system.net.tcp.syn_cookies_sent',
'SyncookiesRecv': 'system.net.tcp.syn_cookies_recv',
'SyncookiesFailed': 'system.net.tcp.syn_cookies_failed',
'TCPAbortOnTimeout': 'system.net.tcp.abort_on_timeout',
'TCPSynRetrans': 'system.net.tcp.syn_retrans',
'TCPFromZeroWindowAdv': 'system.net.tcp.from_zero_window',
'TCPToZeroWindowAdv': 'system.net.tcp.to_zero_window',
'TWRecycled': 'system.net.tcp.tw_reused',
},
'Udp': {
'InDatagrams': 'system.net.udp.in_datagrams',
'NoPorts': 'system.net.udp.no_ports',
'InErrors': 'system.net.udp.in_errors',
'OutDatagrams': 'system.net.udp.out_datagrams',
'RcvbufErrors': 'system.net.udp.rcv_buf_errors',
'SndbufErrors': 'system.net.udp.snd_buf_errors',
'InCsumErrors': 'system.net.udp.in_csum_errors',
},
}
# Skip the first line, as it's junk
for k in nstat_metrics_names:
for met in nstat_metrics_names[k]:
if met in netstat_data.get(k, {}):
self._submit_netmetric(
nstat_metrics_names[k][met], self._parse_value(netstat_data[k][met]), tags=custom_tags
)
# Get the conntrack -S information
conntrack_path = instance.get('conntrack_path')
use_sudo_conntrack = is_affirmative(instance.get('use_sudo_conntrack', True))
if conntrack_path is not None:
self._add_conntrack_stats_metrics(conntrack_path, use_sudo_conntrack, custom_tags)
# Get the rest of the metric by reading the files. Metrics available since kernel 3.6
conntrack_files_location = os.path.join(proc_location, 'sys', 'net', 'netfilter')
# By default, only max and count are reported. However if the blacklist is set,
# the whitelist is losing its default value
blacklisted_files = instance.get('blacklist_conntrack_metrics')
whitelisted_files = instance.get('whitelist_conntrack_metrics')
if blacklisted_files is None and whitelisted_files is None:
whitelisted_files = ['max', 'count']
available_files = []
# Get the metrics to read
try:
for metric_file in os.listdir(conntrack_files_location):
if (
os.path.isfile(os.path.join(conntrack_files_location, metric_file))
and 'nf_conntrack_' in metric_file
):
available_files.append(metric_file[len('nf_conntrack_') :])
except Exception as e:
self.log.debug("Unable to list the files in %s. %s", conntrack_files_location, e)
filtered_available_files = pattern_filter(
available_files, whitelist=whitelisted_files, blacklist=blacklisted_files
)
for metric_name in filtered_available_files:
metric_file_location = os.path.join(conntrack_files_location, 'nf_conntrack_{}'.format(metric_name))
value = self._read_int_file(metric_file_location)
if value is not None:
self.gauge('system.net.conntrack.{}'.format(metric_name), value, tags=custom_tags)
@staticmethod
def _get_net_proc_base_location(proc_location):
if Platform.is_containerized() and proc_location != "/proc":
net_proc_base_location = "%s/1" % proc_location
else:
net_proc_base_location = proc_location
return net_proc_base_location
def _read_int_file(self, file_location):
try:
with open(file_location, 'r') as f:
try:
value = int(f.read().rstrip())
return value
except ValueError:
self.log.debug("Content of %s is not an integer", file_location)
except IOError as e:
self.log.debug("Unable to read %s, skipping %s.", file_location, e)
return None
def _get_iface_sys_metrics(self, custom_tags):
sys_net_location = '/sys/class/net'
sys_net_metrics = ['mtu', 'tx_queue_len']
try:
ifaces = os.listdir(sys_net_location)
except OSError as e:
self.log.debug("Unable to list %s, skipping system iface metrics: %s.", sys_net_location, e)
return None
for iface in ifaces:
for metric_name in sys_net_metrics:
metric_file_location = os.path.join(sys_net_location, iface, metric_name)
value = self._read_int_file(metric_file_location)
if value is not None:
self.gauge('system.net.iface.{}'.format(metric_name), value, tags=custom_tags + ["iface:" + iface])
iface_queues_location = os.path.join(sys_net_location, iface, 'queues')
self._collect_iface_queue_metrics(iface, iface_queues_location, custom_tags)
def _collect_iface_queue_metrics(self, iface, iface_queues_location, custom_tags):
try:
iface_queues = os.listdir(iface_queues_location)
except OSError as e:
self.log.debug("Unable to list %s, skipping %s.", iface_queues_location, e)
return
num_rx_queues = len([q for q in iface_queues if q.startswith('rx-')])
num_tx_queues = len([q for q in iface_queues if q.startswith('tx-')])
self.gauge('system.net.iface.num_tx_queues', num_tx_queues, tags=custom_tags + ["iface:" + iface])
self.gauge('system.net.iface.num_rx_queues', num_rx_queues, tags=custom_tags + ["iface:" + iface])
def _add_conntrack_stats_metrics(self, conntrack_path, use_sudo_conntrack, tags):
"""
Parse the output of conntrack -S
Add the parsed metrics
"""
try:
cmd = [conntrack_path, "-S"]
if use_sudo_conntrack:
cmd.insert(0, "sudo")
output, _, _ = get_subprocess_output(cmd, self.log)
# conntrack -S sample:
# cpu=0 found=27644 invalid=19060 ignore=485633411 insert=0 insert_failed=1 \
# drop=1 early_drop=0 error=0 search_restart=39936711
# cpu=1 found=21960 invalid=17288 ignore=475938848 insert=0 insert_failed=1 \
# drop=1 early_drop=0 error=0 search_restart=36983181
lines = output.splitlines()
for line in lines:
cols = line.split()
cpu_num = cols[0].split('=')[-1]
cpu_tag = ['cpu:{}'.format(cpu_num)]
cols = cols[1:]
for cell in cols:
metric, value = cell.split('=')
self.monotonic_count('system.net.conntrack.{}'.format(metric), int(value), tags=tags + cpu_tag)
except SubprocessOutputEmptyError:
self.log.debug("Couldn't use %s to get conntrack stats", conntrack_path)
def _get_metrics(self):
return {val: 0 for val in itervalues(self.cx_state_gauge)}
def _parse_short_state_lines(self, lines, metrics, tcp_states, ip_version):
for line in lines:
value, state = line.split()
proto = "tcp{0}".format(ip_version)
if state in tcp_states:
metric = self.cx_state_gauge[proto, tcp_states[state]]
metrics[metric] += int(value)
def _parse_linux_cx_state(self, lines, tcp_states, state_col, protocol=None, ip_version=None):
"""
Parse the output of the command that retrieves the connection state (either `ss` or `netstat`)
Returns a dict metric_name -> value
"""
metrics = self._get_metrics()
for l in lines:
cols = l.split()
if cols[0].startswith('tcp') or protocol == 'tcp':
proto = "tcp{0}".format(ip_version) if ip_version else ("tcp4", "tcp6")[cols[0] == "tcp6"]
if cols[state_col] in tcp_states:
metric = self.cx_state_gauge[proto, tcp_states[cols[state_col]]]
metrics[metric] += 1
elif cols[0].startswith('udp') or protocol == 'udp':
proto = "udp{0}".format(ip_version) if ip_version else ("udp4", "udp6")[cols[0] == "udp6"]
metric = self.cx_state_gauge[proto, 'connections']
metrics[metric] += 1
return metrics
def _check_bsd(self, instance):
netstat_flags = ['-i', '-b']
custom_tags = instance.get('tags', [])
# FreeBSD's netstat truncates device names unless you pass '-W'
if Platform.is_freebsd():
netstat_flags.append('-W')
try:
output, _, _ = get_subprocess_output(["netstat"] + netstat_flags, self.log)
lines = output.splitlines()
# Name Mtu Network Address Ipkts Ierrs Ibytes Opkts Oerrs Obytes Coll
# lo0 16384 <Link#1> 318258 0 428252203 318258 0 428252203 0
# lo0 16384 localhost fe80:1::1 318258 - 428252203 318258 - 428252203 -
# lo0 16384 127 localhost 318258 - 428252203 318258 - 428252203 -
# lo0 16384 localhost ::1 318258 - 428252203 318258 - 428252203 -
# gif0* 1280 <Link#2> 0 0 0 0 0 0 0
# stf0* 1280 <Link#3> 0 0 0 0 0 0 0
# en0 1500 <Link#4> 04:0c:ce:db:4e:fa 20801309 0 13835457425 15149389 0 11508790198 0
# en0 1500 seneca.loca fe80:4::60c:ceff: 20801309 - 13835457425 15149389 - 11508790198 -
# en0 1500 2001:470:1f 2001:470:1f07:11d 20801309 - 13835457425 15149389 - 11508790198 -
# en0 1500 2001:470:1f 2001:470:1f07:11d 20801309 - 13835457425 15149389 - 11508790198 -
# en0 1500 192.168.1 192.168.1.63 20801309 - 13835457425 15149389 - 11508790198 -
# en0 1500 2001:470:1f 2001:470:1f07:11d 20801309 - 13835457425 15149389 - 11508790198 -
# p2p0 2304 <Link#5> 06:0c:ce:db:4e:fa 0 0 0 0 0 0 0
# ham0 1404 <Link#6> 7a:79:05:4d:bf:f5 30100 0 6815204 18742 0 8494811 0
# ham0 1404 5 5.77.191.245 30100 - 6815204 18742 - 8494811 -
# ham0 1404 seneca.loca fe80:6::7879:5ff: 30100 - 6815204 18742 - 8494811 -
# ham0 1404 2620:9b::54 2620:9b::54d:bff5 30100 - 6815204 18742 - 8494811 -
headers = lines[0].split()
# Given the irregular structure of the table above, better to parse from the end of each line
# Verify headers first
# -7 -6 -5 -4 -3 -2 -1
for h in ("Ipkts", "Ierrs", "Ibytes", "Opkts", "Oerrs", "Obytes", "Coll"):
if h not in headers:
self.log.error("%s not found in %s; cannot parse", h, headers)
return False
current = None
for l in lines[1:]:
# Another header row, abort now, this is IPv6 land
if "Name" in l:
break
x = l.split()
if len(x) == 0:
break
iface = x[0]
if iface.endswith("*"):
iface = iface[:-1]
if iface == current:
# skip multiple lines of same interface
continue
else:
current = iface
# Filter inactive interfaces
if self._parse_value(x[-5]) or self._parse_value(x[-2]):
iface = current
metrics = {
'bytes_rcvd': self._parse_value(x[-5]),
'bytes_sent': self._parse_value(x[-2]),
'packets_in.count': self._parse_value(x[-7]),
'packets_in.error': self._parse_value(x[-6]),
'packets_out.count': self._parse_value(x[-4]),
'packets_out.error': self._parse_value(x[-3]),
}
self._submit_devicemetrics(iface, metrics, custom_tags)
except SubprocessOutputEmptyError:
self.log.exception("Error collecting connection stats.")
try:
netstat, _, _ = get_subprocess_output(["netstat", "-s", "-p" "tcp"], self.log)
# 3651535 packets sent
# 972097 data packets (615753248 bytes)
# 5009 data packets (2832232 bytes) retransmitted
# 0 resends initiated by MTU discovery
# 2086952 ack-only packets (471 delayed)
# 0 URG only packets
# 0 window probe packets
# 310851 window update packets
# 336829 control packets
# 0 data packets sent after flow control
# 3058232 checksummed in software
# 3058232 segments (571218834 bytes) over IPv4
# 0 segments (0 bytes) over IPv6
# 4807551 packets received
# 1143534 acks (for 616095538 bytes)
# 165400 duplicate acks
# ...
self._submit_regexed_values(netstat, BSD_TCP_METRICS, custom_tags)
except SubprocessOutputEmptyError:
self.log.exception("Error collecting TCP stats.")
proc_location = self.agentConfig.get('procfs_path', '/proc').rstrip('/')
net_proc_base_location = self._get_net_proc_base_location(proc_location)
if self._is_collect_cx_state_runnable(net_proc_base_location):
try:
self.log.debug("Using `netstat` to collect connection state")
output_TCP, _, _ = get_subprocess_output(["netstat", "-n", "-a", "-p", "tcp"], self.log)
output_UDP, _, _ = get_subprocess_output(["netstat", "-n", "-a", "-p", "udp"], self.log)
lines = output_TCP.splitlines() + output_UDP.splitlines()
# Active Internet connections (w/o servers)
# Proto Recv-Q Send-Q Local Address Foreign Address State
# tcp 0 0 46.105.75.4:80 79.220.227.193:2032 SYN_RECV
# tcp 0 0 46.105.75.4:143 90.56.111.177:56867 ESTABLISHED
# tcp 0 0 46.105.75.4:50468 107.20.207.175:443 TIME_WAIT
# tcp6 0 0 46.105.75.4:80 93.15.237.188:58038 FIN_WAIT2
# tcp6 0 0 46.105.75.4:80 79.220.227.193:2029 ESTABLISHED
# udp 0 0 0.0.0.0:123 0.0.0.0:*
# udp6 0 0 :::41458 :::*
metrics = self._parse_linux_cx_state(lines[2:], self.tcp_states['netstat'], 5)
for metric, value in iteritems(metrics):
self.gauge(metric, value, tags=custom_tags)
except SubprocessOutputEmptyError:
self.log.exception("Error collecting connection states.")
def _check_solaris(self, instance):
# Can't get bytes sent and received via netstat
# Default to kstat -p link:0:
custom_tags = instance.get('tags', [])
try:
netstat, _, _ = get_subprocess_output(["kstat", "-p", "link:0:"], self.log)
metrics_by_interface = self._parse_solaris_netstat(netstat)
for interface, metrics in iteritems(metrics_by_interface):
self._submit_devicemetrics(interface, metrics, custom_tags)
except SubprocessOutputEmptyError:
self.log.exception("Error collecting kstat stats.")
try:
netstat, _, _ = get_subprocess_output(["netstat", "-s", "-P" "tcp"], self.log)
# TCP: tcpRtoAlgorithm= 4 tcpRtoMin = 200
# tcpRtoMax = 60000 tcpMaxConn = -1
# tcpActiveOpens = 57 tcpPassiveOpens = 50
# tcpAttemptFails = 1 tcpEstabResets = 0
# tcpCurrEstab = 0 tcpOutSegs = 254
# tcpOutDataSegs = 995 tcpOutDataBytes =1216733
# tcpRetransSegs = 0 tcpRetransBytes = 0
# tcpOutAck = 185 tcpOutAckDelayed = 4
# ...
self._submit_regexed_values(netstat, SOLARIS_TCP_METRICS, custom_tags)
except SubprocessOutputEmptyError:
self.log.exception("Error collecting TCP stats.")
def _parse_solaris_netstat(self, netstat_output):
"""
Return a mapping of network metrics by interface. For example:
{ interface:
{'bytes_sent': 0,
'bytes_rcvd': 0,
'bytes_rcvd': 0,
...
}
}
"""
# Here's an example of the netstat output:
#
# link:0:net0:brdcstrcv 527336
# link:0:net0:brdcstxmt 1595
# link:0:net0:class net
# link:0:net0:collisions 0
# link:0:net0:crtime 16359935.2637943
# link:0:net0:ierrors 0
# link:0:net0:ifspeed 10000000000
# link:0:net0:ipackets 682834
# link:0:net0:ipackets64 682834
# link:0:net0:link_duplex 0
# link:0:net0:link_state 1
# link:0:net0:multircv 0
# link:0:net0:multixmt 1595
# link:0:net0:norcvbuf 0
# link:0:net0:noxmtbuf 0
# link:0:net0:obytes 12820668
# link:0:net0:obytes64 12820668
# link:0:net0:oerrors 0
# link:0:net0:opackets 105445
# link:0:net0:opackets64 105445
# link:0:net0:rbytes 113983614
# link:0:net0:rbytes64 113983614
# link:0:net0:snaptime 16834735.1607669
# link:0:net0:unknowns 0
# link:0:net0:zonename 53aa9b7e-48ba-4152-a52b-a6368c3d9e7c
# link:0:net1:brdcstrcv 4947620
# link:0:net1:brdcstxmt 1594
# link:0:net1:class net
# link:0:net1:collisions 0
# link:0:net1:crtime 16359935.2839167
# link:0:net1:ierrors 0
# link:0:net1:ifspeed 10000000000
# link:0:net1:ipackets 4947620
# link:0:net1:ipackets64 4947620
# link:0:net1:link_duplex 0
# link:0:net1:link_state 1
# link:0:net1:multircv 0
# link:0:net1:multixmt 1594
# link:0:net1:norcvbuf 0
# link:0:net1:noxmtbuf 0
# link:0:net1:obytes 73324
# link:0:net1:obytes64 73324
# link:0:net1:oerrors 0
# link:0:net1:opackets 1594
# link:0:net1:opackets64 1594
# link:0:net1:rbytes 304384894
# link:0:net1:rbytes64 304384894
# link:0:net1:snaptime 16834735.1613302
# link:0:net1:unknowns 0
# link:0:net1:zonename 53aa9b7e-48ba-4152-a52b-a6368c3d9e7c
# A mapping of solaris names -> datadog names
metric_by_solaris_name = {
'rbytes64': 'bytes_rcvd',
'obytes64': 'bytes_sent',
'ipackets64': 'packets_in.count',
'ierrors': 'packets_in.error',
'opackets64': 'packets_out.count',
'oerrors': 'packets_out.error',
}
lines = [l for l in netstat_output.splitlines() if len(l) > 0]
metrics_by_interface = {}
for l in lines:
# Parse the metric & interface.
cols = l.split()
link, n, iface, name = cols[0].split(":")
assert link == "link"
# Get the datadog metric name.
ddname = metric_by_solaris_name.get(name, None)
if ddname is None:
continue
# Add it to this interface's list of metrics.
metrics = metrics_by_interface.get(iface, {})
metrics[ddname] = self._parse_value(cols[1])
metrics_by_interface[iface] = metrics
return metrics_by_interface
def _check_psutil(self, instance):
"""
Gather metrics about connections states and interfaces counters
using psutil facilities
"""
custom_tags = instance.get('tags', [])
if self._collect_cx_state:
self._cx_state_psutil(tags=custom_tags)
self._cx_counters_psutil(tags=custom_tags)
def _cx_state_psutil(self, tags=None):
"""
Collect metrics about connections state using psutil
"""
metrics = defaultdict(int)
tags = [] if tags is None else tags
for conn in psutil.net_connections():
protocol = self._parse_protocol_psutil(conn)
status = self.tcp_states['psutil'].get(conn.status)
metric = self.cx_state_gauge.get((protocol, status))
if metric is None:
self.log.warning('Metric not found for: %s,%s', protocol, status)
else:
metrics[metric] += 1
for metric, value in iteritems(metrics):
self.gauge(metric, value, tags=tags)
def _cx_counters_psutil(self, tags=None):
"""
Collect metrics about interfaces counters using psutil
"""
tags = [] if tags is None else tags
for iface, counters in iteritems(psutil.net_io_counters(pernic=True)):
metrics = {
'bytes_rcvd': counters.bytes_recv,
'bytes_sent': counters.bytes_sent,
'packets_in.count': counters.packets_recv,
'packets_in.drop': counters.dropin,
'packets_in.error': counters.errin,
'packets_out.count': counters.packets_sent,
'packets_out.drop': counters.dropout,
'packets_out.error': counters.errout,
}
self._submit_devicemetrics(iface, metrics, tags)
def _parse_protocol_psutil(self, conn):
"""
Returns a string describing the protocol for the given connection
in the form `tcp4`, 'udp4` as in `self.cx_state_gauge`
"""
protocol = self.PSUTIL_TYPE_MAPPING.get(conn.type, '')
family = self.PSUTIL_FAMILY_MAPPING.get(conn.family, '')
return '{}{}'.format(protocol, family)
def _parse_queues(self, tool, ss_output):
"""
for each line of `ss_output`, returns a triplet with:
* a connection state (`established`, `listening`)
* the receive queue size
* the send queue size
"""
for line in ss_output.splitlines():
fields = line.split()
if len(fields) < (6 if tool == "netstat" else 3):
continue
state_column = 0 if tool == "ss" else 5
try:
state = self.tcp_states[tool][fields[state_column]]
except KeyError:
continue
yield (state, fields[1], fields[2])
def _collect_ena(self, iface):
"""
Collect ENA metrics for given interface.
ENA metrics are collected via the ioctl SIOCETHTOOL call. At the time of writing
this method, there are no maintained Python libraries that do this. The solution
is based on:
* https://github.com/safchain/ethtool
* https://gist.github.com/yunazuno/d7cd7e1e127a39192834c75d85d45df9
"""
ethtool_socket = None
try:
ethtool_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
return self._get_ena_metrics(iface, ethtool_socket)
except OSError as e:
# this will happen for interfaces that don't support SIOCETHTOOL - e.g. loopback or docker
self.log.debug('OSError while trying to collect ENA metrics for interface %s: %s', iface, str(e))
except Exception:
self.log.exception('Unable to collect ENA metrics for interface %s', iface)
finally:
if ethtool_socket is not None:
ethtool_socket.close()
return {}
def _send_ethtool_ioctl(self, iface, sckt, data):
"""
Send an ioctl SIOCETHTOOL call for given interface with given data.
"""
ifr = struct.pack('16sP', iface.encode('utf-8'), data.buffer_info()[0])
fcntl.ioctl(sckt.fileno(), SIOCETHTOOL, ifr)
def _get_ethtool_gstringset(self, iface, sckt):
"""
Retrieve names of all ethtool stats for given interface.
"""
sset_info = array.array('B', struct.pack('IIQI', ETHTOOL_GSSET_INFO, 0, 1 << ETH_SS_STATS, 0))
self._send_ethtool_ioctl(iface, sckt, sset_info)
sset_mask, sset_len = struct.unpack('8xQI', sset_info)
if sset_mask == 0:
sset_len = 0
strings = array.array('B', struct.pack('III', ETHTOOL_GSTRINGS, ETH_SS_STATS, sset_len))
strings.extend([0] * sset_len * ETH_GSTRING_LEN)
self._send_ethtool_ioctl(iface, sckt, strings)
all_names = []
for i in range(sset_len):
offset = 12 + ETH_GSTRING_LEN * i
s = strings[offset : offset + ETH_GSTRING_LEN]
s = s.tobytes() if PY3 else s.tostring()
s = s.partition(b'\x00')[0].decode('utf-8')
all_names.append(s)
return all_names
def _get_ena_metrics(self, iface, sckt):
"""
Get all ENA metrics specified in ENA_METRICS_NAMES list and their values from ethtool.
"""
stats_names = list(self._get_ethtool_gstringset(iface, sckt))
stats_count = len(stats_names)
stats = array.array('B', struct.pack('II', ETHTOOL_GSTATS, stats_count))
# we need `stats_count * (length of uint64)` for the result
stats.extend([0] * len(struct.pack('Q', 0)) * stats_count)
self._send_ethtool_ioctl(iface, sckt, stats)
metrics = {}
for i, stat_name in enumerate(stats_names):
if stat_name in ENA_METRIC_NAMES:
offset = 8 + 8 * i
value = struct.unpack('Q', stats[offset : offset + 8])[0]
metrics[ENA_METRIC_PREFIX + stat_name] = value
return metrics
| 46.294333
| 149
| 0.548829
|
80d17fcb83a1e64b803b1b61cd3b7ed367858927
| 9,659
|
py
|
Python
|
official/nlp/tools/export_tfhub.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | 1
|
2021-05-22T12:50:50.000Z
|
2021-05-22T12:50:50.000Z
|
official/nlp/tools/export_tfhub.py
|
DemonDamon/mask-detection-based-on-tf2odapi
|
192ae544169c1230c21141c033800aa1bd94e9b6
|
[
"MIT"
] | null | null | null |
official/nlp/tools/export_tfhub.py
|
DemonDamon/mask-detection-based-on-tf2odapi
|
192ae544169c1230c21141c033800aa1bd94e9b6
|
[
"MIT"
] | null | null | null |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Exports a BERT-like encoder and its preprocessing as SavedModels for TF Hub.
This tool creates preprocessor and encoder SavedModels suitable for uploading
to https://tfhub.dev that implement the preprocessor and encoder APIs defined
at https://www.tensorflow.org/hub/common_saved_model_apis/text.
For a full usage guide, see
https://github.com/tensorflow/models/blob/master/official/nlp/docs/tfhub.md
Minimal usage examples:
1) Exporting an Encoder from checkpoint and config.
```
export_tfhub \
--encoder_config_file=${BERT_DIR:?}/bert_encoder.yaml \
--model_checkpoint_path=${BERT_DIR:?}/bert_model.ckpt \
--vocab_file=${BERT_DIR:?}/vocab.txt \
--export_type=model \
--export_path=/tmp/bert_model
```
An --encoder_config_file can specify encoder types other than BERT.
For BERT, a --bert_config_file in the legacy JSON format can be passed instead.
Flag --vocab_file (and flag --do_lower_case, whose default value is guessed
from the vocab_file path) capture how BertTokenizer was used in pre-training.
Use flag --sp_model_file instead if SentencepieceTokenizer was used.
Changing --export_type to model_with_mlm additionally creates an `.mlm`
subobject on the exported SavedModel that can be called to produce
the logits of the Masked Language Model task from pretraining.
The help string for flag --model_checkpoint_path explains the checkpoint
formats required for each --export_type.
2) Exporting a preprocessor SavedModel
```
export_tfhub \
--vocab_file ${BERT_DIR:?}/vocab.txt \
--export_type preprocessing --export_path /tmp/bert_preprocessing
```
Be sure to use flag values that match the encoder and how it has been
pre-trained (see above for --vocab_file vs --sp_model_file).
If your encoder has been trained with text preprocessing for which tfhub.dev
already has SavedModel, you could guide your users to reuse that one instead
of exporting and publishing your own.
TODO(b/175369555): When exporting to users of TensorFlow 2.4, add flag
`--experimental_disable_assert_in_preprocessing`.
"""
from absl import app
from absl import flags
import gin
from official.modeling import hyperparams
from official.nlp.bert import configs
from official.nlp.configs import encoders
from official.nlp.tools import export_tfhub_lib
FLAGS = flags.FLAGS
flags.DEFINE_enum(
"export_type", "model",
["model", "model_with_mlm", "preprocessing"],
"The overall type of SavedModel to export. Flags "
"--bert_config_file/--encoder_config_file and --vocab_file/--sp_model_file "
"control which particular encoder model and preprocessing are exported.")
flags.DEFINE_string(
"export_path", None,
"Directory to which the SavedModel is written.")
flags.DEFINE_string(
"encoder_config_file", None,
"A yaml file representing `encoders.EncoderConfig` to define the encoder "
"(BERT or other). "
"Exactly one of --bert_config_file and --encoder_config_file can be set. "
"Needed for --export_type model and model_with_mlm.")
flags.DEFINE_string(
"bert_config_file", None,
"A JSON file with a legacy BERT configuration to define the BERT encoder. "
"Exactly one of --bert_config_file and --encoder_config_file can be set. "
"Needed for --export_type model and model_with_mlm.")
flags.DEFINE_bool(
"copy_pooler_dense_to_encoder", False,
"When the model is trained using `BertPretrainerV2`, the pool layer "
"of next sentence prediction task exists in `ClassificationHead` passed "
"to `BertPretrainerV2`. If True, we will copy this pooler's dense layer "
"to the encoder that is exported by this tool (as in classic BERT). "
"Using `BertPretrainerV2` and leaving this False exports an untrained "
"(randomly initialized) pooling layer, which some authors recommend for "
"subsequent fine-tuning,")
flags.DEFINE_string(
"model_checkpoint_path", None,
"File path to a pre-trained model checkpoint. "
"For --export_type model, this has to be an object-based (TF2) checkpoint "
"that can be restored to `tf.train.Checkpoint(encoder=encoder)` "
"for the `encoder` defined by the config file."
"(Legacy checkpoints with `model=` instead of `encoder=` are also "
"supported for now.) "
"For --export_type model_with_mlm, it must be restorable to "
"`tf.train.Checkpoint(**BertPretrainerV2(...).checkpoint_items)`. "
"(For now, `tf.train.Checkpoint(pretrainer=BertPretrainerV2(...))` is also "
"accepted.)")
flags.DEFINE_string(
"vocab_file", None,
"For encoders trained on BertTokenzier input: "
"the vocabulary file that the encoder model was trained with. "
"Exactly one of --vocab_file and --sp_model_file can be set. "
"Needed for --export_type model, model_with_mlm and preprocessing.")
flags.DEFINE_string(
"sp_model_file", None,
"For encoders trained on SentencepieceTokenzier input: "
"the SentencePiece .model file that the encoder model was trained with. "
"Exactly one of --vocab_file and --sp_model_file can be set. "
"Needed for --export_type model, model_with_mlm and preprocessing.")
flags.DEFINE_bool(
"do_lower_case", None,
"Whether to lowercase before tokenization. "
"If left as None, and --vocab_file is set, do_lower_case will be enabled "
"if 'uncased' appears in the name of --vocab_file. "
"If left as None, and --sp_model_file set, do_lower_case defaults to true. "
"Needed for --export_type model, model_with_mlm and preprocessing.")
flags.DEFINE_integer(
"default_seq_length", 128,
"The sequence length of preprocessing results from "
"top-level preprocess method. This is also the default "
"sequence length for the bert_pack_inputs subobject."
"Needed for --export_type preprocessing.")
flags.DEFINE_bool(
"tokenize_with_offsets", False, # TODO(b/181866850)
"Whether to export a .tokenize_with_offsets subobject for "
"--export_type preprocessing.")
flags.DEFINE_multi_string(
"gin_file", default=None,
help="List of paths to the config files.")
flags.DEFINE_multi_string(
"gin_params", default=None,
help="List of Gin bindings.")
flags.DEFINE_bool( # TODO(b/175369555): Remove this flag and its use.
"experimental_disable_assert_in_preprocessing", False,
"Export a preprocessing model without tf.Assert ops. "
"Usually, that would be a bad idea, except TF2.4 has an issue with "
"Assert ops in tf.functions used in Dataset.map() on a TPU worker, "
"and omitting the Assert ops lets SavedModels avoid the issue.")
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
if bool(FLAGS.vocab_file) == bool(FLAGS.sp_model_file):
raise ValueError("Exactly one of `vocab_file` and `sp_model_file` "
"can be specified, but got %s and %s." %
(FLAGS.vocab_file, FLAGS.sp_model_file))
do_lower_case = export_tfhub_lib.get_do_lower_case(
FLAGS.do_lower_case, FLAGS.vocab_file, FLAGS.sp_model_file)
if FLAGS.export_type in ("model", "model_with_mlm"):
if bool(FLAGS.bert_config_file) == bool(FLAGS.encoder_config_file):
raise ValueError("Exactly one of `bert_config_file` and "
"`encoder_config_file` can be specified, but got "
"%s and %s." %
(FLAGS.bert_config_file, FLAGS.encoder_config_file))
if FLAGS.bert_config_file:
bert_config = configs.BertConfig.from_json_file(FLAGS.bert_config_file)
encoder_config = None
else:
bert_config = None
encoder_config = encoders.EncoderConfig()
encoder_config = hyperparams.override_params_dict(
encoder_config, FLAGS.encoder_config_file, is_strict=True)
export_tfhub_lib.export_model(
FLAGS.export_path,
bert_config=bert_config,
encoder_config=encoder_config,
model_checkpoint_path=FLAGS.model_checkpoint_path,
vocab_file=FLAGS.vocab_file,
sp_model_file=FLAGS.sp_model_file,
do_lower_case=do_lower_case,
with_mlm=FLAGS.export_type == "model_with_mlm",
copy_pooler_dense_to_encoder=FLAGS.copy_pooler_dense_to_encoder)
elif FLAGS.export_type == "preprocessing":
export_tfhub_lib.export_preprocessing(
FLAGS.export_path,
vocab_file=FLAGS.vocab_file,
sp_model_file=FLAGS.sp_model_file,
do_lower_case=do_lower_case,
default_seq_length=FLAGS.default_seq_length,
tokenize_with_offsets=FLAGS.tokenize_with_offsets,
experimental_disable_assert=
FLAGS.experimental_disable_assert_in_preprocessing)
else:
raise app.UsageError(
"Unknown value '%s' for flag --export_type" % FLAGS.export_type)
if __name__ == "__main__":
app.run(main)
| 43.904545
| 81
| 0.719847
|
7a554b5a51d19abfd439467f261ec053abfd489b
| 21,850
|
py
|
Python
|
classification/main_dmt.py
|
voldemortX/DST-CBC
|
e392313c129f6814c1a1c0f20c0abbd5505c3d7d
|
[
"BSD-3-Clause"
] | 103
|
2020-04-21T01:25:16.000Z
|
2022-03-24T07:45:45.000Z
|
classification/main_dmt.py
|
voldemortX/DST-CBC
|
e392313c129f6814c1a1c0f20c0abbd5505c3d7d
|
[
"BSD-3-Clause"
] | 13
|
2021-03-24T06:52:21.000Z
|
2022-01-18T08:17:50.000Z
|
classification/main_dmt.py
|
voldemortX/DST-CBC
|
e392313c129f6814c1a1c0f20c0abbd5505c3d7d
|
[
"BSD-3-Clause"
] | 12
|
2020-04-29T02:33:11.000Z
|
2021-12-28T07:59:20.000Z
|
import os
import time
import torch
import argparse
import random
import pickle
import numpy as np
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
from models.wideresnet import wrn_28_2
from utils.common import num_classes_cifar10, mean_cifar10, std_cifar10, input_sizes_cifar10, base_cifar10, \
load_checkpoint, save_checkpoint, EMA, rank_label_confidence
from utils.datasets import CIFAR10
from utils.mixup import mixup_data
from utils.losses import SigmoidAscendingMixupDMTLoss as MixupDynamicMutualLoss, DynamicMutualLoss
from torchvision.transforms import Compose, RandomCrop, RandomHorizontalFlip, Normalize, ToTensor
from utils.randomrandaugment import RandomRandAugment
from utils.cutout import Cutout
from utils.autoaugment import CIFAR10Policy
from accelerate import Accelerator
from torch.cuda.amp import autocast, GradScaler
def get_transforms(auto_augment, input_sizes, m, mean, n, std):
if auto_augment:
# AutoAugment + Cutout
train_transforms = Compose([
RandomCrop(size=input_sizes, padding=4, fill=128),
RandomHorizontalFlip(p=0.5),
CIFAR10Policy(),
ToTensor(),
Normalize(mean=mean, std=std),
Cutout(n_holes=1, length=16)
])
else:
# RandAugment + Cutout
train_transforms = Compose([
RandomCrop(size=input_sizes, padding=4, fill=128),
RandomHorizontalFlip(p=0.5),
RandomRandAugment(n=n, m_max=m), # This version includes cutout
ToTensor(),
Normalize(mean=mean, std=std)
])
test_transforms = Compose([
ToTensor(),
Normalize(mean=mean, std=std)
])
return test_transforms, train_transforms
def generate_pseudo_labels(net, device, loader, label_ratio, num_images, filename, is_mixed_precision):
k = rank_label_confidence(net=net, device=device, loader=loader, ratio=label_ratio, num_images=num_images,
is_mixed_precision=is_mixed_precision)
print(k)
# 1 forward pass (build pickle file)
selected_files = None
selected_predictions = None
net.eval()
with torch.no_grad():
for images, original_file in tqdm(loader):
# Inference
images = images.to(device)
with autocast(is_mixed_precision):
outputs = net(images)
temp = torch.nn.functional.softmax(input=outputs, dim=-1) # ! softmax
pseudo_probabilities = temp.max(dim=-1).values
# Select
temp_predictions = temp[pseudo_probabilities > k].cpu().numpy()
temp_files = original_file[pseudo_probabilities.cpu() > k].numpy()
# Append
selected_files = temp_files if selected_files is None else np.concatenate((selected_files, temp_files))
selected_predictions = temp_predictions if selected_predictions is None else \
np.concatenate((selected_predictions, temp_predictions))
# Save (label format: softmax results in numpy)
with open(filename, 'wb') as f:
pickle.dump({'data': selected_files, 'labels': selected_predictions}, f)
def init(mean, std, input_sizes, base, num_workers, prefix, val_set, train, batch_size_labeled, batch_size_pseudo,
auto_augment=False, n=1, m=1, dataset='cifar10'):
test_transforms, train_transforms = get_transforms(auto_augment, input_sizes, m, mean, n, std)
# Data sets
if dataset == 'cifar10':
unlabeled_set = CIFAR10(root=base, set_name=prefix + '_unlabeled', transform=test_transforms, label=False)
if train:
labeled_set = CIFAR10(root=base, set_name=prefix + '_labeled', transform=train_transforms, label=True)
pseudo_labeled_set = CIFAR10(root=base, set_name=prefix + '_pseudo', transform=train_transforms,
label=True)
val_set = CIFAR10(root=base, set_name=val_set, transform=test_transforms, label=True)
else:
raise NotImplementedError
# Data loaders
unlabeled_loader = torch.utils.data.DataLoader(dataset=unlabeled_set, batch_size=batch_size_labeled,
num_workers=num_workers * 4, shuffle=False)
if train:
val_loader = torch.utils.data.DataLoader(dataset=val_set, batch_size=batch_size_labeled,
num_workers=num_workers, shuffle=False)
labeled_loader = torch.utils.data.DataLoader(dataset=labeled_set, batch_size=batch_size_labeled,
num_workers=num_workers, shuffle=True)
pseudo_labeled_loader = torch.utils.data.DataLoader(dataset=pseudo_labeled_set, batch_size=batch_size_pseudo,
num_workers=num_workers * 3, shuffle=True)
else:
val_loader = None
labeled_loader = None
pseudo_labeled_loader = None
return labeled_loader, unlabeled_loader, pseudo_labeled_loader, val_loader, unlabeled_set.__len__()
def test(loader, device, net, fine_grain=False, is_mixed_precision=False):
# Evaluate
net.eval()
test_correct = 0
fine_grain_correct = 0.0
test_all = 0
with torch.no_grad():
for image, target in tqdm(loader):
image, target = image.to(device), target.to(device)
with autocast(is_mixed_precision):
output = net(image)
test_all += target.shape[0]
if fine_grain:
predictions = output.softmax(1)
temp = predictions.max(1)
indices = temp.indices
values = temp.values
fine_grain_correct += values[indices == target].sum().item()
test_correct += (target == output.argmax(1)).sum().item()
test_acc = test_correct / test_all * 100
print('%d images tested.' % int(test_all))
print('Test accuracy: %.4f' % test_acc)
if fine_grain:
fine_grain_acc = fine_grain_correct / test_all * 100
print('Fine-grained accuracy: %.4f' % fine_grain_acc)
return fine_grain_acc
return test_acc
def train(writer, labeled_loader, pseudo_labeled_loader, val_loader, device, criterion, net, optimizer, lr_scheduler,
num_epochs, tensorboard_prefix, gamma1, gamma2, labeled_weight, start_at, num_classes, decay=0.999,
alpha=-1, is_mixed_precision=False, loss_freq=10, val_num_steps=None, best_acc=0, fine_grain=False):
# Define validation and loss value print frequency
# Pseudo labeled defines epoch
min_len = len(pseudo_labeled_loader)
if min_len > loss_freq:
loss_num_steps = int(min_len / loss_freq)
else: # For extremely small sets
loss_num_steps = min_len
if val_num_steps is None:
val_num_steps = min_len
if is_mixed_precision:
scaler = GradScaler()
net.train()
# Use EMA to report final performance instead of select best checkpoint with valtiny
ema = EMA(net=net, decay=decay)
epoch = 0
# Training
running_loss = 0.0
running_stats = {'disagree': -1, 'current_win': -1, 'avg_weights': 1.0, 'gamma1': 0, 'gamma2': 0}
iter_labeled = iter(labeled_loader)
while epoch < num_epochs:
train_correct = 0
train_all = 0
time_now = time.time()
for i, data in enumerate(pseudo_labeled_loader, 0):
# Pseudo labeled data
inputs_pseudo, labels_pseudo = data
inputs_pseudo, labels_pseudo = inputs_pseudo.to(device), labels_pseudo.to(device)
# Hard labels
probs_pseudo = labels_pseudo.clone().detach()
labels_pseudo = labels_pseudo.argmax(-1) # data type?
# Labeled data
inputs_labeled, labels_labeled = next(iter_labeled, (0, 0))
if type(inputs_labeled) == type(labels_labeled) == int:
iter_labeled = iter(labeled_loader)
inputs_labeled, labels_labeled = next(iter_labeled, (0, 0))
inputs_labeled, labels_labeled = inputs_labeled.to(device), labels_labeled.to(device)
# To probabilities (in fact, just one-hot)
probs_labeled = torch.nn.functional.one_hot(labels_labeled.clone().detach(), num_classes=num_classes) \
.float()
# Combine
inputs = torch.cat([inputs_pseudo, inputs_labeled])
labels = torch.cat([labels_pseudo, labels_labeled])
probs = torch.cat([probs_pseudo, probs_labeled])
optimizer.zero_grad()
train_all += labels.shape[0]
# mixup data within the batch
if alpha != -1:
dynamic_weights, stats = criterion.dynamic_weights_calc(
net=net, inputs=inputs, targets=probs,
split_index=inputs_pseudo.shape[0], labeled_weight=labeled_weight)
inputs, dynamic_weights, labels_a, labels_b, lam = mixup_data(x=inputs, w=dynamic_weights, y=labels,
alpha=alpha, keep_max=True)
with autocast(is_mixed_precision):
outputs = net(inputs)
if alpha != -1:
# Pseudo training accuracy & interesting loss
predicted = outputs.argmax(1)
train_correct += (lam * (predicted == labels_a).sum().float().item()
+ (1 - lam) * (predicted == labels_b).sum().float().item())
loss, true_loss = criterion(pred=outputs, y_a=labels_a, y_b=labels_b, lam=lam,
dynamic_weights=dynamic_weights)
else:
train_correct += (labels == outputs.argmax(1)).sum().item()
loss, true_loss, stats = criterion(inputs=outputs, targets=probs, split_index=inputs_pseudo.shape[0],
gamma1=gamma1, gamma2=gamma2)
if is_mixed_precision:
accelerator.backward(scaler.scale(loss))
scaler.step(optimizer)
scaler.update()
else:
accelerator.backward(loss)
optimizer.step()
criterion.step()
if lr_scheduler is not None:
lr_scheduler.step()
# EMA update
ema.update(net=net)
# Logging
running_loss += true_loss
for key in stats.keys():
running_stats[key] += stats[key]
current_step_num = int(epoch * len(pseudo_labeled_loader) + i + 1)
if current_step_num % loss_num_steps == (loss_num_steps - 1):
print('[%d, %d] loss: %.4f' % (epoch + 1, i + 1, running_loss / loss_num_steps))
writer.add_scalar(tensorboard_prefix + 'training loss',
running_loss / loss_num_steps,
current_step_num)
running_loss = 0.0
for key in stats.keys():
print('[%d, %d] ' % (epoch + 1, i + 1) + key + ' : %.4f' % (running_stats[key] / loss_num_steps))
writer.add_scalar(tensorboard_prefix + key,
running_stats[key] / loss_num_steps,
current_step_num)
running_stats[key] = 0.0
# Validate and find the best snapshot
if current_step_num % val_num_steps == (val_num_steps - 1) or \
current_step_num == num_epochs * len(pseudo_labeled_loader) - 1:
# Apex bug https://github.com/NVIDIA/apex/issues/706, fixed in PyTorch1.6, kept here for BC
test_acc = test(loader=val_loader, device=device, net=net, fine_grain=fine_grain,
is_mixed_precision=is_mixed_precision)
writer.add_scalar(tensorboard_prefix + 'test accuracy',
test_acc,
current_step_num)
net.train()
# Record best model(Straight to disk)
if test_acc >= best_acc:
best_acc = test_acc
save_checkpoint(net=net, optimizer=optimizer, lr_scheduler=lr_scheduler,
is_mixed_precision=is_mixed_precision)
# Evaluate training accuracies (same metric as validation, but must be on-the-fly to save time)
train_acc = train_correct / train_all * 100
print('Train accuracy: %.4f' % train_acc)
writer.add_scalar(tensorboard_prefix + 'train accuracy',
train_acc,
epoch + 1)
epoch += 1
print('Epoch time: %.2fs' % (time.time() - time_now))
ema.fill_in_bn(state_dict=net.state_dict())
save_checkpoint(net=ema, optimizer=None, lr_scheduler=None, is_mixed_precision=False,
filename='temp-ema.pt')
return best_acc
if __name__ == '__main__':
# Settings
parser = argparse.ArgumentParser(description='PyTorch 1.6.0 && torchvision 0.7.0')
parser.add_argument('--exp-name', type=str, default='auto',
help='Name of the experiment (default: auto)')
parser.add_argument('--dataset', type=str, default='cifar10',
help='Train/Evaluate on Cifar10 (default: cifar10)')
parser.add_argument('--val-num-steps', type=int, default=1000,
help='How many steps between validations (default: 1000)')
parser.add_argument('--seed', type=int, default=1,
help='Random seed (default: 1)')
parser.add_argument('--gamma1', type=float, default=1,
help='Gamma for entropy minimization in agreement (default: 1)')
parser.add_argument('--gamma2', type=float, default=1,
help='Gamma for learning in disagreement (default: 1)')
parser.add_argument('--aa', action='store_true', default=False,
help='Use AutoAugment instead of RandAugment (default: False)')
parser.add_argument('--n', type=int, default=1,
help='N in RandAugment (default: 1)')
parser.add_argument('--m', type=int, default=10,
help='Max M in RandAugment (default: 10)')
parser.add_argument('--alpha', type=float, default=0.75,
help='Alpha for mixup, -1 -> no mixup (default: 0.75)')
parser.add_argument('--lr', type=float, default=0.2,
help='Initial learning rate (default: 0.2)')
parser.add_argument('--labeled-weight', type=float, default=1,
help='Weight for labeled loss (default: 1)')
parser.add_argument('--weight-decay', type=float, default=0.0005,
help='Weight decay for SGD (default: 0.0005)')
parser.add_argument('--epochs', type=int, default=300,
help='Number of training epochs (default: 300)')
parser.add_argument('--start-at', type=int, default=0,
help='State dynamic weighting at what epoch (default: 150)')
parser.add_argument('--num-workers', type=int, default=1,
help='Number of workers for loading (default: 1)')
parser.add_argument('--batch-size-labeled', type=int, default=64,
help='Batch size for labeled data (default: 64)')
parser.add_argument('--batch-size-pseudo', type=int, default=448,
help='Batch size for pseudo labeled data (default: 448)')
parser.add_argument('--do-not-save', action='store_false', default=True,
help='Save model (default: True)')
parser.add_argument('--mixed-precision', action='store_true', default=False,
help='Enable mixed precision training (default: False)')
parser.add_argument('--valtiny', action='store_true', default=False,
help='Use valtiny as validation/Directly use the test set (default: False)')
parser.add_argument('--fine-grain', action='store_true', default=False,
help='Use fine-grain testing, i.e. 90% correct counts as 0.9 (default: False)')
parser.add_argument('--labeling', action='store_true', default=False,
help='Just pseudo labeling (default: False)')
parser.add_argument('--label-ratio', type=float, default=1,
help='Pseudo labeling ratio (default: 1)')
parser.add_argument('--train-set', type=str, default='400_seed1',
help='The training set file name prefix')
parser.add_argument('--continue-from', type=str, default=None,
help='Training begins from a previous checkpoint')
args = parser.parse_args()
with open(args.exp_name + '_cfg.txt', 'w') as f:
f.write(str(vars(args)))
# Basic configurations
exp_name = str(int(time.time()))
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
# torch.backends.cudnn.deterministic = True # Might hurt performance
# torch.backends.cudnn.benchmark = False # Might hurt performance
if args.exp_name != 'auto':
exp_name = args.exp_name
# device = torch.device('cpu')
# if torch.cuda.is_available():
# device = torch.device('cuda:0')
accelerator = Accelerator(split_batches=True)
device = accelerator.device
if args.valtiny:
val_set = 'valtiny_seed1'
else:
val_set = 'test'
if args.dataset == 'cifar10':
num_classes = num_classes_cifar10
mean = mean_cifar10
std = std_cifar10
input_sizes = input_sizes_cifar10
base = base_cifar10
else:
raise ValueError
net = wrn_28_2(num_classes=num_classes)
print(device)
net.to(device)
params_to_optimize = net.parameters()
optimizer = torch.optim.SGD(params_to_optimize, lr=args.lr, momentum=0.9, weight_decay=args.weight_decay)
# optimizer = torch.optim.Adam(params_to_optimize, lr=args.lr, weight_decay=args.weight_decay)
if args.continue_from is not None:
load_checkpoint(net=net, optimizer=None, lr_scheduler=None,
is_mixed_precision=args.mixed_precision, filename=args.continue_from)
labeled_loader, unlabeled_loader, pseudo_labeled_loader, val_loader, num_images = init(
batch_size_labeled=args.batch_size_labeled, mean=mean, base=base, prefix=args.train_set, val_set=val_set,
dataset=args.dataset, n=args.n, m=args.m, auto_augment=args.aa, input_sizes=input_sizes, std=std,
num_workers=args.num_workers, batch_size_pseudo=args.batch_size_pseudo, train=False if args.labeling else True)
net, optimizer, labeled_loader, pseudo_labeled_loader = accelerator.prepare(net, optimizer,
labeled_loader,
pseudo_labeled_loader)
# Pseudo labeling
if args.labeling:
time_now = time.time()
sub_base = CIFAR10.base_folder
filename = os.path.join(base, sub_base, args.train_set + '_pseudo')
generate_pseudo_labels(net=net, device=device, loader=unlabeled_loader, filename=filename,
label_ratio=args.label_ratio, num_images=num_images,
is_mixed_precision=args.mixed_precision)
print('Pseudo labeling time: %.2fs' % (time.time() - time_now))
else:
# Mutual-training
if args.alpha == -1:
criterion = DynamicMutualLoss()
else:
criterion = MixupDynamicMutualLoss(gamma1=args.gamma1, gamma2=args.gamma2,
T_max=args.epochs * len(pseudo_labeled_loader))
writer = SummaryWriter('logs/' + exp_name)
best_acc = test(loader=val_loader, device=device, net=net, fine_grain=args.fine_grain,
is_mixed_precision=args.mixed_precision)
save_checkpoint(net=net, optimizer=None, lr_scheduler=None, is_mixed_precision=args.mixed_precision)
print('Original acc: ' + str(best_acc))
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer=optimizer,
T_max=args.epochs * len(pseudo_labeled_loader))
# lr_scheduler = None
# Retraining (i.e. fine-tuning)
best_acc = train(writer=writer, labeled_loader=labeled_loader, pseudo_labeled_loader=pseudo_labeled_loader,
val_loader=val_loader, device=device, criterion=criterion, net=net, optimizer=optimizer,
lr_scheduler=lr_scheduler, fine_grain=args.fine_grain, alpha=args.alpha,
num_epochs=args.epochs, is_mixed_precision=args.mixed_precision, best_acc=best_acc,
val_num_steps=args.val_num_steps, tensorboard_prefix='', start_at=args.start_at,
labeled_weight=args.labeled_weight, gamma1=args.gamma1, gamma2=args.gamma2,
num_classes=num_classes)
# Tidy up
# --do-not-save => args.do_not_save = False
if args.do_not_save: # Rename the checkpoint
os.rename('temp.pt', exp_name + '.pt')
os.rename('temp-ema.pt', exp_name + '--ema.pt')
else: # Since the checkpoint is already saved, it should be deleted
os.remove('temp.pt')
os.remove('temp-ema.pt')
writer.close()
with open('log.txt', 'a') as f:
f.write(exp_name + ': ' + str(best_acc) + '\n')
| 48.663697
| 119
| 0.609611
|
831a295f5634214b538490530227b2867fa73313
| 169
|
py
|
Python
|
Regs/Block_0/R0990.py
|
BernardoB95/Extrator_SPEDFiscal
|
10b4697833c561d24654251da5f22d044f03fc16
|
[
"MIT"
] | 1
|
2021-04-25T13:53:20.000Z
|
2021-04-25T13:53:20.000Z
|
Regs/Block_0/R0990.py
|
BernardoB95/Extrator_SPEDFiscal
|
10b4697833c561d24654251da5f22d044f03fc16
|
[
"MIT"
] | null | null | null |
Regs/Block_0/R0990.py
|
BernardoB95/Extrator_SPEDFiscal
|
10b4697833c561d24654251da5f22d044f03fc16
|
[
"MIT"
] | null | null | null |
from ..IReg import IReg
class R0990(IReg):
def __init__(self):
self._header = ['REG',
'QTD_LIN_0']
self._hierarchy = "1"
| 15.363636
| 36
| 0.502959
|
40063234fae9c458d994869a82696f9e1ee8ef00
| 27,423
|
py
|
Python
|
netbox/dcim/models/device_components.py
|
waterdrops/netbox
|
cb9478e0eaa5853e73718ef6bc52c1f1171678e5
|
[
"Apache-2.0"
] | 2
|
2021-07-08T03:58:12.000Z
|
2022-02-11T21:50:46.000Z
|
netbox/dcim/models/device_components.py
|
waterdrops/netbox
|
cb9478e0eaa5853e73718ef6bc52c1f1171678e5
|
[
"Apache-2.0"
] | 25
|
2019-09-17T19:40:50.000Z
|
2022-03-11T04:01:55.000Z
|
netbox/dcim/models/device_components.py
|
waterdrops/netbox
|
cb9478e0eaa5853e73718ef6bc52c1f1171678e5
|
[
"Apache-2.0"
] | 1
|
2022-02-11T21:50:58.000Z
|
2022-02-11T21:50:58.000Z
|
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.db.models import Sum
from django.urls import reverse
from mptt.models import MPTTModel, TreeForeignKey
from taggit.managers import TaggableManager
from dcim.choices import *
from dcim.constants import *
from dcim.fields import MACAddressField
from extras.models import ObjectChange, TaggedItem
from extras.utils import extras_features
from utilities.fields import NaturalOrderingField
from utilities.mptt import TreeManager
from utilities.ordering import naturalize_interface
from utilities.querysets import RestrictedQuerySet
from utilities.query_functions import CollateAsChar
from utilities.utils import serialize_object
__all__ = (
'BaseInterface',
'CableTermination',
'ConsolePort',
'ConsoleServerPort',
'DeviceBay',
'FrontPort',
'Interface',
'InventoryItem',
'PathEndpoint',
'PowerOutlet',
'PowerPort',
'RearPort',
)
class ComponentModel(models.Model):
"""
An abstract model inherited by any model which has a parent Device.
"""
device = models.ForeignKey(
to='dcim.Device',
on_delete=models.CASCADE,
related_name='%(class)ss'
)
name = models.CharField(
max_length=64
)
_name = NaturalOrderingField(
target_field='name',
max_length=100,
blank=True
)
label = models.CharField(
max_length=64,
blank=True,
help_text="Physical label"
)
description = models.CharField(
max_length=200,
blank=True
)
objects = RestrictedQuerySet.as_manager()
class Meta:
abstract = True
def __str__(self):
if self.label:
return f"{self.name} ({self.label})"
return self.name
def to_objectchange(self, action):
# Annotate the parent Device
try:
device = self.device
except ObjectDoesNotExist:
# The parent Device has already been deleted
device = None
return ObjectChange(
changed_object=self,
object_repr=str(self),
action=action,
related_object=device,
object_data=serialize_object(self)
)
@property
def parent(self):
return getattr(self, 'device', None)
class CableTermination(models.Model):
"""
An abstract model inherited by all models to which a Cable can terminate (certain device components, PowerFeed, and
CircuitTermination instances). The `cable` field indicates the Cable instance which is terminated to this instance.
`_cable_peer` is a GenericForeignKey used to cache the far-end CableTermination on the local instance; this is a
shortcut to referencing `cable.termination_b`, for example. `_cable_peer` is set or cleared by the receivers in
dcim.signals when a Cable instance is created or deleted, respectively.
"""
cable = models.ForeignKey(
to='dcim.Cable',
on_delete=models.SET_NULL,
related_name='+',
blank=True,
null=True
)
_cable_peer_type = models.ForeignKey(
to=ContentType,
on_delete=models.SET_NULL,
related_name='+',
blank=True,
null=True
)
_cable_peer_id = models.PositiveIntegerField(
blank=True,
null=True
)
_cable_peer = GenericForeignKey(
ct_field='_cable_peer_type',
fk_field='_cable_peer_id'
)
# Generic relations to Cable. These ensure that an attached Cable is deleted if the terminated object is deleted.
_cabled_as_a = GenericRelation(
to='dcim.Cable',
content_type_field='termination_a_type',
object_id_field='termination_a_id'
)
_cabled_as_b = GenericRelation(
to='dcim.Cable',
content_type_field='termination_b_type',
object_id_field='termination_b_id'
)
class Meta:
abstract = True
def get_cable_peer(self):
return self._cable_peer
class PathEndpoint(models.Model):
"""
An abstract model inherited by any CableTermination subclass which represents the end of a CablePath; specifically,
these include ConsolePort, ConsoleServerPort, PowerPort, PowerOutlet, Interface, PowerFeed, and CircuitTermination.
`_path` references the CablePath originating from this instance, if any. It is set or cleared by the receivers in
dcim.signals in response to changes in the cable path, and complements the `origin` GenericForeignKey field on the
CablePath model. `_path` should not be accessed directly; rather, use the `path` property.
`connected_endpoint()` is a convenience method for returning the destination of the associated CablePath, if any.
"""
_path = models.ForeignKey(
to='dcim.CablePath',
on_delete=models.SET_NULL,
null=True,
blank=True
)
class Meta:
abstract = True
def trace(self):
if self._path is None:
return []
# Construct the complete path
path = [self, *self._path.get_path()]
while (len(path) + 1) % 3:
# Pad to ensure we have complete three-tuples (e.g. for paths that end at a RearPort)
path.append(None)
path.append(self._path.destination)
# Return the path as a list of three-tuples (A termination, cable, B termination)
return list(zip(*[iter(path)] * 3))
@property
def path(self):
return self._path
@property
def connected_endpoint(self):
"""
Caching accessor for the attached CablePath's destination (if any)
"""
if not hasattr(self, '_connected_endpoint'):
self._connected_endpoint = self._path.destination if self._path else None
return self._connected_endpoint
#
# Console ports
#
@extras_features('export_templates', 'webhooks', 'custom_links')
class ConsolePort(CableTermination, PathEndpoint, ComponentModel):
"""
A physical console port within a Device. ConsolePorts connect to ConsoleServerPorts.
"""
type = models.CharField(
max_length=50,
choices=ConsolePortTypeChoices,
blank=True,
help_text='Physical port type'
)
tags = TaggableManager(through=TaggedItem)
csv_headers = ['device', 'name', 'label', 'type', 'description']
class Meta:
ordering = ('device', '_name')
unique_together = ('device', 'name')
def get_absolute_url(self):
return reverse('dcim:consoleport', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.identifier,
self.name,
self.label,
self.type,
self.description,
)
#
# Console server ports
#
@extras_features('webhooks', 'custom_links')
class ConsoleServerPort(CableTermination, PathEndpoint, ComponentModel):
"""
A physical port within a Device (typically a designated console server) which provides access to ConsolePorts.
"""
type = models.CharField(
max_length=50,
choices=ConsolePortTypeChoices,
blank=True,
help_text='Physical port type'
)
tags = TaggableManager(through=TaggedItem)
csv_headers = ['device', 'name', 'label', 'type', 'description']
class Meta:
ordering = ('device', '_name')
unique_together = ('device', 'name')
def get_absolute_url(self):
return reverse('dcim:consoleserverport', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.identifier,
self.name,
self.label,
self.type,
self.description,
)
#
# Power ports
#
@extras_features('export_templates', 'webhooks', 'custom_links')
class PowerPort(CableTermination, PathEndpoint, ComponentModel):
"""
A physical power supply (intake) port within a Device. PowerPorts connect to PowerOutlets.
"""
type = models.CharField(
max_length=50,
choices=PowerPortTypeChoices,
blank=True,
help_text='Physical port type'
)
maximum_draw = models.PositiveSmallIntegerField(
blank=True,
null=True,
validators=[MinValueValidator(1)],
help_text="Maximum power draw (watts)"
)
allocated_draw = models.PositiveSmallIntegerField(
blank=True,
null=True,
validators=[MinValueValidator(1)],
help_text="Allocated power draw (watts)"
)
tags = TaggableManager(through=TaggedItem)
csv_headers = ['device', 'name', 'label', 'type', 'maximum_draw', 'allocated_draw', 'description']
class Meta:
ordering = ('device', '_name')
unique_together = ('device', 'name')
def get_absolute_url(self):
return reverse('dcim:powerport', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.identifier,
self.name,
self.label,
self.get_type_display(),
self.maximum_draw,
self.allocated_draw,
self.description,
)
def clean(self):
super().clean()
if self.maximum_draw is not None and self.allocated_draw is not None:
if self.allocated_draw > self.maximum_draw:
raise ValidationError({
'allocated_draw': f"Allocated draw cannot exceed the maximum draw ({self.maximum_draw}W)."
})
def get_power_draw(self):
"""
Return the allocated and maximum power draw (in VA) and child PowerOutlet count for this PowerPort.
"""
# Calculate aggregate draw of all child power outlets if no numbers have been defined manually
if self.allocated_draw is None and self.maximum_draw is None:
poweroutlet_ct = ContentType.objects.get_for_model(PowerOutlet)
outlet_ids = PowerOutlet.objects.filter(power_port=self).values_list('pk', flat=True)
utilization = PowerPort.objects.filter(
_cable_peer_type=poweroutlet_ct,
_cable_peer_id__in=outlet_ids
).aggregate(
maximum_draw_total=Sum('maximum_draw'),
allocated_draw_total=Sum('allocated_draw'),
)
ret = {
'allocated': utilization['allocated_draw_total'] or 0,
'maximum': utilization['maximum_draw_total'] or 0,
'outlet_count': len(outlet_ids),
'legs': [],
}
# Calculate per-leg aggregates for three-phase feeds
if getattr(self._cable_peer, 'phase', None) == PowerFeedPhaseChoices.PHASE_3PHASE:
for leg, leg_name in PowerOutletFeedLegChoices:
outlet_ids = PowerOutlet.objects.filter(power_port=self, feed_leg=leg).values_list('pk', flat=True)
utilization = PowerPort.objects.filter(
_cable_peer_type=poweroutlet_ct,
_cable_peer_id__in=outlet_ids
).aggregate(
maximum_draw_total=Sum('maximum_draw'),
allocated_draw_total=Sum('allocated_draw'),
)
ret['legs'].append({
'name': leg_name,
'allocated': utilization['allocated_draw_total'] or 0,
'maximum': utilization['maximum_draw_total'] or 0,
'outlet_count': len(outlet_ids),
})
return ret
# Default to administratively defined values
return {
'allocated': self.allocated_draw or 0,
'maximum': self.maximum_draw or 0,
'outlet_count': PowerOutlet.objects.filter(power_port=self).count(),
'legs': [],
}
#
# Power outlets
#
@extras_features('webhooks', 'custom_links')
class PowerOutlet(CableTermination, PathEndpoint, ComponentModel):
"""
A physical power outlet (output) within a Device which provides power to a PowerPort.
"""
type = models.CharField(
max_length=50,
choices=PowerOutletTypeChoices,
blank=True,
help_text='Physical port type'
)
power_port = models.ForeignKey(
to='dcim.PowerPort',
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='poweroutlets'
)
feed_leg = models.CharField(
max_length=50,
choices=PowerOutletFeedLegChoices,
blank=True,
help_text="Phase (for three-phase feeds)"
)
tags = TaggableManager(through=TaggedItem)
csv_headers = ['device', 'name', 'label', 'type', 'power_port', 'feed_leg', 'description']
class Meta:
ordering = ('device', '_name')
unique_together = ('device', 'name')
def get_absolute_url(self):
return reverse('dcim:poweroutlet', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.identifier,
self.name,
self.label,
self.get_type_display(),
self.power_port.name if self.power_port else None,
self.get_feed_leg_display(),
self.description,
)
def clean(self):
super().clean()
# Validate power port assignment
if self.power_port and self.power_port.device != self.device:
raise ValidationError(
"Parent power port ({}) must belong to the same device".format(self.power_port)
)
#
# Interfaces
#
class BaseInterface(models.Model):
"""
Abstract base class for fields shared by dcim.Interface and virtualization.VMInterface.
"""
enabled = models.BooleanField(
default=True
)
mac_address = MACAddressField(
null=True,
blank=True,
verbose_name='MAC Address'
)
mtu = models.PositiveIntegerField(
blank=True,
null=True,
validators=[MinValueValidator(1), MaxValueValidator(65536)],
verbose_name='MTU'
)
mode = models.CharField(
max_length=50,
choices=InterfaceModeChoices,
blank=True
)
class Meta:
abstract = True
def save(self, *args, **kwargs):
# Remove untagged VLAN assignment for non-802.1Q interfaces
if not self.mode:
self.untagged_vlan = None
# Only "tagged" interfaces may have tagged VLANs assigned. ("tagged all" implies all VLANs are assigned.)
if self.pk and self.mode != InterfaceModeChoices.MODE_TAGGED:
self.tagged_vlans.clear()
return super().save(*args, **kwargs)
@extras_features('export_templates', 'webhooks', 'custom_links')
class Interface(CableTermination, PathEndpoint, ComponentModel, BaseInterface):
"""
A network interface within a Device. A physical Interface can connect to exactly one other Interface.
"""
# Override ComponentModel._name to specify naturalize_interface function
_name = NaturalOrderingField(
target_field='name',
naturalize_function=naturalize_interface,
max_length=100,
blank=True
)
lag = models.ForeignKey(
to='self',
on_delete=models.SET_NULL,
related_name='member_interfaces',
null=True,
blank=True,
verbose_name='Parent LAG'
)
type = models.CharField(
max_length=50,
choices=InterfaceTypeChoices
)
mgmt_only = models.BooleanField(
default=False,
verbose_name='Management only',
help_text='This interface is used only for out-of-band management'
)
untagged_vlan = models.ForeignKey(
to='ipam.VLAN',
on_delete=models.SET_NULL,
related_name='interfaces_as_untagged',
null=True,
blank=True,
verbose_name='Untagged VLAN'
)
tagged_vlans = models.ManyToManyField(
to='ipam.VLAN',
related_name='interfaces_as_tagged',
blank=True,
verbose_name='Tagged VLANs'
)
ip_addresses = GenericRelation(
to='ipam.IPAddress',
content_type_field='assigned_object_type',
object_id_field='assigned_object_id',
related_query_name='interface'
)
tags = TaggableManager(through=TaggedItem)
csv_headers = [
'device', 'name', 'label', 'lag', 'type', 'enabled', 'mac_address', 'mtu', 'mgmt_only', 'description', 'mode',
]
class Meta:
ordering = ('device', CollateAsChar('_name'))
unique_together = ('device', 'name')
def get_absolute_url(self):
return reverse('dcim:interface', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.identifier if self.device else None,
self.name,
self.label,
self.lag.name if self.lag else None,
self.get_type_display(),
self.enabled,
self.mac_address,
self.mtu,
self.mgmt_only,
self.description,
self.get_mode_display(),
)
def clean(self):
super().clean()
# Virtual interfaces cannot be connected
if self.type in NONCONNECTABLE_IFACE_TYPES and (
self.cable or getattr(self, 'circuit_termination', False)
):
raise ValidationError({
'type': "Virtual and wireless interfaces cannot be connected to another interface or circuit. "
"Disconnect the interface or choose a suitable type."
})
# An interface's LAG must belong to the same device or virtual chassis
if self.lag and self.lag.device != self.device:
if self.device.virtual_chassis is None:
raise ValidationError({
'lag': f"The selected LAG interface ({self.lag}) belongs to a different device ({self.lag.device})."
})
elif self.lag.device.virtual_chassis != self.device.virtual_chassis:
raise ValidationError({
'lag': f"The selected LAG interface ({self.lag}) belongs to {self.lag.device}, which is not part "
f"of virtual chassis {self.device.virtual_chassis}."
})
# A virtual interface cannot have a parent LAG
if self.type == InterfaceTypeChoices.TYPE_VIRTUAL and self.lag is not None:
raise ValidationError({'lag': "Virtual interfaces cannot have a parent LAG interface."})
# A LAG interface cannot be its own parent
if self.pk and self.lag_id == self.pk:
raise ValidationError({'lag': "A LAG interface cannot be its own parent."})
# Validate untagged VLAN
if self.untagged_vlan and self.untagged_vlan.site not in [self.parent.site, None]:
raise ValidationError({
'untagged_vlan': "The untagged VLAN ({}) must belong to the same site as the interface's parent "
"device, or it must be global".format(self.untagged_vlan)
})
@property
def parent(self):
return self.device
@property
def is_connectable(self):
return self.type not in NONCONNECTABLE_IFACE_TYPES
@property
def is_virtual(self):
return self.type in VIRTUAL_IFACE_TYPES
@property
def is_wireless(self):
return self.type in WIRELESS_IFACE_TYPES
@property
def is_lag(self):
return self.type == InterfaceTypeChoices.TYPE_LAG
@property
def count_ipaddresses(self):
return self.ip_addresses.count()
#
# Pass-through ports
#
@extras_features('webhooks', 'custom_links')
class FrontPort(CableTermination, ComponentModel):
"""
A pass-through port on the front of a Device.
"""
type = models.CharField(
max_length=50,
choices=PortTypeChoices
)
rear_port = models.ForeignKey(
to='dcim.RearPort',
on_delete=models.CASCADE,
related_name='frontports'
)
rear_port_position = models.PositiveSmallIntegerField(
default=1,
validators=[
MinValueValidator(REARPORT_POSITIONS_MIN),
MaxValueValidator(REARPORT_POSITIONS_MAX)
]
)
tags = TaggableManager(through=TaggedItem)
csv_headers = ['device', 'name', 'label', 'type', 'rear_port', 'rear_port_position', 'description']
class Meta:
ordering = ('device', '_name')
unique_together = (
('device', 'name'),
('rear_port', 'rear_port_position'),
)
def get_absolute_url(self):
return reverse('dcim:frontport', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.identifier,
self.name,
self.label,
self.get_type_display(),
self.rear_port.name,
self.rear_port_position,
self.description,
)
def clean(self):
super().clean()
# Validate rear port assignment
if self.rear_port.device != self.device:
raise ValidationError({
"rear_port": f"Rear port ({self.rear_port}) must belong to the same device"
})
# Validate rear port position assignment
if self.rear_port_position > self.rear_port.positions:
raise ValidationError({
"rear_port_position": f"Invalid rear port position ({self.rear_port_position}): Rear port "
f"{self.rear_port.name} has only {self.rear_port.positions} positions"
})
@extras_features('webhooks', 'custom_links')
class RearPort(CableTermination, ComponentModel):
"""
A pass-through port on the rear of a Device.
"""
type = models.CharField(
max_length=50,
choices=PortTypeChoices
)
positions = models.PositiveSmallIntegerField(
default=1,
validators=[
MinValueValidator(REARPORT_POSITIONS_MIN),
MaxValueValidator(REARPORT_POSITIONS_MAX)
]
)
tags = TaggableManager(through=TaggedItem)
csv_headers = ['device', 'name', 'label', 'type', 'positions', 'description']
class Meta:
ordering = ('device', '_name')
unique_together = ('device', 'name')
def get_absolute_url(self):
return reverse('dcim:rearport', kwargs={'pk': self.pk})
def clean(self):
super().clean()
# Check that positions count is greater than or equal to the number of associated FrontPorts
frontport_count = self.frontports.count()
if self.positions < frontport_count:
raise ValidationError({
"positions": f"The number of positions cannot be less than the number of mapped front ports "
f"({frontport_count})"
})
def to_csv(self):
return (
self.device.identifier,
self.name,
self.label,
self.get_type_display(),
self.positions,
self.description,
)
#
# Device bays
#
@extras_features('webhooks', 'custom_links')
class DeviceBay(ComponentModel):
"""
An empty space within a Device which can house a child device
"""
installed_device = models.OneToOneField(
to='dcim.Device',
on_delete=models.SET_NULL,
related_name='parent_bay',
blank=True,
null=True
)
tags = TaggableManager(through=TaggedItem)
csv_headers = ['device', 'name', 'label', 'installed_device', 'description']
class Meta:
ordering = ('device', '_name')
unique_together = ('device', 'name')
def get_absolute_url(self):
return reverse('dcim:devicebay', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.identifier,
self.name,
self.label,
self.installed_device.identifier if self.installed_device else None,
self.description,
)
def clean(self):
super().clean()
# Validate that the parent Device can have DeviceBays
if not self.device.device_type.is_parent_device:
raise ValidationError("This type of device ({}) does not support device bays.".format(
self.device.device_type
))
# Cannot install a device into itself, obviously
if self.device == self.installed_device:
raise ValidationError("Cannot install a device into itself.")
# Check that the installed device is not already installed elsewhere
if self.installed_device:
current_bay = DeviceBay.objects.filter(installed_device=self.installed_device).first()
if current_bay and current_bay != self:
raise ValidationError({
'installed_device': "Cannot install the specified device; device is already installed in {}".format(
current_bay
)
})
#
# Inventory items
#
@extras_features('export_templates', 'webhooks', 'custom_links')
class InventoryItem(MPTTModel, ComponentModel):
"""
An InventoryItem represents a serialized piece of hardware within a Device, such as a line card or power supply.
InventoryItems are used only for inventory purposes.
"""
parent = TreeForeignKey(
to='self',
on_delete=models.CASCADE,
related_name='child_items',
blank=True,
null=True,
db_index=True
)
manufacturer = models.ForeignKey(
to='dcim.Manufacturer',
on_delete=models.PROTECT,
related_name='inventory_items',
blank=True,
null=True
)
part_id = models.CharField(
max_length=50,
verbose_name='Part ID',
blank=True,
help_text='Manufacturer-assigned part identifier'
)
serial = models.CharField(
max_length=50,
verbose_name='Serial number',
blank=True
)
asset_tag = models.CharField(
max_length=50,
unique=True,
blank=True,
null=True,
verbose_name='Asset tag',
help_text='A unique tag used to identify this item'
)
discovered = models.BooleanField(
default=False,
help_text='This item was automatically discovered'
)
tags = TaggableManager(through=TaggedItem)
objects = TreeManager()
csv_headers = [
'device', 'name', 'label', 'manufacturer', 'part_id', 'serial', 'asset_tag', 'discovered', 'description',
]
class Meta:
ordering = ('device__id', 'parent__id', '_name')
unique_together = ('device', 'parent', 'name')
def get_absolute_url(self):
return reverse('dcim:inventoryitem', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.name or '{{{}}}'.format(self.device.pk),
self.name,
self.label,
self.manufacturer.name if self.manufacturer else None,
self.part_id,
self.serial,
self.asset_tag,
self.discovered,
self.description,
)
| 31.340571
| 120
| 0.617985
|
e83adba353793dd5f3784cf716f1f6243ffc4e4a
| 7,280
|
py
|
Python
|
src/transformers/configuration_squeezebert.py
|
WERimagin/transformers
|
cc7d14511c647f8147494df72f8b0575015e37ab
|
[
"Apache-2.0"
] | 16
|
2021-09-09T02:09:41.000Z
|
2022-03-22T15:38:11.000Z
|
src/transformers/configuration_squeezebert.py
|
WERimagin/transformers
|
cc7d14511c647f8147494df72f8b0575015e37ab
|
[
"Apache-2.0"
] | 3
|
2021-11-08T04:40:28.000Z
|
2022-03-30T09:10:31.000Z
|
src/transformers/configuration_squeezebert.py
|
WERimagin/transformers
|
cc7d14511c647f8147494df72f8b0575015e37ab
|
[
"Apache-2.0"
] | 6
|
2021-11-01T09:45:53.000Z
|
2022-01-06T06:18:45.000Z
|
# coding=utf-8
# Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" SqueezeBERT model configuration """
from .configuration_utils import PretrainedConfig
from .utils import logging
logger = logging.get_logger(__name__)
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"squeezebert/squeezebert-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/squeezebert/squeezebert-uncased/config.json",
"squeezebert/squeezebert-mnli": "https://s3.amazonaws.com/models.huggingface.co/bert/squeezebert/squeezebert-mnli/config.json",
"squeezebert/squeezebert-mnli-headless": "https://s3.amazonaws.com/models.huggingface.co/bert/squeezebert/squeezebert-mnli-headless/config.json",
}
class SqueezeBertConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.SqueezeBertModel`.
It is used to instantiate a SqueezeBERT model according to the specified arguments, defining the model
architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used
to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig`
for more information.
Args:
vocab_size (:obj:`int`, `optional`, defaults to 30522):
Vocabulary size of the SqueezeBERT model. Defines the number of different tokens that can be
represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.SqueezeBertModel`.
hidden_size (:obj:`int`, `optional`, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (:obj:`int`, `optional`, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (:obj:`int`, `optional`, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (:obj:`int`, `optional`, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (:obj:`str` or :obj:`Callable`, `optional`, defaults to :obj:`"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler.
If string, :obj:`"gelu"`, :obj:`"relu"`, :obj:`"swish"` and :obj:`"gelu_new"` are supported.
hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (:obj:`int`, `optional`, defaults to 512):
The maximum sequence length that this model might ever be used with.
Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (:obj:`int`, `optional`, defaults to 2):
The vocabulary size of the :obj:`token_type_ids` passed when calling :class:`~transformers.BertModel` or
:class:`~transformers.TFBertModel`.
initializer_range (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12):
pad_token_id (:obj:`int`, `optional`, defaults to 0):
The ID of the token in the word embedding to use as padding.
embedding_size (:obj:`int`, `optional`, defaults to 768):
The dimension of the word embedding vectors.
q_groups (:obj:`int`, `optional`, defaults to 4):
The number of groups in Q layer.
k_groups (:obj:`int`, `optional`, defaults to 4):
The number of groups in K layer.
v_groups (:obj:`int`, `optional`, defaults to 4):
The number of groups in V layer.
post_attention_groups (:obj:`int`, `optional`, defaults to 1):
The number of groups in the first feed forward network layer.
intermediate_groups (:obj:`int`, `optional`, defaults to 4):
The number of groups in the second feed forward network layer.
output_groups (:obj:`int`, `optional`, defaults to 4):
The number of groups in the third feed forward network layer.
Example:
>>> from transformers import SqueezeBertModel, SqueezeBertConfig
>>> # Initializing a SqueezeBERT configuration
>>> configuration = SqueezeBertConfig()
>>> # Initializing a model from the configuration above
>>> model = SqueezeBertModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
Attributes:
pretrained_config_archive_map (Dict[str, str]):
A dictionary containing all the available pre-trained checkpoints.
"""
pretrained_config_archive_map = SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
model_type = "squeezebert"
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
embedding_size=768,
q_groups=4,
k_groups=4,
v_groups=4,
post_attention_groups=1,
intermediate_groups=4,
output_groups=4,
**kwargs
):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.embedding_size = embedding_size
self.q_groups = q_groups
self.k_groups = k_groups
self.v_groups = v_groups
self.post_attention_groups = post_attention_groups
self.intermediate_groups = intermediate_groups
self.output_groups = output_groups
| 47.581699
| 149
| 0.690659
|
56f3578863b62de0b55233af6dd556b68197439c
| 1,073
|
py
|
Python
|
tests/drf/controller_with_serializer.py
|
DmitryKhursevich/winter
|
9f3bf462f963059bab1f1bbb309ca57f8a43b46f
|
[
"MIT"
] | 9
|
2019-01-24T11:50:19.000Z
|
2019-07-05T07:58:46.000Z
|
tests/drf/controller_with_serializer.py
|
mikhaillazko/winter
|
cd4f11aaf28d500aabb59cec369817bfdb5c2fc1
|
[
"MIT"
] | 100
|
2019-01-29T08:11:38.000Z
|
2020-04-03T12:00:42.000Z
|
tests/drf/controller_with_serializer.py
|
mikhaillazko/winter
|
cd4f11aaf28d500aabb59cec369817bfdb5c2fc1
|
[
"MIT"
] | 8
|
2020-07-16T13:56:50.000Z
|
2021-12-27T03:33:23.000Z
|
import dataclasses
from rest_framework import serializers
import winter
from winter_django import BodyWithContext
from winter_django import input_serializer
from winter_django import output_serializer
@dataclasses.dataclass
class SimpleDTO:
number: int
class SimpleSerializer(serializers.Serializer):
number = serializers.IntegerField()
class SerializerWithContext(serializers.Serializer):
number = serializers.SerializerMethodField()
def get_number(self, data) -> int:
return self.context['additional_data']
@winter.controller
@winter.route('with-serializer')
class ControllerWithSerializer:
@winter.route_post('/')
@input_serializer(SimpleSerializer, argument_name='input_data')
@output_serializer(SimpleSerializer)
def post(self, input_data: dict) -> SimpleDTO:
return SimpleDTO(input_data['number'] + 1)
@winter.route_get('/with-context/')
@output_serializer(SerializerWithContext)
def post_back_with_context(self) -> BodyWithContext:
return BodyWithContext({}, {'additional_data': 123})
| 26.170732
| 67
| 0.767008
|
f7c202a18a3cb8c131a94852638378454e7f312e
| 1,482
|
py
|
Python
|
aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/DescribeDomainsBySourceRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/DescribeDomainsBySourceRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/DescribeDomainsBySourceRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | 1
|
2021-02-23T11:27:54.000Z
|
2021-02-23T11:27:54.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeDomainsBySourceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2014-11-11', 'DescribeDomainsBySource')
def get_Sources(self):
return self.get_query_params().get('Sources')
def set_Sources(self,Sources):
self.add_query_param('Sources',Sources)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
| 35.285714
| 76
| 0.765857
|
6a615faed84c44d51e2cc785b4a71ba4c0b25c69
| 3,743
|
py
|
Python
|
responsebot/utils/config_utils.py
|
openjusticebaltimore/ResponseBot
|
e224fe5251190f2f4a8901afbd622c411601e86e
|
[
"Apache-2.0"
] | 16
|
2016-05-11T00:04:18.000Z
|
2020-07-21T20:32:11.000Z
|
responsebot/utils/config_utils.py
|
openjusticebaltimore/ResponseBot
|
e224fe5251190f2f4a8901afbd622c411601e86e
|
[
"Apache-2.0"
] | 6
|
2016-05-10T03:34:17.000Z
|
2016-06-14T02:42:45.000Z
|
responsebot/utils/config_utils.py
|
openjusticebaltimore/ResponseBot
|
e224fe5251190f2f4a8901afbd622c411601e86e
|
[
"Apache-2.0"
] | 6
|
2016-05-09T20:17:43.000Z
|
2020-08-15T04:43:45.000Z
|
"""
Utilities for handling configurations
"""
from responsebot.common.exceptions import MissingConfigError
try:
from ConfigParser import SafeConfigParser
except ImportError:
from configparser import SafeConfigParser
class ResponseBotConfig(object):
"""Get config values and validate them."""
REQUIRED_CONFIGS = ['handlers_package', 'consumer_key', 'consumer_secret', 'token_key', 'token_secret']
CONFIG_FILE = '.responsebot'
def __init__(self, *args, **kwargs):
"""
:param kwargs: Config from CLI arguments
"""
self._config = {
'min_seconds_between_errors': 30,
'sleep_seconds_on_consecutive_errors': 300
}
self.load_config_file()
self.load_config_from_cli_arguments(*args, **kwargs)
self.validate_configs()
def load_config_file(self):
"""Parse configuration file and get config values."""
config_parser = SafeConfigParser()
config_parser.read(self.CONFIG_FILE)
if config_parser.has_section('handlers'):
self._config['handlers_package'] = config_parser.get('handlers', 'package')
if config_parser.has_section('auth'):
self._config['consumer_key'] = config_parser.get('auth', 'consumer_key')
self._config['consumer_secret'] = config_parser.get('auth', 'consumer_secret')
self._config['token_key'] = config_parser.get('auth', 'token_key')
self._config['token_secret'] = config_parser.get('auth', 'token_secret')
if config_parser.has_section('stream'):
self._config['user_stream'] = config_parser.get('stream', 'user_stream').lower() == 'true'
else:
self._config['user_stream'] = False
if config_parser.has_option('general', 'min_seconds_between_errors'):
self._config['min_seconds_between_errors'] = config_parser.get('general', 'min_seconds_between_errors')
if config_parser.has_option('general', 'sleep_seconds_on_consecutive_errors'):
self._config['sleep_seconds_on_consecutive_errors'] = config_parser.get(
'general', 'sleep_seconds_on_consecutive_errors')
def load_config_from_cli_arguments(self, *args, **kwargs):
"""
Get config values of passed in CLI options.
:param dict kwargs: CLI options
"""
self._load_config_from_cli_argument(key='handlers_package', **kwargs)
self._load_config_from_cli_argument(key='auth', **kwargs)
self._load_config_from_cli_argument(key='user_stream', **kwargs)
self._load_config_from_cli_argument(key='min_seconds_between_errors', **kwargs)
self._load_config_from_cli_argument(key='sleep_seconds_on_consecutive_errors', **kwargs)
def _load_config_from_cli_argument(self, key, **kwargs):
if kwargs.get(key):
if key == 'auth':
self._config['consumer_key'], self._config['consumer_secret'],\
self._config['token_key'], self._config['token_secret'] = kwargs.get(key)
else:
self._config[key] = kwargs.get(key)
def validate_configs(self):
"""
Check that required config are set.
:raises :class:`~responsebot.common.exceptions.MissingConfigError`: if a required config is missing
"""
# Check required arguments, validate values
for conf in self.REQUIRED_CONFIGS:
if conf not in self._config:
raise MissingConfigError('Missing required configuration %s' % conf)
def get(self, key):
"""
Get config value specify by key.
:param str key: config key
:return: config value
"""
return self._config[key]
| 40.247312
| 115
| 0.657761
|
fe8c25a152cb00138062b1dc9c4653762b221243
| 11,694
|
py
|
Python
|
medspacy/context/context_modifier.py
|
lusterck/medspacy
|
ef151ac163e468159c58cfd7ec391900fac0d809
|
[
"MIT"
] | 197
|
2020-07-22T12:37:03.000Z
|
2022-03-29T19:07:41.000Z
|
medspacy/context/context_modifier.py
|
lusterck/medspacy
|
ef151ac163e468159c58cfd7ec391900fac0d809
|
[
"MIT"
] | 77
|
2020-09-11T19:01:06.000Z
|
2022-03-29T16:57:21.000Z
|
medspacy/context/context_modifier.py
|
lusterck/medspacy
|
ef151ac163e468159c58cfd7ec391900fac0d809
|
[
"MIT"
] | 32
|
2020-07-30T21:30:16.000Z
|
2022-03-27T16:01:30.000Z
|
class ConTextModifier:
"""Represents a concept found by ConText in a document.
Is the result of ConTextRule matching a span of text in a Doc.
"""
def __init__(self, context_rule, start, end, doc, _use_context_window=False):
"""Create a new ConTextModifier from a document span.
context_item (int): The ConTextRule object which defines the modifier.
start (int): The start token index.
end (int): The end token index (non-inclusive).
doc (Doc): The spaCy Doc which contains this span.
"""
self._context_rule = context_rule
self.start = start
self.end = end
self.doc = doc
self._targets = []
self._num_targets = 0
self._use_context_window = _use_context_window
self._scope_start = None
self._scope_end = None
self.set_scope()
@property
def span(self):
"""The spaCy Span object, which is a view of self.doc, covered by this match."""
return self.doc[self.start : self.end]
@property
def rule(self):
"""Returns the associated direction."""
return self._context_rule
@property
def direction(self):
return self.rule.direction
@property
def category(self):
"""Returns the associated category."""
return self.rule.category
@property
def scope(self):
"""Returns the associated scope."""
return self.doc[self._scope_start : self._scope_end]
@property
def allowed_types(self):
"""Returns the associated allowed types."""
return self.rule.allowed_types
@property
def excluded_types(self):
"""Returns the associated excluded types."""
return self.rule.excluded_types
@property
def num_targets(self):
"""Returns the associated number of targets."""
return self._num_targets
@property
def max_targets(self):
"""Returns the associated maximum number of targets."""
return self.rule.max_targets
@property
def max_scope(self):
"""Returns the associated maximum scope."""
return self.rule.max_scope
def set_scope(self):
"""Applies the direction of the ConTextRule which generated
this ConTextModifier to define a scope.
If self.max_scope is None, then the default scope is the sentence which it occurs in
in whichever direction defined by self.direction.
For example, if the direction is "forward", the scope will be [self.end: sentence.end].
If the direction is "backward", it will be [self.start: sentence.start].
If self.max_scope is not None and the length of the default scope is longer than self.max_scope,
it will be reduced to self.max_scope.
"""
# If ConText is set to use defined windows, do that instead of sentence splitting
if self._use_context_window:
full_scope_span = self.span._.window(n=self.rule.max_scope)
# # Up to the beginning of the doc
# full_scope_start = max(
# (0, self.start - self.rule.max_scope)
# )
# # Up to the end of the doc
# full_scope_end = min(
# (len(self.span.doc), self.end + self.rule.max_scope)
# )
# full_scope_span = self.span.doc[full_scope_start:full_scope_end]
# Otherwise, use the sentence
else:
full_scope_span = self.doc[self.start].sent
if full_scope_span is None:
raise ValueError(
"ConText failed because sentence boundaries have not been set and 'use_context_window' is set to False. "
"Add an upstream component such as the dependency parser, Sentencizer, or PyRuSH to detect sentence "
"boundaries or initialize ConTextComponent with 'use_context_window=True.'"
)
if self.direction.lower() == "forward":
self._scope_start, self._scope_end = self.end, full_scope_span.end
if self.max_scope is not None and (self._scope_end - self._scope_start) > self.max_scope:
self._scope_end = self.end + self.max_scope
elif self.direction.lower() == "backward":
self._scope_start, self._scope_end = (
full_scope_span.start,
self.start,
)
if self.max_scope is not None and (self._scope_end - self._scope_start) > self.max_scope:
self._scope_start = self.start - self.max_scope
else: # bidirectional
self._scope_start, self._scope_end = (
full_scope_span.start,
full_scope_span.end,
)
# Set the max scope on either side
# Backwards
if self.max_scope is not None and (self.start - self._scope_start) > self.max_scope:
self._scope_start = self.start - self.max_scope
# Forwards
if self.max_scope is not None and (self._scope_end - self.end) > self.max_scope:
self._scope_end = self.end + self.max_scope
def update_scope(self, span):
"""Change the scope of self to be the given spaCy span.
span (Span): a spaCy Span which contains the scope
which a modifier should cover.
"""
self._scope_start, self._scope_end = span.start, span.end
def limit_scope(self, other):
"""If self and obj have the same category
or if obj has a directionality of 'terminate',
use the span of obj to update the scope of self.
Limiting the scope of two modifiers of the same category
reduces the number of modifiers. For example, in
'no evidence of CHF, no pneumonia', 'pneumonia' will only
be modified by 'no', not 'no evidence of'.
'terminate' modifiers limit the scope of a modifier
like 'no evidence of' in 'no evidence of CHF, **but** there is pneumonia'
other (ConTextModifier)
Returns True if obj modfified the scope of self
"""
if self.span.sent != other.span.sent:
return False
if self.direction.upper() == "TERMINATE":
return False
# Check if the other modifier is a type which can modify self
# or if they are the same category. If not, don't reduce scope.
if (
(other.direction.upper() != "TERMINATE")
and (other.category.upper() not in self.rule.terminated_by)
and (other.category.upper() != self.category.upper())
):
return False
# If two modifiers have the same category but modify different target types,
# don't limit scope.
if self.category == other.category and (
(self.allowed_types != other.allowed_types) or (self.excluded_types != other.excluded_types)
):
return False
orig_scope = self.scope
if self.direction.lower() in ("forward", "bidirectional"):
if other > self:
self._scope_end = min(self._scope_end, other.start)
if self.direction.lower() in ("backward", "bidirectional"):
if other < self:
self._scope_start = max(self._scope_start, other.end)
return orig_scope != self.scope
def modifies(self, target):
"""Returns True if the target is within the modifier scope
and self is allowed to modify target.
target (Span): a spaCy span representing a target concept.
"""
# If the target and modifier overlap, meaning at least one token
# one extracted as both a target and modifier, return False
# to avoid self-modifying concepts
if self.overlaps_target(target):
return False
if self.direction in ("TERMINATE", "PSEUDO"):
return False
if not self.allows(target.label_.upper()):
return False
if target[0] in self.scope or target[-1] in self.scope:
if not self.on_modifies(target):
return False
else:
return True
return False
def allows(self, target_label):
"""Returns True if a modifier is able to modify a target type.
A modifier may not be allowed if either self.allowed_types is not None and
target_label is not in it, or if self.excluded_types is not None and
target_label is in it.
"""
if self.allowed_types is not None:
if target_label not in self.allowed_types:
return False
return True
if self.excluded_types is not None:
if target_label not in self.excluded_types:
return True
return False
return True
def on_modifies(self, target):
"""If the ConTextRule used to define a ConTextModifier has an on_modifies callback function,
evaluate and return either True or False.
If on_modifies is None, return True.
"""
if self.rule.on_modifies is None:
return True
# Find the span in between the target and modifier
start = min(target.end, self.span.end)
end = max(target.start, self.span.start)
span_between = target.doc[start:end]
rslt = self.rule.on_modifies(target, self.span, span_between)
if rslt not in (True, False):
raise ValueError(
"The on_modifies function must return either True or False indicating "
"whether a modify modifies a target. Actual value: {0}".format(rslt)
)
return rslt
def modify(self, target):
"""Add target to the list of self._targets and increment self._num_targets."""
self._targets.append(target)
self._num_targets += 1
def reduce_targets(self):
"""If self.max_targets is not None, reduce the targets which are modified
so that only the n closest targets are left. Distance is measured as
the distance to either the start or end of a target (whichever is closer).
"""
if self.max_targets is None or self.num_targets <= self.max_targets:
return
target_dists = []
for target in self._targets:
dist = min(abs(self.start - target.end), abs(target.start - self.end))
target_dists.append((target, dist))
srtd_targets, _ = zip(*sorted(target_dists, key=lambda x: x[1]))
self._targets = srtd_targets[: self.max_targets]
self._num_targets = len(self._targets)
def overlaps(self, other):
""" Returns whether the object overlaps with another span
other (): the other object to check for overlaps
RETURNS: true if there is overlap, false otherwise.
"""
return (
self.span[0] in other.span
or self.span[-1] in other.span
or other.span[0] in self.span
or other.span[-1] in self.span
)
def overlaps_target(self, target):
"""Returns True if self overlaps with a spaCy span."""
return self.span[0] in target or self.span[-1] in target or target[0] in self.span or target[-1] in self.span
def __gt__(self, other):
return self.span > other.span
def __ge__(self, other):
return self.span >= other.span
def __lt__(self, other):
return self.span < other.span
def __le__(self, other):
return self.span <= other.span
def __len__(self):
return len(self.span)
def __repr__(self):
return f"<ConTextModifier> [{self.span}, {self.category}]"
| 38.467105
| 125
| 0.61322
|
40a9b257512025fbbc657784b51c037ce03aacdc
| 149
|
py
|
Python
|
tests/djsonb_fields/models.py
|
andres-nieves-endava/djsonb
|
5fc1ef3a10cb313af7e1c04c25acac81e81c7096
|
[
"BSD-3-Clause"
] | 3
|
2016-03-08T20:55:54.000Z
|
2019-06-13T14:31:50.000Z
|
tests/djsonb_fields/models.py
|
andres-nieves-endava/djsonb
|
5fc1ef3a10cb313af7e1c04c25acac81e81c7096
|
[
"BSD-3-Clause"
] | 13
|
2015-10-07T18:21:37.000Z
|
2018-07-30T12:51:40.000Z
|
tests/djsonb_fields/models.py
|
andres-nieves-endava/djsonb
|
5fc1ef3a10cb313af7e1c04c25acac81e81c7096
|
[
"BSD-3-Clause"
] | 4
|
2016-03-14T18:12:33.000Z
|
2020-07-08T15:41:50.000Z
|
# -*- coding: utf-8 -*-
from django.db import models
from djsonb.fields import JsonBField
class JsonBModel(models.Model):
data = JsonBField()
| 16.555556
| 36
| 0.711409
|
fc2d3bb7726051683830c3199321bc41a33f5e0a
| 7,343
|
py
|
Python
|
models/rnn_baseline/model_trainer.py
|
kdmarshall/Two_Sigma_Financial_Modeling_Challenge
|
e7e4b6280c1fd85983d3786afb630010f807d700
|
[
"MIT"
] | 3
|
2017-11-12T07:58:10.000Z
|
2021-03-21T19:10:30.000Z
|
models/rnn_baseline/model_trainer.py
|
kdmarshall/Two_Sigma_Financial_Modeling_Challenge
|
e7e4b6280c1fd85983d3786afb630010f807d700
|
[
"MIT"
] | null | null | null |
models/rnn_baseline/model_trainer.py
|
kdmarshall/Two_Sigma_Financial_Modeling_Challenge
|
e7e4b6280c1fd85983d3786afb630010f807d700
|
[
"MIT"
] | 1
|
2020-10-24T22:51:59.000Z
|
2020-10-24T22:51:59.000Z
|
import tensorflow as tf
import numpy as np
import multiprocessing
import random
import os
from random import shuffle
import pandas as pd
from utils.data_utils import DataSet
from utils.mock_gym import r_score
# Training options
SAVE_ANALYTICS = False
OUTDIR = '/Users/Peace/Desktop/outputs'
RANDOM_SEED = 8888
np.random.seed(RANDOM_SEED)
tf.set_random_seed(RANDOM_SEED)
if SAVE_ANALYTICS:
import matplotlib.pyplot as plt # Required for saving out analytics
import matplotlib as mpl
mpl.use('TkAgg') # Backend for OSX -- change accordingly
def save_analytics(true, pred, r_values, step):
plt.plot(range(len(true)), true)
plt.plot(range(len(pred)), pred)
plt.plot(range(len(pred)), r_values)
plt.savefig(OUTDIR + '/example{0}.png'.format(step))
plt.clf()
def relu(x, alpha=0., max_value=None):
'''
Note: when alpha != 0 this corresponds to leaky relu
alpha: slope of negative section.
'''
negative_part = tf.nn.relu(-x)
x = tf.nn.relu(x)
if max_value is not None:
x = tf.clip_by_value(x, tf.cast(0., dtype=tf.float32),
tf.cast(max_value, dtype=tf.float32))
x -= tf.constant(alpha, dtype=tf.float32) * negative_part
return x
print('initializing...')
# Hyperparameters
max_seq_len = 30
num_features = 108 # TODO: examples.shape[-1]
rnn_size = 128
p_l1_size = 100
batch_size = 128
learning_rate = 1e-3
num_steps = 100000
valid_steps = 100
# Initialize TF variables
rnn_cell = tf.nn.rnn_cell.BasicLSTMCell(rnn_size)
embedding_weights = tf.get_variable('emb_w', [num_features, rnn_size], initializer=tf.contrib.layers.xavier_initializer())
p_l1_weights = tf.get_variable('pred_l1_w', [rnn_size, p_l1_size], initializer=tf.contrib.layers.xavier_initializer())
p_l1_bias = tf.get_variable('pred_l1_b', initializer=tf.constant(0., shape=[p_l1_size]))
prediction_weights = tf.get_variable('pred_w', [p_l1_size, 1], initializer=tf.contrib.layers.xavier_initializer())
prediction_bias = tf.get_variable('pred_b', initializer=tf.constant(0.))
# Input nodes into the graph
observation_placeholder = tf.placeholder("float32", [batch_size, max_seq_len, num_features])
targets_placeholder = tf.placeholder("float32", [batch_size, max_seq_len])
weights_placeholder = tf.placeholder("float32", [batch_size, max_seq_len])
#rewards_placeholder = tf.placeholder("float32", [batch_size, 1])
keep_prob_placeholder = tf.placeholder(tf.float32)
def get_graph():
inputs = tf.transpose(observation_placeholder, [1, 0, 2])
embedded = []
for input in tf.unpack(inputs, axis=0):
act = tf.nn.dropout(tf.matmul(input, embedding_weights), keep_prob_placeholder)
embedded.append(act)
outputs, _ = tf.nn.dynamic_rnn(rnn_cell, tf.pack(embedded), time_major=True, scope='lstm', dtype=tf.float32)
logits = []
for timestep in tf.split(0, max_seq_len, outputs):
pre_act_l1 = tf.matmul(tf.squeeze(timestep), p_l1_weights) + p_l1_bias
act_l1 = tf.nn.dropout(relu(pre_act_l1, 0.3), keep_prob_placeholder)
pre_act_l2 = tf.matmul(act_l1, prediction_weights) + prediction_bias
logit = tf.tanh(pre_act_l2)
logits.append(logit)
logits = tf.squeeze(tf.pack(logits))
logits = tf.transpose(logits, [1, 0])
# R is differentiable, so we can optimize the evaluation function directly
y_true = targets_placeholder*10. # Scale to take adv of full tanh range
diffs = tf.square(y_true - logits) * weights_placeholder
y_true_mean = tf.reduce_sum(y_true * weights_placeholder)/tf.reduce_sum(weights_placeholder)
denom = tf.reduce_sum(tf.square(y_true - y_true_mean) * weights_placeholder)
R2 = 1 - tf.reduce_sum(diffs) / (denom + 1e-17)
loss = -1 * tf.sign(R2) * tf.sqrt(tf.abs(R2)) # -1 to maximize R
# SSE loss
#loss = tf.reduce_sum(tf.square(tf.sub(logits, targets_placeholder*10.)) * weights_placeholder )#/ tf.reduce_sum(weights_placeholder))
return logits, loss
logits, loss = get_graph()
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
saver = tf.train.Saver()
with tf.Session() as sess:
tf.global_variables_initializer().run()
# Useful for testing overfit model
#batch_input = np.random.normal(size=(batch_size, max_seq_len, 109))
#batch_targets = np.random.normal(size=(batch_size, max_seq_len)) / 20.
#batch_weights = np.ones((batch_size, max_seq_len))
dataset = DataSet()
data = dataset.get_numpy_data()
trainset, train_validset, validset = dataset.split_valid(*data, 0.25)
print('Train dataset shape: {0}'.format(trainset[0].shape))
# trainset stats:
# shape: 712, 1520 when split 0.5
# Epoch about every ~8000 steps (not true epoch due to shifted seq)
if SAVE_ANALYTICS:
with open(os.path.join(OUTDIR, 'log.csv'), 'w') as outfile:
outfile.write('Step,Train R,Valid R\n')
print('training...')
print('Format: Train R -- Valid R')
avg = []
for step in range(num_steps):
input, targets, weights = dataset.get_numpy_batch(trainset,
batch_size, max_seq_len)
# Allow for burn-in
weights[:5] = 0
l, _, logs = sess.run([loss, optimizer, logits],
feed_dict={
observation_placeholder: input,
targets_placeholder: targets,
weights_placeholder: weights,
keep_prob_placeholder: 0.5})
avg.append(-l)
if step % 200 == 0:
vavg = []
for vstep in range(valid_steps):
input, targets, weights = dataset.get_numpy_batch(validset,
batch_size,
max_seq_len)
weights[:5] = 0
l, logs = sess.run([loss, logits],
feed_dict={
observation_placeholder: input,
targets_placeholder: targets,
weights_placeholder: weights,
keep_prob_placeholder: 1.0})
vavg.append(-l)
print('Step {0}: {1:.4f} {2:.4f}'.format(step, np.mean(avg), np.mean(vavg)))
if SAVE_ANALYTICS:
with open(os.path.join(OUTDIR, 'log.csv'), 'a') as outfile:
outfile.write('{0},{1:.4f},{2:.4f}\n'.format(step, np.mean(avg), np.mean(vavg)))
avg = []
if SAVE_ANALYTICS:
r_values = []
for v1, v2 in zip(np.rollaxis(targets, 1, 0), np.rollaxis(logs/10., 1, 0)):
r_values.append(r_score(np.array(v1), np.array(v2)))
save_analytics(targets[0]*10, logs[0], np.squeeze(np.array(r_values)), step)
if SAVE_ANALYTICS:
saver.save(sess, OUTDIR+'/models/model.ckp')
# Rudimentary early stopping for now (TODO: Learning rate decay;
# conditional model saving)
if np.mean(vavg) > 0.018:
print('Training complete.')
break
| 36.715
| 138
| 0.618548
|
06b66879889c7f827f18c8c0aa4ab4f30c295f1c
| 3,256
|
py
|
Python
|
py_elemental/settings.py
|
FranchuFranchu/py-elemental
|
7679815c8f8b8df5095c0c26c610179aa05d4ea0
|
[
"MIT"
] | null | null | null |
py_elemental/settings.py
|
FranchuFranchu/py-elemental
|
7679815c8f8b8df5095c0c26c610179aa05d4ea0
|
[
"MIT"
] | null | null | null |
py_elemental/settings.py
|
FranchuFranchu/py-elemental
|
7679815c8f8b8df5095c0c26c610179aa05d4ea0
|
[
"MIT"
] | null | null | null |
"""
Django settings for py_elemental project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm5&-p@vk(v^gb=(k*3l0zyrmjl63))cc_%$=rprxh#a)e^31jm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'dwebsocket',
'bot',
'element',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'py_elemental.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ["templates/"],
'APP_DIRS': False,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'py_elemental.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
| 25.24031
| 91
| 0.691032
|
a1a3b7e5c64b0c82cf6e2b9ef6d405e42f8e61ac
| 1,345
|
py
|
Python
|
partridge/utilities.py
|
adiwaz/partridge
|
f39889ddc7142b14473c5c0c3235d6276c76b5c0
|
[
"MIT"
] | 1
|
2019-10-22T13:34:07.000Z
|
2019-10-22T13:34:07.000Z
|
partridge/utilities.py
|
adiwaz/partridge
|
f39889ddc7142b14473c5c0c3235d6276c76b5c0
|
[
"MIT"
] | 2
|
2018-02-25T08:00:17.000Z
|
2019-04-01T14:15:20.000Z
|
partridge/utilities.py
|
adiwaz/partridge
|
f39889ddc7142b14473c5c0c3235d6276c76b5c0
|
[
"MIT"
] | 2
|
2018-02-24T17:10:27.000Z
|
2018-06-18T16:03:30.000Z
|
try:
from functools import lru_cache
except ImportError:
from functools32 import lru_cache
from chardet import UniversalDetector
import numpy as np
import pandas as pd
from pandas.core.common import flatten
__all__ = [
'detect_encoding',
'empty_df',
'lru_cache',
'remove_node_attributes',
'setwrap',
]
def empty_df(columns=None):
columns = [] if columns is None else columns
empty = {col: [] for col in columns}
return pd.DataFrame(empty, columns=columns, dtype=np.unicode)
def setwrap(value):
"""
Returns a flattened and stringified set from the given object or iterable.
For use in public functions which accept argmuents or kwargs that can be
one object or a list of objects.
"""
return set(map(np.unicode, set(flatten([value]))))
def remove_node_attributes(G, attributes):
"""
Return a copy of the graph with the given attributes
deleted from all nodes.
"""
G = G.copy()
for _, data in G.nodes(data=True):
for attribute in setwrap(attributes):
if attribute in data:
del data[attribute]
return G
def detect_encoding(f):
u = UniversalDetector()
for line in f:
line = bytearray(line)
u.feed(line)
if u.done:
break
u.close()
return u.result['encoding']
| 22.79661
| 78
| 0.655019
|
1687a529385c2f4610d125cc6ca0b789d8e698a5
| 73
|
py
|
Python
|
synology_api/__init__.py
|
kidburglar/synology-api
|
1b5d0e6d121646713bf9b120218e811f96bb4288
|
[
"MIT"
] | null | null | null |
synology_api/__init__.py
|
kidburglar/synology-api
|
1b5d0e6d121646713bf9b120218e811f96bb4288
|
[
"MIT"
] | null | null | null |
synology_api/__init__.py
|
kidburglar/synology-api
|
1b5d0e6d121646713bf9b120218e811f96bb4288
|
[
"MIT"
] | 1
|
2020-06-12T03:35:03.000Z
|
2020-06-12T03:35:03.000Z
|
from . import auth, filestation, downloadstation, audiostation, sys_info
| 36.5
| 72
| 0.821918
|
219b66d5bfe26c9a1145bd48e3c706e73a0fcfc6
| 1,171
|
py
|
Python
|
pyperf/tests/test_cli.py
|
Tada-Project/pyperf
|
0a0b68582b3eb1ba221cd15eabe5e965e7f28338
|
[
"MIT"
] | null | null | null |
pyperf/tests/test_cli.py
|
Tada-Project/pyperf
|
0a0b68582b3eb1ba221cd15eabe5e965e7f28338
|
[
"MIT"
] | null | null | null |
pyperf/tests/test_cli.py
|
Tada-Project/pyperf
|
0a0b68582b3eb1ba221cd15eabe5e965e7f28338
|
[
"MIT"
] | 1
|
2019-11-21T12:23:41.000Z
|
2019-11-21T12:23:41.000Z
|
import pyperf
from pyperf import _cli as cli
from pyperf.tests import unittest
class CLITests(unittest.TestCase):
def test_format_result(self):
run = pyperf.Run([1.0, 1.5, 2.0],
warmups=[(1, 3.0)],
metadata={'name': 'mybench'},
collect_metadata=False)
bench = pyperf.Benchmark([run])
self.assertEqual(cli.format_result_value(bench),
'1.50 sec +- 0.50 sec')
self.assertEqual(cli.format_result(bench),
'Mean +- std dev: 1.50 sec +- 0.50 sec')
def test_format_result_calibration(self):
run = pyperf.Run([], warmups=[(100, 1.0)],
metadata={'name': 'bench', 'loops': 100},
collect_metadata=False)
bench = pyperf.Benchmark([run])
self.assertEqual(cli.format_result_value(bench),
'<calibration: 100 loops>')
self.assertEqual(cli.format_result(bench),
'Calibration: 100 loops')
self.assertRaises(ValueError, bench.median)
if __name__ == "__main__":
unittest.main()
| 36.59375
| 66
| 0.543126
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.