blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c374f75d167d32f720369055aac7da3cfd415292 | d5af5459d0a68d8934219cdd516a23d73c7c52fb | /examples/08 functions/search-with-fns.py | 39a2639219179d057192c5701011f4efaf2e778a | [] | no_license | flathunt/pylearn | 1e5b147924dca792eb1cddbcbee1b8da0fc3d055 | 006f37d67343a0288e7efda359ed9454939ec25e | refs/heads/master | 2022-11-23T23:31:32.016146 | 2022-11-17T08:20:57 | 2022-11-17T08:20:57 | 146,803,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | #!/usr/local/bin/python
import re
import sys
UNDERLINE = '='
def get_arguments():
# check for correct number of arguments...
if len(sys.argv) != 3:
print("Usage: review2-search.py search_terms_file data_file", file=sys.stderr)
sys.exit(1)
# return...
terms_file, data_file = sys.argv[1:3]
return terms_file, data_file
def build_pattern(terms_file):
# Read the file of search terms and build a (RegEx) pattern...
terms_list = []
with open(terms_file) as search_words:
for entry in search_words:
terms_list.append(entry.rstrip())
pattern = '|'. join(terms_list)
# print('Pattern:', pattern, end='\n\n') # for debugging purposes
return pattern
def search_file(pattern, data_file):
# Check the data file for matches...
with open(data_file) as data:
for ln, line in enumerate(data, start=1):
m = re.search(pattern, line)
if m:
print("{:04d} {:s}".format(ln, line), end='')
print(' ' * (5 + m.start()) + UNDERLINE * (m.end() - m.start()))
# =====================================================================
# main processing: Search a file for terms provided by another file.
# Usage: search-with-fns.py search_terms_file file_to_search
# =====================================================================
def main():
terms_file, data_file = get_arguments()
pattern = build_pattern(terms_file)
search_file(pattern, data_file)
main()
| [
"porkpie@gmail.com"
] | porkpie@gmail.com |
3b3b79f20e26193d04e460d59f4bf45b4d13b244 | 5453dee97da45be8e316150a65d3308c408dd3c7 | /backend/satchel_wallet_24918/urls.py | 8241ee333f54e43e7463188a70a915a993844224 | [] | no_license | crowdbotics-apps/satchel-wallet-24918 | 22d4599b199e99297a3ccf2236917b951db9fe38 | 524e2432f97334bc25760aa3c18e464f972998a0 | refs/heads/master | 2023-03-13T07:56:43.770159 | 2021-03-09T01:11:51 | 2021-03-09T01:11:51 | 345,825,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,247 | py | """satchel_wallet_24918 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Satchel Wallet"
admin.site.site_title = "Satchel Wallet Admin Portal"
admin.site.index_title = "Satchel Wallet Admin"
# swagger
api_info = openapi.Info(
title="Satchel Wallet API",
default_version="v1",
description="API documentation for Satchel Wallet App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
80d41ea8cf356fba8438c715a87477396443a76b | 9d81d0484cd0954abb1a83079904e65c850f88e6 | /plugins/tmp/client.py | 1d84a120b5721fe9d0e227077192d3c262789c02 | [
"MIT"
] | permissive | GPrathap/OpenBCIPython | 802db7e1591769e7f3e3ca1f347bf78083d7579f | 0f5be167fb09d31c15885003eeafec8cdc08dbfa | refs/heads/master | 2021-09-04T01:12:04.106419 | 2018-01-13T21:52:49 | 2018-01-13T21:52:49 | 82,319,921 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | import json
import cPickle as pickle
import socket
import sys
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = ('localhost', 5678)
message = [45,67,89]
try:
data_string = json.dumps(message)
# Send data
print >>sys.stderr, 'sending "%s"' % data_string
sent = sock.sendto(data_string, server_address)
# # Receive response
# print >>sys.stderr, 'waiting to receive'
# data, server = sock.recv(4096)
# print >>sys.stderr, 'received "%s"' % data
finally:
print >>sys.stderr, 'closing socket'
sock.close() | [
"ggeesara@gmail.com"
] | ggeesara@gmail.com |
1a9bb4a9ec638420d1a783e974812f7852f907a5 | 9b96c37db1f61065094d42bc5c8ad6eb3925961b | /level1/touching_keypad.py | 2df7bd27291cf9f98825fbcaf987970825dc0d85 | [] | no_license | Taeheon-Lee/Programmers | a97589498c866c498c1aa9192fdf8eec9f8e31f4 | c38b1c7dc4114c99191b77e5d19af432eaf6177e | refs/heads/master | 2023-07-09T21:10:25.064947 | 2021-08-30T05:17:49 | 2021-08-30T05:17:49 | 394,327,802 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,851 | py | "카패드 누르기"
# 문제 링크 "https://programmers.co.kr/learn/courses/30/lessons/67256"
dic_loc = {"left":[1,4,7],"right":[3,6,9],"middle":[2,5,8,0]} # 위치 딕셔너리
dic_dis_2 = {1:1,2:0,3:1,4:2,5:1,6:2,7:3,8:2,9:3,0:3,'*':4,'#':4} # 2 키버튼 기준 각 버튼의 거리
dic_dis_5 = {1:2,2:1,3:2,4:1,5:0,6:1,7:2,8:1,9:2,0:2,'*':3,'#':3} # 5 키버튼 기준 각 버튼의 거리
dic_dis_8 = {1:3,2:2,3:3,4:2,5:1,6:2,7:1,8:0,9:1,0:1,'*':2,'#':2} # 8 키버튼 기준 각 버튼의 거리
dic_dis_0 = {1:4,2:3,3:4,4:3,5:2,6:3,7:2,8:1,9:2,0:0,'*':1,'#':1} # 0 키버튼 기준 각 버튼의 거리
def solution(numbers, hand):
answer = '' # 정답 문자열
left_loc = '*' # 왼손 위치 초기화
right_loc = '#' # 오른손 위치 초기화
for key in numbers:
if key in dic_loc["left"]: # 눌러야할 버튼이 왼쪽 위치일 경우
answer += "L"
left_loc = key # 왼손 위치를 해당 키로 이동
elif key in dic_loc["right"]: # 눌러야할 버튼이 오른쪽 위치일 경우
answer += "R"
right_loc = key # 오른손 위치를 해당 키로 이동
else: # 눌러야할 버튼이 중간 위치일 경우
dic = dic_dis_2 if key == 2 else dic_dis_5 if key == 5 else dic_dis_8 if key == 8 else dic_dis_0 # 키에 따라 딕셔너리 선택
if dic[left_loc] < dic[right_loc]: # 왼손 거리값이 오른손 거리값보다 작은 경우
answer += "L"
left_loc = key # 왼손 위치를 해당 키로 이동
elif dic[left_loc] > dic[right_loc]: # 오른손 거리값이 왼손 거리값보다 작은 경우
answer += "R"
right_loc = key # 오른손 위치를 해당 키로 이동
else: # 양손의 거리값이 같은 경우
if hand == "right": # 오른손 잡이일 경우
answer += "R"
right_loc = key # 오른손 위치를 해당 키로 이동
else: # 왼손 잡이일 경우
answer += "L"
left_loc = key # 왼손 위치를 해당 키로 이동
return answer | [
"taeheon714@gmail.com"
] | taeheon714@gmail.com |
f743dc53ac00f73d6d56256c088046122238cc43 | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/nos/v6_0_2f/rbridge_id/ip/rtm_config/route/static_route_oif/route_attributes/__init__.py | 8f304b1edde328b78f463809396152955cb1bf14 | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,705 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class route_attributes(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-rbridge - based on the path /rbridge-id/ip/rtm-config/route/static-route-oif/route-attributes. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__metric','__distance','__tag',)
_yang_name = 'route-attributes'
_rest_name = ''
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__distance = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..254']}), is_leaf=True, yang_name="distance", rest_name="distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route distance'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='uint32', is_config=True)
self.__metric = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..16']}), is_leaf=True, yang_name="metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Cost metric', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='uint32', is_config=True)
self.__tag = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tag", rest_name="tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Tag value for this route'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='uint32', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'rbridge-id', u'ip', u'rtm-config', u'route', u'static-route-oif', u'route-attributes']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'rbridge-id', u'ip', u'route', u'static-route-oif']
def _get_metric(self):
"""
Getter method for metric, mapped from YANG variable /rbridge_id/ip/rtm_config/route/static_route_oif/route_attributes/metric (uint32)
"""
return self.__metric
def _set_metric(self, v, load=False):
"""
Setter method for metric, mapped from YANG variable /rbridge_id/ip/rtm_config/route/static_route_oif/route_attributes/metric (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_metric() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..16']}), is_leaf=True, yang_name="metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Cost metric', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """metric must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..16']}), is_leaf=True, yang_name="metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Cost metric', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='uint32', is_config=True)""",
})
self.__metric = t
if hasattr(self, '_set'):
self._set()
def _unset_metric(self):
self.__metric = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..16']}), is_leaf=True, yang_name="metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Cost metric', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='uint32', is_config=True)
def _get_distance(self):
"""
Getter method for distance, mapped from YANG variable /rbridge_id/ip/rtm_config/route/static_route_oif/route_attributes/distance (uint32)
"""
return self.__distance
def _set_distance(self, v, load=False):
"""
Setter method for distance, mapped from YANG variable /rbridge_id/ip/rtm_config/route/static_route_oif/route_attributes/distance (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_distance is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_distance() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..254']}), is_leaf=True, yang_name="distance", rest_name="distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route distance'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """distance must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..254']}), is_leaf=True, yang_name="distance", rest_name="distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route distance'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='uint32', is_config=True)""",
})
self.__distance = t
if hasattr(self, '_set'):
self._set()
def _unset_distance(self):
self.__distance = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..254']}), is_leaf=True, yang_name="distance", rest_name="distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route distance'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='uint32', is_config=True)
def _get_tag(self):
"""
Getter method for tag, mapped from YANG variable /rbridge_id/ip/rtm_config/route/static_route_oif/route_attributes/tag (uint32)
YANG Description: Tag can be configured to filter the static routes
for route redistribution.
Default value is 0, indicating no tag.
"""
return self.__tag
def _set_tag(self, v, load=False):
"""
Setter method for tag, mapped from YANG variable /rbridge_id/ip/rtm_config/route/static_route_oif/route_attributes/tag (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_tag is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tag() directly.
YANG Description: Tag can be configured to filter the static routes
for route redistribution.
Default value is 0, indicating no tag.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tag", rest_name="tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Tag value for this route'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tag must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tag", rest_name="tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Tag value for this route'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='uint32', is_config=True)""",
})
self.__tag = t
if hasattr(self, '_set'):
self._set()
def _unset_tag(self):
self.__tag = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tag", rest_name="tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Tag value for this route'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='uint32', is_config=True)
metric = __builtin__.property(_get_metric, _set_metric)
distance = __builtin__.property(_get_distance, _set_distance)
tag = __builtin__.property(_get_tag, _set_tag)
_pyangbind_elements = {'metric': metric, 'distance': distance, 'tag': tag, }
| [
"badaniya@brocade.com"
] | badaniya@brocade.com |
9fc68de1b0bedb730f5d41555ddc9324c38376a3 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /sagemaker_write_2/code-repository_create.py | 5da5051aff1f842a5568c68ab8c4d651925c6d51 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,391 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_two_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sagemaker/create-code-repository.html
if __name__ == '__main__':
"""
delete-code-repository : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sagemaker/delete-code-repository.html
describe-code-repository : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sagemaker/describe-code-repository.html
list-code-repositories : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sagemaker/list-code-repositories.html
update-code-repository : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sagemaker/update-code-repository.html
"""
parameter_display_string = """
# code-repository-name : The name of the Git repository. The name must have 1 to 63 characters. Valid characters are a-z, A-Z, 0-9, and - (hyphen).
# git-config :
"""
add_option_dict = {}
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_two_parameter("sagemaker", "create-code-repository", "code-repository-name", "git-config", add_option_dict)
| [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
a4d8767d31d8277c1bad4b172829568c721c2d11 | 130a98632d2ab4c171503b79e455b7aa27a1dda4 | /models/official/modeling/optimization/configs/learning_rate_config.py | 520a0b96141526382f8e29e733505d62273d5a1f | [
"Apache-2.0",
"MIT"
] | permissive | aboerzel/German_License_Plate_Recognition | d7fc0314295f5cf0c9d7ae9c93a795e3ef1c5787 | 6fc53292b1d3ce3c0340ce724c2c11c77e663d27 | refs/heads/master | 2023-01-30T18:08:37.339542 | 2023-01-07T07:41:36 | 2023-01-07T07:41:36 | 245,586,430 | 34 | 12 | MIT | 2023-01-07T07:41:37 | 2020-03-07T07:16:51 | Python | UTF-8 | Python | false | false | 7,705 | py | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataclasses for learning rate schedule config."""
from typing import List, Optional
import dataclasses
from official.modeling.hyperparams import base_config
@dataclasses.dataclass
class ConstantLrConfig(base_config.Config):
"""Configuration for constant learning rate.
This class is a containers for the constant learning rate decay configs.
Attributes:
name: The name of the learning rate schedule. Defaults to Constant.
learning_rate: A float. The learning rate. Defaults to 0.1.
"""
name: str = 'Constant'
learning_rate: float = 0.1
@dataclasses.dataclass
class StepwiseLrConfig(base_config.Config):
"""Configuration for stepwise learning rate decay.
This class is a container for the piecewise constant learning rate scheduling
configs. It will configure an instance of PiecewiseConstantDecay keras
learning rate schedule.
An example (from keras docs): use a learning rate that's 1.0 for the first
100001 steps, 0.5 for the next 10000 steps, and 0.1 for any additional steps.
```python
boundaries: [100000, 110000]
values: [1.0, 0.5, 0.1]
Attributes:
name: The name of the learning rate schedule. Defaults to PiecewiseConstant.
boundaries: A list of ints of strictly increasing entries. Defaults to None.
values: A list of floats that specifies the values for the intervals defined
by `boundaries`. It should have one more element than `boundaries`.
The learning rate is computed as follows: [0, boundaries[0]] ->
values[0] [boundaries[0], boundaries[1]] -> values[1]
[boundaries[n-1], boundaries[n]] -> values[n] [boundaries[n],
end] -> values[n+1] Defaults to None.
"""
name: str = 'PiecewiseConstantDecay'
boundaries: Optional[List[int]] = None
values: Optional[List[float]] = None
@dataclasses.dataclass
class ExponentialLrConfig(base_config.Config):
"""Configuration for exponential learning rate decay.
This class is a containers for the exponential learning rate decay configs.
Attributes:
name: The name of the learning rate schedule. Defaults to ExponentialDecay.
initial_learning_rate: A float. The initial learning rate. Defaults to None.
decay_steps: A positive integer that is used for decay computation. Defaults
to None.
decay_rate: A float. Defaults to None.
staircase: A boolean, if true, learning rate is decreased at discreate
intervals. Defaults to False.
"""
name: str = 'ExponentialDecay'
initial_learning_rate: Optional[float] = None
decay_steps: Optional[int] = None
decay_rate: Optional[float] = None
staircase: Optional[bool] = None
@dataclasses.dataclass
class PolynomialLrConfig(base_config.Config):
"""Configuration for polynomial learning rate decay.
This class is a containers for the polynomial learning rate decay configs.
Attributes:
name: The name of the learning rate schedule. Defaults to PolynomialDecay.
initial_learning_rate: A float. The initial learning rate. Defaults to None.
decay_steps: A positive integer that is used for decay computation. Defaults
to None.
end_learning_rate: A float. The minimal end learning rate.
power: A float. The power of the polynomial. Defaults to linear, 1.0.
cycle: A boolean, whether or not it should cycle beyond decay_steps.
Defaults to False.
"""
name: str = 'PolynomialDecay'
initial_learning_rate: Optional[float] = None
decay_steps: Optional[int] = None
end_learning_rate: float = 0.0001
power: float = 1.0
cycle: bool = False
@dataclasses.dataclass
class CosineLrConfig(base_config.Config):
"""Configuration for Cosine learning rate decay.
This class is a containers for the cosine learning rate decay configs,
tf.keras.experimental.CosineDecay.
Attributes:
name: The name of the learning rate schedule. Defaults to CosineDecay.
initial_learning_rate: A float. The initial learning rate. Defaults to None.
decay_steps: A positive integer that is used for decay computation. Defaults
to None.
alpha: A float. Minimum learning rate value as a fraction of
initial_learning_rate.
"""
name: str = 'CosineDecay'
initial_learning_rate: Optional[float] = None
decay_steps: Optional[int] = None
alpha: float = 0.0
@dataclasses.dataclass
class DirectPowerLrConfig(base_config.Config):
"""Configuration for DirectPower learning rate decay.
This class configures a schedule following follows lr * (step)^power.
Attributes:
name: The name of the learning rate schedule. Defaults to DirectPowerDecay.
initial_learning_rate: A float. The initial learning rate. Defaults to None.
power: A float. Defaults to -0.5, for sqrt decay.
"""
name: str = 'DirectPowerDecay'
initial_learning_rate: Optional[float] = None
power: float = -0.5
@dataclasses.dataclass
class PowerAndLinearDecayLrConfig(base_config.Config):
"""Configuration for DirectPower learning rate decay.
This class configures a schedule following follows lr * (step)^power for the
first total_decay_steps * (1 - linear_decay_fraction) steps, and follows
lr * (step)^power * (total_decay_steps - step) / (total_decay_steps *
linear_decay_fraction) for the rest of the steps.
Attributes:
name: The name of the learning rate schedule. Defaults to DirectPowerDecay.
initial_learning_rate: A float. The initial learning rate. Defaults to None.
power: A float. Defaults to -0.5, for sqrt decay.
"""
name: str = 'PowerAndLinearDecay'
initial_learning_rate: Optional[float] = None
total_decay_steps: Optional[int] = None
power: float = -0.5
linear_decay_fraction: float = 0.1
@dataclasses.dataclass
class LinearWarmupConfig(base_config.Config):
"""Configuration for linear warmup schedule config.
This class is a container for the linear warmup schedule configs.
Warmup_learning_rate is the initial learning rate, the final learning rate of
the warmup period is the learning_rate of the optimizer in use. The learning
rate at each step linearly increased according to the following formula:
warmup_learning_rate = warmup_learning_rate +
step / warmup_steps * (final_learning_rate - warmup_learning_rate).
Using warmup overrides the learning rate schedule by the number of warmup
steps.
Attributes:
name: The name of warmup schedule. Defaults to linear.
warmup_learning_rate: Initial learning rate for the warmup. Defaults to 0.
warmup_steps: Warmup steps. Defaults to None.
"""
name: str = 'linear'
warmup_learning_rate: float = 0
warmup_steps: Optional[int] = None
@dataclasses.dataclass
class PolynomialWarmupConfig(base_config.Config):
"""Configuration for linear warmup schedule config.
This class is a container for the polynomial warmup schedule configs.
Attributes:
name: The name of warmup schedule. Defaults to Polynomial.
power: Polynomial power. Defaults to 1.
warmup_steps: Warmup steps. Defaults to None.
"""
name: str = 'polynomial'
power: float = 1
warmup_steps: Optional[int] = None
| [
"andreas.boerzel@gmx.de"
] | andreas.boerzel@gmx.de |
29b46b647ba8b530d56af61de659f245e6fe215b | 4aa22abc42eb478269f3e3a2c030d2cd09845cfe | /test/grid/runDijetDataAnalyzer_data_cfg.py | 17511204c0cc7813613f6f91544fc3db93c1d163 | [] | no_license | hhendrik/2l2v_fwk | 1593b431577e47f36e382b5e048c01ad4813382b | 3d87152f58f00e39600b0ff8d0531e1030c252f1 | refs/heads/master | 2021-01-18T11:19:21.478111 | 2014-04-04T13:25:38 | 2014-04-04T13:26:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47 | py | ../../test/top/runDijetDataAnalyzer_data_cfg.py | [
"psilva@cern.ch"
] | psilva@cern.ch |
03b84161a9f04f4dc964e751a7e9211dfacfe61b | 9fa8c280571c099c5264960ab2e93255d20b3186 | /algorithm/mobo/solver/parego/parego.py | 490b74696692c48610a556675780573a6df09a4e | [
"MIT"
] | permissive | thuchula6792/AutoOED | 8dc97191a758200dbd39cd850309b0250ac77cdb | 272d88be7ab617a58d3f241d10f4f9fd17b91cbc | refs/heads/master | 2023-07-23T16:06:13.820272 | 2021-09-08T14:22:18 | 2021-09-08T14:22:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,560 | py | import numpy as np
from ..base import Solver
from pymoo.optimize import minimize
from pymoo.algorithms.so_cmaes import CMAES
from pymoo.decomposition.tchebicheff import Tchebicheff
from .utils import ScalarizedEvaluator
from multiprocess import Process, Queue
def optimization(problem, x, weights, queue):
'''
Parallel worker for single-objective CMA-ES optimization.
'''
evaluator = ScalarizedEvaluator(decomposition=Tchebicheff(), weights=weights)
res = minimize(problem, CMAES(x), evaluator=evaluator)
queue.put([res.X[0], res.F[0]])
class ParEGOSolver(Solver):
'''
Solver based on ParEGO.
'''
def __init__(self, *args, **kwargs):
self.pop_size = kwargs['pop_size']
self.n_process = kwargs.pop('n_process')
super().__init__(*args, algo=CMAES, **kwargs)
def solve(self, problem, X, Y):
'''
Solve the multi-objective problem by multiple scalarized single-objective solvers.
Parameters
----------
problem: mobo.surrogate_problem.SurrogateProblem
The surrogate problem to be solved.
X: np.array
Current design variables.
Y: np.array
Current performance values.
Returns
-------
solution: dict
A dictionary containing information of the solution.\n
- solution['x']: Proposed design samples.
- solution['y']: Performance of proposed design samples.
'''
# initialize population
sampling = self._get_sampling(X, Y)
if not isinstance(sampling, np.ndarray):
sampling = sampling.do(problem, self.pop_size)
# generate scalarization weights
weights = np.random.random((self.pop_size, Y.shape[1]))
weights /= np.expand_dims(np.sum(weights, axis=1), 1)
# optimization
xs, ys = [], []
queue = Queue()
n_active_process = 0
for i, x0 in enumerate(sampling):
Process(target=optimization, args=(problem, x0, weights[i], queue)).start()
n_active_process += 1
if n_active_process >= self.n_process:
x, y = queue.get()
xs.append(x)
ys.append(y)
n_active_process -= 1
# gather result
for _ in range(n_active_process):
x, y = queue.get()
xs.append(x)
ys.append(y)
# construct solution
self.solution = {'x': np.array(xs), 'y': np.array(ys)}
return self.solution | [
"yunsheng@mit.edu"
] | yunsheng@mit.edu |
0225168ab002e62d63fd9c39139e0f7e6054f49b | 56d921f97a8ad43c52c5d66be517a4f37ec8a64f | /jiecheng3.py | 94c68c074ebad4eed9aee6a1a37c57d6588c84d1 | [] | no_license | Oscer2016/python | 5ad27360bb6a47ec562696392906ba0f1daa8f99 | ff776e95327db4163a34b780ace21e561675ce62 | refs/heads/master | 2021-06-09T23:37:13.156650 | 2017-01-17T12:23:09 | 2017-01-17T12:23:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | #jicheng.py
def f(n):
if n!=1:
return f(n-1)*n
else:
return 1
n=input()
print f(n)
| [
"hepansos@126.com"
] | hepansos@126.com |
78a6f5a2d7d2673a30b6fdfc1170f493c07e7a3d | 87eb72edb890c22cc230a3e5511a4d745f2e6f72 | /evaluation/workflow/utils.py | b1a72074d62716c97d62f632372232bdb527a091 | [
"MIT"
] | permissive | QuantumMisaka/GLUE | a16c84ec9978daa75117f607b06c1d52259c5d13 | e84cb6483971dcb1e2485080f812899baaf31b5b | refs/heads/master | 2023-07-27T17:53:51.720504 | 2021-09-14T08:21:45 | 2021-09-14T08:21:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,301 | py | r"""
Utility functions for snakemake files
"""
# pylint: disable=missing-function-docstring, redefined-outer-name
from functools import reduce
from operator import add
from pathlib import Path
def conf_expand_pattern(conf, placeholder="null"):
expand_pattern = "-".join(f"{key}:{{{key}}}" for key in conf)
return expand_pattern if expand_pattern else placeholder
def expand(pattern, **wildcards):
from snakemake.io import expand
has_default_choices = False
for val in wildcards.values(): # Sanity check
if isinstance(val, dict):
if "default" not in val or "choices" not in val:
print(val)
raise ValueError("Invalid default choices!")
has_default_choices = True
if not has_default_choices:
return expand(pattern, **wildcards)
expand_set = set()
for key, val in wildcards.items():
if isinstance(val, dict):
wildcards_use = {key: val["choices"]}
for other_key, other_val in wildcards.items():
if other_key == key:
continue
if isinstance(other_val, dict):
wildcards_use[other_key] = other_val["default"]
else:
wildcards_use[other_key] = other_val
expand_set = expand_set.union(expand(pattern, **wildcards_use))
return list(expand_set)
def seed2range(config):
for key, val in config.items():
if isinstance(val, dict):
seed2range(val)
elif key.endswith("seed") and val != 0:
config[key] = range(val)
def target_directories(config):
seed2range(config)
dataset = config["dataset"].keys()
subsample_conf = config["subsample"] or {}
subsample_conf = expand(
conf_expand_pattern(subsample_conf, placeholder="original"),
**subsample_conf
)
def per_method(method):
prior_conf = config["prior"] or {}
prior_conf = {} if method in ("UnionCom", "iNMF_FiG", "LIGER_FiG") else prior_conf # Methods that do not use prior feature matching
prior_conf = expand(
conf_expand_pattern(prior_conf, placeholder="null"),
**prior_conf
)
hyperparam_conf = config["method"][method] or {}
hyperparam_conf = expand(
conf_expand_pattern(hyperparam_conf, placeholder="default"),
**hyperparam_conf
)
seed = 0 if method in ("bindSC", ) else config["seed"] # Methods that are deterministic
return expand(
"results/raw/{dataset}/{subsample_conf}/{prior_conf}/{method}/{hyperparam_conf}/seed:{seed}",
dataset=dataset,
subsample_conf=subsample_conf,
prior_conf=prior_conf,
method=method,
hyperparam_conf=hyperparam_conf,
seed=seed
)
return reduce(add, map(per_method, config["method"]))
def target_files(directories):
def per_directory(directory):
directory = Path(directory)
if (directory / ".blacklist").exists():
return []
return [
directory / "metrics.yaml",
directory / "cell_type.pdf",
directory / "domain.pdf"
]
return reduce(add, map(per_directory, directories))
| [
"caozj@mail.cbi.pku.edu.cn"
] | caozj@mail.cbi.pku.edu.cn |
14d21a66649a6e28c56c87d90ce3b96c05dd1d2a | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/SUSYGluGluToHToTauTau_M-160_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0_1377467578/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_26/run_cfg.py | ffa0dc5d13e7c3bc73a80094616038f9b937bdbf | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,180 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/SUSYGluGluToHToTauTau_M-160_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0_1377467578/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/group/cmgtools/CMG/SUSYGluGluToHToTauTau_M-160_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_79_1_AMD.root',
'/store/cmst3/group/cmgtools/CMG/SUSYGluGluToHToTauTau_M-160_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_7_1_GXL.root',
'/store/cmst3/group/cmgtools/CMG/SUSYGluGluToHToTauTau_M-160_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_80_1_xsD.root')
)
| [
"riccardo.manzoni@cern.ch"
] | riccardo.manzoni@cern.ch |
4414de7c5cbc56534a03fc689dfd90f5e8a113b2 | 7952f66758b685f4bf045c7eb28efa3a22412a89 | /HackerRank/sol5-BigSorting.py | 88a72735750e92995e64feed6799a635fc038d8a | [] | no_license | PingPingE/Algorithm | b418fa13528c27840bb220e305933800c5b4c00a | 89a55309c44320f01d2d6fe5480181a4c5816fd2 | refs/heads/master | 2023-08-31T01:43:09.690729 | 2023-08-27T13:12:22 | 2023-08-27T13:12:22 | 172,465,200 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | #!/bin/python3
import math
import os
import random
import re
import sys
def sol(arr):
return [a for a in sorted(arr, key= lambda x: [len(x),x])]#각 숫자의 길이 정보 추가
if __name__ == '__main__':
unsorted = [ input() for _ in range(int(input()))]
for s in sol(unsorted):
print(s)
#드디어 성공!!
#하지만 discussions를 참고했다. 그래도 다양한 사람들의 의견, 코드 등을 보면서 많이 배웠다.
#컴터가 더 연산을 쉽고 빠르게 할 수 있도록 더 많은 정보를 주자!
| [
"ds03023@gmail.com"
] | ds03023@gmail.com |
860c310e893e8bb4727c39195f129cd71807aabb | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/str_cat-135.py | d0cec0551a65ecaa83019d37eaa5fd104c69b8f3 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | a:str = "Hello"
b:str = "World"
c:str = "ChocoPy"
def cat2(a:str, b:str) -> str:
return a + b
def cat3(a:str, b:str, c:str) -> str:
return a + b + c
print(cat2(a, b))
print(cat2("", c))
print($ID(a, " ", c))
print(len(a))
print(len(cat2(a,a)))
print(len(cat2("","")))
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
3eb38bf3600a44172b0241a3218341a0d711cdea | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03732/s795942587.py | 80267c847271bc1bc6a9c984f1299245239a189a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,363 | py | # https://atcoder.jp/contests/abc060/tasks/arc073_b
# 典型的なナップサック。だけど配列が大きいので素直に実装するとTLEになる
# 成約により、w1以上は必ず前のjを見ることに注意するとテーブルのサイズがぐっと減ることに気がつくがこれを実装するのはなかなかめんどくさそう。
# defaltdictを利用した再帰メモ化なら比較的実装可能では?
import sys
sys.setrecursionlimit(1 << 25)
read = sys.stdin.readline
def read_ints():
return list(map(int, read().split()))
def read_col(H, n_cols):
'''
H is number of rows
n_cols is number of cols
A列、B列が与えられるようなとき
'''
ret = [[] for _ in range(n_cols)]
for _ in range(H):
tmp = list(map(int, read().split()))
for col in range(n_cols):
ret[col].append(tmp[col])
return ret
N, W = read_ints()
w, v = read_col(N, 2)
from collections import defaultdict
dp = defaultdict(lambda: -1)
def f(i, j): # i番目を含んで考慮したとき重さjまでで達成できる価値の最大値
if dp[i, j] != -1:
return dp[i, j]
if i == -1:
return 0
if j - w[i] < 0:
return f(i - 1, j)
ret = max(f(i - 1, j - w[i]) + v[i], f(i - 1, j))
dp[i, j] = ret
return ret
print(f(N - 1, W))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
9046a3da2df23840c71d7b015bc6bd1ebb645ffe | 070ede418be70e376da2fd1ed18a567098c951c9 | /junk/src/libs/alphavantage/windowed_dataset.py | 6599e7882ed075cb2d742f3861492b2d5ba1fcee | [] | no_license | jmnel/neuralsort | b647f745c7c7e33f4d79400493fb974aeb818426 | 9efbeac8c8c98895f2bf930e33d45ebfeffb54c7 | refs/heads/master | 2020-12-30T03:13:18.135533 | 2020-09-21T02:51:40 | 2020-09-21T02:51:40 | 245,709,197 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,444 | py | import sys
from pathlib import Path
sys.path.append(str(Path(__file__).absolute().parents[1]))
from random import randint
from pathlib import Path
import torch
from torch.utils.data import Dataset
import numpy as np
from pprint import pprint
from db_connectors import SQLite3Connector
class WindowedDataset(Dataset):
def __init__(self,
data_path: Path,
train_size,
test_size,
prediction_window,
num_stocks,
is_train,
transform=None):
super().__init__()
self._transform = transform
db = SQLite3Connector.connect(data_path / 'clean.db')
table = 'adj_returns_clean'
# Get list of symbols by picking first (n=num_stocks) column names.
schema = db.get_schema(table)
symbols = [s['name'] for s in schema[1:]][0:num_stocks]
# Get actual price time series.
raw = db.select(table, symbols)
db.close()
k_folds = 4
fold_len = len(raw) // k_folds
print(len(raw))
print(fold_len)
# print(fold_len *
data_path = Path(__file__).absolute().parents[3] / 'data'
print(data_path)
foo = WindowedDataset(data_path,
train_size=600,
test_size=200,
prediction_window=10,
num_stocks=5,
is_train=True)
| [
"jmnel92@gmail.com"
] | jmnel92@gmail.com |
6429729d36074089835ef04f458ea4cf6e124765 | 5f4aab3f1aef88e57bf1676af6ee4d7fd0ec4f08 | /src/SConscript | bc3d26df08583e242278f4869e8687651f95b506 | [
"BSD-3-Clause"
] | permissive | chunkified/kl-iostream | 38167841c781c0052c08c1a5342da31592b6ba81 | b9f4c90b09e0b353971a35d8adc779822e186f03 | refs/heads/master | 2021-01-20T09:41:30.729656 | 2014-05-07T08:03:41 | 2014-05-07T08:03:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | #
# Copyright 2010-2014 Fabric Software Inc. All rights reserved.
#
Import('parentEnv', 'kl2edk', 'kl', 'extSuffix')
extName = 'kliostream'
env = parentEnv.Clone()
env.Append(CPPPATH = [env.Dir('.').abspath])
sources = [
env.File('kliostream.fpm.json'),
env.File('kliostream.codegen.json')
]
sources += env.Glob('*.kl')
cppFiles = [
env.File('extension.cpp'),
env.File('IFStream_functions.cpp'),
env.File('OFStream_functions.cpp')
]
extensionFiles = env.Install(env.Dir('#stage'), [env.File(extName+'.fpm.json')] + env.Glob('*.kl'))
kl2edkResults = env.RunKL2EDK(cppFiles, sources)
extLibFileName = env.File(extName + '-' + extSuffix)
libraryFiles = Flatten([env.SharedLibrary(extLibFileName, cppFiles)])
env.Depends(libraryFiles, kl2edkResults)
extensionFiles += env.Install(env.Dir('#stage'), libraryFiles[0])
Return('extensionFiles')
| [
"helge.mathee@fabricengine.com"
] | helge.mathee@fabricengine.com | |
e33fa54f4a66204c553c8ba94a758e368c1d509b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03606/s347687358.py | de597f3f689f815591d9348faf868bf8955f2a95 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | import sys, re, os
from collections import deque, defaultdict, Counter
from math import ceil, sqrt, hypot, factorial, pi, sin, cos, radians
from itertools import permutations, combinations, product, accumulate
from operator import itemgetter, mul
from copy import deepcopy
from string import ascii_lowercase, ascii_uppercase, digits
from fractions import gcd
def input(): return sys.stdin.readline().strip()
def INT(): return int(input())
def MAP(): return map(int, input().split())
def S_MAP(): return map(str, input().split())
def LIST(): return list(map(int, input().split()))
def S_LIST(): return list(map(str, input().split()))
sys.setrecursionlimit(10 ** 9)
INF = float('inf')
mod = 10 ** 9 + 7
n = INT()
L = []
ans = 0
for i in range(n):
a, b = LIST()
ans += b - a + 1
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
843ed043d892d76779ec0a0ceb2832bd406da3c6 | 6fa0c051f742c3f9c99ee2800cd132db5ffb28c7 | /src/Collective/forms.py | ff624db36622f3fa0e9d6b70808263ea96555afe | [] | no_license | MCN10/NXTLVL | 9c37bf5782bfd8f24d0fb0431cb5885c585369b0 | 76d8818b7961e4f0362e0d5f41f48f53ce1bfdc5 | refs/heads/main | 2023-06-02T13:51:34.432668 | 2021-06-02T14:19:21 | 2021-06-02T14:19:21 | 328,625,042 | 1 | 0 | null | 2021-06-16T10:16:17 | 2021-01-11T10:19:44 | Python | UTF-8 | Python | false | false | 664 | py | from django.forms import ModelForm
from .models import *
class CollectiveOrderForm(ModelForm):
class Meta:
model = CollectiveOrder
fields = '__all__'
exclude = ['customer', 'transaction_id']
class CollectiveOrderItemsForm(ModelForm):
class Meta:
model = CollectiveOrderItem
fields = '__all__'
class CollectiveShippingDetailsForm(ModelForm):
class Meta:
model = CollectiveShippingAddress
fields = '__all__'
class CollectiveProductsForm(ModelForm):
class Meta:
model = CollectiveProduct
fields = '__all__'
class CollectiveCategoriesForm(ModelForm):
class Meta:
model = CollectiveCategory
fields = '__all__'
exclude = ['slug']
| [
"mcn10.foxx@gmail.com"
] | mcn10.foxx@gmail.com |
2454d230d571ade8339803b76c3950c86b824968 | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /StudentProblem/10.21.9.70/2/1569574502.py | e1c6a7512ff955c929417dcb142233ae751ca36e | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,040 | py | import functools
import typing
import string
import random
import pytest
## Lösung Teil 1.
a = 123
b = list(str(a))
print (b)
def is_palindromic(n):
"""
Funktion testet ob eine positive ganze Zahl n>0 ein Palindrom ist.
Definition Palimdrom:
Eine natürliche Zahl ist ein Palindrom, falls die Ziffernfolge ihrerDezimaldarstellung vorwärts und
rückwärts gelesen gleich ist.
args:
n: int (n > 0)
return:
bool (True, wenn n ein Palimdrom ist False wenn n kein Palimdrom ist)
"""
if type(n) != int or n < 0:
return "Nanana"
string_int = str(n)
compare = []
compare2 = list(string_int)
for index in range(len(string_int) - 1, -1, -1):
compare.append(compare2[index])
if compare == compare2:
return True
else:
return False
######################################################################
## Lösung Teil 2. (Tests)
def test_is_palindromic():
a = 123
b = 123321
c = 45654
d = 0
e = 9.09
assert is_palindromic(a) == False
assert is_palindromic(b) == True
assert is_palindromic(c) == True
assert is_palindromic(d) == True
assert is_palindromic(e) == "Nanana"
######################################################################
## Lösung Teil 3.
## Lösung Teil 4.
######################################################################
## test code
pytest.main (["-v", "--assert=plain", "-p", "no:cacheprovider"])
from inspect import getfullargspec
class TestNames:
def test_is_palindromic(self):
assert is_palindromic
assert 'n' in getfullargspec(is_palindromic).args
def test_gen_palindromic(self):
assert gen_palindromic
assert 'n' in getfullargspec(gen_palindromic).args
def test_represent(self):
assert represent
assert 'n' in getfullargspec(represent).args
class TestGrades:
def test_docstring_present(self):
assert is_palindromic.__doc__ is not None
assert gen_palindromic.__doc__ is not None
assert represent.__doc__ is not None
def test_typing_present(self):
assert is_palindromic.__hints__ == typing.get_type_hints(self.is_palindromic_oracle)
assert typing.get_type_hints (gen_palindromic) == typing.get_type_hints (self.gen_palindromic_oracle)
assert typing.get_type_hints (represent) == typing.get_type_hints (self.represent_oracle)
def test_coverage(self):
assert coverage("achieved") == coverage("required")
def is_palindromic_oracle(self, n:int)->list:
s = str(n)
while len (s) > 1:
if s[0] != s[-1]:
return False
s = s[1:-1]
return True
def gen_palindromic_oracle (self, n:int):
return (j for j in range (n + 1, 0, -1) if self.is_palindromic_oracle (j))
def represent_oracle (self, n:int) -> list:
for n1 in self.gen_palindromic_oracle (n):
if n1 == n:
return [n1]
for n2 in self.gen_palindromic_oracle (n - n1):
if n2 == n - n1:
return [n1, n2]
for n3 in self.gen_palindromic_oracle (n - n1 - n2):
if n3 == n - n1 - n2:
return [n1, n2, n3]
# failed to find a representation
return []
def test_is_palindromic(self):
## fill in
for i in range (100):
self.check_divisors (i)
n = random.randrange (10000)
self.check_divisors (n)
def test_gen_palindromic(self):
## fill in
pass
def test_represent (self):
def check(n, r):
for v in r:
assert self.is_palindromic_oracle (v)
assert n == sum (r)
for n in range (1,100):
r = represent (n)
check (n, r)
for i in range (100):
n = random.randrange (10000)
r = represent (n)
check (n, r)
| [
"lenni.elbe@gmail.com"
] | lenni.elbe@gmail.com |
6950bd92117c53aac7dea84e5af24b34e63e4288 | 244ecfc2017a48c70b74556be8c188e7a4815848 | /res/scripts/client/gui/wgnc/actions.py | d0262b6d2850bb671b76223b5c7361d4da1ffa7e | [] | no_license | webiumsk/WOT-0.9.12 | c1e1259411ba1e6c7b02cd6408b731419d3174e5 | 5be5fd9186f335e7bae88c9761c378ff5fbf5351 | refs/heads/master | 2021-01-10T01:38:36.523788 | 2015-11-18T11:33:37 | 2015-11-18T11:33:37 | 46,414,438 | 1 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 5,812 | py | # 2015.11.18 11:57:06 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/wgnc/actions.py
import BigWorld
from adisp import process
from debug_utils import LOG_CURRENT_EXCEPTION, LOG_ERROR, LOG_WARNING, LOG_DEBUG
from gui.game_control import getBrowserCtrl
from gui.shared.utils.decorators import ReprInjector
from gui.wgnc.events import g_wgncEvents
from gui.wgnc.settings import WGNC_GUI_TYPE
@ReprInjector.simple(('_name', 'name'))
class _Action(object):
__slots__ = ('_name',)
def __init__(self, name):
super(_Action, self).__init__()
self._name = name
def getName(self):
return self._name
def validate(self, itemsHolder):
return True
def invoke(self, notID, actor = None):
raise NotImplementedError
@ReprInjector.withParent(('_purge', 'purge'), ('_isInvoked', 'isInvoked'))
class Callback(_Action):
__slots__ = ('_purge', '_isInvoked')
def __init__(self, name, purge = True):
super(Callback, self).__init__(name)
self._purge = purge
self._isInvoked = False
def doPurge(self):
return self._purge
def invoke(self, notID, actor = None):
if self._purge and self._isInvoked:
LOG_DEBUG('Callback with purge=true has been invoked, it is skipped', self._name)
return
self._isInvoked = True
try:
BigWorld.player().sendNotificationReply(notID, self._purge, self._name)
except (AttributeError, TypeError):
LOG_CURRENT_EXCEPTION()
@ReprInjector.withParent(('_url', 'url'))
class _OpenBrowser(_Action):
__slots__ = ('_url',)
def __init__(self, name, url):
super(_OpenBrowser, self).__init__(name)
self._url = url
def getURL(self):
return self._url
@ReprInjector.withParent()
class OpenInternalBrowser(_OpenBrowser):
__slots__ = ('_browserID',)
def __init__(self, name, url):
super(OpenInternalBrowser, self).__init__(name, url)
self._browserID = None
return
def invoke(self, notID, actor = None):
ctrl = getBrowserCtrl()
if ctrl:
if actor:
title = actor.getTopic()
else:
title = None
self.__doInvoke(ctrl, title)
else:
LOG_ERROR('Browser controller is not found')
return
@process
def __doInvoke(self, ctrl, title):
self._browserID = yield ctrl.load(self._url, browserID=self._browserID, title=title)
@ReprInjector.withParent()
class OpenExternalBrowser(_OpenBrowser):
def invoke(self, notID, actor = None):
try:
BigWorld.wg_openWebBrowser(self._url)
except (AttributeError, TypeError):
LOG_CURRENT_EXCEPTION()
@ReprInjector.withParent(('_target', 'target'))
class OpenWindow(_Action):
__slots__ = ('_target',)
def __init__(self, name, target):
super(OpenWindow, self).__init__(name)
self._target = target
def validate(self, itemsHolder):
return itemsHolder.getItemByName(self._target) is not None
def getTarget(self):
return self._target
def invoke(self, notID, actor = None):
g_wgncEvents.onItemShowByAction(notID, self._target)
@ReprInjector.withParent(('_text', 'text'))
class ReplaceButtons(_Action):
__slots__ = ('_text',)
def __init__(self, name, text):
super(ReplaceButtons, self).__init__(name)
self._text = text
def getTextToReplace(self):
return self._text
def invoke(self, notID, actor = None):
if not actor:
LOG_ERROR('GUI item is not found', self)
return
if actor.getType() != WGNC_GUI_TYPE.POP_UP:
LOG_WARNING('Hiding buttons is allowed in pup up only', actor, self)
return
actor.hideButtons()
actor.setNote(self._text)
g_wgncEvents.onItemUpdatedByAction(notID, actor)
def _getActions4String(value):
seq = value.split(',')
for name in seq:
yield name.strip()
@ReprInjector.simple(('__actions', 'actions'))
class ActionsHolder(object):
__slots__ = ('__actions',)
def __init__(self, items):
super(ActionsHolder, self).__init__()
self.__actions = {item.getName():item for item in items}
def clear(self):
self.__actions.clear()
def hasAction(self, name):
return name in self.__actions
def hasAllActions(self, names):
for name in _getActions4String(names):
if not self.hasAction(name):
return False
return True
def getAction(self, name):
action = None
if self.hasAction(name):
action = self.__actions[name]
return action
def validate(self, itemsHolder):
exclude = set()
for name, action in self.__actions.iteritems():
if not action.validate(itemsHolder):
LOG_WARNING('Action is invalid', action)
exclude.add(name)
for name in exclude:
self.__actions.pop(name, None)
return
def invoke(self, notID, names, actor = None):
result = False
if not notID:
LOG_ERROR('ID of notification is not defined', notID)
return result
for name in _getActions4String(names):
if self.hasAction(name):
action = self.__actions[name]
action.invoke(notID, actor)
result = True
else:
LOG_ERROR('Action is not found', name)
return result
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\wgnc\actions.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.18 11:57:06 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
e5aec4b02d12cbe033e4c663271b013101e6589c | 57c64723003e8228338b4d2314cb12c011c0f169 | /deprecated/levelset.py | 7f6b54b83ce38e09ccd85e165b0b22027acc04d8 | [] | no_license | gmaher/tcl_code | d02fa0cafb9aa491f1d5d6197cd94fd9d7dbd37c | 13c18dcdbe265490b3a47916cb22d904d79da54f | refs/heads/master | 2020-04-03T22:03:36.024349 | 2017-05-12T21:35:58 | 2017-05-12T21:35:58 | 56,552,391 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,287 | py | import SimpleITK as sitk
from utility import *
import plotly as py
###########################
# Set some input parameters
###########################
sliceid = 50
impath = '/home/marsdenlab/Dropbox/vascular_data/OSMSC0006/OSMSC0006-cm.mha'
xstart = 200
ystart = 10
dim = 64
sigma = 0.1
seedx = dim/2
seedy = dim/2
############################
# Load image and get patch
############################
reader = sitk.ImageFileReader()
reader.SetFileName(impath)
img = reader.Execute()
print img.GetSize()
patch = img[xstart:xstart+dim, ystart:ystart+dim,sliceid]
print patch
print type(patch)
np_patch = sitk.GetArrayFromImage(patch)
#heatmap(np_patch, fn='./plots/patch.html', title='image')
##########################
# Compute feature image
##########################
gradMagFilter = sitk.GradientMagnitudeRecursiveGaussianImageFilter()
gradMagFilter.SetSigma(sigma)
filt_patch = gradMagFilter.Execute(patch)
rescaleFilter = sitk.RescaleIntensityImageFilter()
filt_patch = rescaleFilter.Execute(filt_patch, 0, 1)
np_patch = sitk.GetArrayFromImage(filt_patch)
heatmap(np_patch, fn='./plots/blur.html', title='gradmag')
###############################
# Create initialization image
###############################
seed_img = sitk.Image(dim,dim,sitk.sitkUInt8)
seed_img.SetSpacing(patch.GetSpacing())
seed_img.SetOrigin(patch.GetOrigin())
seed_img.SetDirection(patch.GetDirection())
seed_img[seedx,seedy] = 1
distance = sitk.SignedMaurerDistanceMapImageFilter()
distance.InsideIsPositiveOff()
distance.UseImageSpacingOn()
dis_img = distance.Execute(seed_img)
np_patch = sitk.GetArrayFromImage(dis_img)
#heatmap(np_patch, fn='./plots/distance.html')
init_img = sitk.BinaryThreshold(dis_img, -1000, 10)
init_img = sitk.Cast(init_img, filt_patch.GetPixelIDValue())*-1+0.5
np_patch = sitk.GetArrayFromImage(init_img)
heatmap(np_patch, fn='./plots/init.html')
#####################################
# Run GeodesicActiveContour level set
#####################################
gdac = sitk.GeodesicActiveContourLevelSetImageFilter()
gdac_img = gdac.Execute(init_img, filt_patch, 0.002, -2.0, 1.0, 1.0, 1000, False)
print gdac.GetElapsedIterations()
print gdac.GetRMSChange()
gdac_patch = sitk.GetArrayFromImage(gdac_img)
heatmap(gdac_patch, fn='./plots/gdac.html', title='levelset')
| [
"gmaher2@hotmail.com"
] | gmaher2@hotmail.com |
f5b03bd3ee32d9828c0d98b5d4816615fc75d3ec | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_21190.py | 3a5c164eb892a8ba8703cc71a1a8a76d07736d16 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | # Beautiful Soup conversion of Unicode characters to HTML entities
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_doc)
print(soup.prettify(formatter="html"))
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
0f1c436fd0791db79ceda5db8d972086d91150a4 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_1/odncas001/question3.py | 9053fb666e0308f4a3ab336b667468b87749a04c | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | first_name = input("Enter first name:\n")
last_name = input("Enter last name:\n")
money = eval(input("Enter sum of money in USD:\n"))
country = input("Enter country name:\n")
money30 = money*30/100
print("\nDearest ",first_name,"\nIt is with a heavy heart that I inform you of the death of my father,","\nGeneral Fayk ",last_name,", your long lost relative from Mapsfostol.","\nMy father left the sum of ",money,"USD for us, your distant cousins.","\nUnfortunately, we cannot access the money as it is in a bank in ",country,".","\nI desperately need your assistance to access this money.","\nI will even pay you generously, 30% of the amount - ",money30,"USD,","\nfor your help. Please get in touch with me at this email address asap.","\nYours sincerely","\nFrank ",last_name, sep="") | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
59fd1d2c4e96308cb0779dd99f018e612155737c | 94724578994ab1438dcefb51b7ef4d8570da5d4c | /z42/z42/lib/heartbeat.py | 0abfd51795aeb2616810c5976bd73069d5e46a41 | [] | no_license | PegasusWang/collection_python | 6648d83203634abf44fd42c0b37b0bf7cc406d8f | 9ef019a737a0817860d3184924c67a0833bd1252 | refs/heads/master | 2023-09-01T23:15:39.813635 | 2023-08-24T06:46:12 | 2023-08-24T06:46:12 | 43,693,872 | 130 | 90 | null | 2021-04-26T15:12:55 | 2015-10-05T15:28:15 | JavaScript | UTF-8 | Python | false | false | 1,422 | py |
#coding:utf-8
import os
from threading import Timer
import socket
import sys
import requests
from datetime import datetime
def sendmail(to, subject, html):
url = 'https://sendcloud.sohu.com/webapi/mail.send.xml'
params = {
'api_user': 'postmaster@42.sendcloud.org',
'api_key' : 'kMCzqBPv',
'to' : to,
'from' : 'alert@42.sendcloud.org',
'fromname' : '42btc',
'subject' : subject,
'html': html
}
r = requests.post(url, data=params)
if r.text.find('error') != -1:
return r.text
class Heartbeat(object):
def __init__(self, interval=60):
self._quit = None
self._interval = interval
def quit(self, func):
self._quit = func
return func
def _sendmail(self):
title = '%s : %s %s'%(
socket.gethostname(),
' '.join(sys.argv),
datetime.now(),
)
html = """
%s
"""%title
#sendmail('42btc-alert@googlegroups.com', '进程自杀 : %s' % title, html)
def is_alive(self, func):
def _():
if not func():
if self._quit is not None:
self._quit()
self._sendmail()
os.kill(os.getpid(), 9)
else:
Timer(self._interval, _).start()
Timer(self._interval+60, _).start()
return _
heartbeat = Heartbeat(5)
| [
"tianma201211@gmail.com"
] | tianma201211@gmail.com |
b86130502764734456319cc9163ee400ecd16c61 | 99ca151c59afd9c0e7091b6919768448e40f88a2 | /numpy_ex1.py | 88f8860666a2f9c6e91be892b051a4713d8161c4 | [] | no_license | zainabnazari/Python_note | 1b6a454f6e7b3aca998d87a201823a600ec28815 | 3beb52beb3a0ebe17a6ac8c5695670e9dde59269 | refs/heads/main | 2023-02-10T22:32:33.160428 | 2021-01-12T18:36:54 | 2021-01-12T18:36:54 | 304,724,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | # file name: numpy_ex1.py
list1=[1,2,3,4]
list2=[1,2,3,4]
list3=[[1,2,3,4],[1,2,3,4]]
#print("list1*list2= ",list1*list2) # this will give error, the operation of multiplication on lists is not defined!
print("list1+list2= ",list1+list2)
print("list3+list1= ",list3+list1)
import numpy as np
numpyarray1=np.array([1,2,3,4])
numpyarray2=np.array([1,2,3,4])
numpyarray3=np.array([[1,2,3,4],[1,2,3,4]])
print("numpyarray1*numpyarray2= ", numpyarray1*numpyarray2)
print("numpyarray1+numpyarray2= ", numpyarray1+numpyarray2)
print("numpyarray3+numpyarray1= ", numpyarray3+numpyarray1)
print("numpyarray3*numpyarray1= ", numpyarray3*numpyarray1)
'''
output:
list1+list2= [1, 2, 3, 4, 1, 2, 3, 4]
list3+list1= [[1, 2, 3, 4], [1, 2, 3, 4], 1, 2, 3, 4]
numpyarray1*numpyarray2= [ 1 4 9 16]
numpyarray1+numpyarray2= [2 4 6 8]
numpyarray3+numpyarray1= [[2 4 6 8]
[2 4 6 8]]
numpyarray3*numpyarray1= [[ 1 4 9 16]
[ 1 4 9 16]]
'''
| [
"nazari.zainab@gmail.com"
] | nazari.zainab@gmail.com |
3bdb764fcca8a052da1946ee71d5ca3a8d849cd5 | eca0530054fcae936bf6b4b9aaf2fa5201d45588 | /final/login.py | a59d31d84d2a2881987fa8bd2c10e8450e96de21 | [] | no_license | benaka-tech/sringeri | d2a0e628485c9c221f753de345c4cb31e03c0f3e | 99b334e8b84c00a6160749dc7964a3741021c10d | refs/heads/main | 2023-03-15T13:57:14.780184 | 2021-03-12T10:52:49 | 2021-03-12T10:52:49 | 347,124,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,138 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'login.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
import mysql.connector as mc
from main_screen import Ui_MainWindow1
from datetime import datetime
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setFixedSize(876, 391)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(210, 10, 311, 111))
self.label.setText("")
self.label.setPixmap(QtGui.QPixmap(":/newPrefix/logo_colour.png"))
self.label.setScaledContents(True)
self.label.setObjectName("label")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(20, -10, 161, 141))
self.label_3.setText("")
self.label_3.setPixmap(QtGui.QPixmap(":/newPrefix/QDkO7nK6-removebg-preview.png"))
self.label_3.setScaledContents(True)
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(540, 10, 171, 111))
self.label_4.setText("")
self.label_4.setPixmap(QtGui.QPixmap(":/newPrefix/download__2_-removebg-preview.png"))
self.label_4.setScaledContents(True)
self.label_4.setObjectName("label_4")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(730, 10, 121, 121))
self.label_2.setText("")
self.label_2.setPixmap(QtGui.QPixmap(":/newPrefix/aic-jitf-logo (1).png"))
self.label_2.setScaledContents(True)
self.label_2.setObjectName("label_2")
self.groupBox = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox.setGeometry(QtCore.QRect(80, 130, 661, 171))
font = QtGui.QFont()
font.setPointSize(10)
self.groupBox.setFont(font)
self.groupBox.setObjectName("groupBox")
self.formLayoutWidget = QtWidgets.QWidget(self.groupBox)
self.formLayoutWidget.setGeometry(QtCore.QRect(39, 40, 591, 81))
self.formLayoutWidget.setObjectName("formLayoutWidget")
self.formLayout = QtWidgets.QFormLayout(self.formLayoutWidget)
self.formLayout.setContentsMargins(0, 0, 0, 0)
self.formLayout.setVerticalSpacing(25)
self.formLayout.setObjectName("formLayout")
self.label_5 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_5.setObjectName("label_5")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_5)
self.lineEdit = QtWidgets.QLineEdit(self.formLayoutWidget)
self.lineEdit.setObjectName("lineEdit")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.lineEdit)
self.label_6 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_6.setObjectName("label_6")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_6)
self.lineEdit_2 = QtWidgets.QLineEdit(self.formLayoutWidget)
self.lineEdit_2.setEchoMode(QtWidgets.QLineEdit.Password)
self.lineEdit_2.setObjectName("lineEdit_2")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.lineEdit_2)
self.pushButton = QtWidgets.QPushButton(self.groupBox)
self.pushButton.setGeometry(QtCore.QRect(300, 130, 75, 23))
self.pushButton.setObjectName("pushButton")
self.pushButton.clicked.connect(self.login)
self.label_7 = QtWidgets.QLabel(self.centralwidget)
self.label_7.setGeometry(QtCore.QRect(80, 320, 671, 41))
font = QtGui.QFont()
font.setPointSize(10)
self.label_7.setFont(font)
self.label_7.setText("")
self.label_7.setObjectName("label_7")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def login(self):
try:
username = self.lineEdit.text()
password = self.lineEdit_2.text()
mydb = mc.connect(
host="localhost",
user="root",
password="",
database="project"
)
mycursor = mydb.cursor()
mycursor.execute(
"SELECT username,password from user where username like '" + username + "'and password like '" + password + "'")
result = mycursor.fetchone()
if result == None:
self.label_7.setText("Incorrect Email & Password")
else:
self.label_7.setText("You are logged in")
self.window = QtWidgets.QMainWindow()
self.ui = Ui_MainWindow1()
self.ui.setupUi(self.window)
MainWindow.hide()
self.window.show()
except mc.Error as e:
print(e)
self.label_5.setText("Error")
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Login Screen"))
self.groupBox.setTitle(_translate("MainWindow", "LOGIN"))
self.label_5.setText(_translate("MainWindow", "USERNAME"))
self.label_6.setText(_translate("MainWindow", "PASSWORD"))
self.pushButton.setText(_translate("MainWindow", "LOGIN"))
import img_rc
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [
"cjayanth35@gmail.com"
] | cjayanth35@gmail.com |
a7e045ed5e51609b50acf24a6e689a58e64dd02e | 635bac115b708864707bbc9a684ce274e88d33a7 | /Tools/Scripts/libraries/webkitscmpy/webkitscmpy/program/canonicalize/__init__.py | 1687704b4b97f78d586f0b29853dc7ff904e5baf | [] | no_license | iglunix/WebKit | 131807b5c24f1644d8a5d2ffece440bf1b1ed707 | 92e63de4a92736360ecfd491a3e0e3b28f753b75 | refs/heads/main | 2023-07-03T08:30:16.089008 | 2021-03-30T17:34:53 | 2021-03-30T17:34:53 | 353,087,887 | 1 | 0 | null | 2021-03-30T17:36:18 | 2021-03-30T17:36:17 | null | UTF-8 | Python | false | false | 6,750 | py | # Copyright (C) 2020, 2021 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import tempfile
import subprocess
import sys
from webkitcorepy import arguments, run, string_utils
from webkitscmpy import log
from ..command import Command
class Canonicalize(Command):
name = 'canonicalize'
help = 'Take the set of commits which have not yet been pushed and edit history to normalize the ' +\
'committers with existing contributor mapping and add identifiers to commit messages'
@classmethod
def parser(cls, parser, loggers=None):
output_args = arguments.LoggingGroup(
parser,
loggers=loggers,
help='{} amount of logging and `git rebase` information displayed'
)
output_args.add_argument(
'--identifier', '--no-identifier',
help='Add in the identifier to commit messages, true by default',
action=arguments.NoAction,
dest='identifier',
default=True,
)
output_args.add_argument(
'--remote',
help='Compare against a different remote',
dest='remote',
default='origin',
)
output_args.add_argument(
'--number', '-n', type=int,
help='Number of commits to be canonicalized, regardless of the state of the remote',
dest='number',
default=None,
)
@classmethod
def main(cls, args, repository, identifier_template=None, **kwargs):
if not repository.path:
sys.stderr.write('Cannot canonicalize commits on a remote repository\n')
return 1
if not repository.is_git:
sys.stderr.write('Commits can only be canonicalized on a Git repository\n')
return 1
branch = repository.branch
if not branch:
sys.stderr.write('Failed to determine current branch\n')
return -1
num_commits_to_canonicalize = args.number
if not num_commits_to_canonicalize:
result = run([
repository.executable(), 'rev-list',
'--count', '--no-merges',
'{remote}/{branch}..{branch}'.format(remote=args.remote, branch=branch),
], capture_output=True, cwd=repository.root_path)
if result.returncode:
sys.stderr.write('Failed to find local commits\n')
return -1
num_commits_to_canonicalize = int(result.stdout.rstrip())
if num_commits_to_canonicalize <= 0:
print('No local commits to be edited')
return 0
log.warning('{} to be editted...'.format(string_utils.pluralize(num_commits_to_canonicalize, 'commit')))
base = repository.find('{}~{}'.format(branch, num_commits_to_canonicalize))
log.info('Base commit is {} (ref {})'.format(base, base.hash))
log.debug('Saving contributors to temp file to be picked up by child processes')
contributors = os.path.join(tempfile.gettempdir(), '{}-contributors.json'.format(os.getpid()))
try:
with open(contributors, 'w') as file:
repository.contributors.save(file)
message_filter = [
'--msg-filter',
"{} {} '{}'".format(
sys.executable,
os.path.join(os.path.dirname(__file__), 'message.py'),
identifier_template or 'Identifier: {}',
),
] if args.identifier else []
with open(os.devnull, 'w') as devnull:
subprocess.check_call([
repository.executable(), 'filter-branch', '-f',
'--env-filter', '''{overwrite_message}
committerOutput=$({python} {committer_py} {contributor_json})
KEY=''
VALUE=''
for word in $committerOutput; do
if [[ $word == GIT_* ]] ; then
if [[ $KEY == GIT_* ]] ; then
{setting_message}
printf -v $KEY "${{VALUE::$((${{#VALUE}} - 1))}}"
KEY=''
VALUE=''
fi
fi
if [[ "$KEY" == "" ]] ; then
KEY="$word"
else
VALUE="$VALUE$word "
fi
done
if [[ $KEY == GIT_* ]] ; then
{setting_message}
printf -v $KEY "${{VALUE::$((${{#VALUE}} - 1))}}"
fi'''.format(
overwrite_message='' if log.level > logging.INFO else 'echo "Overwriting $GIT_COMMIT"',
python=sys.executable,
committer_py=os.path.join(os.path.dirname(__file__), 'committer.py'),
contributor_json=contributors,
setting_message='' if log.level > logging.DEBUG else 'echo " $KEY=$VALUE"',
),
] + message_filter + ['{}...{}'.format(branch, base.hash)],
cwd=repository.root_path,
env={'FILTER_BRANCH_SQUELCH_WARNING': '1', 'PYTHONPATH': ':'.join(sys.path)},
stdout=devnull if log.level > logging.WARNING else None,
stderr=devnull if log.level > logging.WARNING else None,
)
except subprocess.CalledProcessError:
sys.stderr.write('Failed to modify local commit messages\n')
return -1
finally:
os.remove(contributors)
print('{} successfully canonicalized!'.format(string_utils.pluralize(num_commits_to_canonicalize, 'commit')))
return 0
| [
"jbedard@apple.com"
] | jbedard@apple.com |
1d109b1af75897cb08716609c414a9f1459b485f | 7394e97e563138b58e25383de06aa26002e35eb4 | /research/carls/candidate_sampling_ops.py | b1937d422c056d38d5435e0bd0ff7ce17e6e2f9d | [
"Apache-2.0"
] | permissive | otiliastr/neural-structured-learning | ff944411d3d48c6b7fccf6f48f39fe1c3ca29bc2 | 4a574b84c0a02e08ed3ef58e60284555e7e7c7e2 | refs/heads/master | 2022-04-03T21:22:36.023018 | 2021-04-17T01:00:24 | 2021-04-17T01:00:58 | 205,723,792 | 0 | 0 | Apache-2.0 | 2019-09-01T19:38:53 | 2019-09-01T19:38:53 | null | UTF-8 | Python | false | false | 11,135 | py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Candidate sampling related ops."""
import typing
from research.carls import context
from research.carls import dynamic_embedding_config_pb2 as de_config_pb2
from research.carls.kernels import gen_dynamic_embedding_ops as de_ops
from research.carls.kernels import gen_sampled_logits_ops
from research.carls.kernels import gen_topk_ops as gen_topk_op
import tensorflow as tf
def top_k(inputs: tf.Tensor,
k: int,
de_config: de_config_pb2.DynamicEmbeddingConfig,
var_name: typing.Text,
service_address: typing.Text = "",
timeout_ms: int = -1):
"""Computes logits for the top k closest embeddings to the inputs.
Args:
inputs: A float `Tensor` of shape `[batch_size, dim]` representing the
forward activations of the input network.
k: An `int` denoting the number of returned keys.
de_config: A DynamicEmbeddingConfig for configuring the dynamic embedding.
var_name: A unique name for the operation.
service_address: The address of a dynamic embedding service. If empty, the
value passed from --kbs_address flag will be used instead.
timeout_ms: Timeout millseconds for the connection. If negative, never
timout.
Returns:
keys: A string `Tensor` of shape `[batch_size, k]` representing the top k
keys relative to the input.
logits: A float `Tensor` of shape `[batch_size, k]` representing the logits
for the returned keys.
Raises:
ValueError: if k is not greater than zero.
Note: The (keys, logits) pair returned here should not be used for training as
they only represent biased sampling. Instead, use sampled_softmax_loss()
for training.
"""
if not var_name:
raise ValueError("Must specify a valid var_name.")
if k <= 0:
raise ValueError("k must be greater than zero, got %d" % k)
context.add_to_collection(var_name, de_config)
resource = de_ops.dynamic_embedding_manager_resource(
de_config.SerializeToString(), var_name, service_address, timeout_ms)
return gen_topk_op.topk_lookup(inputs, k, resource)
def sampled_softmax_loss(positive_keys: tf.Tensor,
inputs: tf.Tensor,
num_samples: int,
de_config: de_config_pb2.DynamicEmbeddingConfig,
var_name: typing.Text,
service_address: typing.Text = "",
timeout_ms: int = -1):
"""Compute sampled Softmax loss from given input activations.
Args:
positive_keys: A string `Tensor` of shape `[batch_size, None]` representing
input positive keys.
inputs: A float `Tensor` of shape `[batch_size, dim]`, representing the
forward activations of the input network.
num_samples: An int denoting the returned positive and negative samples.
de_config: A DynamicEmbeddingConfig for configuring the dynamic embedding.
var_name: A unique name for the operation.
service_address: The address of a dynamic embedding service. If empty, the
value passed from --kbs_address flag will be used instead.
timeout_ms: Timeout millseconds for the connection. If negative, never
timout.
Returns:
A float `Tensor` representing the sampled softmax loss.
"""
logits, labels, _, mask, _ = compute_sampled_logits(positive_keys, inputs,
num_samples, de_config,
var_name, service_address,
timeout_ms)
tiled_norm = tf.tile(
tf.maximum(tf.reduce_sum(labels, -1, keepdims=True), 1),
[1, labels.get_shape()[-1]])
labels /= tiled_norm
return tf.reduce_sum(
tf.nn.softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits)) / tf.reduce_sum(mask)
def sampled_sigmoid_loss(positive_keys: tf.Tensor,
inputs: tf.Tensor,
num_samples: int,
de_config: de_config_pb2.DynamicEmbeddingConfig,
var_name: typing.Text,
service_address: typing.Text = "",
timeout_ms: int = -1):
"""Compute sampled sigmoid loss from given input activations.
Args:
positive_keys: A string `Tensor` of shape `[batch_size, None]` representing
input positive keys.
inputs: A float `Tensor` of shape `[batch_size, dim]`, representing the
forward activations of the input network.
num_samples: An int denoting the returned positive and negative samples.
de_config: A DynamicEmbeddingConfig for configuring the dynamic embedding.
var_name: A unique name for the operation.
service_address: The address of a dynamic embedding service. If empty, the
value passed from --kbs_address flag will be used instead.
timeout_ms: Timeout millseconds for the connection. If negative, never
timout.
Returns:
A float `Tensor` representing the sampled sigmoid loss.
"""
logits, labels, _, mask, _ = compute_sampled_logits(positive_keys, inputs,
num_samples, de_config,
var_name, service_address,
timeout_ms)
tiled_norm = tf.tile(
tf.maximum(tf.reduce_sum(labels, -1, keepdims=True), 1),
[1, labels.get_shape()[-1]])
labels /= tiled_norm
reduced_sum = tf.reduce_sum(
tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=logits)) / tf.reduce_sum(mask)
return reduced_sum / num_samples
def compute_sampled_logits(positive_keys,
inputs,
num_samples: int,
de_config: de_config_pb2.DynamicEmbeddingConfig,
var_name: typing.Text,
service_address: typing.Text = "",
timeout_ms: int = -1):
"""Computes sampled logits from given positive labels.
Args:
positive_keys: A string `Tensor` of shape `[batch_size, None]` representing
input positive keys.
inputs: A float `Tensor` of shape `[batch_size, dim]` representing the
forward activations of the input network.
num_samples: An int denoting the returned positive and negative samples.
de_config: A DynamicEmbeddingConfig for configuring the dynamic embedding.
var_name: A unique name for the operation.
service_address: The address of a dynamic embedding service. If empty, the
value passed from --kbs_address flag will be used instead.
timeout_ms: Timeout millseconds for the connection. If negative, never
timout.
Returns:
logits: A float `Tensor` of shape `[batch_size, num_samples]` representing
the logits for sampled labels.
labels: A float `Tensor` of shape `[batch_size, num_samples]` with values
in {0, 1} indicating if the sample is positive or negative.
keys: A string `Tensor` of shape `[batch_size, num_samples]` representing
the keys for each sample.
mask: A float `Tensor` of shape `[batch_size]` representing the 0/1 mask
of each batch. For example, if all keys in positive_keys[i] are empty,
mask[i] = 0; otherwise mask[i] = 1.
weights: A float `Tensor` representing the embeddings of the sampled keys.
Raises:
ValueError: If var_name is not specified.
TypeError: If de_config is an instance of DynamicEmbeddingConfig.
"""
if not var_name:
raise ValueError("Must specify a valid name, got %s" % var_name)
if num_samples < 1:
raise ValueError("Invalid num_samples: %d" % num_samples)
context.add_to_collection(var_name, de_config)
resource = de_ops.dynamic_embedding_manager_resource(
de_config.SerializeToString(), var_name, service_address, timeout_ms)
# Create a dummy variable so that the gradients can be passed in.
grad_placeholder = tf.Variable(0.0)
keys, labels, expected_counts, mask, weights = (
gen_sampled_logits_ops.sampled_logits_lookup(positive_keys, inputs,
num_samples,
grad_placeholder, resource))
# Compute sampled logits.
# Shape of weights: [d1, d2, dn-1, num_samples, embed_dim]
# Shape of inputs: [d1, d2, dn-1, embed_dim]
# Shape of output logits: [d1, d2, dn-1, num_samples]
# [d1, d2, dn-1, embed_dim] -> [d1, d2, dn-1, 1, embed_dim]
tiled_inputs = tf.expand_dims(inputs, axis=-2)
# [d1, d2, dn-1, embed_dim] -> [d1, d2, dn-1, num_samples, embed_dim]
multiples = [1] * (inputs.ndim + 1)
multiples[-2] = num_samples
tiled_inputs = tf.tile(tiled_inputs, multiples)
# [d1, d2, dn-1, num_samples, embed_dim] -> [d1, d2, dn-1, num_samples]
logits = tf.reduce_sum(weights * tiled_inputs, -1)
# Sampled logits.
logits -= tf.math.log(expected_counts)
return logits, labels, keys, mask, weights
@tf.RegisterGradient("SampledLogitsLookup")
def _sampled_logits_lookup_grad(op, keys_grad, labels_grad,
expected_counts_grad, mask_grad, weights_grad):
"""Computes the gradients for SampledLogitsLookup.
We uses the gradients w.r.t. the weights output of sampled_logits_lookup() to
update the embeddings/weights of the sampled keys.
The gradients for the inputs of sampled_logits_lookup should be provided, but
none of them needs to be back-propagated. So we set all of them to be zeros.
Args:
op: The DynamicEmbeddingLookup op.
keys_grad: The tensor representing the gradient w.r.t. the keys output.
labels_grad: The tensor representing the gradient w.r.t. the labels output.
expected_counts_grad: The tensor representing the gradient w.r.t. the
expected_counts output.
mask_grad: The tensor representing the gradient w.r.t. the mask output.
weights_grad: The tensor representing the gradient w.r.t. the weights
output.
Returns:
The gradients w.r.t. the input.
"""
del keys_grad, labels_grad, expected_counts_grad, mask_grad # Unused.
pos_keys_grad, num_samples_grad, dummy_variable_grad, resource_grad = (
gen_sampled_logits_ops.sampled_logits_lookup_grad(
keys=op.outputs[0],
weight_gradients=weights_grad,
handle=op.inputs[4]))
# Gradient for the input activation.
inputs_grad = tf.zeros_like(op.inputs[1])
return (pos_keys_grad, inputs_grad, num_samples_grad, dummy_variable_grad,
resource_grad)
| [
"tensorflow.copybara@gmail.com"
] | tensorflow.copybara@gmail.com |
c7339fef2a47d86a6fbcf65ffa3761ad4a3d38bd | 0e8dd5901b1f98934c44a85b133eb7ca6f44b4b9 | /osr2mp4/ImageProcess/PrepareFrames/RankingScreens/ModIcons.py | c87a50b7cd88872d95e0d5011ce4159e07f419f2 | [] | no_license | Hazuki-san/osr2mp4-core | dbd2f4d44a3d0e90974214c97b434dcbb2eedd18 | 83dc5c47bc73dcb0b4d4b6a5ae1924771c13c623 | refs/heads/master | 2022-11-24T13:41:15.703261 | 2020-07-03T14:00:54 | 2020-07-03T14:00:54 | 279,099,127 | 1 | 0 | null | 2020-07-12T16:02:35 | 2020-07-12T16:02:34 | null | UTF-8 | Python | false | false | 676 | py | from osrparse.enums import Mod
from ...PrepareFrames.YImage import YImage
selectionmod = "selection-mod-"
def prepare_modicons(scale, settings):
modnames = {
Mod.Perfect: "perfect",
Mod.Autopilot: "pilot",
Mod.Relax: "relax",
Mod.SpunOut: "spunout",
Mod.Flashlight: "flashlight",
Mod.Hidden: "hidden",
Mod.Nightcore: "nightcore",
Mod.DoubleTime: "doubletime",
Mod.SuddenDeath: "suddendeath",
Mod.HardRock: "hardrock",
Mod.HalfTime: "halftime",
Mod.NoFail: "nofail",
Mod.Easy: "easy",
}
modframes = {}
for mod in modnames:
filename = selectionmod + modnames[mod]
modframes[mod] = YImage(filename, settings, scale).img
return modframes
| [
"snkraishin87@gmail.com"
] | snkraishin87@gmail.com |
1074bb30ddb6ffd71876e31fdc25fe977ac16661 | 1a04e02811c844ecf53cc041b104667e5c987a09 | /vgrabber/model/grade.py | 4e1a3f467bfe686db281ef1013fcefb6f3d90834 | [] | no_license | janjanech/vzdelavanieGui | dff17add6e6946063597d4c1eba5d6d76b6f5374 | b2015f41f7cb1be1ecccf1c4778a91f43f8fba12 | refs/heads/master | 2021-10-24T16:21:24.911817 | 2019-01-15T17:03:49 | 2019-01-15T17:03:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | from enum import Enum, auto
from .files import FileList
from .finalexam import FinalExam
class Grade(Enum):
A = auto()
B = auto()
C = auto()
D = auto()
E = auto()
FX = auto()
class StudentGrade:
final_exam: FinalExam
grade: Grade
points: float
files: FileList
def __init__(self, subject, student, final_exam, grade):
self.__subject = subject
self.final_exam = final_exam
self.grade = grade
self.points = None
self.files = FileList()
self.student = student
def __str__(self):
return "<Grade {0} for final exam at {1}>".format(self.grade.name, self.final_exam.date_time.isoformat())
def clear_files(self):
self.files.clear()
| [
"janik@janik.ws"
] | janik@janik.ws |
0a466df321d2357b667e78d7b6f0c6b7799c7321 | 8c57a6e0f607fc5b0a1d601e4fa5d8e621d73dcc | /Sorting_algorithms/benchmark_sorting.py | 6d248cbcf52daef95addfe19a1415d699e8c6193 | [] | no_license | anoubhav/Data-Structures-and-Algorithms | eb3b0edd7df64e809bfadf41a86f3bf177965cae | d99bac42a86601570255bae85590fc2e485960fc | refs/heads/master | 2021-07-15T07:05:42.034648 | 2020-05-27T15:33:43 | 2020-05-27T15:33:43 | 144,583,921 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,182 | py | from selection_sort import selection_sort
from insertion_sort_swapping import insertion_sort_swap
from insertion_sort_assignment import insertion_sort_assignment
from bubble_sort import bubble_sort
from merge_sort import merge_sort
from quicksort3 import quicksort3
from time import clock
import random
def create_array(size = 2000, max_num = 1000):
""" Returns random array of given size and elements upto max_num
(int, int) -> (list) """
return [random.randint(0, max_num) for _ in range(size)]
def benchmark(n = [10, 100, 1000, 5000, 10000]):
""" Benchmark the 6 sorting algorithms """
times = {'bubble':[], 'selection':[], 'merge':[], 'quicksort3':[], 'insertion_swap':[], 'insertion_ass':[]}
for size in n:
a = create_array(size = size, max_num = 10*size)
t0 = clock()
bubble_sort(a)
t1 = clock()
times['bubble'].append(t1-t0)
a = create_array(size = size, max_num = 10*size)
t0 = clock()
selection_sort(a)
t1 = clock()
times['selection'].append(t1-t0)
a = create_array(size = size, max_num = 10*size)
t0 = clock()
merge_sort(a)
t1 = clock()
times['merge'].append(t1-t0)
a = create_array(size = size, max_num = 10*size)
t0 = clock()
insertion_sort_swap(a)
t1 = clock()
times['insertion_swap'].append(t1-t0)
a = create_array(size = size, max_num = 10*size)
t0 = clock()
insertion_sort_assignment(a)
t1 = clock()
times['insertion_ass'].append(t1-t0)
a = create_array(size = size, max_num = 10*size)
t0 = clock()
quicksort3(a, 0, size)
t1 = clock()
times['quicksort3'].append(t1-t0)
print(98*'_')
print("n\tBubble\t Insertion(s)\t\tInsertion(a)\t Merge\tQuicksort3\tSelection")
print(98*'_')
for i, size in enumerate(n):
print("%d\t%5.4f\t %5.4f\t\t %5.4f\t %5.4f\t %5.4f\t %5.4f"%(size, times['bubble'][i], times['insertion_swap'][i], times['insertion_ass'][i], times['merge'][i], times['quicksort3'][i], times['selection'][i]))
benchmark(n = [10, 100])
| [
"anoubhav.agarwaal@gmail.com"
] | anoubhav.agarwaal@gmail.com |
a184a13a43f1725ecba70739affc5a1f2e1640e3 | e58c6f5ae956fe409c475e2745526c4c4451e509 | /TestCode/Spiders/scrapytest/logo/logo/settings.py | d7465747e8870ed7cb1f27e7cb0f825f369d7fee | [] | no_license | pangxie1987/uiautomator2 | 6d67dd3beeaba5ab3efa85bf6b8eabcad70b17b8 | 9a818e3b9a68ba4006ec393d5ec095ee2d10572d | refs/heads/master | 2022-11-22T17:05:00.580781 | 2021-03-31T05:17:06 | 2021-03-31T05:17:06 | 216,848,204 | 2 | 2 | null | 2022-11-22T03:17:30 | 2019-10-22T15:31:04 | Python | UTF-8 | Python | false | false | 3,404 | py | # -*- coding: utf-8 -*-
# Scrapy settings for logo project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
# BOT_NAME = 'logo'
# SPIDER_MODULES = ['logo.spiders']
# NEWSPIDER_MODULE = 'logo.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'logo (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'logo.middlewares.LogoSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'logo.middlewares.LogoDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'logo.pipelines.LogoPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
import os
BOT_NAME = 'logo'
SPIDER_MODULES = ['logo.spiders']
NEWSPIDER_MODULE = 'logo.spiders'
ITEM_PIPELINES={
# 'sucai.pipelines.SucaiPipeline':1
'logo.pipelines.JsonWithEncodingPipeline':2,
'logo.pipelines.DownloadImagesPipeline':1
}
path = os.path.dirname(os.path.dirname(__file__))
IMAGES_STORE = os.path.join(path, 'picture') | [
"lpb.waln@outlook.com"
] | lpb.waln@outlook.com |
6b086af83c2477052676f8a6f31b94fa6ff34d25 | 5d0b6d45c23337d5d074c62081445e9963b92ba8 | /src/component_parser/ranges.py | c4ea461f747c57c8eb0212e7df4e8c81ae0fb1c3 | [
"MIT"
] | permissive | ghedwards/sublimetext-cfml | f944fb8f8c35b6acca0c4d0fdc8cec4e726442fd | 6b0ef8a325a21f0392b79346a5dd47b7c0d58f30 | refs/heads/master | 2021-08-26T07:06:57.033755 | 2017-11-13T17:49:47 | 2017-11-13T17:49:47 | 111,625,801 | 0 | 0 | null | 2017-11-22T02:21:35 | 2017-11-22T02:21:34 | null | UTF-8 | Python | false | false | 5,671 | py | import re
import collections
RangeDefinition = collections.namedtuple('RangeDefinition', ['start', 'end', 'child_ranges', 'pop'])
BASE_RANGES = [
'comma',
'semicolon',
'curly_brackets',
'line_comment',
'multiline_comment',
'parentheses',
'square_brackets',
'string_double',
'string_single',
'tag_comment'
]
NON_SCRIPT_RANGES = [
'line_comment',
'multiline_comment',
'string_double',
'string_single',
'tag_comment'
]
RangeDefinitions = {
'attributes': RangeDefinition(r'(?=.)', r'\{', BASE_RANGES, 'first'),
'cfscript': RangeDefinition(r'(?=.)', r'\Z', BASE_RANGES, 'first'),
'comma': RangeDefinition(r',', r'(?=.)', [], 'first'),
'curly_brackets':RangeDefinition( r'\{', r'\}', BASE_RANGES, 'first'),
'escaped_double_quote': RangeDefinition(r'""', r'(?=.)', [], 'first'),
'escaped_hash': RangeDefinition(r'##', r'(?=.)', [], 'first'),
'escaped_single_quote': RangeDefinition(r"''", r'(?=.)', [], 'first'),
'hash': RangeDefinition(r'#', r'#', BASE_RANGES, 'first'),
'line_comment': RangeDefinition(r'//', r'\n', [], 'first'),
'multiline_comment': RangeDefinition(r'/\*', r'\*/', [], 'first'),
'non_script': RangeDefinition(r'(?=.)', r'\Z', NON_SCRIPT_RANGES, 'first'),
'parentheses': RangeDefinition(r'\(', r'\)', BASE_RANGES, 'first'),
'semicolon': RangeDefinition(r';', r'(?=.)', [], 'first'),
'square_brackets': RangeDefinition(r'\[', r'\]', BASE_RANGES, 'first'),
'string_double': RangeDefinition(r'"', r'"', ['escaped_hash', 'hash', 'escaped_double_quote'], 'last'),
'string_single': RangeDefinition(r"'", r"'", ['escaped_hash', 'hash', 'escaped_single_quote'], 'last'),
'tag_comment': RangeDefinition(r'<!---', r'--->', [], 'first'),
}
RangeRegex = {}
for name, rd in RangeDefinitions.items():
RangeRegex[name] = {
'start': re.compile(rd.start, re.S)
}
patterns = []
for cr in rd.child_ranges:
crd = RangeDefinitions[cr]
patterns.append((cr, crd.start))
if rd.pop == 'first':
patterns.insert(0, ('pop', rd.end))
else:
patterns.append(('pop', rd.end))
RangeRegex[name]['end'] = re.compile('|'.join('(?P<{}>{})'.format(*p) for p in patterns), re.S)
class Range():
def __init__(self, name, start=None, end=None):
self.name = name
self.start = start
self.end = end
self.parent = None
self.children = []
def add_child(self, child_range):
child_range.parent = self
self.children.append(child_range)
def depth(self):
depth = 0
cr = self
while cr.parent:
cr = cr.parent
depth += 1
return depth
def is_in_range(self, pt, names=None):
if names is None:
names = RangeDefinitions[self.name].child_ranges
if self.name in names and self.start <= pt and self.end > pt:
return True
for child_range in self.children:
if child_range.is_in_range(pt, names):
return True
return False
def range_at_pt(self, pt):
if self.start > pt or self.end < pt:
return None
if self.start == pt:
return self
for child_range in self.children:
r = child_range.range_at_pt(pt)
if r:
return r
return None
def deepest_range(self, pt):
if self.start > pt or self.end < pt:
return None
for child_range in self.children:
dr = child_range.deepest_range(pt)
if dr:
return dr
return self
def next_child_range(self, pt, names=None):
if self.start > pt or self.end < pt:
return None
for child_range in self.children:
if child_range.start >= pt:
if names is None or child_range.name in names:
return child_range
return None
def __repr__(self):
txt = '(' + self.name + ': '
txt += 'start=' + str(self.start)
txt += ', end=' + str(self.end)
if len(self.children) > 0:
txt += ', children=['
for c in self.children:
child_txt = str(c).replace('\n', '\n ')
txt += '\n ' + child_txt
txt += '\n]'
txt += ')'
return txt
class RangeWalker():
def __init__(self, src_txt, pos=0, name='cfscript'):
self.src_txt = src_txt
self.pos = pos
self.name = name
def walk(self):
opening_match = RangeRegex[self.name]['start'].match(self.src_txt, self.pos)
if opening_match is None:
return None
range_to_walk = Range(self.name, self.pos)
pos = opening_match.end()
current_range = range_to_walk
while current_range:
next_match = RangeRegex[current_range.name]['end'].search(self.src_txt, pos)
if next_match is None:
current_range.end = len(self.src_txt)
while current_range.parent:
current_range.parent.end = len(self.src_txt)
current_range = current_range.parent
break
name = next_match.lastgroup
pos = next_match.end()
if name == 'pop':
current_range.end = pos
current_range = current_range.parent
continue
child_range = Range(name, next_match.start(), next_match.end())
current_range.add_child(child_range)
current_range = child_range
return range_to_walk
| [
"jcberquist@outlook.com"
] | jcberquist@outlook.com |
cc5dca56154fe17edb6689970d5221ff59f86751 | 7ef5bb39938e669b5571a097f01d96ee53458ad6 | /maximal_rectangle/solution.py | d7dbfe832161fdefa7ae2748e9dfb64f82dc6ddc | [
"BSD-2-Clause"
] | permissive | mahimadubey/leetcode-python | 61cd135515b26644197b4736a92a53bb1a5870a6 | 38acc65fa4315f86acb62874ca488620c5d77e17 | refs/heads/master | 2020-08-29T09:27:45.232412 | 2019-10-28T08:06:52 | 2019-10-28T08:06:52 | 217,993,547 | 0 | 0 | BSD-2-Clause | 2019-10-28T07:55:38 | 2019-10-28T07:55:38 | null | UTF-8 | Python | false | false | 1,258 | py | class Solution:
# @param matrix, a list of lists of 1 length string
# @return an integer
def maximalRectangle(self, matrix):
# Make a list of heights
if not matrix:
return 0
n = len(matrix)
if not matrix[0]:
return 0
m = len(matrix[0])
hist = [[0 for j in range(m)] for i in range(n)]
for i in range(n):
for j in range(m):
if i == 0:
hist[i][j] = int(matrix[i][j])
else:
if matrix[i][j] == '1':
hist[i][j] = 1 + hist[i - 1][j]
res = 0
for row in hist:
res = max(res, self.max_hist_rect(row))
return res
def max_hist_rect(self, heights):
if not heights:
return 0
n = len(heights)
max_area = heights[0]
stack = []
for i in range(n + 1):
while stack and (i == n or heights[stack[-1]] > heights[i]):
h = heights[stack.pop()]
if stack:
w = i - stack[-1] - 1
else:
w = i
max_area = max(max_area, h * w)
stack.append(i)
return max_area
| [
"shichao.an@nyu.edu"
] | shichao.an@nyu.edu |
9c4c802cf858874d37d665db3ace105775e64f83 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/INTERVIEW-PREP-COMPLETE/Leetcode/215.py | 9ea3591e57858d86cf92438005c6cef00ab4ab09 | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 976 | py | import heapq
class Solution:
def findKthLargest(self, nums: List[int], k: int) -> int:
def quickSelect(low, high, k):
i = low
for j in range(low, high):
if nums[j] <= nums[high]:
nums[i], nums[j] = nums[j], nums[i]
i += 1
nums[i], nums[high] = nums[high], nums[i]
count = high - i + 1
if count == k:
return nums[i]
if count > k:
return quickSelect(i + 1, high, k)
else:
return quickSelect(low, i - 1, k - count)
return quickSelect(0, len(nums) - 1, k)
# Time complexity: O(nlogn)
class Solution:
def findKthLargest(self, nums: List[int], k: int) -> int:
q = []
for i, n in enumerate(nums):
heapq.heappush(q, (-n, i))
result = None
for _ in range(k):
result = -heapq.heappop(q)[0]
return result
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
44ed1910e8ed13934e5fb218eb574fad3f2b8649 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_23210.py | a16a569ba67dd79006f8b14bda2a070aed189c29 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,840 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((494.074, 587.143, 473.944), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((520.768, 635.189, 435.19), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((559.439, 699.249, 400.081), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((601.61, 655.031, 526.484), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((614.495, 849.024, 288.228), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((505.64, 627.483, 454.189), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((504.355, 627.257, 455.204), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((487.942, 649.778, 457.772), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((466.697, 635.34, 468.961), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((443.69, 635.716, 485.128), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((439.723, 645.764, 511.214), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((446.609, 672.956, 510.075), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((498.067, 600.2, 450.838), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((389.767, 746.45, 561.895), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((473.496, 848.516, 408.846), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((473.496, 848.516, 408.846), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((476.303, 820.977, 401.712), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((471.046, 799.668, 419.433), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((473.652, 771.12, 419.059), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((480.566, 743.334, 416.034), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((488.226, 715.624, 414.506), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((494.405, 687.42, 418.447), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((430.982, 899.803, 550.233), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((558.133, 463.968, 301.166), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((514.475, 692.397, 386.393), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((514.475, 692.397, 386.393), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((527.653, 701.805, 410.638), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((548.772, 703.428, 430.847), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((576.147, 693.45, 429.018), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((549.099, 589.966, 493.31), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((612.322, 793.903, 363.818), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((537.563, 627.898, 457.337), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((537.588, 627.887, 457.357), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((527.263, 605.323, 470.563), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((516.317, 625.007, 487.434), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((493.026, 639.039, 494.745), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((477.973, 662.12, 488.658), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((456.735, 677.052, 477.905), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((430.424, 667.216, 481.639), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((454.193, 642.986, 402.632), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((408.801, 689.743, 563.018), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((493.53, 632.211, 380.981), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((513.823, 640.452, 395.715), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((557.825, 659.171, 428.115), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((601.95, 677.429, 460.552), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((610.469, 598.13, 475.902), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((654.598, 761.449, 491.752), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((529.4, 569.958, 443.135), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((540.862, 593.437, 432.384), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((550.178, 619.082, 424.436), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((562.208, 635.216, 403.532), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((574.004, 655.851, 386.746), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((585.306, 676.54, 369.563), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((533.353, 633.828, 411.86), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((638.717, 721.544, 324.315), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"batxes@gmail.com"
] | batxes@gmail.com |
91935e9f77a4d8bc3c373d76ca627484057b389c | 53c3462ff265b6273f4a4fa17f6d59688f69def0 | /剑指offer/41_FindContinuousSequence.py | d3586055cdb08c6798a50f5d7375e5ac92d8c85a | [] | no_license | 17764591637/jianzhi_offer | b76e69a3ecb2174676da2c8d8d3372a3fc27b5c4 | 27e420ee302d5ab6512ecfdb8d469b043fb7102d | refs/heads/master | 2023-08-03T01:32:51.588472 | 2019-10-13T07:56:21 | 2019-10-13T07:56:21 | 197,692,548 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | '''
他在想究竟有多少种连续的正数序列的和为100(至少包括两个数)。
没多久,他就得到另一组连续正数和为100的序列:18,19,20,21,22。
现在把问题交给你,你能不能也很快的找出所有和为S的连续正数序列?
'''
class Solution:
def FindContinuousSequence(self, tsum):
# write code here
res = []
for i in range(1,int(tsum/2)+1):
for j in range(i,int(tsum/2)+2):
sum_ = (j+i)*(j-i+1)/2
if sum_>tsum:
break
elif sum_ == tsum:
res.append(list(range(i,j+1)))
return res
s = Solution()
res = s.FindContinuousSequence(100)
print(res) | [
"17764591637@163.com"
] | 17764591637@163.com |
4c55379b54e9cc451df5d9f8c31bbba8c65872df | e72265a8f523cd76e75ac3832e3236917746c96a | /dawp2020/hy-data-analysis-with-python-2020/part01-e06_triple_square/src/triple_square.py | 3e16029c04371d878c0a48f86024b73b5e491f6b | [
"MIT"
] | permissive | ored95/data-analysis-course | 9bde67f489a16b94f376427331a24efc330877ed | f61a953769b8e7c502f2bec28158ec1bd344f72a | refs/heads/main | 2023-04-07T05:19:22.044343 | 2021-03-30T10:25:52 | 2021-03-30T10:25:52 | 346,290,289 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | #!/usr/bin/env python3
def triple(x):
return 3 * x
def square(x):
return x ** 2
def main():
for x in range(1, 11):
x2 = square(x)
x3 = triple(x)
if x > 3:
break
print("triple({0})=={1} square({0})=={2}".format(x, x3, x2))
if __name__ == "__main__":
main()
| [
"stepup.ored@gmail.com"
] | stepup.ored@gmail.com |
f65290fa42db6280e9a931af321b0809650af036 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/4/j1n.py | 9780a79a04275f9679bf88692305c144decde612 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'j1N':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
7572a60f3a8fa50ee798286f14595c2f7f470535 | 99d7765da35926279c4a4fd7313d55908786f4b8 | /1/3/13458/13458.py | 6ec1bfd94fbbe36f57727f20730dcf70cbc1c8e3 | [
"MIT"
] | permissive | chr0m3/boj-codes | b8294c5d4d10a5af25b5276427bccd74d0866ef5 | d71d0a22d0a3ae62c225f382442461275f56fe8f | refs/heads/master | 2021-08-16T15:24:57.733088 | 2021-03-22T13:13:10 | 2021-03-22T13:13:10 | 91,523,558 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | room = int(input())
people = list(input().split())
a, b = map(int, input().split())
sum = 0
for i in people:
if int(i) - a <= 0:
sum += 1
continue
else:
sum += 1
if (int(i) - a) % b:
sum += int((int(i) - a) / b) + 1
else:
sum += int((int(i) - a) / b)
print(sum)
| [
"chr0m3@users.noreply.github.com"
] | chr0m3@users.noreply.github.com |
e9bbd4b05764ae81360f13740612ea89bb4390d5 | 11e484590b27585facf758f0432eeebe66bf790a | /fal_order_revised/__init__.py | d6e5c1ea819561be5b4d0f4b58d6469d3df06971 | [] | no_license | jeanabreu/falinwa_branch | 51b38ee5a3373d42417b84a0431bad9f7295f373 | be96a209479259cd5b47dec73694938848a2db6c | refs/heads/master | 2021-01-18T10:25:49.866747 | 2015-08-25T10:05:05 | 2015-08-25T10:05:05 | 41,369,368 | 0 | 1 | null | 2015-08-25T14:51:50 | 2015-08-25T14:51:50 | null | UTF-8 | Python | false | false | 65 | py | # -*- coding: utf-8 -*-
import wizard
import sale
import purchase | [
"hans.yonathan@falinwa.com"
] | hans.yonathan@falinwa.com |
572aa51b501d575b1c8037bc1a705e474cd31df5 | e4f8b14cead542586a96bcaa75993b0a29b3c3d0 | /pyNastran/op2/tables/oee_energy/onr.py | 81b96e0a5ce317b9f02eb03330e509248c1f4092 | [] | no_license | afcarl/cyNastran | f1d1ef5f1f7cb05f435eac53b05ff6a0cc95c19b | 356ee55dd08fdc9880c5ffba47265125cba855c4 | refs/heads/master | 2020-03-26T02:09:00.350237 | 2014-08-07T00:00:29 | 2014-08-07T00:00:29 | 144,398,645 | 1 | 0 | null | 2018-08-11T15:56:50 | 2018-08-11T15:56:50 | null | UTF-8 | Python | false | false | 9,060 | py | #pylint: disable=C0326,C0301,C0103
from struct import Struct, unpack
from pyNastran.op2.tables.oee_energy.oee_objects import StrainEnergyObject
from pyNastran.op2.op2_common import OP2Common
class ONR(OP2Common):
def __init__(self):
OP2Common.__init__(self)
self.words = None
self.num_wide = None
def _read_onr1_3(self, data):
"""
reads ONRGY1 subtable 3
"""
self.words = [
'aCode', 'tCode', 'eTotal', 'isubcase',
'???', '???', '???', 'load_set'
'format_code', 'num_wide', 'cvalres', '???',
'setID', '???', '???', '???',
'???', '???', '???', '???',
'???', '???', '???', '???',
'???', 'Title', 'subtitle', 'label']
#aCode = self.get_block_int_entry(data, 1)
## total energy of all elements in isubcase/mode
self.eTotal = self.parse_approach_code(data)
element_name, = unpack(b'8s', data[24:32])
#print("element_name = %s" %(element_name))
try:
element_name = element_name.decode('utf-8').strip() # element name
except UnicodeDecodeError:
print("element_name = ", str(element_name))
raise
#print("element_name = %s" %(element_name))
if element_name.isalpha():
self.data_code['element_name'] = element_name
#: Load set or zero
self.load_set = self.add_data_parameter(data, 'load_set', 'i', 8, False)
#: format code
self.format_code = self.add_data_parameter(data, 'format_code', 'i', 9, False)
#: number of words per entry in record
#: .. note:: is this needed for this table ???
self.num_wide = self.add_data_parameter(data, 'num_wide', 'i', 10, False)
## C
self.cvalres = self.add_data_parameter(data, 'cvalres', 'i', 11, False)
#: Set identification number Number
self.setID = self.add_data_parameter(data, 'setID', 'i', 13, False)
#: Natural eigenvalue - real part
self.eigenReal = self.add_data_parameter(data, 'eigenReal', 'i', 14, False)
#: Natural eigenvalue - imaginary part
self.eigenImag = self.add_data_parameter(data, 'eigenImag', 'i', 15, False)
self.add_data_parameter(data, 'freq', 'f', 16, False) ## Natural frequency
#: Total positive energy
self.etotpos = self.add_data_parameter(data, 'etotpos', 'f', 18)
#: Total negative energy
self.etotneg = self.add_data_parameter(data, 'etotneg', 'f', 19, False)
if not self.is_sort1():
raise NotImplementedError('sort2...')
#self.print_block(data) # on
if self.analysis_code == 1: # statics / displacement / heat flux
#del self.data_code['nonlinear_factor']
self.lsdvmn = self.add_data_parameter(data, 'lsdvmn', 'i', 5, False)
self.dataNames = self.apply_data_code_value('dataNames', ['lsdvmn'])
self.setNullNonlinearFactor()
elif self.analysis_code == 2: # real eigenvalues
self.mode = self.add_data_parameter(data, 'mode', 'i', 5) ## mode number
self.dataNames = self.apply_data_code_value('dataNames', ['mode'])
#print "mode(5)=%s eigr(6)=%s mode_cycle(7)=%s" %(self.mode,self.eigr,self.mode_cycle)
#elif self.analysis_code==3: # differential stiffness
#self.lsdvmn = self.get_values(data,'i',5) ## load set number
#self.data_code['lsdvmn'] = self.lsdvmn
#elif self.analysis_code==4: # differential stiffness
#self.lsdvmn = self.get_values(data,'i',5) ## load set number
elif self.analysis_code == 5: # frequency
self.freq2 = self.add_data_parameter(data, 'freq2', 'f', 5) ## frequency
self.dataNames = self.apply_data_code_value('dataNames', ['freq2'])
elif self.analysis_code == 6: # transient
self.time = self.add_data_parameter(data, 'time', 'f', 5) ## time step
self.dataNames = self.apply_data_code_value('dataNames', ['time'])
#elif self.analysis_code==7: # pre-buckling
#self.dataNames = self.apply_data_code_value('dataNames',['lsdvmn'])
elif self.analysis_code == 8: # post-buckling
self.mode = self.add_data_parameter(data, 'mode', 'i', 5) ## mode number
self.dataNames = self.apply_data_code_value('dataNames', ['mode'])
elif self.analysis_code == 9: # complex eigenvalues
self.mode = self.add_data_parameter(data, 'mode', 'i', 5) ## mode number
self.dataNames = self.apply_data_code_value('dataNames', ['mode'])
elif self.analysis_code == 10: # nonlinear statics
self.loadFactor = self.add_data_parameter(data, 'loadFactor', 'f', 5) ## load factor
self.dataNames = self.apply_data_code_value('dataNames', ['loadFactor'])
#elif self.analysis_code==11: # old geometric nonlinear statics
#self.dataNames = self.apply_data_code_value('dataNames',['lsdvmn'])
elif self.analysis_code == 12: # contran ? (may appear as aCode=6) --> straight from DMAP...grrr...
self.time = self.add_data_parameter(data, 'time', 'f', 5) ## time step
self.dataNames = self.apply_data_code_value('dataNames', ['time'])
else:
raise RuntimeError('invalid analysis_code...analysis_code=%s' %
self.analysis_code)
if self.debug:
self.binary_debug.write(' approach_code = %r\n' % self.approach_code)
self.binary_debug.write(' tCode = %r\n' % self.tCode)
self.binary_debug.write(' isubcase = %r\n' % self.isubcase)
self._read_title(data)
self._write_debug_bits()
def _read_onr1_4(self, data):
"""
reads ONRGY1 subtable 4
"""
if self.read_mode == 1:
return len(data)
if self.table_code == 18: # element strain energy
assert self.table_name in ['ONRGY1'], 'table_name=%s table_code=%s' % (self.table_name, self.table_code)
n = self._read_element_strain_energy(data)
else:
raise NotImplementedError(self.table_code)
return n
def _read_element_strain_energy(self, data):
"""
table_code = 19
"""
dt = self.nonlinear_factor
n = 0
result_name = 'strainEnergy'
if self.read_mode == 1:
return len(data)
if self.num_wide == 4:
self.create_transient_object(self.strainEnergy, StrainEnergyObject)
s = Struct(b'i3f')
ntotal = 16
nnodes = len(data) // ntotal
for i in xrange(nnodes):
edata = data[n:n+ntotal]
out = s.unpack(edata)
(eid_device, energy, percent, density) = out
eid = (eid_device - self.device_code) // 10
#print "eType=%s" % (eType)
data_in = [eid, energy, percent, density]
#print "%s" % (self.get_element_type(self.element_type)), data_in
self.obj.add(dt, data_in)
n += ntotal
elif self.num_wide == 5:
self.create_transient_object(self.strainEnergy, StrainEnergyObject) # why is this not different?
ntotal = 20
s = Struct(b'8s3f')
nnodes = len(data) // ntotal
for i in xrange(nnodes):
edata = data[n:n+20]
out = s.unpack(edata)
(word, energy, percent, density) = out
#print "out = ",out
word = word.strip()
#print "eType=%s" % (eType)
data_in = [word, energy, percent, density]
#print "%s" %(self.get_element_type(self.element_type)), data_in
#eid = self.obj.add_new_eid(out)
self.obj.add(dt, data_in)
n += ntotal
elif self.num_wide == 6: ## TODO: figure this out...
self.create_transient_object(self.strainEnergy, StrainEnergyObject) # TODO: why is this not different?
ntotal = 24
s = Struct(b'i8s3f')
nnodes = len(data) // ntotal
for i in xrange(nnodes):
edata = data[n:n+24]
out = s.unpack(edata)
(word, energy, percent, density) = out # TODO: this has to be wrong...
#print "out = ",out
word = word.strip()
#print "eType=%s" % (eType)
data_in = [word, energy, percent, density]
#print "%s" %(self.get_element_type(self.element_type)), data_in
#eid = self.obj.add_new_eid(out)
self.obj.add(dt, data_in)
n += ntotal
else:
raise NotImplementedError('num_wide = %s' % self.num_wide)
return n
| [
"mesheb82@abe5364a-6225-a519-111c-932ebcde5b3b"
] | mesheb82@abe5364a-6225-a519-111c-932ebcde5b3b |
bbd400842a93d924ddbd60b272e0bebefb7c0e98 | 12972f4d9e7de2c38e79ae911f2e7b125965cac9 | /virtual/lib/python3.6/site-packages/pip/_internal/commands/show.py | 7d714f74c91ba4f1811ca8d37ca6b73ce58d95b4 | [
"MIT"
] | permissive | Michellemukami/pitch | b33d0de81cc2a0dfe70ddc1e91affc88af63ff2b | aebb7736d18766343a5a295de0782aa175245c35 | refs/heads/master | 2022-10-22T03:55:33.364628 | 2019-08-07T10:15:10 | 2019-08-07T10:15:10 | 200,673,234 | 0 | 0 | null | 2022-09-16T18:07:53 | 2019-08-05T14:38:26 | Python | UTF-8 | Python | false | false | 6,261 | py | from __future__ import absolute_import
import logging
import os
from email.parser import FeedParser
from pip._vendor import pkg_resources
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.cli.base_command import Command
from pip._internal.cli.status_codes import ERROR, SUCCESS
logger = logging.getLogger(__name__)
class ShowCommand(Command):
"""
Show information about one or more installed packages.
The output is in RFC-compliant mail header format.
"""
name = 'show'
usage = """
%prog [options] <package> ..."""
summary = 'Show information about installed packages.'
ignore_require_venv = True
def __init__(self, *args, **kw):
super(ShowCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-f', '--files',
dest='files',
action='store_true',
default=False,
help='Show the full list of installed files for each package.')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
logger.warning('ERROR: Please provide a package name or names.')
return ERROR
query = args
results = search_packages_info(query)
if not print_results(
results, list_files=options.files, verbose=options.verbose):
return ERROR
return SUCCESS
def search_packages_info(query):
"""
Gather details from installed distributions. Print distribution name,
version, location, and installed files. Installed files requires a
pip generated 'installed-files.txt' in the distributions '.egg-info'
directory.
"""
installed = {}
for p in pkg_resources.working_set:
installed[canonicalize_name(p.project_name)] = p
query_names = [canonicalize_name(name) for name in query]
for dist in [installed[pkg] for pkg in query_names if pkg in installed]:
package = {
'name': dist.project_name,
'version': dist.version,
'location': dist.location,
'requires': [dep.project_name for dep in dist.requires()],
}
file_list = None
metadata = None
if isinstance(dist, pkg_resources.DistInfoDistribution):
# RECORDs should be part of .dist-info metadatas
if dist.has_metadata('RECORD'):
lines = dist.get_metadata_lines('RECORD')
paths = [l.split(',')[0] for l in lines]
paths = [os.path.join(dist.location, p) for p in paths]
file_list = [os.path.relpath(p, dist.location) for p in paths]
if dist.has_metadata('METADATA'):
metadata = dist.get_metadata('METADATA')
else:
# Otherwise use pip's log for .egg-info's
if dist.has_metadata('installed-files.txt'):
paths = dist.get_metadata_lines('installed-files.txt')
paths = [os.path.join(dist.egg_info, p) for p in paths]
file_list = [os.path.relpath(p, dist.location) for p in paths]
if dist.has_metadata('PKG-INFO'):
metadata = dist.get_metadata('PKG-INFO')
if dist.has_metadata('entry_points.txt'):
entry_points = dist.get_metadata_lines('entry_points.txt')
package['entry_points'] = entry_points
if dist.has_metadata('INSTALLER'):
for line in dist.get_metadata_lines('INSTALLER'):
if line.strip():
package['installer'] = line.strip()
break
# @todo: Should pkg_resources.Distribution have a
# `get_pkg_info` method?
feed_parser = FeedParser()
feed_parser.feed(metadata)
pkg_info_dict = feed_parser.close()
for key in ('metadata-version', 'summary',
'home-page', 'user', 'user-email', 'license'):
package[key] = pkg_info_dict.get(key)
# It looks like FeedParser cannot deal with repeated headers
classifiers = []
for line in metadata.splitlines():
if line.startswith('Classifier: '):
classifiers.append(line[len('Classifier: '):])
package['classifiers'] = classifiers
if file_list:
package['files'] = sorted(file_list)
yield package
def print_results(distributions, list_files=False, verbose=False):
"""
Print the informations from installed distributions found.
"""
results_printed = False
for i, dist in enumerate(distributions):
results_printed = True
if i > 0:
logger.info("---")
name = dist.get('name', '')
required_by = [
pkg.project_name for pkg in pkg_resources.working_set
if name in [required.name for required in pkg.requires()]
]
logger.info("Name: %s", name)
logger.info("Version: %s", dist.get('version', ''))
logger.info("Summary: %s", dist.get('summary', ''))
logger.info("Home-page: %s", dist.get('home-page', ''))
logger.info("user: %s", dist.get('user', ''))
logger.info("user-email: %s", dist.get('user-email', ''))
logger.info("License: %s", dist.get('license', ''))
logger.info("Location: %s", dist.get('location', ''))
logger.info("Requires: %s", ', '.join(dist.get('requires', [])))
logger.info("Required-by: %s", ', '.join(required_by))
if verbose:
logger.info("Metadata-Version: %s",
dist.get('metadata-version', ''))
logger.info("Installer: %s", dist.get('installer', ''))
logger.info("Classifiers:")
for classifier in dist.get('classifiers', []):
logger.info(" %s", classifier)
logger.info("Entry-points:")
for entry in dist.get('entry_points', []):
logger.info(" %s", entry.strip())
if list_files:
logger.info("Files:")
for line in dist.get('files', []):
logger.info(" %s", line.strip())
if "files" not in dist:
logger.info("Cannot locate installed-files.txt")
return results_printed
| [
"you@example.com"
] | you@example.com |
b7bbd0cff4f44ec86ea0f1751469f76ffbf8a50f | 24c84c5b93cd816976d370a99982f45e0d18a184 | /BitManipulation/XRaiseToPowerN.py | 59807df6c254cb11dc951ae81be87470bc2be99a | [] | no_license | purushottamkaushik/DataStructuresUsingPython | 4ef1cf33f1af3fd25105a45be4f179069e327628 | e016fe052c5600dcfbfcede986d173b401ed23fc | refs/heads/master | 2023-03-12T13:25:18.186446 | 2021-02-28T18:21:37 | 2021-02-28T18:21:37 | 343,180,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | class Solution1:
def myPow(self, x,n):
return x ** (n)
# Time limit Exceeded Solution
#
# class Solution:
# def myPow(self, x: float, n: int) -> float:
#
# if n < 0:
# n = -n
# x = 1 / x
#
# val = 1
# for j in range(1, n + 1):
# val *= x
# return val
class Solution:
def fastPow(self, x, n):
if n == 0:
return 1.0
A = self.fastPow(x, n / 2)
if n % 2 == 0:
return A * A
else:
return A * A * x
def myPow(self, x: float, n: int) -> float:
if n < 0:
x = 1 / x
n = -n
return self.fastPow(x, n)
s = Solution()
print(s.myPow(2,10)) | [
"purushottamkaushik96@gmail.com"
] | purushottamkaushik96@gmail.com |
3f2ebca3033b0e9b6e0594a4e024b17269235a58 | 44064ed79f173ddca96174913910c1610992b7cb | /Second_Processing_app/temboo/Library/Zendesk/Users/CreateManyUsers.py | 7f4fbc8aa21e3fc4e42b3c6633ea77278bf1c34c | [] | no_license | dattasaurabh82/Final_thesis | 440fb5e29ebc28dd64fe59ecd87f01494ed6d4e5 | 8edaea62f5987db026adfffb6b52b59b119f6375 | refs/heads/master | 2021-01-20T22:25:48.999100 | 2014-10-14T18:58:00 | 2014-10-14T18:58:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,051 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# CreateManyUsers
# Creates many new users at one time.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class CreateManyUsers(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the CreateManyUsers Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/Zendesk/Users/CreateManyUsers')
def new_input_set(self):
return CreateManyUsersInputSet()
def _make_result_set(self, result, path):
return CreateManyUsersResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return CreateManyUsersChoreographyExecution(session, exec_id, path)
class CreateManyUsersInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the CreateManyUsers
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Email(self, value):
"""
Set the value of the Email input for this Choreo. ((required, string) The email address you use to login to your Zendesk account.)
"""
InputSet._set_input(self, 'Email', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, password) Your Zendesk password.)
"""
InputSet._set_input(self, 'Password', value)
def set_Server(self, value):
"""
Set the value of the Server input for this Choreo. ((required, string) Your Zendesk domain and subdomain (e.g., temboocare.zendesk.com).)
"""
InputSet._set_input(self, 'Server', value)
def set_Users(self, value):
"""
Set the value of the Users input for this Choreo. ((required, json) A JSON-formatted string containing an array of user properties you wish to set.)
"""
InputSet._set_input(self, 'Users', value)
class CreateManyUsersResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the CreateManyUsers Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) )
"""
return self._output.get('Response', None)
class CreateManyUsersChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return CreateManyUsersResultSet(response, path)
| [
"dattasaurabh82@gmail.com"
] | dattasaurabh82@gmail.com |
c9d1642bef0822ea1d0d53275ff38258fec6c343 | a7cb6aa4605e6cb8387858e270e051b5cb5c95b6 | /nagaram/anagrams.py | 04c657f2aba07d717599c1bea0f9488d5486f23e | [
"BSD-3-Clause"
] | permissive | a-tal/nagaram | 6a41322762d5746b14c13e46b116b6b5f6fdd2e9 | 2edcb0ef8cb569ebd1c398be826472b4831d6110 | refs/heads/master | 2020-06-08T05:00:45.766688 | 2018-03-10T18:18:12 | 2018-03-10T18:18:12 | 8,365,790 | 1 | 2 | BSD-3-Clause | 2018-03-10T18:18:31 | 2013-02-22T20:59:43 | Python | UTF-8 | Python | false | false | 1,632 | py | """Anagram finding functions."""
from nagaram.scrabble import blank_tiles, word_list, word_score
def _letter_map(word):
"""Creates a map of letter use in a word.
Args:
word: a string to create a letter map from
Returns:
a dictionary of {letter: integer count of letter in word}
"""
lmap = {}
for letter in word:
try:
lmap[letter] += 1
except KeyError:
lmap[letter] = 1
return lmap
def anagrams_in_word(word, sowpods=False, start="", end=""):
"""Finds anagrams in word.
Args:
word: the string to base our search off of
sowpods: boolean to declare TWL or SOWPODS words file
start: a string of starting characters to find anagrams based on
end: a string of ending characters to find anagrams based on
Yields:
a tuple of (word, score) that can be made with the input_word
"""
input_letters, blanks, questions = blank_tiles(word)
for tile in start + end:
input_letters.append(tile)
for word in word_list(sowpods, start, end):
lmap = _letter_map(input_letters)
used_blanks = 0
for letter in word:
if letter in lmap:
lmap[letter] -= 1
if lmap[letter] < 0:
used_blanks += 1
if used_blanks > (blanks + questions):
break
else:
used_blanks += 1
if used_blanks > (blanks + questions):
break
else:
yield (word, word_score(word, input_letters, questions))
| [
"github@talsma.ca"
] | github@talsma.ca |
1ac752b254b2f46452b83ae611dadec6e8478300 | 9e0105505f4746d5872090885df6064ad772b3e5 | /utils/modules.py | 12888a8c8e2c06fb3dc18dd68c7e1eeed327cf99 | [] | no_license | wwwbq/PyTorch_YOLOv1 | 568afa4f8f508dabd9bcb35404a634985bf4a8ae | 2e86c64577e24193b117582c07a4941c2eeba8cc | refs/heads/main | 2023-08-08T00:08:42.466820 | 2021-09-13T06:25:09 | 2021-09-13T06:25:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
class Conv(nn.Module):
def __init__(self, c1, c2, k, s=1, p=0, d=1, g=1, act=True):
super(Conv, self).__init__()
self.convs = nn.Sequential(
nn.Conv2d(c1, c2, k, stride=s, padding=p, dilation=d, groups=g),
nn.BatchNorm2d(c2),
nn.LeakyReLU(0.1, inplace=True) if act else nn.Identity()
)
def forward(self, x):
return self.convs(x)
class SPP(nn.Module):
"""
Spatial Pyramid Pooling
"""
def __init__(self):
super(SPP, self).__init__()
def forward(self, x):
x_1 = torch.nn.functional.max_pool2d(x, 5, stride=1, padding=2)
x_2 = torch.nn.functional.max_pool2d(x, 9, stride=1, padding=4)
x_3 = torch.nn.functional.max_pool2d(x, 13, stride=1, padding=6)
x = torch.cat([x, x_1, x_2, x_3], dim=1)
return x
| [
"1394571815@qq.com"
] | 1394571815@qq.com |
51c776801f20b5d316a03b48a5de11268a41752e | 8bbfb5b937772066ea965058eb29e9f6362847c2 | /utils/tags/Q6_6_0Beta3/Binary Model Conversion/Python2.4 QuArK Model Importer and test files/MY QuArK Python Model Import-Export files/Prev Work Files/Copy (3) of MYmd2_import.py | 05576fc812ed79cbbc62035c3e4fea468bfd4d78 | [] | no_license | QuakeEngines/QuArK_quake_editor-clone | e1aeeb38e7ec8287835d643c3a0bfe5612f0b7f3 | 412bf28a14d4e369479bf38408bd93e6a2612f87 | refs/heads/master | 2021-02-15T16:11:17.332239 | 2020-03-04T14:28:50 | 2020-03-04T14:28:50 | 244,911,440 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,919 | py | """
__author__ = 'Bob Holcomb'
__version__ = '0.15'
__url__ = ["Bob's site, http://bane.servebeer.com",
"Support forum, http://scourage.servebeer.com/phpbb/", "blender", "elysiun"]
__email__ = ["Bob Holcomb, bob_holcomb:hotmail*com", "scripts"]
This script imports a Quake 2 file (MD2), textures,
and animations into blender for editing. Loader is based on MD2 loader from www.gametutorials.com-Thanks DigiBen! and the md3 blender loader by PhaethonH <phaethon@linux.ucla.edu><br>
Additional help from: Shadwolf, Skandal, Rojo, Cambo<br>
Thanks Guys!
"""
import struct, string, sys, os
from types import *
######################################################
# Main Body
######################################################
#returns the string from a null terminated string
def asciiz (s):
n = 0
while (ord(s[n]) != 0):
n = n + 1
return s[0:n]
######################################################
# MD2 Model Constants
######################################################
MD2_MAX_TRIANGLES=4096
MD2_MAX_VERTICES=2048
MD2_MAX_TEXCOORDS=2048
MD2_MAX_FRAMES=512
MD2_MAX_SKINS=32
MD2_MAX_FRAMESIZE=(MD2_MAX_VERTICES * 4 + 128)
######################################################
# MD2 data structures
######################################################
class md2_alias_triangle:
vertices=[]
lightnormalindex=0
binary_format="<3BB" #little-endian (<), 3 Unsigned char
def __init__(self):
self.vertices=[0]*3
self.lightnormalindex=0
def load(self, file):
temp_data = file.read(struct.calcsize(self.binary_format))
data = struct.unpack(self.binary_format, temp_data)
self.vertices[0]=data[0]
self.vertices[1]=data[1]
self.vertices[2]=data[2]
self.lightnormalindex=data[3]
return self
def dump(self):
print "MD2 Alias_Triangle Structure"
print "vertex: ", self.vertices[0]
print "vertex: ", self.vertices[1]
print "vertex: ", self.vertices[2]
print "lightnormalindex: ",self.lightnormalindex
print ""
class md2_face:
vertex_index=[]
texture_index=[]
binary_format="<3h3h" #little-endian (<), 3 short, 3 short
def __init__(self):
self.vertex_index = [ 0, 0, 0 ]
self.texture_index = [ 0, 0, 0]
def load (self, file):
temp_data=file.read(struct.calcsize(self.binary_format))
data=struct.unpack(self.binary_format, temp_data)
self.vertex_index[0]=data[0]
self.vertex_index[1]=data[1]
self.vertex_index[2]=data[2]
self.texture_index[0]=data[3]
self.texture_index[1]=data[4]
self.texture_index[2]=data[5]
return self
def dump (self):
print "MD2 Face Structure"
print "vertex index: ", self.vertex_index[0]
print "vertex index: ", self.vertex_index[1]
print "vertex index: ", self.vertex_index[2]
print "texture index: ", self.texture_index[0]
print "texture index: ", self.texture_index[1]
print "texture index: ", self.texture_index[2]
print ""
class md2_tex_coord:
u=0
v=0
binary_format="<2h" #little-endian (<), 2 unsigned short
def __init__(self):
self.u=0
self.v=0
def load (self, file):
temp_data=file.read(struct.calcsize(self.binary_format))
data=struct.unpack(self.binary_format, temp_data)
self.u=data[0]
self.v=data[1]
return self
def dump (self):
print "MD2 Texture Coordinate Structure"
print "texture coordinate u: ",self.u
print "texture coordinate v: ",self.v
print ""
class md2_skin:
name=""
binary_format="<64s" #little-endian (<), char[64]
def __init__(self):
self.name=""
def load (self, file):
temp_data=file.read(struct.calcsize(self.binary_format))
data=struct.unpack(self.binary_format, temp_data)
self.name=asciiz(data[0])
return self
def dump (self):
print "MD2 Skin"
print "skin name: ",self.name
print ""
class md2_alias_frame:
scale=[]
translate=[]
name=[]
vertices=[]
binary_format="<3f3f16s" #little-endian (<), 3 float, 3 float char[16]
#did not add the "3bb" to the end of the binary format
#because the alias_vertices will be read in through
#thier own loader
def __init__(self):
self.scale=[0.0]*3
self.translate=[0.0]*3
self.name=""
self.vertices=[]
def load (self, file):
temp_data=file.read(struct.calcsize(self.binary_format))
data=struct.unpack(self.binary_format, temp_data)
self.scale[0]=data[0]
self.scale[1]=data[1]
self.scale[2]=data[2]
self.translate[0]=data[3]
self.translate[1]=data[4]
self.translate[2]=data[5]
self.name=asciiz(data[6])
return self
def dump (self):
print "MD2 Alias Frame"
print "scale x: ",self.scale[0]
print "scale y: ",self.scale[1]
print "scale z: ",self.scale[2]
print "translate x: ",self.translate[0]
print "translate y: ",self.translate[1]
print "translate z: ",self.translate[2]
print "name: ",self.name
print ""
class md2_obj:
#Header Structure
ident=0 #int 0 This is used to identify the file
version=0 #int 1 The version number of the file (Must be 8)
skin_width=0 #int 2 The skin width in pixels
skin_height=0 #int 3 The skin height in pixels
frame_size=0 #int 4 The size in bytes the frames are
num_skins=0 #int 5 The number of skins associated with the model
num_vertices=0 #int 6 The number of vertices (constant for each frame)
num_tex_coords=0 #int 7 The number of texture coordinates
num_faces=0 #int 8 The number of faces (polygons)
num_GL_commands=0 #int 9 The number of gl commands
num_frames=0 #int 10 The number of animation frames
offset_skins=0 #int 11 The offset in the file for the skin data
offset_tex_coords=0 #int 12 The offset in the file for the texture data
offset_faces=0 #int 13 The offset in the file for the face data
offset_frames=0 #int 14 The offset in the file for the frames data
offset_GL_commands=0#int 15 The offset in the file for the gl commands data
offset_end=0 #int 16 The end of the file offset
binary_format="<17i" #little-endian (<), 17 integers (17i)
#md2 data objects
tex_coords=[]
faces=[]
frames=[]
skins=[]
def __init__ (self):
self.tex_coords=[]
self.faces=[]
self.frames=[]
self.skins=[]
def load (self, file):
temp_data = file.read(struct.calcsize(self.binary_format))
data = struct.unpack(self.binary_format, temp_data)
self.ident=data[0]
self.version=data[1]
if (self.ident!=844121161 or self.version!=8):
print "Not a valid MD2 file"
Exit()
self.skin_width=data[2]
self.skin_height=data[3]
self.frame_size=data[4]
#make the # of skin objects for model
self.num_skins=data[5]
for i in xrange(0,self.num_skins):
self.skins.append(md2_skin())
self.num_vertices=data[6]
#make the # of texture coordinates for model
self.num_tex_coords=data[7]
for i in xrange(0,self.num_tex_coords):
self.tex_coords.append(md2_tex_coord())
#make the # of triangle faces for model
self.num_faces=data[8]
for i in xrange(0,self.num_faces):
self.faces.append(md2_face())
self.num_GL_commands=data[9]
#make the # of frames for the model
self.num_frames=data[10]
for i in xrange(0,self.num_frames):
self.frames.append(md2_alias_frame())
#make the # of vertices for each frame
for j in xrange(0,self.num_vertices):
self.frames[i].vertices.append(md2_alias_triangle())
self.offset_skins=data[11]
self.offset_tex_coords=data[12]
self.offset_faces=data[13]
self.offset_frames=data[14]
self.offset_GL_commands=data[15]
#load the skin info
file.seek(self.offset_skins,0)
for i in xrange(0, self.num_skins):
self.skins[i].load(file)
#self.skins[i].dump()
#load the texture coordinates
file.seek(self.offset_tex_coords,0)
for i in xrange(0, self.num_tex_coords):
self.tex_coords[i].load(file)
#self.tex_coords[i].dump()
#load the face info
file.seek(self.offset_faces,0)
for i in xrange(0, self.num_faces):
self.faces[i].load(file)
#self.faces[i].dump()
#load the frames
file.seek(self.offset_frames,0)
for i in xrange(0, self.num_frames):
self.frames[i].load(file)
#self.frames[i].dump()
for j in xrange(0,self.num_vertices):
self.frames[i].vertices[j].load(file)
#self.frames[i].vertices[j].dump()
return self
def dump (self):
print "Header Information"
print "ident: ", self.ident
print "version: ", self.version
print "skin width: ", self.skin_width
print "skin height: ", self.skin_height
print "frame size: ", self.frame_size
print "number of skins: ", self.num_skins
print "number of texture coordinates: ", self.num_tex_coords
print "number of faces: ", self.num_faces
print "number of frames: ", self.num_frames
print "number of vertices: ", self.num_vertices
print "offset skins: ", self.offset_skins
print "offset texture coordinates: ", self.offset_tex_coords
print "offset faces: ", self.offset_faces
print "offset frames: ",self.offset_frames
print ""
######################################################
# Import functions
######################################################
def load_textures(md2, texture_filename):
#does the model have textures specified with it?
if int(md2.num_skins) > 0:
for i in xrange(0,md2.num_skins):
md2.skins[i].dump() # Comment out later, just prints to the console what the skin(s) are.
return -1
### Blenders way of loading the skin texture.
# if (Blender.sys.exists(md2.skins[i].name)):
# mesh_image=Blender.Image.Load(md2.skins[i].name)
# else:
# result=Blender.Draw.PupMenu("Cannot find texture: "+md2.skins[i].name+"-Continue?%t|OK")
# if(result==1):
# return -1
# return mesh_image
else:
return -1
def load_md2 (md2_filename, texture_filename):
# Open our text file to wright the data to.
temp = open("c:\\Python24\\temp.txt", "w")
o = open("c:\\Python24\\Md2_Model_Import_Data.txt", "w")
#read the file in
file=open(md2_filename,"rb")
md2=md2_obj()
md2.load(file)
md2.dump() # Comment out later, just to print the file Header to the console.
### Lines below changes the system output causing the 'dump' to be writen to the 'temp' file
### which is read back in for the variable 'Header' to use and write to another file.
sys.stdout = temp
md2.dump()
sys.stdout = sys.__stdout__
temp.close()
temp = open("c:\\Python24\\temp.txt")
Header = "None"
while Header != "":
Header = temp.readline()
o.write(Header)
temp.close()
os.remove("c:\\Python24\\temp.txt") # Deletes this temp file.
file.close()
######### Creates a new mesh
# mesh = NMesh.New()
mesh = []
uv_coord=[]
uv_list=[]
#load the textures to use later
#-1 if there is no texture to load
mesh_image=load_textures(md2, texture_filename)
######### Make the verts
print "Loading Vertex Data = " + str(xrange(0,md2.num_vertices)) + " md2.num_vertices\n"
for i in xrange(0,md2.num_vertices):
#use the first frame for the mesh vertices
x=(md2.frames[0].scale[0]*md2.frames[0].vertices[i].vertices[0]+md2.frames[0].translate[0])*g_scale
y=(md2.frames[0].scale[1]*md2.frames[0].vertices[i].vertices[1]+md2.frames[0].translate[1])*g_scale
z=(md2.frames[0].scale[2]*md2.frames[0].vertices[i].vertices[2]+md2.frames[0].translate[2])*g_scale
# vertex=NMesh.Vert(y,-x,z)
vertex=(y,-x,z)
# mesh.verts.append(vertex)
mesh.append(vertex)
o.write("\n\nMesh Vertex Data = " + str(xrange(0,md2.num_vertices)) + " md2.num_vertices\n" + str(mesh) + "\n\n")
######## Make the UV list
print "Loading UV Data"
mesh.hasFaceUV(1) #turn on face UV coordinates for this mesh
for i in xrange(0, md2.num_tex_coords):
u=(float(md2.tex_coords[i].u)/float(md2.skin_width))
v=(float(md2.tex_coords[i].v)/float(md2.skin_height))
#for some reason quake2 texture maps are upside down, flip that
uv_coord=(u,1-v)
uv_list.append(uv_coord)
######### Make the faces
print "Loading Face Data"
for i in xrange(0,md2.num_faces):
face = NMesh.Face()
#draw the triangles in reverse order so they show up
face.v.append(mesh.verts[md2.faces[i].vertex_index[0]])
face.v.append(mesh.verts[md2.faces[i].vertex_index[2]])
face.v.append(mesh.verts[md2.faces[i].vertex_index[1]])
#append the list of UV
#ditto in reverse order with the texture verts
face.uv.append(uv_list[md2.faces[i].texture_index[0]])
face.uv.append(uv_list[md2.faces[i].texture_index[2]])
face.uv.append(uv_list[md2.faces[i].texture_index[1]])
#set the texture that this face uses if it has one
if (mesh_image!=-1):
face.image=mesh_image
#add the face
mesh.faces.append(face)
mesh_obj=NMesh.PutRaw(mesh)
animate_md2(md2, mesh_obj)
print "Loading Animation Data"
#***********************************************
# MAIN
#***********************************************
"""
# Import globals
g_md2_filename=Create("model")
g_texture_filename=Create("texture")
g_filename_search=Create("model")
g_texture_search=Create("texture")
"""
#Globals
# g_scale=Create(1.0)
g_scale = 1.0
# Events
EVENT_NOEVENT=1
EVENT_LOAD_MD2=2
EVENT_CHOOSE_FILENAME=3
EVENT_CHOOSE_TEXTURE=4
EVENT_SAVE_MD2=5
EVENT_EXIT=100
######################################################
# Callbacks for Window functions
######################################################
def filename_callback(input_filename):
global g_md2_filename
g_md2_filename.val=input_filename
def texture_callback(input_texture):
global g_texture_filename
g_texture_filename.val=input_texture
########################
# To run this file
########################
md2_filename = "c:\\Python24\\models\\alien\\tris.md2"
texture_filename = "c:\\Python24\\models\\alien\\rust.pcx"
load_md2 (md2_filename, texture_filename)
| [
"nobody@5419a3ea-6bc3-475c-b79b-167d7c3fbd5f"
] | nobody@5419a3ea-6bc3-475c-b79b-167d7c3fbd5f |
cf1bd1863d3f53e5ecd17372f8353719846f99e0 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/M/michiko3/my_scraper_-_michiko.py | 051a46dfb17a94145b5b97a6695f80da14f2f70c | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,276 | py | import scraperwiki
from bs4 import BeautifulSoup
html = scraperwiki.scrape("http://usatoday30.usatoday.com/money/economy/housing/2009-02-11-decline-housing-foreclosure_N.htm")
soup = BeautifulSoup(html)
print soup.prettify()
print soup.find_all("table")
print len(soup.find_all("table"))
tables = soup.find_all("table", {"border" : "0", "cellspacing": "1", "cellpadding": "2"})
print len(tables)
print tables
for table in tables:
for row in table.find_all('tr'):
for cell in row.find_all("td"):
print cell.get_text().strip()
rows = tables[1].find_all('tr')
for row in rows:
for cell in row.find_all("td"):
print cell.get_text().strip()
for row in rows:
cells = row.find_all("td")
print "there are", len(cells), "in this row"
print "zero", cells[0]
for row in rows:
cells = row.find_all("td")
print "there are", len(cells), "cells in this row"
if len(cells) > 5:
print "rank", cells[0].get_text().strip()
print "state", cells[1].get_text().strip()
print 'total_filings', cells[2].get_text().strip()
print '1_per_x' , cells[3].get_text().strip()
for row in rows:
cells = row.find_all("td")
if len(cells) > 5:
data = {
'rank' : cells[0].get_text().strip(),
'state' : cells[1].get_text().strip(),
'total_filings' : cells[2].get_text().strip(),
'1_per_x' : cells[3].get_text().strip(),
'change_dec_jan' : cells[4].get_text().strip(),
'change_jan08' : cells[5].get_text().strip()
}
scraperwiki.sqlite.save(unique_keys=['state'],data=data)import scraperwiki
from bs4 import BeautifulSoup
html = scraperwiki.scrape("http://usatoday30.usatoday.com/money/economy/housing/2009-02-11-decline-housing-foreclosure_N.htm")
soup = BeautifulSoup(html)
print soup.prettify()
print soup.find_all("table")
print len(soup.find_all("table"))
tables = soup.find_all("table", {"border" : "0", "cellspacing": "1", "cellpadding": "2"})
print len(tables)
print tables
for table in tables:
for row in table.find_all('tr'):
for cell in row.find_all("td"):
print cell.get_text().strip()
rows = tables[1].find_all('tr')
for row in rows:
for cell in row.find_all("td"):
print cell.get_text().strip()
for row in rows:
cells = row.find_all("td")
print "there are", len(cells), "in this row"
print "zero", cells[0]
for row in rows:
cells = row.find_all("td")
print "there are", len(cells), "cells in this row"
if len(cells) > 5:
print "rank", cells[0].get_text().strip()
print "state", cells[1].get_text().strip()
print 'total_filings', cells[2].get_text().strip()
print '1_per_x' , cells[3].get_text().strip()
for row in rows:
cells = row.find_all("td")
if len(cells) > 5:
data = {
'rank' : cells[0].get_text().strip(),
'state' : cells[1].get_text().strip(),
'total_filings' : cells[2].get_text().strip(),
'1_per_x' : cells[3].get_text().strip(),
'change_dec_jan' : cells[4].get_text().strip(),
'change_jan08' : cells[5].get_text().strip()
}
scraperwiki.sqlite.save(unique_keys=['state'],data=data) | [
"pallih@kaninka.net"
] | pallih@kaninka.net |
83d80e4116c4fa57e5fd02e264e49d708e6d5710 | 09ae3f372d1000f118ad80874870ae420a4be66f | /scikit-learn-master/sklearn/semi_supervised/label_propagation.py | 11b3344a74dbc8d09f555fd96ebecdfee76abdab | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | lqkweb/learnMLflow | 998f80c3828879b8d542125bc95c6345b8e9b29a | 13c5decaebba95b1b90f92021be35e343b4764af | refs/heads/master | 2022-10-18T06:17:23.584172 | 2019-01-18T09:51:38 | 2019-01-18T09:51:38 | 166,145,472 | 2 | 0 | Apache-2.0 | 2022-09-30T18:26:17 | 2019-01-17T02:22:29 | Python | UTF-8 | Python | false | false | 18,484 | py | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semi-supervised classification algorithms. At a high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset given
label assignments over an initial subset. In one variant, the algorithm does
not allow for any errors in the initial assignment (hard-clamping) while
in another variant, the algorithm allows for some wiggle room for the initial
assignments, allowing them to change by a fraction alpha in each iteration
(soft-clamping).
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supports RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> import numpy as np
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> rng = np.random.RandomState(42)
>>> random_unlabeled_points = rng.rand(len(iris.target)) < 0.3
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <clay@woolam.org>
# Utkarsh Upadhyay <mail@musicallyut.in>
# License: BSD
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import sparse
from scipy.sparse import csgraph
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..neighbors.unsupervised import NearestNeighbors
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_X_y, check_is_fitted, check_array
from ..exceptions import ConvergenceWarning
class BaseLabelPropagation(BaseEstimator, ClassifierMixin, metaclass=ABCMeta):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : integer
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3, n_jobs=None):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
self.n_jobs = n_jobs
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors,
n_jobs=self.n_jobs).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
elif callable(self.kernel):
if y is None:
return self.kernel(X, X)
else:
return self.kernel(X, y)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" or an explicit function "
" are supported at this time." % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse=['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = np.array([
np.sum(self.label_distributions_[weight_matrix], axis=0)
for weight_matrix in weight_matrices])
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
check_classification_targets(y)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
alpha = self.alpha
if self._variant == 'spreading' and \
(alpha is None or alpha <= 0.0 or alpha >= 1.0):
raise ValueError('alpha=%s is invalid: it must be inside '
'the open interval (0, 1)' % alpha)
y = np.asarray(y)
unlabeled = y == -1
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self._variant == 'propagation':
# LabelPropagation
y_static[unlabeled] = 0
else:
# LabelSpreading
y_static *= 1 - alpha
l_previous = np.zeros((self.X_.shape[0], n_classes))
unlabeled = unlabeled[:, np.newaxis]
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
for self.n_iter_ in range(self.max_iter):
if np.abs(self.label_distributions_ - l_previous).sum() < self.tol:
break
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
if self._variant == 'propagation':
normalizer = np.sum(
self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
self.label_distributions_ = np.where(unlabeled,
self.label_distributions_,
y_static)
else:
# clamp
self.label_distributions_ = np.multiply(
alpha, self.label_distributions_) + y_static
else:
warnings.warn(
'max_iter=%d was reached without convergence.' % self.max_iter,
category=ConvergenceWarning
)
self.n_iter_ += 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix.
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
max_iter : integer
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> rng = np.random.RandomState(42)
>>> random_unlabeled_points = rng.rand(len(iris.target)) < 0.3
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
_variant = 'propagation'
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
max_iter=1000, tol=1e-3, n_jobs=None):
super().__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors, max_iter=max_iter,
tol=tol, n_jobs=n_jobs, alpha=None)
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
def fit(self, X, y):
return super().fit(X, y)
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propagation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
Clamping factor. A value in [0, 1] that specifies the relative amount
that an instance should adopt the information from its neighbors as
opposed to its initial label.
alpha=0 means keeping the initial label information; alpha=1 means
replacing all initial information.
max_iter : integer
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> rng = np.random.RandomState(42)
>>> random_unlabeled_points = rng.rand(len(iris.target)) < 0.3
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
_variant = 'spreading'
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3, n_jobs=None):
# this one has different base parameters
super().__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors, alpha=alpha,
max_iter=max_iter, tol=tol, n_jobs=n_jobs)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = csgraph.laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| [
"leiqk@dxy.cn"
] | leiqk@dxy.cn |
e8e579a506bd6a374ce3f70bba4fc4fa825cd638 | e6448149ad47f0362720e3eb30fa330d7e040ed0 | /lib/__init__.py | 08bcd67f7f71dd609bc8735deb6af6c4861fd902 | [] | no_license | ziming-liu/still-image-action-recognition | 9936270b344e60bbb1e1619c89173c7ff3cecdfe | 79ada680de7d97bf1ffebbbc7f02799bd2434c46 | refs/heads/master | 2020-08-03T02:49:19.553019 | 2019-09-29T05:15:10 | 2019-09-29T05:15:10 | 211,602,449 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 31 | py | from .mycollate import collate
| [
"zimingemail@163.com"
] | zimingemail@163.com |
6074ebb5ed259550f2ca5a4e5800871b45257c7e | 87f94560837a93963b1610457f27cd9adc34e538 | /2018/3/main.py | 4aa7d0bcf7fb488e7c4caffb3bb5c404e64271d0 | [] | no_license | Coul33t/AdventOfCode | 3f511a71f48fcff3738a6e905e711a68f32fad62 | f1443888cde4043f21d286449f2660f1c722e571 | refs/heads/master | 2020-04-09T09:38:05.351225 | 2018-12-04T15:09:09 | 2018-12-04T15:09:09 | 160,240,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,557 | py | import pdb
import re
def rect_overlaps(rect1, rect2):
# 1 = top left x
# 2 = top left y
# 3 = width
# 4 = height
# top_right.x = rect[1] + rect[3]
# bottom_left.x = rect[1]
# top_right.y = rect[2]
# bottom_left.y = rect[2] + rect[4]
return not (rect1[1] + rect1[3] < rect2[1] or
rect1[1] > rect2[1] + rect2[3] or
rect1[2] > rect2[2] + rect2[4] or
rect1[2] + rect1[4] < rect2[2])
def overlapping2(data):
overlaps_matrix = [[0 for y in range(len(data))] for x in range(len(data))]
for i in range(len(data)):
for j in range(len(data)):
if i != j and rect_overlaps(data[i], data[j]):
overlaps_matrix[i][j] += 1
if sum(overlaps_matrix[i]) == 0:
return data[i][0]
def overlapping(data):
max_width = max([x[1] + x[3] for x in data])
max_height = max([x[2] + x[4] for x in data])
canvas = [[0 for y in range(max_width)] for x in range(max_height)]
for claim in data:
for x in range(claim[2], claim[2] + claim[4]):
for y in range(claim[1], claim[1] + claim[3]):
canvas[x][y] += 1
return sum(sum(i > 1 for i in row) for row in canvas)
if __name__ == '__main__':
with open ('input.txt', 'r') as input_file:
data = input_file.read().split('\n')
for i in range(len(data)):
data[i] = [int(x) for x in re.findall(r'(\d+)', data[i])]
print(overlapping(data))
print(overlapping2(data))
| [
"Coulis1990@gmail.com"
] | Coulis1990@gmail.com |
ab9e2100dc7cda13e45967def14d034a68096bb4 | 9680ba23fd13b4bc0fc3ce0c9f02bb88c6da73e4 | /Bernd Klein (520) ile Python/p_20708x.py | 4d2b0d69e9c9930d9ace4529711b5a71b43f8e75 | [] | no_license | mnihatyavas/Python-uygulamalar | 694091545a24f50a40a2ef63a3d96354a57c8859 | 688e0dbde24b5605e045c8ec2a9c772ab5f0f244 | refs/heads/master | 2020-08-23T19:12:42.897039 | 2020-04-24T22:45:22 | 2020-04-24T22:45:22 | 216,670,169 | 0 | 0 | null | null | null | null | ISO-8859-9 | Python | false | false | 7,413 | py | # coding:iso-8859-9 Türkçe
# p_20708x.py: Orijinal Grafik sınıfı ve metodlarının alt-örneği.
class Graph(object):
def __init__(self, graph_dict=None):
""" initializes a graph object
If no dictionary or None is given, an empty dictionary will be used
"""
if graph_dict == None: graph_dict = {}
self.__graph_dict = graph_dict
def vertices(self):
""" returns the vertices of a graph """
return list(self.__graph_dict.keys())
def edges(self):
""" returns the edges of a graph """
return self.__generate_edges()
def add_vertex(self, vertex):
""" If the vertex "vertex" is not in
self.__graph_dict, a key "vertex" with an empty
list as a value is added to the dictionary.
Otherwise nothing has to be done.
"""
if vertex not in self.__graph_dict: self.__graph_dict[vertex] = []
def add_edge(self, edge):
""" assumes that edge is of type set, tuple or list;
between two vertices can be multiple edges!
"""
edge = set(edge)
vertex1 = edge.pop()
if edge:
# not a loop
vertex2 = edge.pop()
else:
# a loop
vertex2 = vertex1
if vertex1 in self.__graph_dict: self.__graph_dict[vertex1].append(vertex2)
else: self.__graph_dict[vertex1] = [vertex2]
def __generate_edges(self):
""" A static method generating the edges of the
graph "graph". Edges are represented as sets
with one (a loop back to the vertex) or two
vertices
"""
edges = []
for vertex in self.__graph_dict:
for neighbour in self.__graph_dict[vertex]:
if {neighbour, vertex} not in edges: edges.append({vertex, neighbour})
return edges
def __str__(self):
res = "vertices: "
for k in self.__graph_dict: res += str(k) + " "
res += "\nedges: "
for edge in self.__generate_edges(): res += str(edge) + " "
return res
def find_isolated_vertices(self):
""" returns a list of isolated vertices. """
graph = self.__graph_dict
isolated = []
for vertex in graph:
print(isolated, vertex)
if not graph[vertex]: isolated += [vertex]
return isolated
def find_path(self, start_vertex, end_vertex, path=[]):
""" find a path from start_vertex to end_vertex in graph """
graph = self.__graph_dict
path = path + [start_vertex]
if start_vertex == end_vertex: return path
if start_vertex not in graph: return None
for vertex in graph[start_vertex]:
if vertex not in path:
extended_path = self.find_path(vertex, end_vertex, path)
if extended_path: return extended_path
return None
def find_all_paths(self, start_vertex, end_vertex, path=[]):
""" find all paths from start_vertex to end_vertex in graph """
graph = self.__graph_dict
path = path + [start_vertex]
if start_vertex == end_vertex: return [path]
if start_vertex not in graph: return []
paths = []
for vertex in graph[start_vertex]:
if vertex not in path:
extended_paths = self.find_all_paths(vertex, end_vertex, path)
for p in extended_paths: paths.append(p)
return paths
def is_connected(self, vertices_encountered = None, start_vertex=None):
""" determines if the graph is connected """
if vertices_encountered is None: vertices_encountered = set()
gdict = self.__graph_dict
vertices = list(gdict.keys()) # "list" necessary in Python 3
if not start_vertex:
# chosse a vertex from graph as a starting point
start_vertex = vertices[0]
vertices_encountered.add(start_vertex)
if len(vertices_encountered) != len(vertices):
for vertex in gdict[start_vertex]:
if vertex not in vertices_encountered:
if self.is_connected(vertices_encountered, vertex): return True
else: return True
return False
def vertex_degree(self, vertex):
""" The degree of a vertex is the number of edges connecting
it, i.e. the number of adjacent vertices. Loops are counted
double, i.e. every occurence of vertex in the list
of adjacent vertices. """
adj_vertices = self.__graph_dict[vertex]
degree = len(adj_vertices) + adj_vertices.count(vertex)
return degree
def degree_sequence(self):
""" calculates the degree sequence """
seq = []
for vertex in self.__graph_dict:seq.append(self.vertex_degree(vertex))
seq.sort(reverse=True)
return tuple(seq)
@staticmethod
def is_degree_sequence(sequence):
""" Method returns True, if the sequence "sequence" is a
degree sequence, i.e. a non-increasing sequence.
Otherwise False is returned.
"""
# check if the sequence sequence is non-increasing:
return all( x>=y for x, y in zip(sequence, sequence[1:]))
def delta(self):
""" the minimum degree of the vertices """
min = 100000000
for vertex in self.__graph_dict:
vertex_degree = self.vertex_degree(vertex)
if vertex_degree < min: min = vertex_degree
return min
def Delta(self):
""" the maximum degree of the vertices """
max = 0
for vertex in self.__graph_dict:
vertex_degree = self.vertex_degree(vertex)
if vertex_degree > max: max = vertex_degree
return max
def density(self):
""" method to calculate the density of a graph """
g = self.__graph_dict
V = len(g.keys())
E = len(self.edges())
return 2.0 * E / (V *(V - 1))
def diameter(self):
""" calculates the diameter of the graph """
v = self.vertices()
pairs = [ (v[i],v[j]) for i in range(len(v)) for j in range(i+1, len(v)-1)]
smallest_paths = []
for (s,e) in pairs:
paths = self.find_all_paths(s,e)
smallest = sorted(paths, key=len)[0]
smallest_paths.append(smallest)
smallest_paths.sort(key=len)
# longest path is at the end of list,
# i.e. diameter corresponds to the length of this path
diameter = len(smallest_paths[-1]) - 1
return diameter
@staticmethod
def erdoes_gallai(dsequence):
""" Checks if the condition of the Erdoes-Gallai inequality is fullfilled """
if sum(dsequence) % 2:
# sum of sequence is odd
return False
if Graph.is_degree_sequence(dsequence):
for k in range(1,len(dsequence) + 1):
left = sum(dsequence[:k])
right = k * (k-1) + sum([min(x,k) for x in dsequence[k:]])
if left > right:return False
else:
# sequence is increasing
return False
return True
| [
"noreply@github.com"
] | mnihatyavas.noreply@github.com |
0e45c65d4c67c931afc4e85b7410cb8b62d4c595 | 9adc45c39030d1109849c211afd294b4d33f660c | /example/scripts/upload_video_to_s3_and_sync.py | 321e2e91ad4f767a025163b98ac0c20532904674 | [
"MIT"
] | permissive | jf-parent/brome | f12e728b984061e3c329f30d59f9d615ffe0a1f2 | 784f45d96b83b703dd2181cb59ca8ea777c2510e | refs/heads/release | 2020-12-28T14:45:53.428910 | 2017-05-05T12:32:44 | 2017-05-05T12:32:44 | 38,472,866 | 3 | 0 | null | 2017-05-05T12:32:45 | 2015-07-03T05:02:09 | Python | UTF-8 | Python | false | false | 2,713 | py | #! /usr/bin/env python
import os
import boto3
import yaml
from brome.core.utils import DbSessionContext
from brome.model.testinstance import Testinstance
from brome.model.testcrash import Testcrash
from brome.model.testresult import Testresult
HERE = os.path.abspath(os.path.dirname(__file__))
ROOT = os.path.join(HERE, '..')
s3 = boto3.resource('s3')
brome_config_path = os.path.join(ROOT, "config", "brome.yml")
with open(brome_config_path) as fd:
config = yaml.load(fd)
DB_NAME = config['database']['mongo_database_name']
BUCKET_NAME = config['database']['s3_bucket_name']
ROOT_TB_RESULTS = config['project']['test_batch_result_path']
with DbSessionContext(DB_NAME) as session:
# fetch test instance that has their video in local
test_instance_list = session.query(Testinstance)\
.filter(Testinstance.video_location == 'local')\
.filter(Testinstance.video_capture_path != '')\
.all()
for test_instance in test_instance_list:
# upload the video to s3
file_path = os.path.join(
ROOT_TB_RESULTS,
test_instance.video_capture_path
)
try:
data = open(file_path, 'rb')
except FileNotFoundError:
print('{file_path} not found'.format(file_path=file_path))
continue
print('[*]Uploading {file_path} to s3...'.format(file_path=file_path))
s3.Bucket(BUCKET_NAME).put_object(
Key=test_instance.video_capture_path,
Body=data
)
remote_file_name = \
'https://s3-us-west-2.amazonaws.com/{bucket}/{path}' \
.format(
bucket=BUCKET_NAME,
path=test_instance.video_capture_path
)
# set the video_location to s3
test_instance.video_location = 's3'
test_instance.video_capture_path = remote_file_name
session.save(test_instance, safe=True)
# Test Crash
test_crash_list = session.query(Testcrash)\
.filter(Testcrash.test_instance_id == test_instance.mongo_id)\
.all()
for test_crash in test_crash_list:
test_crash.video_capture_path = remote_file_name
test_crash.video_location = 's3'
session.save(test_crash, safe=True)
# Test Result
test_result_list = session.query(Testresult)\
.filter(Testresult.test_instance_id == test_instance.mongo_id)\
.all()
for test_result in test_result_list:
test_result.video_capture_path = remote_file_name
test_result.video_location = 's3'
session.save(test_result, safe=True)
os.remove(file_path)
print('Done')
| [
"parent.j.f@gmail.com"
] | parent.j.f@gmail.com |
ea52af8e78e10745bd9ade47d2ab923070e1488d | 41ad3529ff9b6357d57468641b21d4ac02a6c0b5 | /DJI_wxPython.py | 446cff5dda74c28d7be7f1d217ff7ced444f789c | [] | no_license | gitpNser/learndata | 9153a3c305cf07904d5006059489c72c2388cee0 | a5059ae3346a17b833132e9295a0e73482b4a386 | refs/heads/master | 2020-06-13T11:25:56.676610 | 2019-11-28T14:47:43 | 2019-11-28T14:47:43 | 194,638,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,694 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 14 13:51:41 2019
wxPython plot
@author: pNser copy Dazhuang @NJU
"""
import datetime as dt
import myfinance as finance
import matplotlib.pyplot as plt
import pandas as pd
import _thread as thread
import wx
#不明白这行的目的
ID_EVENT_REFRESH = 9999
#定义一个wx.Frame的子类
class StockFrame(wx.Frame):
#定义选择框的初始状态
option_list = {'open':True,'close':True,'high':False,'low':False,'volume':False}
def __init__(self,title):
wx.Frame.__init__(self,None,title=title,size=(430,600))
#创建状态栏
self.CreateStatusBar()
#创建菜单栏,菜单栏文字
menuBar = wx.MenuBar()
filemenu = wx.Menu()
menuBar.Append(filemenu,"&File")
#增加“Refresh”菜单以及在状态栏说明文字,绑定方法
menuRefresh = filemenu.Append(ID_EVENT_REFRESH,"&Refresh","Refresh the price")
self.Bind(wx.EVT_MENU,self.OnRefresh,menuRefresh)
#增加“Quit”菜单以及在状态栏说明文字,绑定方法
menuQuit = filemenu.Append(wx.ID_EXIT,"&Quit","Terminate the program")
self.Bind(wx.EVT_MENU,self.OnQuit,menuQuit)
#菜单栏子项设置完成后完成菜单栏设定
self.SetMenuBar(menuBar)
#创建panel
panel = wx.Panel(self)
#创建股票代码(code)文本框sizer,水平放置
codeSizer = wx.BoxSizer(wx.HORIZONTAL)
#静态文字标签,在codesizer中加入标签位置下对齐
labelText = wx.StaticText(panel,label="Stock Code:")
codeSizer.Add(labelText,0,wx.ALIGN_BOTTOM)
#TODO: need a better way yo put a spacer here than this:
#codeSizer.Add((10,10))
#文本框,初始值“BA”,出发回车键响应
codeText = wx.TextCtrl(panel,value="BA",style=wx.TE_PROCESS_ENTER)
#绑定回车键方法到Event上
self.Bind(wx.EVT_TEXT_ENTER,self.OnTextSubmitted,codeText)
#codesizer中增加文本框位置
codeSizer.Add(codeText)
#创建optionsizer,水平放置
optionSizer = wx.BoxSizer(wx.HORIZONTAL)
#增加check event的方法,并在optionsizer中增加checkbox位置
for key, value in self.option_list.items():
checkBox = wx.CheckBox(panel,label=key.title())
checkBox.SetValue(value)
self.Bind(wx.EVT_CHECKBOX,self.OnChecked)
optionSizer.Add(checkBox)
#增加列表,report类型
self.list = wx.ListCtrl(panel,wx.NewId(),style=wx.LC_REPORT)
#执行createHeaer程序
self.createHeader()
#增加list显示内容,对列表内双击事件绑定方法
pos = self.list.InsertItem(0,"__")
self.list.SetItem(pos,1,"loading...")
self.list.SetItem(pos,2,"__")
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED,self.OnDoubleClick,self.list)
#增加ctrlsizer
ctrlSizer = wx.BoxSizer(wx.HORIZONTAL)
ctrlSizer.Add((10,10))
#增加退出按钮及刷新按钮,分别绑定方法及放置位置
buttonQuit = wx.Button(panel,-1,"Quit")
self.Bind(wx.EVT_BUTTON,self.OnQuit,buttonQuit)
ctrlSizer.Add(buttonQuit,1)
buttonRefresh = wx.Button(panel,-1,"Refresh")
self.Bind(wx.EVT_BUTTON,self.OnRefresh,buttonRefresh)
ctrlSizer.Add(buttonRefresh,1,wx.LEFT|wx.BOTTOM)
#设置一个总的sizer,垂直方式,然后将其他子sizer放入
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(codeSizer,0,wx.ALL,5)
sizer.Add(optionSizer,0,wx.ALL,5)
sizer.Add(self.list,-1,wx.ALL | wx.EXPAND,5)
sizer.Add(ctrlSizer,0,wx.ALIGN_BOTTOM)
#最终整理sizer
panel.SetSizerAndFit(sizer)
self.Center()
#start to load data right after the window omes up
self.OnRefresh(None)
#创建list表头
def createHeader(self):
self.list.InsertColumn(0,"Symbol")
self.list.InsertColumn(1,"Name")
self.list.InsertColumn(2,"Last Trade")
#list里面增加数据
def setData(self,data):
self.list.ClearAll()
self.createHeader()
pos = 0
for row in data:
pos = self.list.InsertItem(pos+1,row['code'])
self.list.SetItem(pos,1,row['name'])
self.list.SetColumnWidth(1,-1)
self.list.SetItem(pos,2,str(row['price']))
if pos%2 == 0:
#set new look and feel for odd lines
self.list.SetItemBackgroundColour(pos,(134,225,249))
#画图
def PlotData(self,code):
quotes = finance.get_quotes_historical(code)
fields = ['date','open','close','high','low','volume']
dates = []
for i in range(0,len(quotes)):
x = dt.datetime.utcfromtimestamp(int(quotes[i]['date']))
y = dt.datetime.strftime(x,'%Y-%m-%d')
dates.append(y)
quotesdf = pd.DataFrame(quotes,index=dates,columns=fields)
#remove unchecked fileds
fileds_to_drop = ['date']
for key, value in self.option_list.items():
if not value:
fileds_to_drop.append(key)
quotesdf = quotesdf.drop(fileds_to_drop,axis=1)
quotesdf.plot()
plt.show()
#响应列表双击方法
def OnDoubleClick(self,event):
self.PlotData(event.GetText())
#响应文本框输入回车方法
def OnTextSubmitted(self,event):
self.PlotData(event.GetString())
#获取复选框数据
def OnChecked(self,event):
checkBox = event.GetEventObject()
text = checkBox.GetLabel().lower()
self.option_list[text] = checkBox.GetValue()
#响应退出按钮/菜单方法
def OnQuit(self,event):
self.Close()
self.Destroy()
#响应刷新按钮/菜单方法
def OnRefresh(self,event):
thread.start_new_thread(self.retrieve_quotes,())
#获取 DJI数据
def retrieve_quotes(self):
data = finance.get_dji_list()
if data:
self.setData(data)
else:
wx.MessageBox('Download failed.','Message', wx.OK | wx.ICON_INFORMATION)
if __name__ == '__main__':
app = wx.App(False)
#创建StockFrame实例,名称为"Dow Jons Industrial Average(^DJI)"
top = StockFrame("Dow Jons Industrial Average(^DJI)")
top.Show(True)
app.MainLoop()
| [
"gitknt@outlook.com"
] | gitknt@outlook.com |
faea3e50e4e9b59dd76834845389c8df6f3fdee6 | 597b82737635e845fd5360e191f323669af1b2ae | /08_full_django/products/apps/products/views.py | d46ee65e2d2a815b3a101e18a5a2d6fa8c0eec5e | [] | no_license | twknab/learning-python | 1bd10497fbbe181a26f2070c147cb2fed6955178 | 75b76b2a607439aa2d8db675738adf8d3b8644df | refs/heads/master | 2021-08-08T08:50:04.337490 | 2017-11-10T00:28:45 | 2017-11-10T00:28:45 | 89,213,845 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | from django.shortcuts import render
from models import Product
# Create 3 Different Products:
"""
Note: You can see our object creation has been placed outside of our `index()`
method. This is because if we placed these creation events inside of our index method,
we'd have the same product created on every refresh.
"""
Product.objects.create(name="Kool Aid",description="A powdered sugary drink.",price="1.00",cost="0.50",category="Beverage")
Product.objects.create(name="Lentil Burger",description="Lentils, onions and shallots pressed into a patty.",price="8.00",cost="3.50",category="Food")
Product.objects.create(name="French Fries",description="Organic potatos fried to a crisp and seasoned to perfection.",price="2.00",cost="1.00",category="Food")
def index(request):
"""Loads homepage."""
print "Loading homepage..."
print "Building instance off of `Product` model..."
# Stores all products:
products = Product.objects.all()
# Loop through products and print information:
print "///////////// P R O D U C T S /////////////"
for product in products:
print "- {} | {} | ${} (consumer cost)/ea | ${}/ea (business cost)".format(product.name, product.description, product.price, product.cost)
print "/////////////////////////////////////////////////"
return render(request, "products/index.html")
| [
"natureminded@users.noreply.github.com"
] | natureminded@users.noreply.github.com |
571a517e58b1431ce8c60ae03fc91e324600403c | 78c3082e9082b5b50435805723ae00a58ca88e30 | /03.AI알고리즘 소스코드/venv/Lib/site-packages/caffe2/python/operator_test/jsd_ops_test.py | 4a2b327ddeba63f3df666180047c9e0f4b3ed672 | [] | no_license | jinStar-kimmy/algorithm | 26c1bc456d5319578110f3d56f8bd19122356603 | 59ae8afd8d133f59a6b8d8cee76790fd9dfe1ff7 | refs/heads/master | 2023-08-28T13:16:45.690232 | 2021-10-20T08:23:46 | 2021-10-20T08:23:46 | 419,217,105 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,085 | py |
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
def entropy(p):
q = 1. - p
return -p * np.log(p) - q * np.log(q)
def jsd(p, q):
return [entropy(p / 2. + q / 2.) - entropy(p) / 2. - entropy(q) / 2.]
def jsd_grad(go, o, pq_list):
p, q = pq_list
m = (p + q) / 2.
return [np.log(p * (1 - m) / (1 - p) / m) / 2. * go, None]
class TestJSDOps(serial.SerializedTestCase):
@serial.given(n=st.integers(10, 100), **hu.gcs_cpu_only)
def test_bernoulli_jsd(self, n, gc, dc):
p = np.random.rand(n).astype(np.float32)
q = np.random.rand(n).astype(np.float32)
op = core.CreateOperator("BernoulliJSD", ["p", "q"], ["l"])
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[p, q],
reference=jsd,
output_to_grad='l',
grad_reference=jsd_grad,
)
| [
"gudwls3126@gmail.com"
] | gudwls3126@gmail.com |
2f97ef1ba97ac1029b7fbfb48d23a450ad5975a0 | 242086b8c6a39cbc7af3bd7f2fd9b78a66567024 | /python/PP4E-Examples-1.4/Examples/PP4E/Preview/tkinter102.py | 9ef018e04625b9629f7cbf85be5ab4f1d125109d | [] | no_license | chuzui/algorithm | 7537d0aa051ac4cbe9f6a7ca9a3037204803a650 | c3006b24c4896c1242d3ceab43ace995c94f10c8 | refs/heads/master | 2021-01-10T13:05:30.902020 | 2015-09-27T14:39:02 | 2015-09-27T14:39:02 | 8,404,397 | 4 | 4 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | from tkinter import *
from tkinter.messagebox import showinfo
class MyGui(Frame):
def __init__(self, parent=None):
Frame.__init__(self, parent)
button = Button(self, text='press', command=self.reply)
button.pack()
def reply(self):
showinfo(title='popup', message='Button pressed!')
if __name__ == '__main__':
window = MyGui()
window.pack()
window.mainloop()
| [
"zui"
] | zui |
40edf933fd8e4c11dab1a3ba5aba8f74c4db9100 | b8275e3c6f20a6e6638536a4333d2d1767ef868d | /coarsegraining-v8-new.py | 16e40b6eabec7be1dbaabc84c2e71a5ff67a8765 | [] | no_license | czang/Modules | 81bd991f8b57b2b99b59fe4408f95587cbf1a2b0 | 04fdbfcd46df0c8615d01d1c9d7bdb267f8a14c4 | refs/heads/master | 2022-06-10T04:21:47.045380 | 2019-03-20T01:36:02 | 2019-03-20T01:36:02 | 176,625,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,616 | py | #!/usr/bin/env python
# Copyright (c) 2011 The George Washington University
# Authors: Chongzhi Zang, Weiqun Peng
#
# This software is distributable under the terms of the GNU General
# Public License (GPL) v2, the text of which can be found at
# http://www.gnu.org/copyleft/gpl.html. Installing, importing or
# otherwise using this module constitutes acceptance of the terms of
# this License.
#
# Disclaimer
#
# This software is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Comments and/or additions are welcome (send e-mail to:
# wpeng@gwu.edu).
import re, os, sys, shutil
from math import *
from string import *
from optparse import OptionParser
import operator
from numpy import *
import scipy.stats
import bisect
import BED
import GenomeData
import get_total_tag_counts
import SeparateByChrom
'''version 8: 3-phase coarse graining, take the phase that has most 1 to next step. '''
def is_list_sorted(List):
"""
Check if sorted in ascending order.
input is a list of pure numbers.
output: sorted =1 or 0
"""
sorted = 1;
for index in range(0, len(List)-1):
if List[index] > List[index + 1]:
sorted = 0;
return sorted;
def start_list_correlation_r_rev(List, win, r, chrom_length):
'''List must be sorted'''
assert is_list_sorted(List) == 1
x = List[0]%win
d = int(r/win)
sum = 0
n = int((chrom_length - x)/win)
if n - d > 0:
a = [0] * n
for item in List:
i = int(item - x) / int(win)
if i >= 0 and i < n:
a[i] = 1
for i in range(0, n - d):
sum += a[i] * a[i + d]
return float(sum)/float(n - d) - pow(scipy.stats.mean(a),2)
else:
return 0.0
def start_list_correlation_function(List, win, chrom_length, name):
xlist = []
ylist = []
#file = open("cr_"+name+"_"+str(win)+".txt", 'w')
for i in range(0, min(3, int(chrom_length/win))):
r = i * win
c = start_list_correlation_r_rev(List, win, r, chrom_length)
xlist.append(i)
ylist.append(c)
#file.write(str(i)+'\t'+str(c)+'\n')
#file.close()
return (xlist, ylist)
def correlation_length_fit(xlist, ylist):
assert len(xlist) == len(ylist)
loglist = []
for i in range(0, len(ylist)):
loglist.append(log(max(ylist[i], 0.000000000001)))
(a,b,r,stderr,x) = scipy.stats.linregress(xlist[1:],loglist[1:])
return -1.0/a
def graining(List, win, step, score):
'''
1 step coarse graining, phase considered:
List must be sorted!
List (list) contains (start) coordinates of positive signals;
win (int) is the window size in list, coarse graining will start from this resolution;
step (int) is the number of windows in one graining unit;
score (int) is the minimum number of positive elements in the graining unit to call the unit positive;
output is a list of positive unit number in each graining step;
'''
result = []
endlimit = List[-1]
for p in range(0, step):
tmp_result = []
i = List[0] - p * win
k = 0
while i <= endlimit and k < len(List):
j = i + step * win
h = k
while h <= (len(List)-1) and List[h] < j:
h += 1
n = h - k
if n >= score:
tmp_result.append(i)
k = h
i = j
if len(tmp_result) > len(result):
result = tmp_result
return(result)
def coarsegraining(List, win_min, step, score, genome_length):
if (is_list_sorted(List) != 1):
List.sort()
Length_list = []
Length_list.append(len(List))
result_list = []
result_list.append(List)
win = win_min
while len(List) > 0:
#(xlist, ylist) = start_list_correlation_function(List, win, genome_length)
print len(Length_list)-1, len(List)#, correlation_length_fit(xlist, ylist)
List = graining(List, win, step, score)
Length_list.append(len(List))
if len(List) > 0:
result_list.append(List)
win = win * step
return Length_list, result_list
def union_islands_to_list(islandlist, win):
'''input islandlist and output list are both lists of BED island objects'''
islandlist.sort(key=operator.attrgetter('start'));
List = []
current = islandlist[0]
i = 1
while i < len(islandlist):
compare = islandlist[i]
assert current.start <= compare.start
if compare.start > current.end + 1 + win:
List.append(current)
current = compare
i += 1
else:
current.end = max(current.end, compare.end)
i += 1
List.append(current)
return List
def write_islandlist(List, win):
'''input a start list and universal island width, output a islandlist of BED objects
object.start = List[i]
object.end = List[i] + win - 1'''
output_list = []
for item in List:
output_list.append(BED.BED3('', item, item + win - 1))
output_list.sort(key=operator.attrgetter('start'))
return output_list
def backstep(islandlist, List, win):
'''one step trace back'''
#result_list = []
#fine_islands = []
addtional_islands = write_islandlist(List, win)
for item in islandlist:
start_left = (item.start - win) in List
start_right = item.start in List
if start_left and start_right:
item.start = item.start - win
elif (not start_left) and (not start_right):
item.start = item.start + win
end_left = (item.end + 1 - win) in List
end_right = (item.end + 1) in List
if end_left and end_right:
item.end = item.end + win
elif (not end_left) and (not end_right):
item.end = item.end - win
assert item.start < item.end
return union_islands_to_list(islandlist + addtional_islands, win)
def traceback(List, win_min, step, level, genome_length, name):
'''
Input is a list of lists.
'''
win = win_min * pow(step, len(List)-1)
islandlist = write_islandlist(List[-1], win)
backlist = List[-1]
(xlist, ylist) = start_list_correlation_function(backlist, win, genome_length, name)
correlation_length = correlation_length_fit(xlist, ylist)
print len(backlist), correlation_length
if len(List) > 1:
(xlist, ylist) = start_list_correlation_function(List[-2], win/step, genome_length, name)
correlation_length_next = correlation_length_fit(xlist, ylist)
print len(List[-2]), correlation_length_next
i = 1
while i < len(List)-level:
backlist = List[-i-1]
win = win/step
if correlation_length > 1.0 and correlation_length_next >= correlation_length:
print len(islandlist)
islands = islandlist
islandlist = backstep(islands, backlist, win)
#if len(List) > i+1:
#(xlist, ylist) = start_list_correlation_function(List[-i-2], win/step, genome_length, name)
#print len(islandlist), correlation_length_fit(xlist, ylist)
else:
islandlist = write_islandlist(backlist, win)
correlation_length = correlation_length_next
if len(List) > i+1:
(xlist, ylist) = start_list_correlation_function(List[-i-2], win/step, genome_length, name)
correlation_length_next = correlation_length_fit(xlist, ylist)
print len(List[-i-2]), correlation_length_next
else:
correlation_length_next = 10000
i += 1
return islandlist
def main(argv):
'''
Coarse graining test chr1, input must only have chr1
'''
parser = OptionParser()
parser.add_option("-s", "--species", action="store", type="string", dest="species", help="mm8, hg18, background, etc", metavar="<str>")
parser.add_option("-b", "--summarygraph", action="store",type="string", dest="summarygraph", help="summarygraph", metavar="<file>")
parser.add_option("-w", "--window_size(bp)", action="store", type="int", dest="window_size", help="window_size(in bps)", metavar="<int>")
parser.add_option("-g", "--graining_size", action="store", type="int", dest="step", help="graining unit size (>0)", metavar="<int>")
parser.add_option("-e", "--score", action="store", type="int", dest="score", help="graining criterion, 0<score<=graining_size", metavar="<int>")
parser.add_option("-t", "--mappable_faction_of_genome_size", action="store", type="float", dest="fraction", help="mapable fraction of genome size", metavar="<float>")
parser.add_option("-f", "--output_file", action="store", type="string", dest="out_file", help="output file name", metavar="<file>")
(opt, args) = parser.parse_args(argv)
if len(argv) < 14:
parser.print_help()
sys.exit(1)
print "Coarse-graining approach to identify broad enrichment islands from ChIP-Seq:"
if opt.species in GenomeData.species_chroms.keys():
print "Species: ", opt.species;
print "Window_size: ", opt.window_size;
print "Coarse graining step: ", opt.step;
print "Coarse graining score:", opt.score;
chroms = GenomeData.species_chroms[opt.species]
total_read_count = get_total_tag_counts.get_total_tag_counts_bed_graph(opt.summarygraph);
print "Total read count:", total_read_count
genome_length = sum (GenomeData.species_chrom_lengths[opt.species].values());
genome_length = int(opt.fraction * genome_length);
average = float(total_read_count) * opt.window_size/genome_length;
print "Effective genome length: ", genome_length;
print "window average:", average;
min_tags_in_window = int(average) + 1
print "Minimum read count in a qualified window: ", min_tags_in_window
print "Generate preprocessed data list";
#read in the summary graph file
bed_val = BED.BED(opt.species, opt.summarygraph, "BED_GRAPH");
#generate the probscore summary graph file, only care about enrichment
for chrom in chroms:
if chrom in bed_val.keys() and len(bed_val[chrom]) > 0:
chrom_length = GenomeData.species_chrom_lengths[opt.species][chrom]
eligible_start_list = []
for index in xrange(len(bed_val[chrom])):
read_count = bed_val[chrom][index].value;
if read_count >= min_tags_in_window:
eligible_start_list.append(bed_val[chrom][index].start)
print "Coarse graining:";
(result_list, island_list) = coarsegraining(eligible_start_list, opt.window_size, opt.step, opt.score, chrom_length)
print "Trace back...", len(island_list)
islands = traceback(island_list, opt.window_size, opt.step, 0, chrom_length, chrom)
print len(islands), "islands found in", chrom
f = open(chrom + ".islandstemp", 'w')
for i in range(0, len(islands)):
f.write(chrom + '\t' + str(int(islands[i].start)) + '\t' + str(int(islands[i].end)) + '\t1\n')
f.close()
o = open(opt.out_file, 'w')
o.write('track type=bedGraph name=' + opt.out_file + '\n')
o.close()
SeparateByChrom.combineAllGraphFiles(chroms, ".islandstemp", opt.out_file)
SeparateByChrom.cleanup(chroms, ".islandstemp")
#else:
#print "input data error!"
else:
print "This species is not in my list!";
if __name__ == "__main__":
main(sys.argv) | [
"noreply@github.com"
] | czang.noreply@github.com |
f314aa92ae0309c5fc33503ef23d53144f73e8d1 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03618/s585911965.py | 3cb657424c1ff0a4d384f5df252781e21eb2b757 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | import sys
sys.setrecursionlimit(10**7)
#input = sys.stdin.readline
from collections import Counter
def main():
a = input()
n = len(a)
base = n * (n - 1) // 2
counter = Counter(a)
def f():
for c in counter.values():
yield c * (c - 1) // 2
print(base - sum(f()) + 1)
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
70e4f58dfd4f4803a9edb317dac94031b38f62ac | ac4eca2377cafd58b3da6c7bdae8e3eea9606e52 | /101-200/129.Sum Root to Leaf Numbers.py | 3fcb3828eb0d11e950cc8329f5bf86e95237e258 | [] | no_license | iscas-ljc/leetcode-medium | bf71a8d9a93af07d6863c67b10c824f6855c520c | 21b962825b1a386c60b319cbf94c0aecfa546008 | refs/heads/master | 2021-09-05T09:42:01.100811 | 2018-01-26T06:14:46 | 2018-01-26T06:14:46 | 111,070,794 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | class Solution(object):
def sumNumbers(self, root):
self.result=0
self.sum(root,0)
return self.result
def sum(self,root,tile_now):
if root:
self.sum(root.left,tile_now*10+root.val)
self.sum(root.right,tile_now*10+root.val)
if not root.left and not root.right:
self.result+=tile_now*10+root.val | [
"861218470@qq.com"
] | 861218470@qq.com |
add4acbdeed7d4915810b535fde6d55f196c1352 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/nlp/MT5_ID4146_for_PyTorch/transformers/tests/blenderbot/test_modeling_tf_blenderbot.py | 7bb25c5c6352028cbf31e6f66d8b5e4653697098 | [
"GPL-1.0-or-later",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 13,486 | py | # coding=utf-8
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.file_utils import cached_property
from transformers.testing_utils import require_tf, require_tokenizers, slow
from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeq2SeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class TFBlenderbotModelTester:
config_cls = BlenderbotConfig
config_updates = {}
hidden_act = "gelu"
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
def prepare_config_and_inputs_for_common(self):
input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size)
eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1)
input_ids = tf.concat([input_ids, eos_tensor], axis=1)
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.config_cls(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_ids=[2],
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.pad_token_id,
**self.config_updates,
)
inputs_dict = prepare_blenderbot_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = TFBlenderbotModel(config=config).get_decoder()
input_ids = inputs_dict["input_ids"]
input_ids = input_ids[:1, :]
attention_mask = inputs_dict["attention_mask"][:1, :]
head_mask = inputs_dict["head_mask"]
self.batch_size = 1
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8)
# append to next input_ids and
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])
# select random slice
random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx]
output_from_past_slice = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)
def prepare_blenderbot_inputs_dict(
config,
input_ids,
decoder_input_ids,
attention_mask=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
):
if attention_mask is None:
attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8)
if decoder_attention_mask is None:
decoder_attention_mask = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.int8),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.int8),
],
axis=-1,
)
if head_mask is None:
head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
cross_attn_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class TFBlenderbotModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
all_generative_model_classes = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
is_encoder_decoder = True
test_pruning = False
test_onnx = False
def setUp(self):
self.model_tester = TFBlenderbotModelTester(self)
self.config_tester = ConfigTester(self, config_class=BlenderbotConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class in self.all_generative_model_classes:
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert isinstance(name, dict)
for k, v in name.items():
assert isinstance(v, tf.Variable)
else:
x = model.get_output_embeddings()
assert x is None
name = model.get_bias()
assert name is None
def test_saved_model_creation(self):
# This test is too long (>30sec) and makes fail the CI
pass
def test_resize_token_embeddings(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(model, embedding_layer):
if hasattr(embedding_layer, "weight"):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model(model.dummy_inputs)
if hasattr(embedding_layer, "weight"):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10, None]:
# build the embeddings
model = model_class(config=config)
old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
old_final_logits_bias = model.get_bias()
# reshape the embeddings
model.resize_token_embeddings(size)
new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
new_final_logits_bias = model.get_bias()
# check that the resized embeddings size matches the desired size.
assert_size = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0], assert_size)
# check that weights remain the same after resizing
models_equal = True
for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0], assert_size)
models_equal = True
for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_final_logits_bias is not None and new_final_logits_bias is not None:
old_final_logits_bias = old_final_logits_bias["final_logits_bias"]
new_final_logits_bias = new_final_logits_bias["final_logits_bias"]
self.assertEqual(new_final_logits_bias.shape[0], 1)
self.assertEqual(new_final_logits_bias.shape[1], assert_size)
models_equal = True
for old, new in zip(old_final_logits_bias.value(), new_final_logits_bias.value()):
for p1, p2 in zip(old, new):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
"""If tensors not close, or a and b arent both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if tf.debugging.assert_near(a, b, atol=atol):
return True
raise
except Exception:
if len(prefix) > 0:
prefix = f"{prefix}: "
raise AssertionError(f"{prefix}{a} != {b}")
def _long_tensor(tok_lst):
return tf.constant(tok_lst, dtype=tf.int32)
@require_tokenizers
@require_tf
class TFBlenderbot400MIntegrationTests(unittest.TestCase):
src_text = ["My friends are cool but they eat too many carbs."]
model_name = "facebook/blenderbot-400M-distill"
@cached_property
def tokenizer(self):
return BlenderbotTokenizer.from_pretrained(self.model_name)
@cached_property
def model(self):
model = TFAutoModelForSeq2SeqLM.from_pretrained(self.model_name)
return model
@slow
def test_generation_from_long_input(self):
model_inputs = self.tokenizer(self.src_text, return_tensors="tf")
generated_ids = self.model.generate(
model_inputs.input_ids,
)
generated_words = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=True)[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
09c4d515d0e34930231eb3884a4c97df7a64efc9 | 3cef23043a4bf3bc2a37d952e51b1a9faeb76d0b | /setup.py | 0d6ff2da2b231576e669a8f68795badbaf0a0b64 | [
"MIT"
] | permissive | hiroaki-yamamoto/django-nghelp | 794bc103ecf5bb652363e3a1df530afa971ac46a | e15dc408a4a9205d23f9d68b6d10d7b9648dbd2e | refs/heads/master | 2020-07-29T21:41:23.972244 | 2018-01-15T04:30:49 | 2018-01-15T04:30:49 | 73,657,569 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,198 | py | #!/usr/bin/env python
# coding=utf-8
"""Setup script."""
import sys
from setuptools import setup, find_packages
dependencies = ("django>=1.11", "jinja2")
name = "django-nghelp"
desc = "AngularJS Frontend Helper for Django"
license = "MIT"
url = "https://github.com/hiroaki-yamamoto/django-nghelp.git"
keywords = "django AngularJS"
version = "[VERSION]"
author = "Hiroaki Yamamoto"
author_email = "hiroaki@hysoftware.net"
if sys.version_info < (2, 7):
raise RuntimeError("Not supported on earlier then python 2.7.")
try:
with open('README.rst') as readme:
long_desc = readme.read()
except Exception:
long_desc = None
setup(
name=name,
version=version,
description=desc,
long_description=long_desc,
packages=find_packages(exclude=["tests"]),
include_package_data=True,
install_requires=dependencies,
zip_safe=False,
author=author,
author_email=author_email,
license=license,
keywords=keywords,
url=url,
classifiers=[
"Development Status :: 7 - Inactive",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5"
]
)
| [
"hiroaki@hysoftware.net"
] | hiroaki@hysoftware.net |
cfc948a7d33be8ee11ef65a155425af6c2ca5b4b | 9b5597492e57313712c0a842ef887940f92636cd | /judge/sessions/2018Individual/jillzhoujinjing@gmail.com/PB_02.py | c4f369a67b553603b661bba7006a5bf67a02439f | [] | no_license | onionhoney/codesprint | ae02be9e3c2354bb921dc0721ad3819539a580fa | fcece4daf908aec41de7bba94c07b44c2aa98c67 | refs/heads/master | 2020-03-11T11:29:57.052236 | 2019-05-02T22:04:53 | 2019-05-02T22:04:53 | 129,971,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | import sys
def find_one(data,min_p,max_p):
index = int((min_p+max_p)/2)
dif = max_p - min_p
if dif == 0:
return max_p
if dif == 1:
if data[max_p] == 1:
return max_p
elif data[min_p] == 1:
return min_p
if data[index] == 2:
return find_one(data,min_p,index)
elif data[index] == 0:
return find_one(data,index, max_p)
else:
return index
first_line = 1
cur_case = 0
cur_case_line = 0
all_data = {}
for line in sys.stdin:
if first_line:
first_line = 0
else:
cur_case_line +=1
if cur_case not in all_data:
all_data[cur_case] = [list(map(int,line.strip('\n').split(' ')))]
else:
all_data[cur_case].append(list(map(int,line.strip('\n').split(' '))))
if cur_case_line == 3:
result = ""
for i in range(3):
data = all_data[cur_case][i]
if i < 2:
result += str(find_one(data,0,99)) + ' '
else:
result += str(find_one(data,0,99))
print(result)
cur_case += 1
cur_case_line = 0
| [
"root@codesprintla.com"
] | root@codesprintla.com |
fc7543ea66d355520c01da7ccccdf80ef36ac870 | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/vz/rttoremotetaboodef.py | 8da1f704e9c779723ea236e386fb9080f4b59445 | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 4,535 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RtToRemoteTabooDef(Mo):
"""
A target relation to a remote taboo definition.
"""
meta = TargetRelationMeta("cobra.model.vz.RtToRemoteTabooDef", "cobra.model.fv.RemotePolHolder")
meta.moClassName = "vzRtToRemoteTabooDef"
meta.rnFormat = "rtfvToRemoteTabooDef-[%(tDn)s]"
meta.category = MoCategory.RELATIONSHIP_FROM_LOCAL
meta.label = "None"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x2001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.vz.TabooDef")
meta.superClasses.add("cobra.model.reln.From")
meta.superClasses.add("cobra.model.reln.Inst")
meta.rnPrefixes = [
('rtfvToRemoteTabooDef-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 12459, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 2085
prop.defaultValueStr = "fvRemotePolHolder"
prop._addConstant("fvRemotePolHolder", None, 2085)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 12458, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("tDn", prop)
meta.namingProps.append(getattr(meta.props, "tDn"))
getattr(meta.props, "tDn").needDelimiter = True
def __init__(self, parentMoOrDn, tDn, markDirty=True, **creationProps):
namingVals = [tDn]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"collinsctk@qytang.com"
] | collinsctk@qytang.com |
4da5b58f9a136d6c3af1a22f37d1afcfd4426cfa | ece0d321e48f182832252b23db1df0c21b78f20c | /engine/2.80/scripts/addons/presets/pov/lamp/05_(4000K)_100W_Metal_Halide.py | 2a0ba8c12164925c6a8d0a9b11741a6f94a1b5f1 | [
"Unlicense",
"GPL-3.0-only",
"Font-exception-2.0",
"GPL-3.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain-disclaimer",
"Bitstream-Vera",
"LicenseRef-scancode-blender-2010",
"LGPL-2.1-or-later",
"GPL-2.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"PSF-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"BSD-2-Clause"
] | permissive | byteinc/Phasor | 47d4e48a52fa562dfa1a2dbe493f8ec9e94625b9 | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | refs/heads/master | 2022-10-25T17:05:01.585032 | 2019-03-16T19:24:22 | 2019-03-16T19:24:22 | 175,723,233 | 3 | 1 | Unlicense | 2022-10-21T07:02:37 | 2019-03-15T00:58:08 | Python | UTF-8 | Python | false | false | 603 | py | #After 1962
#Common uses: outdoor lighting where good color rendering is needed, television/film lighting, sports fields, car headlights, flood lights, heavy flashlights, green house applications
import bpy
bpy.context.object.data.type = 'SPOT'
lampdata = bpy.context.object.data
lampdata.show_cone = True
lampdata.spot_size = 0.6
lampdata.spot_blend = 0.9
lampdata.color = (0.9490196108818054, 0.9882352948188782, 1.0)
lampdata.energy = 20.98293#9000lm/21.446(=lux)*0.004*6.25(distance) *2 for distance is the point of half strength
lampdata.distance = 0.025
lampdata.falloff_type = 'INVERSE_SQUARE'
| [
"admin@irradiate.net"
] | admin@irradiate.net |
a05114189934ab4ce9304762f6164d71185d9253 | b08437d7346ace7b8282e0fd7f292c6eb64b0b0d | /columbus/people.py | 67aeeba949705935adf341a8e3fd70edd696bd3c | [] | no_license | opengovernment/municipal-scrapers-us | d3b3df303d8999882d3a05f1bec831ba705e8e2b | 54c28c0fa1319d712674c559e3df3d79a975768f | refs/heads/master | 2021-01-14T13:17:07.931879 | 2014-01-10T00:55:48 | 2014-01-10T00:55:48 | 15,881,694 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,598 | py | # Copyright (c) Sunlight Labs, 2013, under the terms of the BSD-3 clause
# license.
#
# Contributors:
#
# - Paul Tagliamonte <paultag@sunlightfoundation.com>
from pupa.scrape import Scraper, Legislator, Committee
from collections import defaultdict
import lxml.html
HOMEPAGE = "http://council.columbus.gov/"
class ColumbusPersonScraper(Scraper):
def lxmlize(self, url):
entry = self.urlopen(url)
page = lxml.html.fromstring(entry)
page.make_links_absolute(url)
return page
def get_people(self):
yield self.cbus_scrape_people()
def scrape_homepage(self, folk):
url = folk.attrib['href']
page = self.lxmlize(url)
image = page.xpath(
"//img[contains(@src, 'uploadedImages/City_Council/Members/')]"
)[0].attrib['src']
name = page.xpath("//div[@id='ctl00_ctl00_Body_body_cntCommon']/h3")
name, = name
bio = "\n\n".join([x.text_content() for x in page.xpath(
"//div[@id='ctl00_ctl00_Body_body_cntCommon']/p"
)])
leg = Legislator(name=name.text,
post_id='member',
biography=bio,
image=image)
leg.add_source(url)
return leg
def cbus_scrape_people(self):
page = self.lxmlize(HOMEPAGE)
folks = page.xpath("//div[@class='col-left']/div[2]//"
"div[@class='gutter_text'][1]//"
"ul[@class='gutterlist']/li//a")
for folk in folks:
yield self.scrape_homepage(folk)
| [
"tag@pault.ag"
] | tag@pault.ag |
9066c600056c28e78bbb7f6076635f0797b62764 | 85a32fc66050b5590f6a54774bbb4b88291894ab | /python/strings/merge-the-tools/python2.py | 9062ca3d05ac46255135508ccf8df8779f673d34 | [] | no_license | charlesartbr/hackerrank-python | 59a01330a3a6c2a3889e725d4a29a45d3483fb01 | bbe7c6e2bfed38132f511881487cda3d5977c89d | refs/heads/master | 2022-04-29T07:40:20.244416 | 2022-03-19T14:26:33 | 2022-03-19T14:26:33 | 188,117,284 | 46 | 37 | null | 2022-03-19T14:26:34 | 2019-05-22T21:38:18 | Python | UTF-8 | Python | false | false | 318 | py | def merge_the_tools(string, k):
n = len(string)
for i in xrange(0, n, k):
u = []
for j in string[i:i+k]:
if not j in u:
u.append(j)
print ''.join(u)
if __name__ == '__main__':
string, k = raw_input(), int(raw_input())
merge_the_tools(string, k)
| [
"e-mail@charles.art.br"
] | e-mail@charles.art.br |
2811d0bfb278246fdc5e3e4c58809850f9b47c87 | ba0cbdae81c171bd4be7b12c0594de72bd6d625a | /MyToontown/py2/otp/speedchat/SCElement.pyc.py | 8c609d912b2c1b4d4c2d607f57a05e84e233b5b4 | [] | no_license | sweep41/Toontown-2016 | 65985f198fa32a832e762fa9c59e59606d6a40a3 | 7732fb2c27001264e6dd652c057b3dc41f9c8a7d | refs/heads/master | 2021-01-23T16:04:45.264205 | 2017-06-04T02:47:34 | 2017-06-04T02:47:34 | 93,279,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,083 | py | # 2013.08.22 22:15:47 Pacific Daylight Time
# Embedded file name: otp.speedchat.SCElement
from pandac.PandaModules import *
from direct.gui.DirectGui import *
from direct.task import Task
from SCConstants import *
from SCObject import SCObject
from direct.showbase.PythonUtil import boolEqual
from otp.otpbase import OTPGlobals
class SCElement(SCObject, NodePath):
__module__ = __name__
font = OTPGlobals.getInterfaceFont()
SerialNum = 0
def __init__(self, parentMenu = None):
SCObject.__init__(self)
self.SerialNum = SCElement.SerialNum
SCElement.SerialNum += 1
node = hidden.attachNewNode('SCElement%s' % self.SerialNum)
NodePath.__init__(self, node)
self.FinalizeTaskName = 'SCElement%s_Finalize' % self.SerialNum
self.parentMenu = parentMenu
self.__active = 0
self.__viewable = 1
self.lastWidth = 0
self.lastHeight = 0
self.setDimensions(0, 0)
self.padX = 0.25
self.padZ = 0.1
def destroy(self):
if self.isActive():
self.exitActive()
SCObject.destroy(self)
if hasattr(self, 'button'):
self.button.destroy()
del self.button
self.parentMenu = None
self.detachNode()
return
def setParentMenu(self, parentMenu):
self.parentMenu = parentMenu
def getParentMenu(self):
return self.parentMenu
def getDisplayText(self):
self.notify.error('getDisplayText is pure virtual, derived class must override')
def onMouseEnter(self, event):
if self.parentMenu is not None:
self.parentMenu.memberGainedInputFocus(self)
return
def onMouseLeave(self, event):
if self.parentMenu is not None:
self.parentMenu.memberLostInputFocus(self)
return
def onMouseClick(self, event):
pass
def enterActive(self):
self.__active = 1
def exitActive(self):
self.__active = 0
def isActive(self):
return self.__active
def hasStickyFocus(self):
return 0
def setViewable(self, viewable):
if not boolEqual(self.__viewable, viewable):
self.__viewable = viewable
if self.parentMenu is not None:
self.parentMenu.memberViewabilityChanged(self)
return
def isViewable(self):
return self.__viewable
def getMinDimensions(self):
text = TextNode('SCTemp')
text.setFont(SCElement.font)
dText = self.getDisplayText()
text.setText(dText)
bounds = text.getCardActual()
width = abs(bounds[1] - bounds[0]) + self.padX
height = abs(bounds[3] - bounds[2]) + 2.0 * self.padZ
return (width, height)
def setDimensions(self, width, height):
self.width = float(width)
self.height = float(height)
if (self.lastWidth, self.lastHeight) != (self.width, self.height):
self.invalidate()
def invalidate(self):
SCObject.invalidate(self)
parentMenu = self.getParentMenu()
if parentMenu is not None:
if not parentMenu.isFinalizing():
parentMenu.invalidate()
return
def enterVisible(self):
SCObject.enterVisible(self)
self.privScheduleFinalize()
def exitVisible(self):
SCObject.exitVisible(self)
self.privCancelFinalize()
def privScheduleFinalize(self):
def finalizeElement(task, self = self):
if self.parentMenu is not None:
if self.parentMenu.isDirty():
return Task.done
self.finalize()
return Task.done
taskMgr.remove(self.FinalizeTaskName)
taskMgr.add(finalizeElement, self.FinalizeTaskName, priority=SCElementFinalizePriority)
def privCancelFinalize(self):
taskMgr.remove(self.FinalizeTaskName)
def finalize(self, dbArgs = {}):
if not self.isDirty():
return
SCObject.finalize(self)
if hasattr(self, 'button'):
self.button.destroy()
del self.button
halfHeight = self.height / 2.0
textX = 0
if dbArgs.has_key('text_align'):
if dbArgs['text_align'] == TextNode.ACenter:
textX = self.width / 2.0
args = {'text': self.getDisplayText(),
'frameColor': (0, 0, 0, 0),
'rolloverColor': self.getColorScheme().getRolloverColor() + (1,),
'pressedColor': self.getColorScheme().getPressedColor() + (1,),
'text_font': OTPGlobals.getInterfaceFont(),
'text_align': TextNode.ALeft,
'text_fg': self.getColorScheme().getTextColor() + (1,),
'text_pos': (textX, -0.25 - halfHeight, 0),
'relief': DGG.FLAT,
'pressEffect': 0}
args.update(dbArgs)
rolloverColor = args['rolloverColor']
pressedColor = args['pressedColor']
del args['rolloverColor']
del args['pressedColor']
btn = DirectButton(parent=self, frameSize=(0,
self.width,
-self.height,
0), **args)
btn.frameStyle[DGG.BUTTON_ROLLOVER_STATE].setColor(*rolloverColor)
btn.frameStyle[DGG.BUTTON_DEPRESSED_STATE].setColor(*pressedColor)
btn.updateFrameStyle()
btn.bind(DGG.ENTER, self.onMouseEnter)
btn.bind(DGG.EXIT, self.onMouseLeave)
btn.bind(DGG.B1PRESS, self.onMouseClick)
self.button = btn
self.lastWidth = self.width
self.lastHeight = self.height
self.validate()
def __str__(self):
return '%s: %s' % (self.__class__.__name__, self.getDisplayText())
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\otp\speedchat\SCElement.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:15:47 Pacific Daylight Time
| [
"sweep14@gmail.com"
] | sweep14@gmail.com |
b5d4d113d19205f7d6853527b3a819ef30f6852e | e52f3d13abccbfdfc037ae447137759d02fef75f | /python_work/ch10/practice/10-9 silent cAd.py | 6c63722dd8156520a7af6d091fb279957d89495d | [] | no_license | WeiS49/Python-Crash-Course | 7b8bc10dff58a06b0ef42dfa686ff72d4020547d | 2a3c11c28429e49fa48018b252dacb4129587950 | refs/heads/master | 2023-01-06T19:23:39.577634 | 2020-11-06T06:12:03 | 2020-11-06T06:12:03 | 302,330,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py |
filename1 = 'dogs.txt'
filename2 = 'catss.txt'
try:
with open(filename1) as f1:
dogs = f1.read()
except FileNotFoundError:
pass
else:
print(dogs)
try:
with open(filename2) as f2:
cats = f2.read()
except FileNotFoundError:
pass
else:
print(cats)
| [
"swh_1C3@outlook.com"
] | swh_1C3@outlook.com |
9babe1cec243911c031cb699ad3568337d98a49f | 3ac84fa46db498e914f6e1aaf2eff490a63807a1 | /devstack/python-glanceclient/tests/v1/test_client.py | e6f8de9ac73bd482707ab0f64ee408614a6f4cec | [
"Apache-2.0"
] | permissive | onsoku/horizon_review | 3c5f1a8f863142f3f724f59771ad39604bca4c20 | 80cca0badc61b4754ef2c10f23a0ee48cd227445 | refs/heads/master | 2020-05-20T11:11:25.625186 | 2015-01-21T01:01:59 | 2015-01-21T01:01:59 | 29,002,325 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | # Copyright 2013 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from glanceclient.v1 import client
class ClientTest(testtools.TestCase):
def setUp(self):
super(ClientTest, self).setUp()
def test_endpoint(self):
gc = client.Client("http://example.com")
self.assertEqual("http://example.com", gc.http_client.endpoint)
def test_versioned_endpoint(self):
gc = client.Client("http://example.com/v1")
self.assertEqual("http://example.com", gc.http_client.endpoint)
def test_versioned_endpoint_with_minor_revision(self):
gc = client.Client("http://example.com/v1.1")
self.assertEqual("http://example.com", gc.http_client.endpoint)
| [
"onsoku@onsoku.sakura.ne.j"
] | onsoku@onsoku.sakura.ne.j |
ed6879070f3634f296250c79bf584b9f6cc4aefb | 72786214749df0fc3d2eb471f25b80839b866896 | /python_algo_lab/linear_search.py | 3ff565dd7f403b0179ed48181a7dc31a1799aa50 | [] | no_license | itsvinayak/labs | 4f5245fd8a1554e7bbd042a2f4d15ac408a13276 | 15f54ca1672f684a14c22ac0194f9457eafc97f6 | refs/heads/master | 2020-09-16T05:19:17.406156 | 2020-04-08T05:22:59 | 2020-04-08T05:22:59 | 223,665,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | def linear_search(array, left, right, item):
if left == right:
return -1
if array[left] == item:
return left + 1
return linear_search(array, left + 1, right, item)
if __name__ == "__main__":
array = [1, 3, 9, 22, 5, 0, 3, 3, 4, 90]
item = 22
right = len(array)
print(linear_search(array, 0, right, item))
| [
"itssvinayak@gmail.com"
] | itssvinayak@gmail.com |
cead79d631ff3bbb21c62d18f19d5f993f9bef46 | ea89417f702100a5541263e7342f3e9ad76c5423 | /hmc/algorithm/utils/lib_utils_system.py | c2202116205fc6cb1bb27a984a42f5894f328225 | [
"MIT"
] | permissive | c-hydro/fp-hmc | 130b887a27072b7c967fca27f0568d68a3c06143 | 30bd5d0a3e4921410990968bb3a4eaf601b5b804 | refs/heads/main | 2023-06-11T12:22:07.480759 | 2022-12-07T12:20:51 | 2022-12-07T12:20:51 | 216,776,851 | 0 | 0 | MIT | 2022-12-02T12:10:25 | 2019-10-22T09:39:05 | Python | UTF-8 | Python | false | false | 2,819 | py | """
Library Features:
Name: lib_utils_system
Author(s): Fabio Delogu (fabio.delogu@cimafoundation.org)
Date: '20200401'
Version: '3.0.0'
"""
#######################################################################################
# Library
import logging
import os
import pandas as pd
import shutil
from os.path import exists
from hmc.algorithm.default.lib_default_args import logger_name
# Logging
log_stream = logging.getLogger(logger_name)
# Debug
# import matplotlib.pylab as plt
#######################################################################################
# -------------------------------------------------------------------------------------
# Method to split full path in root and filename
def split_path(file_path):
file_root, file_name = os.path.split(file_path)
return file_root, file_name
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to create folder (and check if folder exists)
def create_folder(path_name=None, path_delimiter=None):
if path_name:
if path_delimiter:
path_name_tmp = path_name.split(path_delimiter)[0]
else:
path_name_tmp = path_name
if not exists(path_name_tmp):
os.makedirs(path_name_tmp)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to delete folder (and check if folder exists)
def delete_folder(path_name):
# Check folder status
if os.path.exists(path_name):
# Remove folder (file only-read too)
shutil.rmtree(path_name, ignore_errors=True)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to delete file
def delete_file(file_path, file_delete=True):
if file_delete:
if os.path.isfile(file_path):
os.remove(file_path)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to copy file from source to destination
def copy_file(file_path_src, file_path_dest):
if os.path.exists(file_path_src):
if not file_path_src == file_path_dest:
if os.path.exists(file_path_dest):
os.remove(file_path_dest)
shutil.copy2(file_path_src, file_path_dest)
else:
log_stream.warning(' ===> Copy file failed! Source not available!')
# -------------------------------------------------------------------------------------
| [
"fabio.delogu@cimafoundation.org"
] | fabio.delogu@cimafoundation.org |
f1e5599f0ea8c38d38ebc0353bab439d1b5250a3 | e6f1578ae2eff1b14a0734d8ebf9cbfaf13a7338 | /steem/tests/test_steem.py | 101488980cd4acf91af38304bdd16248a7e9894d | [
"MIT"
] | permissive | clayop/python-steem | d672be71f5662658bad141a835c2155af1e1ffb5 | ad1298d81a8116d000fe21cf1528cfa444c0e38d | refs/heads/master | 2021-01-11T21:09:15.789961 | 2017-02-16T01:51:19 | 2017-02-16T01:51:19 | 79,258,712 | 0 | 0 | null | 2017-01-17T18:36:08 | 2017-01-17T18:36:08 | null | UTF-8 | Python | false | false | 3,508 | py | import unittest
from steem.steem import (
Steem,
MissingKeyError,
InsufficientAuthorityError
)
from steem.post import (
Post,
VotingInvalidOnArchivedPost
)
identifier = "@xeroc/piston"
testaccount = "xeroc"
wif = "5KkUHuJEFhN1RCS3GLV7UMeQ5P1k5Vu31jRgivJei8dBtAcXYMV"
steem = Steem(nobroadcast=True, wif=wif)
class Testcases(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(Testcases, self).__init__(*args, **kwargs)
self.post = Post(steem, identifier)
def test_getOpeningPost(self):
self.post._getOpeningPost()
def test_reply(self):
try:
self.post.reply(body="foobar", title="", author=testaccount, meta=None)
except InsufficientAuthorityError:
pass
except MissingKeyError:
pass
def test_upvote(self):
try:
self.post.upvote(voter=testaccount)
except VotingInvalidOnArchivedPost:
pass
except InsufficientAuthorityError:
pass
except MissingKeyError:
pass
def test_downvote(self, weight=-100, voter=testaccount):
try:
self.post.downvote(voter=testaccount)
except VotingInvalidOnArchivedPost:
pass
except InsufficientAuthorityError:
pass
except MissingKeyError:
pass
def test_edit(self):
try:
steem.edit(identifier, "Foobar")
except InsufficientAuthorityError:
pass
except MissingKeyError:
pass
def test_post(self):
try:
steem.post("title", "body", meta={"foo": "bar"}, author=testaccount)
except InsufficientAuthorityError:
pass
except MissingKeyError:
pass
def test_create_account(self):
try:
steem.create_account("xeroc-create",
creator=testaccount,
password="foobar foo bar hello world",
storekeys=False
)
except InsufficientAuthorityError:
pass
except MissingKeyError:
pass
def test_transfer(self):
try:
steem.transfer("fabian", 10, "STEEM", account=testaccount)
except InsufficientAuthorityError:
pass
except MissingKeyError:
pass
def test_withdraw_vesting(self):
try:
steem.withdraw_vesting(10, account=testaccount)
except InsufficientAuthorityError:
pass
except MissingKeyError:
pass
def test_transfer_to_vesting(self):
try:
steem.transfer_to_vesting(10, to=testaccount, account=testaccount)
except InsufficientAuthorityError:
pass
except MissingKeyError:
pass
def test_get_replies(self):
steem.get_replies(author=testaccount)
def test_get_posts(self):
steem.get_posts()
def test_get_categories(self):
steem.get_categories(sort="trending")
def test_get_balances(self):
steem.get_balances(testaccount)
def test_getPost(self):
self.assertEqual(Post(steem, "@xeroc/piston").url,
"/piston/@xeroc/piston")
self.assertEqual(Post(steem, {"author": "@xeroc", "permlink": "piston"}).url,
"/piston/@xeroc/piston")
if __name__ == '__main__':
unittest.main()
| [
"mail@xeroc.org"
] | mail@xeroc.org |
64958beb389d7d629f311e4dd7a2be1ee78700b3 | 8a029afcaee3080728be4648c96865d5847d3247 | /dnnseg/bin/csv_to_zs.py | aa845946c363e52e15e0ba430e5d9e548ee97eb0 | [] | no_license | coryshain/dnnseg | 623d8c3583a996e496e77123a3296c8731f40613 | 30eed4b031adb3fcef80f98c6f037fd993aa36ca | refs/heads/master | 2021-06-11T00:47:25.975714 | 2021-02-22T14:18:51 | 2021-02-22T14:18:51 | 143,957,434 | 10 | 7 | null | 2020-12-12T01:34:11 | 2018-08-08T03:41:41 | Python | UTF-8 | Python | false | false | 1,351 | py | import pandas as pd
import sys
import os
import argparse
from dnnseg.data import segment_table_to_csv
def check_path(path):
if os.path.basename(path).startswith('embeddings_pred') and path.endswith('.csv'):
return True
return False
if __name__ == '__main__':
argparser = argparse.ArgumentParser('''
Convert CSV segment tables into a format acceptable for the Zerospeech 2015 TDE eval.
''')
argparser.add_argument('paths', nargs='+', help='Paths to CSV files or directories to recursively search for CSV segment table files.')
argparser.add_argument('-v', '--verbose', action='store_true', help='Write progress report to standard error.')
args = argparser.parse_args()
csvs = set()
for path in args.paths:
if check_path(path):
csvs.add(path)
else:
for root, _, files in os.walk(path):
for f in files:
p = os.path.join(root, f)
if check_path(p):
csvs.add(p)
csvs = sorted(list(csvs))
for csv in csvs:
if args.verbose:
sys.stderr.write('Converting file %s...\n' % csv)
df = pd.read_csv(csv, sep=' ')
out = segment_table_to_csv(df, verbose=args.verbose)
with open(csv[:-4] + '.classes', 'w') as f:
f.write(out)
| [
"cory.shain@gmail.com"
] | cory.shain@gmail.com |
6654efa5ed917fd3dad0f2bd8dc8c8d4adb389aa | f305f84ea6f721c2391300f0a60e21d2ce14f2a5 | /22_专题/邻位交换(adjacent swap)/minAdjacentSwap.py | 5fd74ce9c63a814cda310f86986f1290e369323b | [] | no_license | 981377660LMT/algorithm-study | f2ada3e6959338ae1bc21934a84f7314a8ecff82 | 7e79e26bb8f641868561b186e34c1127ed63c9e0 | refs/heads/master | 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null | UTF-8 | Python | false | false | 1,733 | py | from typing import MutableSequence
from collections import defaultdict, deque
from sortedcontainers import SortedList
def minAdjacentSwap1(nums1: MutableSequence[int], nums2: MutableSequence[int]) -> int:
"""
求使两个数组相等的最少邻位交换次数 映射+求逆序对 时间复杂度`O(nlogn)`
如果无法做到则输出 -1
"""
def countInversionPair(nums: MutableSequence[int]) -> int:
"""计算逆序对的个数 时间复杂度`O(nlogn)`"""
res = 0
sl = SortedList()
for num in reversed(nums):
pos = sl.bisect_left(num)
res += pos
sl.add(num)
return res
# 含有重复元素的映射 例如nums [1,3,2,1,4] 表示已经排序的数组 [0,1,2,3,4]
# 那么nums1 [1,1,3,4,2] 就 映射到 [0,3,1,4,2]
mapping = defaultdict(deque)
for index, num in enumerate(nums2):
mapping[num].append(index)
for index, num in enumerate(nums1):
if not mapping[num]:
return -1
mapped = mapping[num].popleft()
nums1[index] = mapped
res = countInversionPair(nums1)
return res
def minAdjacentSwap2(nums1: MutableSequence[int], nums2: MutableSequence[int]) -> int:
"""求使两个数组相等的最少邻位交换次数
对每个数,贪心找到对应的最近位置交换
时间复杂度`O(n^2)`
如果无法做到则输出 -1
"""
res = 0
for num in nums1:
index = nums2.index(num) # 最左边的第一个位置
if index == -1:
return -1
res += index
nums2.pop(index) # 已经被换到最左边了,所以减1
return res
| [
"lmt2818088@gmail.com"
] | lmt2818088@gmail.com |
a6b76bd42fb9759cfcf2ab24708ea976b1f32dea | 876de904572c611b8cbad21f50877cdc812f2946 | /校招/程序员面试金典_08.11. 硬币_动态规划_数学推导.py | 4f0da7d68805a5a34aa80b20e883a371d0147ad4 | [
"MIT"
] | permissive | QDylan/Learning- | 66a33de0e15f26672fb63c0b393866721def27ae | f09e0aa3de081883b4a7ebfe4d31b5f86f24b64f | refs/heads/master | 2023-02-08T02:34:26.616116 | 2020-12-25T05:02:32 | 2020-12-25T05:02:32 | 263,805,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,807 | py | # -*- coding: utf-8 -*-
"""
@Time : 2020/5/19 11:52
@Author : QDY
@FileName: 程序员面试金典_08.11. 硬币_动态规划_数学推导.py
硬币。给定数量不限的硬币,币值为25分、10分、5分和1分,编写代码计算n分有几种表示法。(结果可能会很大,你需要将结果模上1000000007)
示例1:
输入: n = 5
输出:2
解释: 有两种方式可以凑成总金额:
5=5
5=1+1+1+1+1
示例2:
输入: n = 10
输出:4
解释: 有四种方式可以凑成总金额:
10=10
10=5+5
10=5+1+1+1+1+1
10=1+1+1+1+1+1+1+1+1+1
"""
class Solution:
def waysToChange(self, n: int) -> int:
# # 1.动态规划 完全背包问题
# # dp[i] = 金额为i时有几种表示法
# # dp[i] = sum(dp[i-coins[j]]),j=0~4
# dp = [1]*(n+1) # 1.dp
# for c in [5,10,25]:
# for i in range(c,n+1):
# dp[i] += dp[i-c]
# return dp[-1] % 1000000007
# 2.数学推导 (速度快,应用于len(coins)=4)
res = 0
for i in range(n//25+1): # 选用多少个25分硬币
rest = n-25*i # 剩余的金额r
# r = r1*10 + a, a = r2*5 + b, a<10, b<5
# 假设选了x个10分硬币(有r1+1种选取法),则剩余的金额为
# r' = r-10*x = 10*r1-10*x+a = 10*(r1-x)+ 5*r2 + b
# 这时,10*(r1-x)全由5分硬币组成-> r' = (2(r1-x)+r2)*5+b
# 即r'有2r1+r2-2x+1种组成方案
# 对 (2r1+r2-2x+1), x从0->r1求和得
# sum = (2r1+r2+1)*(r1+1)-2(0+r1)*(r1+1)/2 = (r1+r2+1)*(r1+1)
rest1, rest2 = rest//10, rest % 10//5
res += (rest1+1)*(rest1+rest2+1)
return res % 1000000007
| [
"qdy960411@outlook.com"
] | qdy960411@outlook.com |
fea9164721f5b30483782a20953571c70a3e5445 | 5456502f97627278cbd6e16d002d50f1de3da7bb | /chrome/test/chromeos/autotest/files/client/deps/page_cycler_dep/common.py | 6e20002004cd79d7c23eb14889b0cbb24282c71a | [
"BSD-3-Clause"
] | permissive | TrellixVulnTeam/Chromium_7C66 | 72d108a413909eb3bd36c73a6c2f98de1573b6e5 | c8649ab2a0f5a747369ed50351209a42f59672ee | refs/heads/master | 2023-03-16T12:51:40.231959 | 2017-12-20T10:38:26 | 2017-12-20T10:38:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os, sys
dirname = os.path.dirname(sys.modules[__name__].__file__)
client_dir = os.path.abspath(os.path.join(dirname, "../../"))
sys.path.insert(0, client_dir)
import setup_modules
sys.path.pop(0)
setup_modules.setup(base_path=client_dir,
root_module_name="autotest_lib.client")
| [
"lixiaodonglove7@aliyun.com"
] | lixiaodonglove7@aliyun.com |
f4234d8bcd1e3006a4816853a9195259c292e1ad | 487fdbff5f51c67f401d108691291a64acc16f94 | /day05.py | ea905b22ccf49f9e7e7f3d0242f81878304af93e | [
"MIT"
] | permissive | Yalfoosh/Advent-of-Code-2019 | 66f8f5d897cd67eaa561789332b033bb8aaa608e | d09be66a14f05b1ae086cdefd22dc414bc45d562 | refs/heads/master | 2020-09-26T23:01:54.199679 | 2019-12-11T20:07:33 | 2019-12-11T20:07:33 | 226,362,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,353 | py | from copy import deepcopy
import re
input_splitter = re.compile(r",\s*")
parameter_count_dict =\
{
1: 3, 2: 3,
3: 1, 4: 1,
5: 2, 6: 2,
7: 3, 8: 3,
99: 1
}
def load(path: str = "input/05.txt"):
with open(path) as file:
return [int(x) for x in input_splitter.split(file.read().strip())]
def int_to_flag_vector(value, max_size=3):
to_return = list()
for _ in range(max_size):
to_return.append(value % 10)
value //= 10
return to_return
def execute(memory, position: int, program_input, **kwargs):
command_code = memory[position]
flags = [0, 0, 0]
if command_code > 99:
flags = int_to_flag_vector(command_code // 100)
command_code = command_code % 100
if command_code == 99:
return len(memory)
parameters = memory[position + 1: position + 1 + parameter_count_dict[command_code]]
for pi in range(min(2, len(parameters))):
if flags[pi] == 0 and command_code != 3:
parameters[pi] = memory[parameters[pi]]
if command_code == 1:
memory[parameters[2]] = parameters[0] + parameters[1]
elif command_code == 2:
memory[parameters[2]] = parameters[0] * parameters[1]
elif command_code == 3:
memory[parameters[0]] = program_input
elif command_code == 4:
prefix = kwargs.get("prefix", None)
prefix = "" if prefix is None else "[{}]\t".format(str(prefix))
print(f"{prefix}{parameters[0]}")
elif command_code == 5:
if parameters[0] != 0:
return parameters[1]
elif command_code == 6:
if parameters[0] == 0:
return parameters[1]
elif command_code == 7:
memory[parameters[2]] = 1 if (parameters[0] < parameters[1]) else 0
elif command_code == 8:
memory[parameters[2]] = 1 if (parameters[0] == parameters[1]) else 0
else:
return len(memory)
return position + len(parameters) + 1
# Prep
original_instructions = load()
# First
first_input = 1
instructions = deepcopy(original_instructions)
i = 0
while i < len(instructions):
i = execute(instructions, i, first_input, prefix=1)
# Second
second_input = 5
instructions = deepcopy(original_instructions)
i = 0
while i < len(instructions):
i = execute(instructions, i, second_input, prefix=2)
| [
"suflajmob@gmail.com"
] | suflajmob@gmail.com |
2a782607fc5405942c8b407da66dac1b362a86ee | a689a72d3699883d7b58bd4ee3103373270bd0d5 | /2019/1907/190729/03.py | 850dc8a393dc5a820c51f124f881c5182b0d0006 | [] | no_license | Oizys18/Algo | 4670748c850dc9472b6cfb9f828a3ccad9c18981 | 45caafe22a8a8c9134e4ff3b227f5f0be94eefe7 | refs/heads/master | 2022-05-11T08:35:06.812539 | 2022-05-07T01:30:41 | 2022-05-07T01:30:41 | 202,690,024 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | from base64 import b64decode as b64
for t in range(int(input())):
print(f"#{t+1} {str(b64(input()))[2:-1]}")
| [
"oizys18@gmail.com"
] | oizys18@gmail.com |
7f93a40d800f6f3ebe0d7340f2b8d405f789ae15 | 7b54edc142d01a7385f22f9e127f4790bd88f92b | /info/utils/common.py | 8c5b49ee0121b2afd02e40193b0bca6cad980950 | [] | no_license | amourbrus/newsWebFlask | 20b73b39da3739133ea235b92b88e09639fbcfd8 | 359ec394ce2eacd3dde330d83f490efc0f354b5d | refs/heads/master | 2020-03-21T10:16:19.084405 | 2018-07-12T15:04:54 | 2018-07-12T15:04:54 | 138,442,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | import functools
from flask import g
from flask import session
from info.models import User
def index_class(index):
if index == 0:
return "first"
elif index == 1:
return "second"
elif index == 2:
return "third"
else:
return ""
def user_login_data(f):
@functools.wraps(f)
def wrapper(*args,**kwargs):
user_id = session.get("user_id")
# 默认值
user = None
if user_id:
# 根据id查询当前用户
user = User.query.get(user_id)
g.user = user
return f(*args,**kwargs)
return wrapper
| [
"2338336776@qq.com"
] | 2338336776@qq.com |
8819e864d5603b9e54a6f258e6a7c04e9483aff8 | 71d4fafdf7261a7da96404f294feed13f6c771a0 | /mainwebsiteenv/lib/python2.7/site-packages/phonenumbers/data/region_TC.py | 45cf24186cb81296740b4d9674973cb859470f7f | [] | no_license | avravikiran/mainwebsite | 53f80108caf6fb536ba598967d417395aa2d9604 | 65bb5e85618aed89bfc1ee2719bd86d0ba0c8acd | refs/heads/master | 2021-09-17T02:26:09.689217 | 2018-06-26T16:09:57 | 2018-06-26T16:09:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,413 | py | """Auto-generated file, do not edit by hand. TC metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_TC = PhoneMetadata(id='TC', country_code=1, international_prefix='011',
general_desc=PhoneNumberDesc(national_number_pattern='[5689]\\d{9}', possible_length=(10,), possible_length_local_only=(7,)),
fixed_line=PhoneNumberDesc(national_number_pattern='649(?:712|9(?:4\\d|50))\\d{4}', example_number='6497121234', possible_length=(10,), possible_length_local_only=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='649(?:2(?:3[129]|4[1-7])|3(?:3[1-389]|4[1-8])|4[34][1-3])\\d{4}', example_number='6492311234', possible_length=(10,), possible_length_local_only=(7,)),
toll_free=PhoneNumberDesc(national_number_pattern='8(?:00|33|44|55|66|77|88)[2-9]\\d{6}', example_number='8002345678', possible_length=(10,)),
premium_rate=PhoneNumberDesc(national_number_pattern='900[2-9]\\d{6}', example_number='9002345678', possible_length=(10,)),
personal_number=PhoneNumberDesc(national_number_pattern='5(?:00|22|33|44|66|77|88)[2-9]\\d{6}', example_number='5002345678', possible_length=(10,)),
voip=PhoneNumberDesc(national_number_pattern='64971[01]\\d{4}', example_number='6497101234', possible_length=(10,), possible_length_local_only=(7,)),
national_prefix='1',
national_prefix_for_parsing='1',
leading_digits='649')
| [
"me15btech11039@iith.ac.in.com"
] | me15btech11039@iith.ac.in.com |
20b6947db74c68d8d54fdb41a06eb308501cfc49 | f88fc26caeb21c42f7f630892a53b8b3906a1f6c | /exp_kitti_raft_fixation/train/train_deepv2d_fixation.py | dae5a7dcce0acc8808d46d13900e1b9f157df501 | [] | no_license | TWJianNuo/RAFT_epp | 53bb2e39f5e248e35dbe0979c00ffe9597b0bed7 | 8a8bb850e8f25626100d21006c35ff4e5b058ab1 | refs/heads/main | 2023-08-05T11:39:01.915642 | 2021-09-21T23:12:32 | 2021-09-21T23:12:32 | 334,811,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,007 | py | from __future__ import print_function, division
import os, sys
project_rootdir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
sys.path.insert(0, project_rootdir)
sys.path.append('core')
import argparse
import os
import cv2
import time
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import time
from torch.utils.data import DataLoader
from exp_kitti_eigen_fixation.dataset_kitti_eigen_fixation import KITTI_eigen
from exp_kitti_eigen_fixation.eppflowenet.EppFlowNet import EppFlowNet
from torch.utils.tensorboard import SummaryWriter
import torch.utils.data as data
from PIL import Image, ImageDraw
from core.utils.flow_viz import flow_to_image
from core.utils.utils import InputPadder, forward_interpolate, tensor2disp, tensor2rgb, vls_ins
from posenet import Posenet
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.autograd import Variable
from tqdm import tqdm
try:
from torch.cuda.amp import GradScaler
except:
# dummy GradScaler for PyTorch < 1.6
class GradScaler:
def __init__(self):
pass
def scale(self, loss):
return loss
def unscale_(self, optimizer):
pass
def step(self, optimizer):
optimizer.step()
def update(self):
pass
# exclude extremly large displacements
MAX_FLOW = 400
SUM_FREQ = 100
VAL_FREQ = 5000
class SSIM(nn.Module):
"""Layer to compute the SSIM loss between a pair of images
"""
def __init__(self):
super(SSIM, self).__init__()
self.mu_x_pool = nn.AvgPool2d(3, 1)
self.mu_y_pool = nn.AvgPool2d(3, 1)
self.sig_x_pool = nn.AvgPool2d(3, 1)
self.sig_y_pool = nn.AvgPool2d(3, 1)
self.sig_xy_pool = nn.AvgPool2d(3, 1)
self.refl = nn.ReflectionPad2d(1)
self.C1 = 0.01 ** 2
self.C2 = 0.03 ** 2
def forward(self, x, y):
x = self.refl(x)
y = self.refl(y)
mu_x = self.mu_x_pool(x)
mu_y = self.mu_y_pool(y)
sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2
sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2
sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y
SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2)
SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y + self.C2)
return torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def fetch_optimizer(args, model):
""" Create the optimizer and learning rate scheduler """
optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon)
scheduler = optim.lr_scheduler.OneCycleLR(optimizer, args.lr, args.num_steps + 100,
pct_start=0.05, cycle_momentum=False, anneal_strategy='linear')
return optimizer, scheduler
class Logger:
def __init__(self, logpath):
self.logpath = logpath
self.writer = None
def create_summarywriter(self):
if self.writer is None:
self.writer = SummaryWriter(self.logpath)
def write_vls(self, data_blob, outputs, flowselector, reprojselector, step):
img1 = data_blob['img1'][0].permute([1, 2, 0]).numpy().astype(np.uint8)
img2 = data_blob['img2'][0].permute([1, 2, 0]).numpy().astype(np.uint8)
insmap = data_blob['insmap'][0].squeeze().numpy()
figmask_flow = tensor2disp(flowselector, vmax=1, viewind=0)
figmask_reprojection = tensor2disp(reprojselector, vmax=1, viewind=0)
insvls = vls_ins(img1, insmap)
depthpredvls = tensor2disp(1 / outputs[('depth', 2)], vmax=0.15, viewind=0)
depthgtvls = tensor2disp(1 / data_blob['depthmap'], vmax=0.15, viewind=0)
flowvls = flow_to_image(outputs[('flowpred', 2)][0].detach().cpu().permute([1, 2, 0]).numpy(), rad_max=10)
imgrecon = tensor2rgb(outputs[('reconImg', 2)], viewind=0)
img_val_up = np.concatenate([np.array(insvls), np.array(img2)], axis=1)
img_val_mid1 = np.concatenate([np.array(figmask_flow), np.array(figmask_reprojection)], axis=1)
img_val_mid2 = np.concatenate([np.array(depthpredvls), np.array(depthgtvls)], axis=1)
img_val_mid3 = np.concatenate([np.array(imgrecon), np.array(flowvls)], axis=1)
img_val = np.concatenate([np.array(img_val_up), np.array(img_val_mid1), np.array(img_val_mid2), np.array(img_val_mid3)], axis=0)
self.writer.add_image('predvls', (torch.from_numpy(img_val).float() / 255).permute([2, 0, 1]), step)
X = self.vls_sampling(np.array(insvls), img2, data_blob['depthvls'], data_blob['flowmap'], data_blob['insmap'], outputs)
self.writer.add_image('X', (torch.from_numpy(X).float() / 255).permute([2, 0, 1]), step)
def vls_sampling(self, img1, img2, depthgt, flowmap, insmap, outputs):
depthgtnp = depthgt[0].squeeze().cpu().numpy()
insmapnp = insmap[0].squeeze().cpu().numpy()
flowmapnp = flowmap[0].cpu().numpy()
h, w, _ = img1.shape
xx, yy = np.meshgrid(range(w), range(h), indexing='xy')
selector = (depthgtnp > 0)
flowx = outputs[('flowpred', 2)][0, 0].detach().cpu().numpy()
flowy = outputs[('flowpred', 2)][0, 1].detach().cpu().numpy()
flowxf = flowx[selector]
flowyf = flowy[selector]
floworgx = outputs['org_flow'][0, 0].detach().cpu().numpy()
floworgy = outputs['org_flow'][0, 1].detach().cpu().numpy()
floworgxf = floworgx[selector]
floworgyf = floworgy[selector]
xxf = xx[selector]
yyf = yy[selector]
df = depthgtnp[selector]
slRange_sel = (np.mod(xx, 4) == 0) * (np.mod(yy, 4) == 0) * selector * (insmapnp > 0)
dsratio = 4
if np.sum(slRange_sel) > 0:
xxfsl = xx[slRange_sel]
yyfsl = yy[slRange_sel]
rndidx = np.random.randint(0, xxfsl.shape[0], 1).item()
xxfsl_sel = xxfsl[rndidx]
yyfsl_sel = yyfsl[rndidx]
slvlsxx_fg = (outputs['sample_pts'][0, :, int(yyfsl_sel / dsratio), int(xxfsl_sel / dsratio), 0].detach().cpu().numpy() + 1) / 2 * w
slvlsyy_fg = (outputs['sample_pts'][0, :, int(yyfsl_sel / dsratio), int(xxfsl_sel / dsratio), 1].detach().cpu().numpy() + 1) / 2 * h
else:
slvlsxx_fg = None
slvlsyy_fg = None
slRange_sel = (np.mod(xx, 4) == 0) * (np.mod(yy, 4) == 0) * selector * (insmapnp == 0)
if np.sum(slRange_sel) > 0:
xxfsl = xx[slRange_sel]
yyfsl = yy[slRange_sel]
rndidx = np.random.randint(0, xxfsl.shape[0], 1).item()
xxfsl_sel = xxfsl[rndidx]
yyfsl_sel = yyfsl[rndidx]
slvlsxx_bg = (outputs['sample_pts'][0, :, int(yyfsl_sel / dsratio), int(xxfsl_sel / dsratio), 0].detach().cpu().numpy() + 1) / 2 * w
slvlsyy_bg = (outputs['sample_pts'][0, :, int(yyfsl_sel / dsratio), int(xxfsl_sel / dsratio), 1].detach().cpu().numpy() + 1) / 2 * h
gtposx = xxfsl_sel + flowmapnp[0, yyfsl_sel, xxfsl_sel]
gtposy = yyfsl_sel + flowmapnp[0, yyfsl_sel, xxfsl_sel]
else:
slvlsxx_bg = None
slvlsyy_bg = None
cm = plt.get_cmap('magma')
rndcolor = cm(1 / df / 0.15)[:, 0:3]
fig = plt.figure(figsize=(16, 9))
canvas = FigureCanvasAgg(fig)
fig.add_subplot(2, 2, 1)
plt.scatter(xxf, yyf, 3, rndcolor)
plt.imshow(img1)
plt.title("Input")
fig.add_subplot(2, 2, 2)
plt.scatter(xxf + floworgxf, yyf + floworgyf, 3, rndcolor)
plt.imshow(img2)
plt.title("Original Prediction")
fig.add_subplot(2, 2, 3)
plt.scatter(xxf + flowxf, yyf + flowyf, 3, rndcolor)
plt.imshow(img2)
plt.title("Fixed Prediction")
fig.add_subplot(2, 2, 4)
if slvlsxx_fg is not None and slvlsyy_fg is not None:
plt.scatter(slvlsxx_fg, slvlsyy_fg, 3, 'b')
plt.scatter(slvlsxx_fg[16], slvlsyy_fg[16], 3, 'g')
if slvlsxx_fg is not None and slvlsyy_fg is not None:
plt.scatter(slvlsxx_bg, slvlsyy_bg, 3, 'b')
plt.scatter(slvlsxx_bg[16], slvlsyy_bg[16], 3, 'g')
plt.scatter(gtposx, gtposy, 3, 'r')
plt.imshow(img2)
plt.title("Sampling Arae")
fig.tight_layout() # Or equivalently, "plt.tight_layout()"
canvas.draw()
buf = canvas.buffer_rgba()
plt.close()
X = np.asarray(buf)
# Image.fromarray(X).show()
return X
def write_vls_eval(self, data_blob, outputs, tagname, step):
img1 = data_blob['img1'][0].permute([1, 2, 0]).numpy().astype(np.uint8)
img2 = data_blob['img2'][0].permute([1, 2, 0]).numpy().astype(np.uint8)
insmap = data_blob['insmap'][0].squeeze().numpy()
insvls = vls_ins(img1, insmap)
depthpredvls = tensor2disp(1 / outputs[('depth', 2)], vmax=0.15, viewind=0)
depthgtvls = tensor2disp(1 / data_blob['depthmap'], vmax=0.15, viewind=0)
flowvls = flow_to_image(outputs[('flowpred', 2)][0].detach().cpu().permute([1, 2, 0]).numpy(), rad_max=10)
imgrecon = tensor2rgb(outputs[('reconImg', 2)], viewind=0)
img_val_up = np.concatenate([np.array(insvls), np.array(img2)], axis=1)
img_val_mid2 = np.concatenate([np.array(depthpredvls), np.array(depthgtvls)], axis=1)
img_val_mid3 = np.concatenate([np.array(imgrecon), np.array(flowvls)], axis=1)
img_val = np.concatenate([np.array(img_val_up), np.array(img_val_mid2), np.array(img_val_mid3)], axis=0)
self.writer.add_image('{}_predvls'.format(tagname), (torch.from_numpy(img_val).float() / 255).permute([2, 0, 1]), step)
X = self.vls_sampling(np.array(insvls), img2, data_blob['depthvls'], data_blob['flowmap'], data_blob['insmap'], outputs)
self.writer.add_image('{}_X'.format(tagname), (torch.from_numpy(X).float() / 255).permute([2, 0, 1]), step)
def write_dict(self, results, step):
for key in results:
self.writer.add_scalar(key, results[key], step)
def close(self):
self.writer.close()
@torch.no_grad()
def validate_kitti(model, args, eval_loader, logger, group, total_steps, isdeepv2dpred=False):
""" Peform validation using the KITTI-2015 (train) split """
""" Peform validation using the KITTI-2015 (train) split """
model.eval()
gpu = args.gpu
eval_reld = torch.zeros(2).cuda(device=gpu)
for val_id, data_blob in enumerate(tqdm(eval_loader)):
image1 = data_blob['img1'].cuda(gpu) / 255.0
image2 = data_blob['img2'].cuda(gpu) / 255.0
intrinsic = data_blob['intrinsic'].cuda(gpu)
insmap = data_blob['insmap'].cuda(gpu)
depthpred = data_blob['depthpred'].cuda(gpu)
posepred = data_blob['posepred'].cuda(gpu)
selfpose_gt = data_blob['rel_pose'].cuda(gpu)
depthgt = data_blob['depthmap'].cuda(gpu)
reldepth_gt = torch.log(depthgt + 1e-10) - torch.log(torch.sqrt(torch.sum(selfpose_gt[:, 0:3, 3] ** 2, dim=1, keepdim=True))).unsqueeze(-1).unsqueeze(-1).expand([-1, -1, args.evalheight, args.evalwidth])
outputs = model(image1, image2, depthpred, intrinsic, posepred, insmap)
if isdeepv2dpred:
predreld = outputs[('relativedepth', 2)]
else:
predreld = outputs[('org_relativedepth', 2)]
selector = ((depthgt > 0) * (insmap == 0)).float()
depthloss = torch.sum(torch.abs(predreld - reldepth_gt) * selector) / (torch.sum(selector) + 1)
eval_reld[0] += depthloss
eval_reld[1] += 1
if not(logger is None) and np.mod(val_id, 20) == 0 and isdeepv2dpred:
seq, frmidx = data_blob['tag'][0].split(' ')
tag = "{}_{}".format(seq.split('/')[-1], frmidx)
logger.write_vls_eval(data_blob, outputs, tag, total_steps)
if args.distributed:
dist.all_reduce(tensor=eval_reld, op=dist.ReduceOp.SUM, group=group)
if args.gpu == 0:
eval_reld[0] = eval_reld[0] / eval_reld[1]
print("in {} eval samples: Absolute Relative Depth Loss: {:7.3f}".format(eval_reld[1].item(), eval_reld[0].item()))
return {'reld': float(eval_reld[0].item())}
else:
return None
def read_splits():
split_root = os.path.join(project_rootdir, 'exp_pose_mdepth_kitti_eigen/splits')
train_entries = [x.rstrip('\n') for x in open(os.path.join(split_root, 'train_files.txt'), 'r')]
evaluation_entries = [x.rstrip('\n') for x in open(os.path.join(split_root, 'test_files.txt'), 'r')]
return train_entries, evaluation_entries
def get_reprojection_loss(img1, insmap, outputs, ssim):
reprojloss = 0
selector = ((outputs[('reconImg', 2)].sum(dim=1, keepdim=True) != 0) * (insmap > 0)).float()
for k in range(1, 3, 1):
ssimloss = ssim(outputs[('reconImg', k)], img1).mean(dim=1, keepdim=True)
l1_loss = torch.abs(outputs[('reconImg', k)] - img1).mean(dim=1, keepdim=True)
reprojectionloss = 0.85 * ssimloss + 0.15 * l1_loss
reprojloss += (reprojectionloss * selector).sum() / (selector.sum() + 1)
reprojloss = reprojloss / 2
return reprojloss, selector
def get_rdepth_loss(reldepth_gt, depthgt, outputs, insmap):
selector = ((depthgt > 0) * (insmap == 0)).float()
depthloss = 0
for k in range(1, 3, 1):
depthloss += torch.sum(torch.abs(outputs[('relativedepth', k)] - reldepth_gt) * selector) / (torch.sum(selector) + 1)
return depthloss / 2, selector
def train(gpu, ngpus_per_node, args):
print("Using GPU %d for training" % gpu)
args.gpu = gpu
if args.distributed:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=ngpus_per_node, rank=args.gpu)
model = EppFlowNet(args=args)
if args.distributed:
torch.cuda.set_device(args.gpu)
args.batch_size = int(args.batch_size / ngpus_per_node)
model = nn.SyncBatchNorm.convert_sync_batchnorm(module=model)
model = model.to(f'cuda:{args.gpu}')
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True, output_device=args.gpu)
else:
model = torch.nn.DataParallel(model)
model.cuda()
ssim = SSIM()
logroot = os.path.join(args.logroot, args.name)
print("Parameter Count: %d, saving location: %s" % (count_parameters(model), logroot))
if args.restore_ckpt is not None:
print("=> loading checkpoint '{}'".format(args.restore_ckpt))
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.restore_ckpt, map_location=loc)
model.load_state_dict(checkpoint, strict=False)
model.train()
train_entries, evaluation_entries = read_splits()
train_dataset = KITTI_eigen(root=args.dataset_root, inheight=args.inheight, inwidth=args.inwidth, entries=train_entries, maxinsnum=args.maxinsnum,
depth_root=args.depth_root, depthvls_root=args.depthvlsgt_root, prediction_root=args.prediction_root, ins_root=args.ins_root,
istrain=True, muteaug=False, banremovedup=False)
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None
train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size, pin_memory=True, num_workers=int(args.num_workers / ngpus_per_node), drop_last=True, sampler=train_sampler)
eval_dataset = KITTI_eigen(root=args.dataset_root, inheight=args.evalheight, inwidth=args.evalwidth, entries=evaluation_entries, maxinsnum=args.maxinsnum,
depth_root=args.depth_root, depthvls_root=args.depthvlsgt_root, prediction_root=args.prediction_root, ins_root=args.ins_root, istrain=False)
eval_sampler = torch.utils.data.distributed.DistributedSampler(eval_dataset) if args.distributed else None
eval_loader = data.DataLoader(eval_dataset, batch_size=args.batch_size, pin_memory=True, num_workers=3, drop_last=True, sampler=eval_sampler)
print("Training splits contain %d images while test splits contain %d images" % (train_dataset.__len__(), eval_dataset.__len__()))
if args.distributed:
group = dist.new_group([i for i in range(ngpus_per_node)])
optimizer, scheduler = fetch_optimizer(args, model)
total_steps = 0
if args.gpu == 0:
logger = Logger(logroot)
logger_evaluation = Logger(os.path.join(args.logroot, 'evaluation_eigen_background', args.name))
logger_evaluation_org = Logger(os.path.join(args.logroot, 'evaluation_eigen_background', "{}_org".format(args.name)))
logger.create_summarywriter()
logger_evaluation.create_summarywriter()
logger_evaluation_org.create_summarywriter()
VAL_FREQ = 5000
epoch = 0
minreld = 100
st = time.time()
should_keep_training = True
while should_keep_training:
train_sampler.set_epoch(epoch)
for i_batch, data_blob in enumerate(train_loader):
optimizer.zero_grad()
image1 = data_blob['img1'].cuda(gpu) / 255.0
image2 = data_blob['img2'].cuda(gpu) / 255.0
intrinsic = data_blob['intrinsic'].cuda(gpu)
insmap = data_blob['insmap'].cuda(gpu)
depthgt = data_blob['depthmap'].cuda(gpu)
depthpred = data_blob['depthpred'].cuda(gpu)
posepred = data_blob['posepred'].cuda(gpu)
selfpose_gt = data_blob['rel_pose'].cuda(gpu)
reldepth_gt = torch.log(depthgt + 1e-10) - torch.log(torch.sqrt(torch.sum(selfpose_gt[:, 0:3, 3] ** 2, dim=1, keepdim=True))).unsqueeze(-1).unsqueeze(-1).expand([-1, -1, args.inheight, args.inwidth])
outputs = model(image1, image2, depthpred, intrinsic, posepred, insmap)
depthloss, depthselector = get_rdepth_loss(reldepth_gt=reldepth_gt, depthgt=depthgt, outputs=outputs, insmap=insmap)
ssimloss, reprojselector = get_reprojection_loss(image1, insmap, outputs, ssim)
metrics = dict()
metrics['depthloss'] = depthloss.item()
metrics['ssimloss'] = ssimloss.item()
loss = depthloss + ssimloss * 0
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
scheduler.step()
if args.gpu == 0:
logger.write_dict(metrics, step=total_steps)
if total_steps % SUM_FREQ == 0:
dr = time.time() - st
resths = (args.num_steps - total_steps) * dr / (total_steps + 1) / 60 / 60
print("Step: %d, rest hour: %f, depthloss: %f, ssimloss: %f" % (total_steps, resths, depthloss.item(), ssimloss.item()))
logger.write_vls(data_blob, outputs, depthselector, reprojselector, total_steps)
if total_steps % VAL_FREQ == 1:
if args.gpu == 0:
results = validate_kitti(model.module, args, eval_loader, logger, group, total_steps, isdeepv2dpred=True)
else:
results = validate_kitti(model.module, args, eval_loader, None, group, None, isdeepv2dpred=True)
if args.gpu == 0:
logger_evaluation.write_dict(results, total_steps)
if minreld > results['reld']:
minreld = results['reld']
PATH = os.path.join(logroot, 'minreld.pth')
torch.save(model.state_dict(), PATH)
print("model saved to %s" % PATH)
if args.gpu == 0:
results = validate_kitti(model.module, args, eval_loader, logger, group, total_steps, isdeepv2dpred=False)
else:
results = validate_kitti(model.module, args, eval_loader, None, group, None, isdeepv2dpred=False)
if args.gpu == 0:
logger_evaluation_org.write_dict(results, total_steps)
model.train()
total_steps += 1
if total_steps > args.num_steps:
should_keep_training = False
break
epoch = epoch + 1
if args.gpu == 0:
logger.close()
PATH = os.path.join(logroot, 'final.pth')
torch.save(model.state_dict(), PATH)
return PATH
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--name', default='raft', help="name your experiment")
parser.add_argument('--stage', help="determines which dataset to use for training")
parser.add_argument('--restore_ckpt', help="restore checkpoint")
parser.add_argument('--lr', type=float, default=0.00002)
parser.add_argument('--num_steps', type=int, default=100000)
parser.add_argument('--batch_size', type=int, default=6)
parser.add_argument('--image_size', type=int, nargs='+', default=[384, 512])
parser.add_argument('--inheight', type=int, default=320)
parser.add_argument('--inwidth', type=int, default=960)
parser.add_argument('--evalheight', type=int, default=320)
parser.add_argument('--evalwidth', type=int, default=1216)
parser.add_argument('--maxinsnum', type=int, default=50)
parser.add_argument('--min_depth_pred', type=float, default=1)
parser.add_argument('--max_depth_pred', type=float, default=85)
parser.add_argument('--min_depth_eval', type=float, default=1e-3)
parser.add_argument('--max_depth_eval', type=float, default=80)
parser.add_argument('--tscale_range', type=float, default=3)
parser.add_argument('--objtscale_range', type=float, default=10)
parser.add_argument('--angx_range', type=float, default=0.03)
parser.add_argument('--angy_range', type=float, default=0.06)
parser.add_argument('--angz_range', type=float, default=0.01)
parser.add_argument('--num_layers', type=int, default=50)
parser.add_argument('--num_deges', type=int, default=32)
parser.add_argument('--maxlogscale', type=float, default=1.5)
parser.add_argument('--wdecay', type=float, default=.00005)
parser.add_argument('--epsilon', type=float, default=1e-8)
parser.add_argument('--clip', type=float, default=1.0)
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--add_noise', action='store_true')
parser.add_argument('--dataset_root', type=str)
parser.add_argument('--semantics_root', type=str)
parser.add_argument('--depth_root', type=str)
parser.add_argument('--depthvlsgt_root', type=str)
parser.add_argument('--prediction_root', type=str)
parser.add_argument('--ins_root', type=str)
parser.add_argument('--logroot', type=str)
parser.add_argument('--num_workers', type=int, default=12)
parser.add_argument('--distributed', default=True, type=bool)
parser.add_argument('--dist_url', type=str, help='url used to set up distributed training', default='tcp://127.0.0.1:1235')
parser.add_argument('--dist_backend', type=str, help='distributed backend', default='nccl')
args = parser.parse_args()
torch.manual_seed(1234)
np.random.seed(1234)
if not os.path.isdir(os.path.join(args.logroot, args.name)):
os.makedirs(os.path.join(args.logroot, args.name), exist_ok=True)
os.makedirs(os.path.join(args.logroot, 'evaluation', args.name), exist_ok=True)
torch.cuda.empty_cache()
ngpus_per_node = torch.cuda.device_count()
if args.distributed:
args.world_size = ngpus_per_node
mp.spawn(train, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
train(args.gpu, ngpus_per_node, args) | [
"twjiannuo@gmail.com"
] | twjiannuo@gmail.com |
a4b2d7963db02f8c672477e5f95f24874ba73fdb | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_1605+123/sdB_pg_1605+123_lc.py | 0bb502fdc840c769372947da5add7de5f4ec3f0b | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[242.076542,12.200892], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_pg_1605+123/sdB_pg_1605+123_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
fbaa8321206abbc95d0c0afcb5d6add91cba179d | 30d02ec6dd309dced011d266ca40bace293fb23e | /20210125/min_cost_climbing_stairs.py | 364d94212e31ff42f3ac84aab24719ebdbb696b9 | [] | no_license | jyeoniii/algorithm | b72f5e9f7fe63098c251bcc1585787ba39ca750c | 7d80e27aec8fbac936911ee78a92c47b00daa3ba | refs/heads/master | 2023-04-15T01:39:41.149528 | 2021-04-22T13:55:58 | 2021-04-22T13:55:58 | 316,533,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | # https://leetcode.com/problems/min-cost-climbing-stairs/
from typing import List
class Solution:
def minCostClimbingStairs(self, cost: List[int]) -> int:
dp = [0] * len(cost)
dp[0], dp[1] = cost[0], cost[1]
for i in range(2, len(cost)):
dp[i] = min(dp[i-2], dp[i-1]) + cost[i]
return min(dp[-1], dp[-2]) | [
"jaykim9438@gmail.com"
] | jaykim9438@gmail.com |
10063675a5f60fefde1348935aca932ecf817962 | 1ba58b17f33122abf4236e9e430a51d375e0eb53 | /km72/Lesiuk_Andrew/3/task7.py | 5193e638cd6293de20a4c88f3e2230d7f4a8f580 | [] | no_license | igortereshchenko/amis_python | c4f8d86b88ab036d08ff0ce35c9b42ebeabecc42 | c6f0f2a70c82d5f269b3078eb296f82271b5bb10 | refs/heads/master | 2021-10-22T16:21:19.990650 | 2017-11-01T07:26:54 | 2017-11-01T07:26:54 | 104,785,028 | 0 | 139 | null | 2020-04-21T21:27:09 | 2017-09-25T18:11:42 | Python | UTF-8 | Python | false | false | 466 | py | print('Програма визначає скільки показуватиме годинник')
while True:
N=int(input('Введіть скільки часу пройшло після півночі\n'))
if N<0:
print('Час маэ бути додатнім')
else:
break
t = N//1440
print('Кількість днів:', t)
print('Кількість годин:', t//60)
print('Кількість хвилин:', t%60)
| [
"noreply@github.com"
] | igortereshchenko.noreply@github.com |
a664baac4f20445be3d45180f262f6795c2cb852 | 41b4702e359e3352116eeecf2bdf59cb13c71cf2 | /full_model_walker_param/rand_param_envs/rand_param_envs/gym/error.py | 3cbf07f1aebc146761fed6c16430dfa2594c62c8 | [] | no_license | CaralHsi/Multi-Task-Batch-RL | b0aad53291c1713fd2d89fa4fff4a85c98427d4d | 69d29164ab7d82ec5e06a929ed3b96462db21853 | refs/heads/master | 2022-12-22T19:23:45.341092 | 2020-10-01T00:05:36 | 2020-10-01T00:05:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,413 | py | import sys
class Error(Exception):
pass
# Local errors
class Unregistered(Error):
"""Raised when the user requests an item from the registry that does
not actually exist.
"""
pass
class UnregisteredEnv(Unregistered):
"""Raised when the user requests an env from the registry that does
not actually exist.
"""
pass
class UnregisteredBenchmark(Unregistered):
"""Raised when the user requests an env from the registry that does
not actually exist.
"""
pass
class DeprecatedEnv(Error):
"""Raised when the user requests an env from the registry with an
older version number than the latest env with the same name.
"""
pass
class UnseedableEnv(Error):
"""Raised when the user tries to seed an env that does not support
seeding.
"""
pass
class DependencyNotInstalled(Error):
pass
class UnsupportedMode(Exception):
"""Raised when the user requests a rendering mode not supported by the
environment.
"""
pass
class ResetNeeded(Exception):
"""When the monitor is active, raised when the user tries to step an
environment that's already done.
"""
pass
class ResetNotAllowed(Exception):
"""When the monitor is active, raised when the user tries to step an
environment that's not yet done.
"""
pass
class InvalidAction(Exception):
"""Raised when the user performs an action not contained within the
action space
"""
pass
# API errors
class APIError(Error):
def __init__(self, message=None, http_body=None, http_status=None,
json_body=None, headers=None):
super(APIError, self).__init__(message)
if http_body and hasattr(http_body, 'decode'):
try:
http_body = http_body.decode('utf-8')
except:
http_body = ('<Could not decode body as utf-8. '
'Please report to gym@openai.com>')
self._message = message
self.http_body = http_body
self.http_status = http_status
self.json_body = json_body
self.headers = headers or {}
self.request_id = self.headers.get('request-id', None)
def __unicode__(self):
if self.request_id is not None:
msg = self._message or "<empty message>"
return u"Request {0}: {1}".format(self.request_id, msg)
else:
return self._message
if sys.version_info > (3, 0):
def __str__(self):
return self.__unicode__()
else:
def __str__(self):
return unicode(self).encode('utf-8')
class APIConnectionError(APIError):
pass
class InvalidRequestError(APIError):
def __init__(self, message, param, http_body=None,
http_status=None, json_body=None, headers=None):
super(InvalidRequestError, self).__init__(
message, http_body, http_status, json_body,
headers)
self.param = param
class AuthenticationError(APIError):
pass
class RateLimitError(APIError):
pass
# Video errors
class VideoRecorderError(Error):
pass
class InvalidFrame(Error):
pass
# Wrapper errors
class DoubleWrapperError(Error):
pass
class WrapAfterConfigureError(Error):
pass
| [
"jil021@eng.ucsd.edu"
] | jil021@eng.ucsd.edu |
58e7e2647dbe0a4e0415a8e2ee79e7efd343560e | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/era5_scripts/01_netCDF_extraction/erafive902TG/606-tideGauge.py | 2cf848516a1ff591f435d0ff9083a5bb228a8912 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,595 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 01 10:00:00 2020
ERA5 netCDF extraction script
@author: Michael Tadesse
"""
import time as tt
import os
import pandas as pd
from d_define_grid import Coordinate, findPixels, findindx
from c_read_netcdf import readnetcdf
from f_era5_subsetV2 import subsetter
def extract_data(delta= 1):
"""
This is the master function that calls subsequent functions
to extract uwnd, vwnd, slp for the specified
tide gauges
delta: distance (in degrees) from the tide gauge
"""
print('Delta = {}'.format(delta), '\n')
#defining the folders for predictors
nc_path = {'slp' : "/lustre/fs0/home/mtadesse/era_five/slp",\
"wnd_u": "/lustre/fs0/home/mtadesse/era_five/wnd_u",\
'wnd_v' : "/lustre/fs0/home/mtadesse/era_five/wnd_v"}
surge_path = "/lustre/fs0/home/mtadesse/obs_surge"
csv_path = "/lustre/fs0/home/mtadesse/erafive_localized"
#cd to the obs_surge dir to get TG information
os.chdir(surge_path)
tg_list = os.listdir()
#################################
#looping through the predictor folders
#################################
for pf in nc_path.keys():
print(pf, '\n')
os.chdir(nc_path[pf])
####################################
#looping through the years of the chosen predictor
####################################
for py in os.listdir():
os.chdir(nc_path[pf]) #back to the predictor folder
print(py, '\n')
#get netcdf components - give predicor name and predictor file
nc_file = readnetcdf(pf, py)
lon, lat, time, pred = nc_file[0], nc_file[1], nc_file[2], \
nc_file[3]
x = 606
y = 607
#looping through individual tide gauges
for t in range(x, y):
#the name of the tide gauge - for saving purposes
# tg = tg_list[t].split('.mat.mat.csv')[0]
tg = tg_list[t]
#extract lon and lat data from surge csv file
print("tide gauge", tg, '\n')
os.chdir(surge_path)
if os.stat(tg).st_size == 0:
print('\n', "This tide gauge has no surge data!", '\n')
continue
surge = pd.read_csv(tg, header = None)
#surge_with_date = add_date(surge)
#define tide gauge coordinate(lon, lat)
tg_cord = Coordinate(float(surge.iloc[1,4]), float(surge.iloc[1,5]))
print(tg_cord)
#find closest grid points and their indices
close_grids = findPixels(tg_cord, delta, lon, lat)
ind_grids = findindx(close_grids, lon, lat)
ind_grids.columns = ['lon', 'lat']
#loop through preds#
#subset predictor on selected grid size
print("subsetting \n")
pred_new = subsetter(pred, ind_grids, time)
#create directories to save pred_new
os.chdir(csv_path)
#tide gauge directory
tg_name = tg.split('.csv')[0]
try:
os.makedirs(tg_name)
os.chdir(tg_name) #cd to it after creating it
except FileExistsError:
#directory already exists
os.chdir(tg_name)
#predictor directory
pred_name = pf
try:
os.makedirs(pred_name)
os.chdir(pred_name) #cd to it after creating it
except FileExistsError:
#directory already exists
os.chdir(pred_name)
#time for saving file
print("saving as csv")
yr_name = py.split('_')[-1]
save_name = '_'.join([tg_name, pred_name, yr_name])\
+ ".csv"
pred_new.to_csv(save_name)
#return to the predictor directory
os.chdir(nc_path[pf])
#run script
extract_data(delta= 1) | [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
59a4a14b9d124e2ebce3cf2bee85efcd5d00fa6b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03722/s588134158.py | 2ca94082b4c983d18464cd5ca89025e819443681 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,902 | py | from collections import defaultdict, deque, Counter
from heapq import heappush, heappop, heapify
import math
import bisect
import random
from itertools import permutations, accumulate, combinations, product
import sys
from copy import deepcopy
import string
from bisect import bisect_left, bisect_right
from math import factorial, ceil, floor
from operator import mul
from functools import reduce
sys.setrecursionlimit(2147483647)
INF = 10 ** 20
def LI(): return list(map(int, sys.stdin.buffer.readline().split()))
def I(): return int(sys.stdin.buffer.readline())
def LS(): return sys.stdin.buffer.readline().rstrip().decode('utf-8').split()
def S(): return sys.stdin.buffer.readline().rstrip().decode('utf-8')
def IR(n): return [I() for i in range(n)]
def LIR(n): return [LI() for i in range(n)]
def SR(n): return [S() for i in range(n)]
def LSR(n): return [LS() for i in range(n)]
def SRL(n): return [list(S()) for i in range(n)]
mod = 1000000007
n, m = LI()
G = [[] for _ in range(n)]
for a, b, c in LIR(m):
G[a - 1] += [(b - 1, c)]
# 負の閉路がなかったら高々n-1回で更新は終わるはずn回目に更新が起こったとしたら負の閉路がある。
def bellman_ford(G, s=0):
n = len(G)
dist = [-INF] * n
dist[s] = 0
v_nows = {s}
for _ in range(n):
v_changeds = set()
for u in v_nows:
for v, c in G[u]:
if dist[u] + c > dist[v]:
dist[v] = dist[u] + c
v_changeds.add(v)
v_nows = v_changeds
if not v_changeds:
return dist[n - 1]
for i in v_nows:
dist[i] = INF
dq = deque(v_nows)
while dq:
u = dq.popleft()
if u == n - 1:
return "inf"
for v, c in G[u]:
if dist[v] != INF:
dist[v] = INF
dq += [v]
return dist[n - 1]
print(bellman_ford(G))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a44dda549816cfa2bb70f000b27b7c4ae8270f22 | 049584769071069cd4ebb83a4a519093ba57eb87 | /src/billing/migrations/0023_auto_20150620_1546.py | 173e44e8784fa6c344130f8c12801a8d85ca02f4 | [] | no_license | kij8323/mysite_server_version | 4c1c29b0d0b6eb5d91907d44105a347a0ff58a54 | d58ddd79626772d8b71e539f00cdf45763ab2a00 | refs/heads/master | 2016-09-05T23:34:14.584462 | 2015-12-21T12:45:39 | 2015-12-21T12:45:39 | 38,934,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 869 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('billing', '0022_auto_20150620_1545'),
]
operations = [
migrations.AlterField(
model_name='membership',
name='date_end',
field=models.DateTimeField(default=datetime.datetime(2015, 6, 20, 15, 46, 54, 250929, tzinfo=utc), verbose_name=b'End Date'),
preserve_default=True,
),
migrations.AlterField(
model_name='membership',
name='date_start',
field=models.DateTimeField(default=datetime.datetime(2015, 6, 20, 15, 46, 54, 250955, tzinfo=utc), verbose_name=b'Start Date'),
preserve_default=True,
),
]
| [
"shenanping2008@Hotmail.com"
] | shenanping2008@Hotmail.com |
21696ee9099e81ed55415a70fe21f018e45ad988 | 3be1ddf42236a1b33ec74ed3bfdd0f8918513733 | /coding-challenges/week05/day3/apartment/class_bed.py | 808418561319a7ae9f5d0a65e2ca972b02df1795 | [] | no_license | aabhishek-chaurasia-au17/MyCoding_Challenge | 84ef926b550b3f511f1c642fe35f4303c8abb949 | 419d02ad8740a2c00403fd30c661074266d2ba8f | refs/heads/main | 2023-08-29T09:52:36.796504 | 2021-11-07T07:32:09 | 2021-11-07T07:32:09 | 359,842,173 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,250 | py | """
1. The Bed Object has the following attributes:
length: length of the bed in feet
breadth: breadth of the bed in feet
year_made: Year in which the bed was made
has_headboard: True or False depending on whether the bed has a headboard or not
has_posts: True or False depending on whether the bed has sideposts or not
material: material is wood, steel, plywood and so on.
2. The Bed Object does not support any following methods
"""
class Bed(): # Beds class made with followings attributes
def __init__(self,length,breadth,year_made,has_headboard,has_posts,material):
self.length = f"The length of the bed in feet is : {length} feet "
self.breadth = f"The breadth of the bed in feet is {breadth} feet "
self.year_made = f"The bed is made in the year of : {year_made} "
self.has_headboard = f"This bed has headboard : {has_headboard} "
self.has_posts = f"This bed has posts : {has_posts} "
self.material = f"The materials that use to make the bed is : {material}"
my_bed = Bed(10,5,2015,True,False,'Wood')
# for checking just uncomment below lines
print(my_bed.length)
print(my_bed.breadth)
print(my_bed.year_made)
print(my_bed.has_headboard)
print(my_bed.has_posts)
print(my_bed.material)
| [
"abhishekc838@gmail.com"
] | abhishekc838@gmail.com |
7635fb88cccde37ae72c60d1cc32540fc8b41ed6 | 4c434fd8883a087f0baef4398e98b7cdf82606f5 | /deploy.py | 87d8b7e330bbe77d7534b18a38ca9968b10c7e22 | [] | no_license | Vignesh2208/emane-TimeKeeper | 6b8a779ba0a3b325819920f8df7e32285db353ca | dea6d62a31467de7293666729846bc34a375468b | refs/heads/master | 2021-01-10T14:34:34.148864 | 2017-04-06T21:46:55 | 2017-04-06T21:46:55 | 49,746,354 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 31,522 | py | #
# File : deploy.py
#
# Brief : Starts an emane-deployment with TimeKeeper enabled or disabled
#
# authors : Vignesh Babu
#
import sys
import os
import re
import shutil
from timekeeper_functions import *
import time
from emanesh.events import EventService
from emanesh.events import LocationEvent
from emanesh.events import PathlossEvent
import datetime
from datetime import datetime
import subprocess
import signal
from util_functions import *
## DEFAULT VALUES
ENABLE_TIMEKEEPER = 1
platformendpoint_base = 8201
transportendpoint_base = 8301
transport_base_address ="10.100.0.0"
cwd = os.getcwd()
lxc_files_dir = "/tmp/emane/lxc"
experiment_dir = cwd + "/conf/experiment"
conf_file = cwd + "/conf/emane.conf"
node_conf_file = cwd + "/conf/node.conf"
script_interrupted = 0
max_tdf = -1
topo_size = 0
def IP2Int(ip):
o = map(int, ip.split('.'))
res = (16777216 * o[0]) + (65536 * o[1]) + (256 * o[2]) + o[3]
return res
def Int2IP(ipnum):
o1 = int(ipnum / 16777216) % 256
o2 = int(ipnum / 65536) % 256
o3 = int(ipnum / 256) % 256
o4 = int(ipnum) % 256
return '%(o1)s.%(o2)s.%(o3)s.%(o4)s' % locals()
def generate_ARP_table(n_nodes):
arp_table = ""
i = 1
while i <= n_nodes :
curr_entry_IP = Int2IP(IP2Int(transport_base_address) + i)
nemid_hex = str(hex(i))
nemid_hex = nemid_hex[2:]
while len(nemid_hex) < 4 :
nemid_hex = "0" + nemid_hex
nemid_hex = nemid_hex[0:2] + ":" + nemid_hex[2:]
curr_entry_mac = "02:02:00:00:" + nemid_hex
arp_table = arp_table + curr_entry_IP + " " + curr_entry_mac + "\n"
i = i + 1
with open(experiment_dir + "/arp_table.txt","w") as f :
f.write(arp_table)
def generate_platformxml(nem_id,otamanagerdevice,otamanagergroup,otamanagerttl,otamanagerloopback,eventmanagerdevice,eventmanagergroup,eventmanagerttl,transportdef,macdef,phydef) :
platformxmlheader = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE platform SYSTEM "file:///usr/share/emane/dtd/platform.dtd">"""
platformxml = platformxmlheader
platformxml = platformxml + \
"""
<platform> """
platformxml += \
"""
<param name="otamanagerchannelenable" value="on"/>
<param name="otamanagerdevice" value=""" + "\"" + otamanagerdevice + "\"/>"
platformxml += \
"""
<param name="otamanagergroup" value=""" + "\"" + otamanagergroup + "\"/>"
#platformxml += \
#"""
#<param name="otamanagerttl" value=""" + "\"" + otamanagerttl + "\"/>"
#platformxml += \
#"""
#<param name="otamanagerloopback" value=""" + "\"" + otamanagerloopback + "\"/>"
platformxml += \
"""
<param name="eventservicegroup" value=""" + "\"" + eventmanagergroup + "\"/>"
platformxml += \
"""
<param name="eventservicedevice" value=""" + "\"" + eventmanagerdevice + "\"/>"
platformxml += \
"""
<param name="controlportendpoint" value="0.0.0.0:47000"/>"""
#platformxml += \
#"""
#<nem id=\"""" + str(nem_id) + "\" definition=\"expnem.xml\" transport=\"external\" >"
platformxml += \
"""
<nem id=\"""" + str(nem_id) + "\" definition=\"expnem.xml\">"
#platformxml += \
#"""
# <param name="platformendpoint" value=""" + "\"localhost:" + str(platformendpoint_base + nem_id) + "\"/>"
#platformxml += \
#"""
# <param name="transportendpoint" value=""" + "\"localhost:" + str(transportendpoint_base + nem_id) + "\"/>"
platformxml += \
"""
<transport definition=""" + "\"" + transportdef + ".xml\">"
platformxml += \
"""
<param name="address" value=""" + "\"" + str(Int2IP(IP2Int(transport_base_address) + nem_id)) + "\"/>"
# was 255.255.0.0 before
platformxml += \
"""
<param name="mask" value=""" + "\"255.255.0.0\"/>"
platformxml += \
"""
</transport>
</nem>
</platform>
"""
return platformxml
def generate_transportdaemonxml(nem_id,transportdef) :
transportdaemonheader = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE transportdaemon SYSTEM "file:///usr/share/emane/dtd/transportdaemon.dtd">"""
transportdaemonxml = transportdaemonheader
transportdaemonxml += \
"""
<transportdaemon>
<instance nemid=""" + "\"" + str(nem_id) + "\">"
transportdaemonxml += \
"""
<param name="platformendpoint" value=""" + "\"localhost:" + str(platformendpoint_base + nem_id) + "\"/>"
transportdaemonxml += \
"""
<param name="transportendpoint" value=""" + "\"localhost:" + str(transportendpoint_base + nem_id) + "\"/>"
transportdaemonxml += \
"""
<transport definition=""" + "\"" + transportdef + ".xml\">"
transportdaemonxml += \
"""
<param name="address" value=""" + "\"" + str(Int2IP(IP2Int(transport_base_address) + nem_id)) + "\"/>"
# was 255.255.0.0 before
transportdaemonxml += \
"""
<param name="mask" value=""" + "\"255.255.0.0\"/>"
transportdaemonxml += \
"""
</transport>
</instance>
</transportdaemon>
"""
return transportdaemonxml
def generate_expnemxml(transportdef,macdef,phydef) :
expnemxmlheader = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE nem SYSTEM "file:///usr/share/emane/dtd/nem.dtd">"""
expnemxml = expnemxmlheader
expnemxml += \
"""
<nem name="EXP NEM">
<transport definition=""" + "\"" + transportdef + ".xml\"/>"
expnemxml += \
"""
<mac definition=""" + "\""+ macdef + ".xml\"/>"
expnemxml += \
"""
<phy definition=""" + "\""+ phydef + ".xml\"/>"
expnemxml += \
"""
</nem>
"""
return expnemxml
def generate_deploymentxml(n_nodes) :
deploymentxmlheader = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE deployment SYSTEM "file:///usr/share/emane/dtd/deployment.dtd">\n
"""
deploymentxml = deploymentxmlheader
deploymentxml += "<deployment>"
nem_id = 1
while nem_id <= n_nodes :
deploymentxml += \
"""
<platform id=""" + "\"" + str(nem_id) + "\">"
deploymentxml += \
"""
<nem id=""" + "\"" + str(nem_id) + "\"/>"
deploymentxml += \
"""
</platform>
"""
nem_id += 1
deploymentxml += "</deployment>"
return deploymentxml
def generate_gpsdlocationxml(nemid) :
gpsdlocationxmlheader = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE eventagent SYSTEM "file:///usr/share/emane/dtd/eventagent.dtd">"""
gpsdlocationxml = gpsdlocationxmlheader
gpsdlocationxml += \
"""
<eventagent name="gpsdlocationagent" library="gpsdlocationagent">
<param name="gpsdconnectionenabled" value="no"/>
<param name="pseudoterminalfile" value="/tmp/emane/lxc/""" + str(nemid) + """/var/lib/gps.pty\"/>
</eventagent>
"""
return gpsdlocationxml
def generate_eventdaemonxml(nemid, eventmanagergroup, eventmanagerdevice) :
eventdaemonxmlheader = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE eventdaemon SYSTEM "file:///usr/share/emane/dtd/eventdaemon.dtd">
"""
eventdaemonxml = eventdaemonxmlheader
eventdaemonxml += \
"""
<eventdaemon name="EMANE Event Daemon """ + str(nemid) + """\" nemid = \"""" + str(nemid) + """\">"""
eventdaemonxml += \
"""
<param name="eventservicegroup" value=\"""" + str(eventmanagergroup) + """\"/>"""
eventdaemonxml +=\
"""
<param name="eventservicedevice" value=\"""" + str(eventmanagerdevice) + """\"/>"""
eventdaemonxml += \
"""
<agent definition="gpsdlocationagent""" + str(nemid) + """.xml\"/>"""
eventdaemonxml += \
"""
</eventdaemon>
"""
return eventdaemonxml
def generate_emulationscriptgeneratorxml(experiment_dir) :
emulationscriptgeneratorxml = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE eventgenerator SYSTEM "file:///usr/share/emane/dtd/eventgenerator.dtd">
<eventgenerator library="emulationscriptgenerator">"""
emulationscriptgeneratorxml +=\
"""
<param name="inputfile" value=\"""" + experiment_dir + """/location.xml\"/>"""
emulationscriptgeneratorxml += \
"""
<param name="repeatcount" value="0"/>
<param name="schemalocation" value="file:///usr/share/doc/emane-gen-emulationscript/EmulationScriptSchema.xsd"/>
</eventgenerator>"""
# there was a 0.8.1 here
return emulationscriptgeneratorxml
def generate_eventservicexml(eventservicegroup) :
eventservicexml = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE eventservice SYSTEM "file:///usr/share/emane/dtd/eventservice.dtd">
<eventservice>"""
eventservicexml += \
"""
<param name="eventservicegroup" value=\"""" + eventservicegroup + """\"/>
<param name="eventservicedevice" value="br0"/>
<generator name="Emulation Script Generator" definition="emulationscriptgenerator.xml"/>
</eventservice>"""
return eventservicexml
def write_files(nemid,dest_dir,platformxml,transportdaemonxml,eventdaemonxml,gpsdlocationxml) :
with open(dest_dir + "/platform" + str(nemid) +".xml","w+") as f :
f.write(platformxml)
with open(dest_dir + "/transportdaemon" + str(nemid) +".xml","w+") as f :
f.write(transportdaemonxml)
with open(dest_dir + "/eventdaemon" + str(nemid) +".xml","w+") as f :
f.write(eventdaemonxml)
with open(dest_dir + "/gpsdlocationagent" + str(nemid) +".xml","w+") as f :
f.write(gpsdlocationxml)
def ERROR(msg,log=False) :
print msg
if log == True :
pass
sys.exit(-1)
def validate_params(otamanagerdevice,otamanagergroup,otamanagerttl,otamanagerloopback,eventmanagerdevice,eventmanagergroup,eventmanagerttl,transportdef,macdef,phydef):
regexp_otamanagerdevice = r'[a-z]+[0-9]*$'
regexp_otamanagergroup = r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?):[0-9]{5}$'
regexp_otamanagerttl = r'[1-9]+$'
regexp_otamanagerloopback = r'[Tt][Rr][Uu][Ee]|[Ff][Aa][Ll][Ss][Ee]$'
regexp_eventmanagerdevice = r'[a-z]+[0-9]*$'
regexp_eventmanagergroup = r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?):[0-9]{5}$'
regexp_eventmanagerttl = r'[1-9]+$'
regexp_transportdef = r'\w+$'
regexp_macdef = r'\w+$'
regexp_phydef = r'\w+$'
searchobj = re.search(regexp_otamanagerdevice, otamanagerdevice)
if searchobj is None :
ERROR("Improper format Otamanager device %s" %otamanagerdevice)
searchobj = re.search(regexp_otamanagergroup, otamanagergroup)
if searchobj is None :
ERROR("Improper format Otamanager Group %s" %otamanagergroup)
searchobj = re.search(regexp_otamanagerttl, otamanagerttl)
if searchobj is None :
ERROR("Improper format Otamanager ttl %s" %otamanagerttl)
searchobj = re.search(regexp_otamanagerloopback, otamanagerloopback)
if searchobj is None :
ERROR("Improper format Otamanager loopback %s" %otamanagerloopback)
searchobj = re.search(regexp_eventmanagerdevice, eventmanagerdevice)
if searchobj is None :
ERROR("Improper format Eventmanager device %s" %eventmanagerdevice)
searchobj = re.search(regexp_eventmanagergroup, eventmanagergroup)
if searchobj is None :
ERROR("Improper format Eventmanager group %s" %eventmanagergroup)
searchobj = re.search(regexp_eventmanagerttl, eventmanagerttl)
if searchobj is None :
ERROR("Improper format Eventmanager ttl %s" %eventmanagerttl)
searchobj = re.search(regexp_transportdef, transportdef)
if searchobj is None :
ERROR("Improper format Transport Definition %s" %transportdef)
searchobj = re.search(regexp_macdef, macdef)
if searchobj is None :
ERROR("Improper format Mac Definition %s" %macdef)
searchobj = re.search(regexp_phydef, phydef)
if searchobj is None :
ERROR("Improper format Phy Definition %s" %phydef)
def configure() :
global conf_file
global node_conf_file
global ENABLE_TIMEKEEPER
# dictionary containing each node's configuration read from node.conf
Node= {}
with open(conf_file) as f :
content = f.readlines()
for line in content :
param_list = line.split("=")
param_name = param_list[0].strip(' \t\n\r')
if len(param_list) == 1 :
param_value = None
else :
param_value = param_list[1].strip(' \t\n\r')
if len(param_value) == 0 :
param_value = None
"""
Valid params
otamanagerdevice : <NONE>
otamanagergroup : <NONE>
otamanagerttl : 1
otamanagerloopback : FALSE
eventmanagerdevice : <NONE>
eventmanagergroup : <REQUIRED>
eventmanagerttl : 1
antennaprofilemanifesturi : <NONE>
transportdef : <NONE>
macdef : <NONE>
phydef : <NONE>
bandwidth : 1000000
min_pkt_size : 1024
"""
if param_name == "otamanagerdevice" :
if not param_value == None :
otamanagerdevice = param_value
else :
otamanagerdevice = "eth0"
elif param_name == "otamanagergroup" :
if not param_value == None :
otamanagergroup = param_value
else :
otamanagergroup = "224.1.2.4:45702"
elif param_name == "otamanagerttl" :
if not param_value == None :
otamanagerttl = param_value
else :
otamanagerttl = "1"
elif param_name == "otamanagerloopback" :
if not param_value == None :
otamanagerloopback = param_value
else :
otamanagerloopback = "false"
elif param_name == "eventmanagerdevice" :
if not param_value == None :
eventmanagerdevice = param_value
else :
eventmanagerdevice = "eth0"
elif param_name == "eventmanagergroup" :
if not param_value == None :
eventmanagergroup = param_value
else :
eventmanagergroup = "224.1.2.4:45703"
elif param_name == "eventmanagerttl" :
if not param_value == None :
eventmanagerttl = param_value
else :
otamanagerttl = "1"
elif param_name == "antennaprofilemanifesturi" :
if not param_value == None :
antennaprofilemanifesturi = param_value
else :
antennaprofilemanifesturi = None
elif param_name == "transportdef" :
if not param_value == None :
transportdef = param_value
else :
transportdef = "transvirtual"
elif param_name == "macdef" :
if not param_value == None :
macdef = param_value
else :
macdef = "rfpipe"
elif param_name == "phydef" :
if not param_value == None :
phydef = param_value
else :
phydef = "universalphy"
elif param_name == "n_nodes":
if not param_value == None :
n_nodes = int(param_value)
else :
n_nodes = 10
elif param_name == "run_time" :
if not param_value == None:
run_time = float(param_value)
else :
run_time = 1.0 # 1 secs
elif param_name == "bandwidth" :
if not param_value == None:
bandwidth = float(param_value)
else :
bandwidth = 1000000.0
elif param_name == "min_pkt_size" :
if not param_value == None:
min_pkt_size = int(param_value)
else :
min_pkt_size = 1024
else :
print "Unrecognized parameter: ", param_name
sys.exit(-1)
timeslice = int((min_pkt_size*8/bandwidth)*1000000000)
print "Timeslice value = ", timeslice
if timeslice < 10000000 :
print "Warning. Computed Timeslice value < 10ms. Force setting it to 10ms. Could increase propagation delay error"
timeslice = 10000000
validate_params(otamanagerdevice,otamanagergroup,otamanagerttl,otamanagerloopback,eventmanagerdevice,eventmanagergroup,eventmanagerttl,transportdef,macdef,phydef)
transport_base_address_int = IP2Int(transport_base_address)
# Clean up the experiment-conf directory
for the_file in os.listdir(cwd + "/conf/experiment"):
file_path = os.path.join(cwd + "/conf/experiment", the_file)
try:
if os.path.isfile(file_path) and ".keep" not in str(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception, e:
print e
# Clean up /tmp/emane/lxc directory
if os.path.isdir(lxc_files_dir) == True :
for the_file in os.listdir(lxc_files_dir):
file_path = os.path.join(lxc_files_dir, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception, e:
print e
# Clean up experiment-data directory
if os.path.isdir(cwd + "/experiment-data") == True :
for the_file in os.listdir(cwd + "/experiment-data"):
file_path = os.path.join(cwd + "/experiment-data", the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
pass
except Exception, e:
print e
transportdef_file = cwd + "/conf/models/"+ transportdef + ".xml"
macdef_file = cwd + "/conf/models/"+ macdef + ".xml"
phydef_file = cwd + "/conf/models/" + phydef + ".xml"
if not os.path.isfile(transportdef_file) :
ERROR("Transport definition file does not exist")
else :
shutil.copy(transportdef_file, cwd + "/conf/experiment")
if not os.path.isfile(macdef_file) :
ERROR("MAC definition file does not exist")
else :
shutil.copy(macdef_file, cwd + "/conf/experiment")
if not os.path.isfile(phydef_file) :
ERROR("Phyisical layer definition file does not exist")
else :
shutil.copy(phydef_file, cwd + "/conf/experiment")
# Generate deploymentxml
deploymentxml = generate_deploymentxml(n_nodes) # For use by event generators.
# Generate expnemxml
expnemxml = generate_expnemxml(transportdef,macdef,phydef)
# Generate emulationscriptgeneratorxml
emulationscriptgeneratorxml = generate_emulationscriptgeneratorxml(experiment_dir)
# Generate eventservicexml
eventservicexml = generate_eventservicexml(eventmanagergroup)
# write deploymentxml and expnemxml into experiment directory
#with open(experiment_dir + "/deployment.xml","w+") as f :
# f.write(deploymentxml)
with open(experiment_dir + "/expnem.xml","w+") as f :
f.write(expnemxml)
#with open(experiment_dir + "/emulationscriptgenerator.xml","w+") as f :
# f.write(emulationscriptgeneratorxml)
#with open(experiment_dir + "/eventservice.xml","w+") as f :
# f.write(eventservicexml)
nem_id = 1
while nem_id <= n_nodes :
# Generate platform.xml
platformxml = generate_platformxml(nem_id,otamanagerdevice,otamanagergroup,otamanagerttl,otamanagerloopback,eventmanagerdevice,eventmanagergroup,eventmanagerttl,transportdef,macdef,phydef)
# Generate transportdaemonxml
transportdaemonxml = generate_transportdaemonxml(nem_id,transportdef)
# Generate evendaemonxml
eventdaemonxml = generate_eventdaemonxml(nem_id, eventmanagergroup, eventmanagerdevice)
# Generate gpsdlocationxml
gpsdlocationxml = generate_gpsdlocationxml(nem_id)
write_files(nem_id,experiment_dir,platformxml,transportdaemonxml,eventdaemonxml,gpsdlocationxml)
nem_id += 1
# Node configurations
try :
lines = [line.rstrip('\n') for line in open(node_conf_file)]
except IOError :
ERROR("Could not open node.conf file")
locationxml = \
"""<?xml version="1.0" encoding="UTF-8"?>
<EmulationScript xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="file:///usr/share/doc/emane-gen-emulationscript-0.5.3/EmulationScriptSchema.xsd">
<Event>
<time>0</time>
"""
# there was a 0.5.3 here
line_no = 0
for line in lines :
line_no += 1
if line.startswith("#") :
continue
line = line.replace('\t','')
if len(line) <= 1 :
continue
params = line.split(",")
if len(params) != 6 :
ERROR("Node conf parser: Invalid number of configurations. Line_no %s" %line_no)
try:
node_id = int(params[0])
if node_id <= 0 or node_id > n_nodes :
ERROR("Node conf parser: node_id out of bounds. Line_no %s" %line_no)
lattitude = float(params[1])
longitude = float(params[2])
altitude = float(params[3])
tdf = int(params[4])
if tdf < 1 :
ERROR("Node conf parser: tdf must be >= 1. Line_no %s" %line_no)
cmd = params[5]
Node[node_id] = {}
Node[node_id]["lattitude"] = lattitude
Node[node_id]["longitude"] = longitude
Node[node_id]["altitude"] = altitude
Node[node_id]["tdf"] = tdf
Node[node_id]["cmd"] = cmd
locationxml += \
"""
<Node id=\"""" + str(node_id) + """\">"""
locationxml += \
"""
<location>""" + str(lattitude) + "," + str(longitude) + "," + str(altitude) + "</location>"
locationxml += \
"""
</Node>
"""
except (RuntimeError, TypeError, NameError) as e:
print e
ERROR("Node conf parser: Error at Line_no %s" %line_no)
locationxml += \
""" </Event>
</EmulationScript>
"""
# write locationxml in to the experiment directory
#with open(experiment_dir + "/location.xml","w+") as f :
# f.write(locationxml)
# Generate routing confs for the olsr routing protocol <experimental>
routing_template_file = cwd + "/conf/templates/routing.conf.template"
lxc_node_start_template_file = cwd + "/conf/templates/lxc-node-start.sh.template"
lxc_node_stop_template_file = cwd + "/conf/templates/lxc-node-stop.sh.template"
lxc_init_template_file = cwd + "/conf/templates/lxc-init.sh.template"
if ENABLE_TIMEKEEPER == 1 :
lxc_config_template_file = cwd + "/conf/templates/lxc-config.template.timekeeper"
else :
lxc_config_template_file = cwd + "/conf/templates/lxc-config.template"
exp_start_file = cwd + "/conf/templates/exp-start.sh.template"
exp_stop_file = cwd + "/conf/templates/exp-stop.sh.template"
PATH_TO_READER = cwd + "/lxc-command/reader " + experiment_dir
ROUTING_COMMAND = "olsrd -f "
with open(routing_template_file) as f :
routing_template = f.read()
with open(lxc_node_start_template_file) as f :
lxc_node_start_template = f.read()
with open(lxc_init_template_file) as f :
lxc_init_template = f.read()
with open(lxc_config_template_file) as f:
lxc_config_template = f.read()
with open(lxc_node_stop_template_file) as f :
lxc_node_stop_template = f.read()
with open(exp_start_file) as f :
exp_start_template = f.read()
with open(exp_stop_file) as f :
exp_stop_template = f.read()
nemid = 1
while nemid <= n_nodes :
temp = routing_template
temp = temp.replace("@NODEID@",str(nemid))
with open(experiment_dir + "/routing" + str(nemid) + ".conf","w+") as f :
f.write(temp)
# create lxc directories
os.system("mkdir -p " + lxc_files_dir + "/" + str(nemid))
os.system("mkdir -p " + lxc_files_dir + "/" + str(nemid) + "/var/lib")
os.system("mkdir -p " + lxc_files_dir + "/" + str(nemid) + "/var/log")
os.system("mkdir -p " + lxc_files_dir + "/" + str(nemid) + "/var/run")
temp = lxc_node_start_template
temp = temp.replace("@NODEID@",str(nemid))
temp = temp.replace("@LXCNODEROOT@",lxc_files_dir + "/" + str(nemid))
with open(lxc_files_dir + "/"+ str(nemid) + "/lxc-node-start.sh","w+") as f :
f.write(temp)
temp = lxc_init_template
temp = temp.replace("@EMANEEXPROOT@",experiment_dir)
temp = temp.replace("@NODEID@",str(nemid))
temp = temp.replace("@LXCNODEROOT@",lxc_files_dir + "/" + str(nemid))
temp = temp.replace("@ROUTINGCOMMAND@",ROUTING_COMMAND)
if len(Node[nemid].keys()) != 0 :
#temp = temp.replace("@LXC_COMMAND@",Node[nemid]["cmd"] + " " + str(nemid)) # pass nemid as last argument
temp = temp.replace("@LXC_COMMAND@",PATH_TO_READER)
else:
temp = temp.replace("@LXC_COMMAND@","")
with open(lxc_files_dir + "/"+ str(nemid) + "/init.sh","w+") as f :
f.write(temp)
temp = lxc_node_stop_template
temp = temp.replace("@NODEID@",str(nemid))
with open(lxc_files_dir + "/" + str(nemid) + "/lxc-node-stop.sh","w+") as f :
f.write(temp)
temp = lxc_config_template
temp = temp.replace("@NODEIDIP@",str(Int2IP(IP2Int("10.99.0.0") + nemid)))
temp = temp.replace("@NODEID@",str(nemid))
if ENABLE_TIMEKEEPER == 0 :
if nemid % 2 == 0 :
temp = temp.replace("@CPU1@",str(0))
temp = temp.replace("@CPU2@",str(1))
else :
temp = temp.replace("@CPU1@",str(2))
temp = temp.replace("@CPU2@",str(3))
nemid_hex = str(hex(nemid))
nemid_hex = nemid_hex[2:]
while len(nemid_hex) < 4 :
nemid_hex = "0" + nemid_hex
nemid_hex = nemid_hex[0:2] + ":" + nemid_hex[2:]
temp = temp.replace("@NODEIDHEX@",nemid_hex)
temp = temp.replace("@OTAMANAGERDEVICE@",otamanagerdevice)
with open(lxc_files_dir + "/" + str(nemid) + "/config","w+") as f :
f.write(temp)
temp = exp_start_template
temp = temp.replace("@EXPERIMENT_DIR@", experiment_dir)
with open(experiment_dir + "/exp-start.sh","w+") as f :
f.write(temp)
temp = exp_stop_template
temp = temp.replace("@EXPERIMENT_DIR@",experiment_dir)
with open(experiment_dir + "/exp-stop.sh","w+") as f :
f.write(temp)
nemid += 1
generate_ARP_table(n_nodes)
os.system("chmod -R 777 " + experiment_dir)
os.system("chmod -R 777 " + lxc_files_dir)
return Node,run_time,n_nodes,eventmanagergroup,timeslice
def send_command_to_node(node_name,cmd) :
filename = "/tmp/" + node_name
with open(filename,"w+") as f :
f.write(cmd)
# call exp_start_script here
def start_LXCs() :
if ENABLE_TIMEKEEPER == 1 :
print "Removing Timekeeper module"
os.system("rmmod " + cwd + "/dilation-code/build/TimeKeeper.ko")
time.sleep(1)
print"Inserting Timekeeper module"
os.system("insmod " + cwd + "/dilation-code/build/TimeKeeper.ko")
time.sleep(1)
print "Starting LXCs"
script_path = "sudo " + experiment_dir + "/exp-start.sh"
os.system(script_path)
print"LXC's Started"
# call exp_stop_script here
def stop_LXCs(max_tdf = None) :
global node_conf_file
global conf_file
global topo_size
print "Stopping LXCs"
script_path = experiment_dir + "/exp-stop.sh"
os.system(script_path)
time.sleep(2)
print "LXCs stopped"
print "Storing Experiment Logs ... "
dt = datetime.now()
exp_name = str(dt)
if ENABLE_TIMEKEEPER == 1 and max_tdf != None:
exp_name = "TimeKeeper_Enabled/" + "Topo_Size_" + str(topo_size) + "/" + "TDF_" + str(max_tdf) + "/" + "Timestamp_" + exp_name
#exp_name = "TimeKeeper_Enabled/E_TDF_" + str(max_tdf) + "_Timestamp_" + exp_name
else :
exp_name = "TimeKeeper_Disabled/" + "Topo_Size_" + str(topo_size) + "/" + "Timestamp_" + exp_name
#exp_name = "TimeKeeper_Disabled/D_Timestamp_" + exp_name
dest = cwd + "/experiment-data/" + exp_name
if not os.path.exists(dest):
os.makedirs(dest)
if not os.path.exists(dest):
os.makedirs(dest)
for the_file in os.listdir(cwd + "/experiment-data"):
file_path = os.path.join(cwd + "/experiment-data", the_file)
try:
if os.path.isfile(file_path) and ".keep" not in str(file_path):
shutil.copy(file_path, dest)
os.unlink(file_path)
elif os.path.isdir(file_path):
pass
except Exception, e:
print e
file_path = os.path.join(node_conf_file)
try:
if os.path.isfile(file_path):
shutil.copy(file_path, dest)
os.unlink(file_path)
elif os.path.isdir(file_path):
pass
except Exception, e:
print e
file_path = os.path.join(conf_file)
try:
if os.path.isfile(file_path):
shutil.copy(file_path, dest)
os.unlink(file_path)
elif os.path.isdir(file_path):
pass
except Exception, e:
print e
os.system("chmod -R 777 " + cwd + "/experiment-data")
def main():
global conf_file
global node_conf_file
global ENABLE_TIMEKEEPER
global max_tdf
global topo_size
os.system("sudo chmod -R 777 /tmp")
os.system("sudo rm -rf /tmp/emane")
if is_root() == 0 :
print "Must be run as root"
sys.exit(-1)
arg_list = sys.argv
if len(arg_list) == 1 :
conf_file = cwd + "/conf/emane.conf"
node_conf_file = cwd + "/conf/node.conf"
else :
i = 1
while i < len(arg_list) :
if arg_list[i] == "-D" :
ENABLE_TIMEKEEPER = 0
else :
ENABLE_TIMEKEEPER = 1
conf_files_dir = arg_list[1]
if os.path.isdir(conf_files_dir) == True :
conf_file = conf_files_dir + "/emane.conf"
node_conf_file = conf_files_dir + "/node.conf"
if os.path.exists(conf_file) == False or os.path.exists(node_conf_file) == False :
print "Config files do not exist"
sys.exit(-1)
else :
print "Config directory specified is incorrect"
sys.exit(-1)
i = i + 1
Node,run_time,n_nodes,eventmanagergroup,timeslice = configure()
topo_size = n_nodes
# create experiment-data directory
with open(cwd + "/experiment-data/exp-info.txt","w") as f :
f.write("Conf file path : " + conf_file + "\n")
f.write("Node Conf file : " + node_conf_file + "\n")
f.write("Run time : " + str(run_time) + "\n")
f.write("N_nodes : " + str(n_nodes) + "\n")
# copy node_config file and emane_conf file
os.system("mkdir -p " + cwd + "/experiment-data")
start_LXCs()
print "Timeslice = ", timeslice
print "Setting initial location values to all lxcs ..."
nemid = 1
temp_list = eventmanagergroup.split(":")
eventmanagergroupaddress = temp_list[0]
eventmanagergroupport = int(temp_list[1])
service = EventService((eventmanagergroupaddress,eventmanagergroupport,'br0'))
event = LocationEvent()
i = 1
while i <= n_nodes:
pathlossevt = PathlossEvent()
j = 1
while j <= n_nodes:
if i != j:
pathlossevt.append(j,forward=90,reverse=90)
j = j + 1
i = i + 1
while nemid <= n_nodes :
event.append(nemid,latitude=Node[nemid]["lattitude"],longitude=Node[nemid]["longitude"],altitude=Node[nemid]["altitude"])
nemid = nemid + 1
service.publish(0,event)
time.sleep(2)
print "Location events published. All nodes set to initial positions. Waiting for 30 sec for routing updates to stabilize"
time.sleep(50)
# Timekeeper portion
freeze_quantum = 1000000 # in nano seconds
nemid = 1
while nemid <= n_nodes :
pid = int(getpidfromname("node-" + str(nemid)))
print "PID of node ",nemid, " = ", pid, " TDF = ", Node[nemid]["tdf"]
if pid != -1 and ENABLE_TIMEKEEPER == 1:
dilate_all(pid,Node[nemid]["tdf"])
addToExp(pid)
if max_tdf < Node[nemid]["tdf"] :
max_tdf = Node[nemid]["tdf"]
nemid += 1
lxc_pid = int(getpidfromname("node-1"))
if os.path.exists(cwd + "/exp_finished.txt") :
os.unlink(cwd + "/exp_finished.txt")
# send commands to execute to each LXC
nemid = 1
while nemid <= n_nodes :
if nemid % 2 == 0 :
process = subprocess.Popen(["python","lxc_command_dispatcher.py",str(0),str(nemid), Node[nemid]["cmd"]])
else :
process = subprocess.Popen(["python","lxc_command_dispatcher.py",str(1),str(nemid), Node[nemid]["cmd"]])
nemid += 1
print "Set freeze_quantum = ", freeze_quantum*max_tdf
if ENABLE_TIMEKEEPER == 1 and max_tdf >= 1 :
set_cpu_affinity(int(os.getpid()))
set_cbe_experiment_timeslice(freeze_quantum*max_tdf)
print "Timekeeper synchronizing ..."
synchronizeAndFreeze()
startExp()
print "Synchronized CBE experiment started ..."
start_time = int(get_current_virtual_time_pid(int(lxc_pid)))
prev_time = start_time
print "Experiment start time", start_time, " local Time = " + str(datetime.now())
sys.stdout.flush()
else :
print "Experiment Started with TimeKeeper disabled - Ignoring TDF settings"
try :
k = 0
while True :
if ENABLE_TIMEKEEPER == 1 :
curr_time = int(get_current_virtual_time_pid(int(lxc_pid)))
if curr_time - start_time >= run_time :
break;
else :
if curr_time - prev_time >= 1 :
k = k + (curr_time - prev_time)
print k," secs of virtual time elapsed"
prev_time = curr_time
else :
if k >= run_time :
break
k= k + 1
print k," secs of real time elapsed"
# sleep until runtime expires
time.sleep(1)
except KeyboardInterrupt:
pass
# stop Exp
print "Stopping Synchronized experiment, local time = " + str(datetime.now())
if ENABLE_TIMEKEEPER == 1 :
stopExp()
time.sleep(10)
stop_LXCs(max_tdf)
def interrupt_handler(signum, frame):
global script_interrupted
global max_tdf
global ENABLE_TIMEKEEPER
if script_interrupted == 0 :
script_interrupted = 1
print "Interrupted. Stopping Experiment"
if ENABLE_TIMEKEEPER == 1 :
stopExp()
time.sleep(10)
stop_LXCs(max_tdf)
sys.exit(0)
if __name__ == "__main__" :
signal.signal(signal.SIGINT, interrupt_handler)
main()
| [
"vig2208@gmail.com"
] | vig2208@gmail.com |
ce80e27fa4f113d3fe3cbe6211c1770bc5d3cf5a | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_054/ch117_2020_10_05_13_13_34_650168.py | 9a002163f43160a3ffe5a528f9801990fccc6592 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | import math
def snell_descartes (n1, n2 , tetta1):
tetta2 = math.asin(n1*math.sin(math.radians(tetta1))/n2)
return | [
"you@example.com"
] | you@example.com |
1295ed4099b439863d64290c637f977703fbdfa8 | 59c746c28bff4afcc99a13b4ddd9aa42365f3348 | /dashboard/forms.py | fc8e64f55aa24a9c972d5f06f17d354cc0fa2fd2 | [] | no_license | lucassimon/django-sendgrid | c228fe6f5bc871181c82c18c20837080fe6bb47f | 4e34fc0f7072ebcf06ee91764e220f0aa94904e6 | refs/heads/master | 2021-01-20T08:01:04.948950 | 2017-07-07T13:33:12 | 2017-07-07T13:33:12 | 90,080,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
# Stdlib imports
# Core Django imports
from django import forms
from django.utils.translation import ugettext as _
# Third-party app imports
# Realative imports of the 'app-name' package
from .models import Scheduling
class DashboardCustomerServiceForm(forms.Form):
start_date = forms.DateField()
end_date = forms.DateField()
class Meta:
fields = '__all__'
help_texts = {
'start_date': _(
_(u'Data Inicial.')
),
'end_date': _(
_(u'Data Final.')
),
}
widgets = {
'start_date': forms.DateInput(
attrs={
'class': 'form-control',
},
),
'end_date': forms.DateInput(
attrs={
'class': 'form-control',
},
),
} | [
"lucassrod@gmail.com"
] | lucassrod@gmail.com |
2f1c8c27aa6df64d188f2e053ca56184acd42928 | 952243fed6885563cb9631e3bea6f367cb19a30c | /calendars/views.py | 21013fbaf8c3647d2d467a3eccd72cdc131acdd1 | [] | no_license | Kang-kyunghun/batch_calendar | 8380d8ccad958341e9e0050f7a4b710ab0daa973 | 76570bfd3816001c3be7714554100cf7d57948c9 | refs/heads/main | 2023-08-01T11:51:27.045435 | 2021-04-25T07:48:48 | 2021-04-25T07:48:48 | 406,577,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,743 | py | import json
import requests
import progressbar
from time import sleep
from pprint import pprint
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from django.http import HttpResponse, JsonResponse
from django.views import View
from django.shortcuts import render, get_object_or_404
from .models import Batch, Calendar
token = "ya29.a0AfH6SMBtPkM8F-eKJD4liR4GwxJwL_IiBya-Z7vpkmdtXQ8dY3x3gOBSDSh3xAHM-Dr2u5gjkAF8vpHuVz61U3s0ky-vOH5CBSVpGkGbguy96P9LEL_Q8d_d1JX5qIeGDhjpyTitY7_puCahTJXq3QLr_uxaYStYZsFU9XVpGtw"
class BatchesView(View):
def get(self, request):
batch_list = Batch.objects.all()
output = ', '.join([batch.name for batch in batch_list])
context = {'batch_list' : batch_list}
return render(request, 'batches/index.html', context)
class CalendarsView(View):
def get(self, request):
calendar_list = Calendar.objects.all()
output = ', '.join([calendar.name for calendar in calendar_list])
context = {'calendar_list' : calendar_list}
return render(request, 'calendars/index.html', context)
class CalendarView(View):
def get(self, request, calendar_id):
calendar = get_object_or_404(Calendar, pk = calendar_id)
return render(request, 'calendars/detail.html', {'calendar' : calendar})
class GoogleCalendarsView(View):
def get(self, request):
google_calendar_list = requests.get('https://www.googleapis.com/calendar/v3/users/me/calendarList?access_token=' + token)
calendars = google_calendar_list.json()['items']
batch_calendars = [calendar for calendar in calendars]
output = ', '.join([calendar['summary'] for calendar in batch_calendars])
return JsonResponse({'result':batch_calendars}, status=200)
class GoogleCalendarEventsView(View):
def get(self, request, calendar_id):
event_list = requests.get(f'https://www.googleapis.com/calendar/v3/calendars/{calendar_id}/events?showDeleted=False&singleEvents=True&access_token=' + token)
print("CURRENT_CALENDAR : ", event_list.json()['summary'])
events = [
{
'id' : event['id'],
'name' : event.get('summary', None),
'start_time' : event['start']['dateTime'] if 'start' in event else None,
'end_time' : event['end']['dateTime'] if 'end' in event else None
} for event in event_list.json()['items']
]
return JsonResponse({'events' : events, 'number_of_events' : len(events)}, status=200)
def post(self, request, calendar_id):
payload = json.loads(request.body)
referenced_calendar_id = payload['referenced_calendar_id']
week_added = payload['week_added']
referenced_event_list = requests.get(f'https://www.googleapis.com/calendar/v3/calendars/{referenced_calendar_id}/events?showDeleted=False&singleEvents=True&access_token=' + token)
print('CURRENT_CALENDAR : ', referenced_event_list.json()['summary'])
events = referenced_event_list.json()['items']
for event in events:
print('CURRENT_EVENT: ', event['summary'])
print('DATE_TIME: ', event['start']['dateTime'][:10])
if datetime.strptime(event['start']['dateTime'], '%Y-%m-%dT%H:%M:%SZ') < datetime(2021, 2, 8):
body = {
'summary' : event['summary'].replace('[16기]',''),
'start' : { 'dateTime' : (datetime.strptime(event['start']['dateTime'],'%Y-%m-%dT%H:%M:%SZ') +
relativedelta(weeks=week_added)).strftime('%Y-%m-%dT%H:%M:%SZ') },
'end' : { 'dateTime' : (datetime.strptime(event['end']['dateTime'],'%Y-%m-%dT%H:%M:%SZ') + relativedelta(weeks=week_added)).strftime('%Y-%m-%dT%H:%M:%SZ') },
}
else:
body = {
'summary' : event['summary'].replace('[16기]',''),
'start' : { 'dateTime' : (datetime.strptime(event['start']['dateTime'],'%Y-%m-%dT%H:%M:%SZ') + relativedelta(weeks=week_added-1)).strftime('%Y-%m-%dT%H:%M:%SZ') },
'end' : { 'dateTime' : (datetime.strptime(event['end']['dateTime'],'%Y-%m-%dT%H:%M:%SZ') + relativedelta(weeks=week_added-1)).strftime('%Y-%m-%dT%H:%M:%SZ') },
}
# if '[16기]' in event['summary']:
# body['summary'] = event['summary'].replace('[16기]', '')
#
# if '[Back]' in event['summary']:
# body['summary'] = event['summary'].replace('[Back]', 'Session - Back |')
#
# if '[Front]' in event['summary']:
# body['summary'] = event['summary'].replace('[Front]', 'Session - Front |')
#
# if 'Code Kata' in event['summary']:
# body['summary'] = event['summary'].replace(event['summary'][-3]+'주차', 'week'+event['summary'][-3])
#
# if '1:1 면담' in event['summary']:
# continue
a = requests.post(f'https://www.googleapis.com/calendar/v3/calendars/{calendar_id}/events?access_token=' + token, json=body)
return JsonResponse({'result' : 'ok'}, status=200)
def delete(self, request, calendar_id):
event_list = requests.get(f'https://www.googleapis.com/calendar/v3/calendars/{calendar_id}/events?showDeleted=False&singleEvents=True&access_token=' + token).json()['items']
for event in event_list:
print(event['summary'])
event_id = event['id']
a = requests.delete(f"https://www.googleapis.com/calendar/v3/calendars/{calendar_id}/events/{event_id}?access_token=" + token)
return JsonResponse({'messae': 'ok'})
| [
"lsheon93@gmail.com"
] | lsheon93@gmail.com |
11f5989b5de10bec420830d71d06778129715373 | b68c92fe89b701297f76054b0f284df5466eb698 | /Other/Daily/InsertIntoSortedCircularList.py | ce812f2208b6741a286fbcf4ddf906fefa62ae38 | [] | no_license | makrandp/python-practice | 32381a8c589f9b499ab6bde8184a847b066112f8 | 60218fd79248bf8138158811e6e1b03261fb38fa | refs/heads/master | 2023-03-27T18:11:56.066535 | 2021-03-28T04:02:00 | 2021-03-28T04:02:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,278 | py | '''
Given a node from a Circular Linked List which is sorted in ascending order, write a function to insert a value insertVal into the list such that it remains a sorted circular list. The given node can be a reference to any single node in the list, and may not be necessarily the smallest value in the circular list.
If there are multiple suitable places for insertion, you may choose any place to insert the new value. After the insertion, the circular list should remain sorted.
If the list is empty (i.e., given node is null), you should create a new single circular list and return the reference to that single node. Otherwise, you should return the original given node.
'''
"""
# Definition for a Node.
class Node:
def __init__(self, val=None, next=None):
self.val = val
self.next = next
"""
from typing import List
class Solution:
def insert(self, head: 'Node', insertVal: int) -> 'Node':
# Handling null
if head == None:
n = Node(insertVal)
n.next = n
return n
# Handling a size one
if head.next == head:
n = Node(insertVal, head)
head.next = n
return n
n = head
while True:
if n.next.val < n.val:
# We've reached the end
# Time to decide if we are going to insert here or just after
if n.val <= insertVal:
# Our insert val is greater than or equal to the maximum value
# We will insert here
break
elif insertVal <= n.next.val:
# We will insert at the bottom
break
if n.val <= insertVal and n.next.val >= insertVal:
break
n = n.next
# If we've ever reached the head, again, we have a circular array with all the same numbers
if n == head:
break
# Inserting
print(n.val)
pointNext = n.next
node = Node(insertVal, pointNext)
n.next = node
return head
| [
"awalexweber99@gmail.com"
] | awalexweber99@gmail.com |
2e810e00ffe4dad728bcd1f47ef0855f39af6474 | bffd93e3ba15915c5b929ac75303d2e124db6a24 | /app/api_v1_2/domain/app_infos.py | 7c5e8d4ec0d38e006312737af67c1cf3271c5fca | [] | no_license | themycode/MobileToolPlatform | fe7140ede1069495fd077364e7b932f3e7e8299d | 1569f06dcd9f3b9a4a699e47cf6724d90f8a84c8 | refs/heads/master | 2021-10-26T15:05:44.859610 | 2019-04-13T10:38:27 | 2019-04-13T10:38:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version: v1.0
@author: jayzhen
@license: Apache Licence
@email: jayzhen_testing@163.com
@software: PyCharm
"""
class App(object):
def __init__(self):
self.uid = None
self.pid = None
self.cpu = None
self.mem = None
self.gfx = None
self.net = None
self.bat = None
self.fps = None | [
"jayzhen_testing@163.com"
] | jayzhen_testing@163.com |
925e3430251624099ef13779755194137a4bab3d | e489172f6e49e1239db56c047a78a29a6ffc0b36 | /via_cash_advance/__init__.py | f245fbd7937d61ea074a5a3fb9640e1fda5e7033 | [] | no_license | eksotama/prln-via-custom-addons | f05d0059353ae1de89ccc8d1625a896c0215cfc7 | f2b44a8af0e7bee87d52d258fca012bf44ca876f | refs/heads/master | 2020-03-25T19:49:08.117628 | 2015-12-01T07:29:43 | 2015-12-01T07:29:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,074 | py | ##############################################################################
#
# Vikasa Infinity Anugrah, PT
# Copyright (c) 2011 - 2012 Vikasa Infinity Anugrah <http://www.infi-nity.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import cash_advance_journal_selection
import cash_advance_establishment
import cash_advance_establishment_line
| [
"aero@aero.(none)"
] | aero@aero.(none) |
74292cb0826476d9a3ecacb7cec1ac1c8b7d879b | ac245e448cdf791f24ee71d2b89e5f13d5fb1fbb | /Betsy/attic/test_case.py | 1b1cfb33e35f0567ba4de2c8cf5114f31e8d7454 | [
"MIT"
] | permissive | jefftc/changlab | 86420c8ce0f3e11a9b1b00d49f17c6af87439f32 | d9688709cd1ce5185996637c57f001a543b5bb1d | refs/heads/master | 2023-05-24T18:59:25.875112 | 2023-05-11T20:15:26 | 2023-05-11T20:15:26 | 67,619,148 | 11 | 4 | null | null | null | null | UTF-8 | Python | false | false | 50,062 | py | from Betsy import rulebase
from Betsy import bie3
def run_case01():
in_data = rulebase.GEOSeries
out_data = rulebase.SignalFile.output(preprocess="rma",
format="tdf", logged="yes",gene_center='mean',#annotate='yes',
quantile_norm='yes',#contents="class0,class1"
)
network = bie3.backchain(rulebase.all_modules, out_data)
network = bie3.optimize_network(network)
print "INPUT:"
print in_data
print
print "OUTPUT:"
print out_data
print
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case02():
in_data = rulebase.GEOSeries
# Will generate network back to illumina preprocessing if
# SignalFile2 is given. Problem is that SignalFile cannot be
# shiftscale normalized.
out_data = rulebase.SignalFile.output(
preprocess="illumina",
format="tdf", logged="yes",
shiftscale_norm='yes'
)
network = bie3.backchain(rulebase.all_modules, out_data)
network = bie3.complete_network(network)
network = bie3.optimize_network(network)
print "INPUT:"
print in_data
print
print "OUTPUT:"
print out_data
print
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case03():
in_data = rulebase.GEOSeries
out_data = rulebase.SignalFile.output(preprocess="illumina",
format="tdf", logged="yes",
)
network = bie3.backchain(rulebase.all_modules, out_data)
network = bie3.optimize_network(network)
print "INPUT:"
print in_data
print
print "OUTPUT:"
print out_data
print
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case04():
in_data = rulebase.GEOSeries
# The SignalFile2 should be created by the reorder_genes module.
# However, it can not create it because gene_center="no", by
# default. reorder_genes produces a SignalFile2 with
# gene_center="unknown", which conflicts. SignalFile2 has no way
# to check_gene_center.
#
# Work around is to make gene_center="unknown" and
# gene_normalize="unknown". Better solution is to rethink how the
# SignalFiles work.
out_data = rulebase.SignalFile.output(
preprocess="illumina",
format="tdf", logged="yes",
gene_order='t_test_p',
#gene_center="unknown", gene_normalize="unknown",
)
network = bie3.backchain(rulebase.all_modules, out_data)
network = bie3.complete_network(network)
network = bie3.optimize_network(network)
print "INPUT:"
print in_data
print
print "OUTPUT:"
print out_data
print
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case05():
""" for each module,the attributes not mentioned will
be set to its default input value."""
in_data = rulebase.GEOSeries
out_data = rulebase.SignalFile.output(
preprocess="agilent", format="tdf", quantile_norm='yes')
network = bie3.backchain(rulebase.all_modules, out_data)
network = bie3.optimize_network(network)
print "INPUT:"
print in_data
print
print "OUTPUT:"
print out_data
print
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case06():
network = bie3.backchain(
rulebase.all_modules, rulebase.ActbPlot,
bie3.Attribute(rulebase.SignalFile, "preprocess", "rma"),
)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case07():
network = bie3.backchain(
rulebase.all_modules, rulebase.SignalFile,
bie3.Attribute(rulebase.SignalFile,"contents","class0,class1"),
bie3.Attribute(rulebase.SignalFile,"preprocess","rma"),
bie3.Attribute(rulebase.SignalFile,"quantile_norm","yes")
)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case08():
#test ClusterFile
# Heatmap requires SignalFile to be logged. Explicitly
# specificying logged=yes changes the network, even though they
# should in principle be the same.
in_data = rulebase.GEOSeries
network = bie3.backchain(
rulebase.all_modules, rulebase.Heatmap,
###specify this attribute or not make the network different
bie3.Attribute(rulebase.SignalFile, "logged", "yes"),
)
network = bie3.optimize_network(network)
print "INPUT:"
print in_data
print
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case09():
# command1 (command 1 and command 2 suppose to have the same
# result, but they are not)
# command 1
out_data = rulebase.SignalFile.output(
preprocess="rma",quantile_norm='yes',
gene_center='mean',gene_normalize='variance')
network = bie3.backchain(rulebase.all_modules, out_data)
network = bie3.optimize_network(network)
bie3.print_network(network, open("out1.log", 'w'))
bie3.plot_network_gv("out1.png", network)
# command 2
network = bie3.backchain(
rulebase.all_modules, rulebase.SignalFile,
bie3.Attribute(rulebase.SignalFile,"preprocess","rma"),
bie3.Attribute(rulebase.SignalFile,"quantile_norm","yes"),
bie3.Attribute(rulebase.SignalFile,'gene_center',"mean"),
bie3.Attribute(rulebase.SignalFile,'gene_normalize',"variance"))
network = bie3.optimize_network(network)
bie3.print_network(network, open("out2.log", 'w'))
bie3.plot_network_gv("out2.png", network)
def run_case10():
# the SignalFile has several preprocess not only 'mas5'
out_data = rulebase.SignalFile.output(
preprocess='mas5', contents="class0,class1")
network = bie3.backchain(rulebase.all_modules, out_data)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case11():
# New version of bie3 (2/20/14) runs too closly and generates
# "network too large" error. Older version finishes quickly.
if 0:
# No problems.
out_data = rulebase.SignalFile.output()
network = bie3.backchain(rulebase.all_modules, out_data)
network = bie3.optimize_network(network)
else:
# network too large.
out_data = rulebase.SignalFile.output()
network = bie3.backchain(rulebase.all_modules, out_data)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case12():
# the branches to merge module has only one GeoSeries, it supposed
# to have two, one is contents=class0, one is contents=class1
#out_data = rulebase.SignalFile.output(
# contents='class0,class1',preprocess='mas5')
out_data = rulebase.SignalFile.output(
bfrm_norm='no', combat_norm='no', contents='class1',
dwd_norm='no', filter='no', format='tdf', gene_center='no',
gene_normalize='no', logged='yes', missing_algorithm='zero_fill',
predataset='no', preprocess='mas5',
quantile_norm='no', shiftscale_norm='no')
network = bie3.backchain(rulebase.all_modules, out_data)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case13():
'''test cluster report,
ClusterFile cluster_alg should be pca, but it shows
four different cluster algorithms'''
network = bie3.backchain(
rulebase.all_modules, rulebase.ReportFile,
bie3.Attribute(rulebase.SignalFile, "preprocess", "mas5"),
bie3.Attribute(rulebase.ReportFile, "report_type", "cluster"),
bie3.Attribute(rulebase.SignalFile, "quantile_norm", "yes"),
bie3.Attribute(rulebase.ClusterFile, "cluster_alg", "pca"),
bie3.Attribute(rulebase.Heatmap, "cluster_alg", "pca"),
)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case14():
'''test normalize report,
requires PSF preprocess=unknown and contents=test,
but preprocess can be any of ['rma', 'mas5', 'agilent',
'loess', 'unknown'] and contents can any of
['train0', 'train1', 'test', 'class0,class1,test',
'class0', 'class1', 'class0,class1','unspecified']'''
network = bie3.backchain(
rulebase.all_modules, rulebase.ReportFile,
bie3.Attribute(rulebase.ReportFile,"report_type","normalize_file"),
bie3.Attribute(rulebase.SignalFile,"preprocess","unknown"),
bie3.Attribute(rulebase.SignalFile,"contents","test"),
bie3.Attribute(rulebase.SignalFile,"quantile_norm","yes")
)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case15():
# want PSF preprocess=illumina, but PSF that goes into
# rank_genes_by_class_neighbors has preprocess unknown.
#
# Problem: Why does no PSF with preprocess=illumina point to
# rank_genes_by_class_neighbors?
# Answer: rank_genes_by_class_neighbors takes PrettySignalFile.
# In default PrettySignalFile, output preprocess=unknown.
#out_data = rulebase.PrettySignalFile.output(
# gene_order='class_neighbors', preprocess='illumina')
#network = bie3.backchain(rulebase.all_modules, out_data)
#network = bie3.optimize_network(network)
#bie3.write_network("test.network", network)
#network = bie3.read_network("test.network")
#bie3.complete_network(network)
#network = bie3.optimize_network(network)
out_data = rulebase.SignalFile.output(
gene_order='class_neighbors', preprocess='illumina')
network = bie3.backchain(rulebase.all_modules, out_data)
network = bie3.complete_network(network)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case16():
"""the difference between node 59 and node 191 is the
sf_processing_step, if we have an input SignalFile as node 66,
the pipeline will go to node 59 but no way to go module 6."""
# 59. SignalFile gene_normalize="no"
# sf_processing_step="processed"
# 66. SignalFile gene_normalize="unknown"
# sf_processing_step="normalize"
# 191. SignalFile gene_normalize="no"
# sf_processing_step="merge"
# Problem: Input file with gene_normalize="unknown" cannot be used
# to normalize_samples_with_dwd.
# SignalFile [59] should be acceptable as input for
# normalize_samples_with_dwd.
# 66 -> check_gene_normalize -> 59 -> convert_label_to_cls ->
# 21 -> normalize_samples_with_dwd [6]
# 191 -> normalize_samples_with_dwd [6]
#
# normalize_samples_with_dwd requires sf_processing_step to be
# "merge".
#
# Is this a problem? Node 66 should not be an input. Inputs
# should have an earlier processing step (e.g. "postprocess"). In
# the network, nodes higher up do go into
# normalize_samples_with_dwd [6].
# Processing steps:
# postprocess -> impute -> merge -> normalize -> processed
out_data = rulebase.SignalFile.output(dwd_norm='yes')
network = bie3.backchain(rulebase.all_modules, out_data)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case17():
'''test the 'network too large error',
I have changed the MAX_NETWORK_SIZE to 10024, the
out network is about 768 nodes and does not pop
'network too large' error'''
network = bie3.backchain(
rulebase.all_modules, rulebase.ClassifyFile,
bie3.Attribute(rulebase.ClassifyFile,"classify_alg","weighted_voting"),
bie3.Attribute(rulebase.SignalFile,"quantile_norm","yes")
)
network = bie3.complete_network(network)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case18():
"""Result that generates:
A network with 232 nodes. Node 2 and Node 3 are following:
2. Data(ClusterFile, cluster_alg='pca', contents='unspecified',
distance=['correlation', 'euclidean'])
3. Data(Heatmap, cluster_alg='pca', color='red_green',
contents='unspecified', distance=['correlation', 'euclidean'],
hm_height='yes', hm_width='yes')
Result I expected:
distance in Node 2 and Node 3 should be set to default because we
did not specify it.
That is: distance='correlation'.
JC: The distance is specified in the make_cluster_report Module:
Constraint("distance", CAN_BE_ANY_OF, ['correlation','euclidean'], 0),
Defaults are used only if no other information is available.
"""
network = bie3.backchain(
rulebase.all_modules, rulebase.ReportFile,
bie3.Attribute(rulebase.SignalFile, "preprocess", "mas5"),
bie3.Attribute(rulebase.ReportFile, "report_type", "cluster"),
bie3.Attribute(rulebase.SignalFile, "quantile_norm", "yes"),
bie3.Attribute(rulebase.ClusterFile, "cluster_alg", "pca"),
bie3.Attribute(rulebase.Heatmap, "cluster_alg", "pca"),
)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case19():
"""Result that generates:
A network with 127 nodes. Node 2 and Node 3 are following:
2. Data(ClusterFile, cluster_alg=['som', 'pca', 'kmeans', 'hierarchica
l'], contents='unspecified', distance=['correlation', 'euclidean'])
3. Data(Heatmap, cluster_alg=['som', 'pca', 'kmeans', 'hierarchical'],
color='red_green', contents='unspecified', distance=['correlatio
n', 'euclidean'], hm_height='yes', hm_width='yes')
Result I expected:
distance and cluster_alg in Node 2 and Node 3 should be set
to default because we did not specify it.
That is: distance='correlation', cluster_alg = 'kmeans'.
JC: The distance and cluster_alg is specified in the
make_cluster_report Module:
Constraint("cluster_alg",CAN_BE_ANY_OF,['som','pca','kmeans',
'hierarchical'],0),
Constraint("distance", CAN_BE_ANY_OF, ['correlation','euclidean'], 0),
Defaults are used only if no other information is available.
"""
network = bie3.backchain(
rulebase.all_modules, rulebase.ReportFile,
bie3.Attribute(rulebase.SignalFile, "preprocess", "mas5"),
bie3.Attribute(rulebase.ReportFile, "report_type", "cluster"),
)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case20():
"""Result that generates:
A network with 368 nodes.
Problem: The network is different from the result using old
bie3.py. The output of Module 49 goes to multiple different
SignalFile. It should only go to one SignalFile. In that
SignalFile, the attributes that are not specified in the
get_illumina_signal module are set to default.
JC: I believe this is the correct behavior because each one of the
output files can be generated by the get_illumina_signal Module,
as the Module has been described.
The output Data objects of get_illumina_signal have varying values
for attributes predataset, quantile_normalize, gene_normalize,
etc. The get_illumina_signal needs more consequences to describe
the values of these parameters. E.g. There should be a
Consequence that sets gene_normalize to "no".
Made some changes to address case22, and now get_illumina_signal
is not generated. Not sure what is the issue. Will look again
after implementation of new Signal files (and removal of
processing_step attribute).
"""
#out_data = rulebase.PrettySignalFile.output(
# preprocess='illumina', missing_algorithm="zero_fill",
# missing_values='no', logged='yes', quantile_norm="yes",
# predataset='yes')
out_data = rulebase.SignalFile.output(
preprocess='illumina',
format="gct",
logged="yes",
)
# #missing_values="no",
# #missing_algorithm="zero_fill",
# #quantile_norm="yes")
network = bie3.backchain(rulebase.all_modules, out_data)
network = bie3.complete_network(network)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case21():
"""AssertionError: Module make_normalize_report requires a
PrettySignalFile with preprocess=unknown, but user requests it to
be mas5.
Problem: I have added the constraint of preprocess for PcaPlot to
be SAME_AS PrettySignalFile. But for other attributes, it still
get the error. Do we need to constraint all the attributes in
PrettySignalFile and PcaPlot?
JC: Fixed. Will accept user constraints now.
"""
network = bie3.backchain(
rulebase.all_modules, rulebase.ReportFile,
bie3.Attribute(rulebase.ReportFile,"report_type","normalize_file"),
bie3.Attribute(rulebase.SignalFile,"preprocess","mas5"),
bie3.Attribute(rulebase.SignalFile,"contents","test"),
bie3.Attribute(rulebase.SignalFile,'gene_center',"median"),
)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case22():
"""Result to get: only the node 0 in the network.
Need to change the priority of the attributes value:
1. constraint for priority
2. get from output
3. user input
4. default
JC: I'm not sure this problem will be fixed with a change in the
priority. I thought there was another case where PrettySignalFile
was an internal node?
This is only generating 1 node in the network because if
PrettySignalFile gene_order=t_test_p, then transfer will no longer
be able to generate it. It requires gene_order=no.
"""
network = bie3.backchain(
rulebase.all_modules, rulebase.SignalFile,
bie3.Attribute(rulebase.SignalFile,"gene_order","t_test_p"),
)
network = bie3.complete_network(network)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case23():
"""cannot trace back to GeoSeries to generate the
ExpressionFile and preprocess with illumina.
Expected a network with the nodes:
DATA MODULE
GEOSeries -> download_geo ->
ExpressionFiles -> extract_illumina_idat_files ->
IDATFiles -> preprocess_illumina ->
ILLUFolder -> get_illumina_signal ->
SignalFile_Postprocess -> convert_signal_to_tdf ->
SignalFile_Postprocess
However, we currently only get a network:
DATA MODULE
SignalFile_Postprocess -> check_for_log ->
SignalFile_Postprocess
"""
out_data = rulebase.SignalFile.output(
preprocess="illumina",
format="tdf",
#logged="no",
logged="yes",
)
network = bie3.backchain(rulebase.all_modules, out_data)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case24():
"""Generate a network start from SignalFile_order, cannot trace back
to SignalFile_Postprocess.
Expected a network with the nodes:
DATA MODULE
SignalFile_Postprocess -> convert_signal_to_tdf ->
SignalFile_Postprocess -> check_for_log ->
SignalFile_Postprocess -> log_signal ->
SignalFile_Postprocess -> convert_postprocess_impute ->
SignalFile_Impute -> fill_missing_with_zeros ->
SignalFile_Impute -> convert_impute_merge ->
SignalFile_Merge -> convert_merge_normalize ->
SignalFile_Normalize -> check_gene_center ->
SignalFile_Normalize -> check_gene_normalize ->
SignalFile_Normalize -> convert_normalize_order ->
SignalFile_Order,ClassLableFile-> rank_genes_by_sample_ttest ->
GeneListFile,SignalFile_Order-> reorder_genes ->
SignalFile_Order -> convert_order_annotate ->
SignalFile_Annotate -> convert_annotate_filter ->
SignalFile
However, we currently get a network:
DATA MODULE
SignalFile_Order -> convert_order_annotate ->
SignalFile_Annotate -> convert_annotate_filter ->
SignalFile
"""
out_data = rulebase.SignalFile.output(
format="tdf", logged="yes",
gene_order='t_test_p',
)
network = bie3.backchain(rulebase.all_modules, out_data)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case25():
"""
cannot trace back to GEOSeries and SignalFile_Postprocess to
generate SignalFile_Merge with preprocess=aiglent.
Expected a network generate from GeoSeries or SignalFile_Postprocess
The Data path in the network is like:
GEOSeries -> SignalFile_Postprocess ->
SignalFile_Impute -> SignalFile_Merge -> (plot_actb_line) ->
ActPlot
However, we currently get a network with only one node
Data(ActbPlot, contents='unspecified')
"""
network = bie3.backchain(
rulebase.all_modules, rulebase.ActbPlot,
bie3.Attribute(rulebase.ActbPlot, "preprocess", "agilent"),
)
network = bie3.complete_network(network)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case26():
'''Test normalize report,
Expected a network generated form SignalFile_Postprocess with
contents='test' and preprocess="unknown".
- The make_normalize_report module has 5 input Data nodes, which
are SignalFile, IntensityPlot,ControlPlot,PcaPlot and ActbPlot.
- The SignalFile is generated from:
SignalFile_Postprocess->SignalFile_Impute->SignalFile_Merge->
SignalFile_Normalize->SignalFile_Order->SignalFile_Annotate->SignalFile
- The IntensityPlot,ControlPlot, PcaPlot are generated from SignalFile.
- The ActbPlot is generated from SignalFile_Merge.
However, we got a network which has three SignalFile_Postprocess
with different values for "contents".
Also the network has ExpressionFiles, AgilentFiles,GPRFiles,
which lead the SignalFile has different "contents" values and
"preprocess" values.
The IntensityPlot, ControlPlot, PcaPlot and ActvPlot are not
generated from any other Data.
'''
network = bie3.backchain(
rulebase.all_modules, rulebase.ReportFile,
bie3.Attribute(rulebase.ReportFile,"report_type","normalize_file"),
bie3.Attribute(rulebase.SignalFile,"preprocess","unknown"),
bie3.Attribute(rulebase.SignalFile_Merge,"preprocess","unknown"),
bie3.Attribute(rulebase.SignalFile,"contents","test"),
#bie3.Attribute(rulebase.SignalFile,"quantile_norm","yes")
)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case27():
"""Three problems:
1. Cannot trace back to GeoSeries to preprocess mas5.
2. HeapMap node is not generated from ClusterFile.
3. We require the cluster_alg is pca but different cluster_alg is shown.
We expected a network generated from GEOSeries and go through to SignalFile,
GEOSeries -> download_geo -> ExpressionFile ->......-> SignalFile ->
Cluster_genes_by_pca -> ClusterFile->plot_heatmap -> HeatMap
ClusterFile, HeatMap -> make_cluster_report->ReportFile
However, we got a network which is from ExpressionFile, but not GEOSeries.
The SignalFile can go to different cluster_alg but not the only one we specify.
HeatMap is isolated from ClusterFile.
"""
network = bie3.backchain(
rulebase.all_modules, rulebase.ReportFile,
bie3.Attribute(rulebase.SignalFile, "preprocess", "mas5"),
bie3.Attribute(rulebase.ReportFile, "report_type", "cluster"),
bie3.Attribute(rulebase.SignalFile, "quantile_norm", "yes"),
bie3.Attribute(rulebase.ClusterFile, "cluster_alg", "pca"),
bie3.Attribute(rulebase.Heatmap, "cluster_alg", "pca"),
)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case28():
"""get error when running this command
File "test_case.py", line 704, in run_case28
network = bie3.backchain(rulebase.all_modules, out_data)
File "/home/xchen/chencode/Betsy_3version/Betsy/bie3.py", line 928, in backchain
modules = _backchain_to_modules(moduledb, node, user_attributes)
File "/home/xchen/chencode/Betsy_3version/Betsy/bie3.py", line 1872, in _backchain_to_modules
if _can_module_produce_data(module, data, user_attributes):
File "/home/xchen/chencode/Betsy_3version/Betsy/bie3.py", line 2533, in _can_module_produce_data
if x.name == conseq2.name and x.input_index == const2.arg1]
NameError: global name 'conseq2' is not defined
"""
out_data = rulebase.SignalFile.output(group_fc='yes')
network = bie3.backchain(rulebase.all_modules, out_data)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case29():
"""Expected a network generated from GEOSeries
the path of Data is:
GEOSeries -> ... -> SignalFile_Merge -> ...
->SignalFile_Order -> SignalFile
SignalFile_Merge, ClassLabelFile -> (convert_label_cls)->ClassLabelFile
SignalFile_Order,ClassLabelFile-> (rank_genes_sample_ttest)->GeneListFile
SignalFile_Order,GeneListFile -> (reorder_genes)->SignalFile_Order
However, we got a network which the input to convert_label_cls is
not generated from GEOSeries, it is generated from
SignalFile_Postprocess with preprocess=unknown, That is, we expect
the node 17 and node 54 to be the same node
JC: Node 17 and 54 have different preprocess. In principle, we
could generate a cls from SignalFiles with different
preprocessing. I think the issue is that node 17 should point to
node 73.
17 SignalFile_Merge preprocess="illumina"
54 SignalFile_Merge preprocess="unknown"
45 ClassLabelFile
73 convert_label_to_cls
Before optimization, ClassLabelFile (118) + SignalFile_Merge (25)
should go into convert_label_to_cls (116, 117).
"""
out_data = rulebase.SignalFile.output(
preprocess="illumina",
format="tdf", logged="yes",
gene_order='t_test_p',
)
network = bie3.backchain(rulebase.all_modules, out_data)
#bie3.write_network("out.network", network)
#network = bie3.read_network("out.network")
network = bie3.complete_network(network)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case30():
'''test normalize report,
Expect a network generated from GEOSeries, the
make_normalize_report [1] has 6 input data:
[2] SignalFile
[3] IntensityPlot
[4] ControlPlot
[5] PcaPlot
[6] ActbPlot
[7] PcaPlot.
The first PcaPlot [5] is generated from SignalFile [58] and we require the
attribute of quantile_norm, combat_norm, shiftscale_norm,
bfrm_norm, dwd_norm, gene_center, gene_normalize, unique_genes,
platform, group_fc, num_features, and duplicate_probes for both
SignalFile and first PcaPlot are the same. If it is not specified
by the user in the output, the value of these attributes will be
set to output default.
The second PcaPlot [7] is generated from SignalFile and we require the
attributes of quantile_norm, combat_norm, shiftscale_norm,
bfrm_norm, dwd_norm, gene_center, gene_normalize, unique_genes,
platform, group_fc, num_features, and duplicate_probes all set to
'no'.
The reason of two PcaPlot is that we want to compare the SignalFile
before any normalization and after normalization.
However, the network we currently got is:
the attributes of SignalFile, which we are not specified in the output,
can set to different values, like:
bfrm_norm=['yes', 'no']
combat_norm=['yes', 'no']
dwd_norm=['yes', 'no']
gene_normalize=['variance', 'sum_of_squares', 'no'],
group_fc=['yes', 'no'],
num_features=['yes', 'no'],
platform=['yes', 'no'],
shiftscale_norm=['yes', 'no'],
unique_genes=['average_genes', 'high_var', 'first_gene'])
duplicate_probes=["no", "closest_probe", "high_var_probe"]
The path from node 27 to node 2 is very complicated since the
combination of different attributes. I expected the node 2 has the
following attributes
Data(SignalFile, annotate='yes', bfrm_norm='no', combat_norm='no', c
ontents='test', duplicate_probe='no', dwd_norm='no', filter='no',
format='tdf', gene_center='median', gene_normalize='no', gene_or
der='no', group_fc='no', logged='yes', missing_algorithm='zero_fi
ll', num_features='no', platform='no', predataset='no', preproces
s='mas5', quantile_norm='yes', rename_sample='no', shiftscale_nor
m='no', unique_genes='no')
I expect the network: node 64 and node 58 is the same node.
Also the path to node 2(SignalFile) is like:
SignalFile_Annotate(node 68)->annotate_probes->SignalFile_Annotate->
convert_annotate_filter->SignalFile_Filter->transfter->SignalFile(node 2)
JC:
SignalFile [64] -> plot_affy_affx_line [63]
SignalFile [58] -> analyze_samples_pca [57] -> PcaAnalysis [56] ->
plot_sample_pca_wo_label [55] -> PcaPlot [5] ->
make_normalize_report [1]
SignalFile [64] bfrm_norm="no"
SignalFile [58] bfrm_norm=["yes", "no"]
PcaAnalysis [56] bfrm_norm=["yes", "no"]
PcaPlot [5] bfrm_norm=["yes", "no"]
PcaPlot [5] bfrm_norm should be the same as bfrm_norm for
SignalFile [2]. According to constraint in make_normalize_report,
SignalFile [2] bfrm_norm can be ["yes", "no"].
plot_affy_affx_line
No constraints or consequences on bfrm_norm.
analyze_samples_pca
Constraint("bfrm_norm", CAN_BE_ANY_OF, ["yes", "no"])
Consequence("bfrm_norm", SAME_AS_CONSTRAINT)
plot_sample_pca_wo_label
Constraint("bfrm_norm", CAN_BE_ANY_OF, ["yes", "no"])
Consequence("bfrm_norm", SAME_AS_CONSTRAINT)
I'm not completely convinced that setting bfrm_norm (and all the
other values) to the output default the right thing to do here,
but let's try.
XC:
The SignalFile mentioned above(SignalFile[64],SignalFile[58],
SignalFile[56],SiganlFile[2],PcaPlot[5] all have bfrm_norm="no".
SignalFile[86],SignalFile[84] and SignalFile[2] has attribute
unique_genes=['average_genes', 'high_var', 'first_gene'],
since we do not specify in the output, why it is not the default?
Expect SignalFile[86] -> transfter[83]->SignalFile[2]
Also SignalFile_Filter[60] has unique_genes=['average_genes', 'high_var', 'first_gene'],
that it is why the PcaPlot[5] is not generated from SignalFile_Filter[64].
'''
network = bie3.backchain(
rulebase.all_modules, rulebase.ReportFile,
bie3.Attribute(rulebase.ReportFile,"report_type","normalize_file"),
bie3.Attribute(rulebase.SignalFile,"preprocess","illumina"),
bie3.Attribute(rulebase.SignalFile,"contents","test"),
bie3.Attribute(rulebase.SignalFile,"quantile_norm","yes"),
bie3.Attribute(rulebase.SignalFile,'gene_center',"median"),
)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case31():
"""test case for batch effect remove, need a function to select
the order of different normalization methods
"""
out_data = rulebase.SignalFile.output(
preprocess='illumina',
missing_algorithm="zero_fill",
format='gct',
logged='no',
filter='yes',
quantile_norm="yes",
dwd_norm='yes',
## shiftscale_norm="yes",
## bfrm_norm='yes',
## combat_norm='yes',
## predataset='yes',
)
network = bie3.backchain(rulebase.all_modules, out_data)
## # Make sure quantile_norm occurs before dwd, shiftscale, bfrm, combat.
## network = bie3.remove_data_node(
## network,
## bie3.Attribute(rulebase.SignalFile_Merge, "quantile_norm", "no"),
## bie3.Attribute(rulebase.SignalFile_Merge, "dwd_norm", "yes"),
## )
## network = bie3.remove_data_node(
## network,
## bie3.Attribute(rulebase.SignalFile_Merge, "quantile_norm", "no"),
## bie3.Attribute(rulebase.SignalFile_Merge, "shiftscale_norm", "yes"),
## )
## network = bie3.remove_data_node(
## network,
## bie3.Attribute(rulebase.SignalFile_Merge, "quantile_norm", "no"),
## bie3.Attribute(rulebase.SignalFile_Merge, "bfrm_norm", "yes"),
## )
## network = bie3.remove_data_node(
## network,
## bie3.Attribute(rulebase.SignalFile_Merge, "quantile_norm", "no"),
## bie3.Attribute(rulebase.SignalFile_Merge, "combat_norm", "yes"),
## )
## # Make sure bfrm occurs before dwd, shiftscale, combat.
## network = bie3.remove_data_node(
## network,
## bie3.Attribute(rulebase.SignalFile_Merge, "bfrm_norm", "no"),
## bie3.Attribute(rulebase.SignalFile_Merge, "dwd_norm", "yes"),
## )
## network = bie3.remove_data_node(
## network,
## bie3.Attribute(rulebase.SignalFile_Merge, "bfrm_norm", "no"),
## bie3.Attribute(rulebase.SignalFile_Merge, "shiftscale_norm", "yes"),
## )
## network = bie3.remove_data_node(
## network,
## bie3.Attribute(rulebase.SignalFile_Merge, "bfrm_norm", "no"),
## bie3.Attribute(rulebase.SignalFile_Merge, "combat_norm", "yes"),
## )
network = bie3.complete_network(network)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case32():
"""test case for multiple batch effect remove methods.
Expected a network generated from:
SignalFile_Proprocess -> ... -> SignalFile_Impute ->
(convert_impute_merge)->SignalFile_Merge[18]
SignalFile_Merge[18], ClassLableFile[17] ->
(normalize_samples_with_dwd [40,16]) ->
SignalFile_Merge[39] -> (normalize_samples_with_quantile[15]) ->
SignalFile_Merge[14]
However, we got a network which
normalize_samples_with_dwd[40] has only one input (SignalFile_Merge[18])
normalize_samples_with_dwd[16] has only one input (ClassLabelFile[17])
node 40 and 16 should be the same node.
"""
out_data = rulebase.SignalFile.output(
quantile_norm="yes",
dwd_norm='yes',
)
network = bie3.backchain(rulebase.all_modules, out_data)
# Make sure dwd occurs before quantile_norm.
bie3.plot_network_gv("out_before.png", network)
bie3.print_network(network, open("out_before.log", 'w'))
network = bie3.remove_data_node(
network,
bie3.Attribute(rulebase.SignalFile_Merge, "dwd_norm", "no"),
bie3.Attribute(rulebase.SignalFile_Merge, "quantile_norm", "yes"),
)
bie3.plot_network_gv("out_after.png", network)
bie3.print_network(network, open("out_after.log", 'w'))
network = bie3.complete_network(network)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case33():
"""test case for 3 batch effect remove methods.
Expected a network generated from:
SignalFile_Proprocess -> ... -> SignalFile_Impute ->
(convert_impute_merge)->SignalFile_Merge[19]
SignalFile_Merge[19]->(normalize_samples_with_quantile)->
SignalFile_Merge[17]
SignalFile_Merge[17],ClassLableFile[16] -> (normalize_samples_with_shiftscale)
->SignalFile_Merge
SignalFile_Merge,ClassLabelFile[16]->(normalize_samples_with_dwd)->
SignalFile_Merge
However, we got a network which
normalize_samples_with_shiftscale is missing
SignalFile_Merge[19] and SignalFile_Merge[17] both
go to convert_label_to_cls but not go to the
normalize_samples_with_shiftscale
"""
out_data = rulebase.SignalFile.output(
quantile_norm="yes",
dwd_norm='yes',
shiftscale_norm="yes"
)
network = bie3.backchain(rulebase.all_modules, out_data)
# Make sure quantile occurs before dwd and shiftscale.
bie3.plot_network_gv("out_before.png", network)
bie3.print_network(network, open("out_before.log", 'w'))
network = bie3.remove_data_node(
network,
bie3.Attribute(rulebase.SignalFile_Merge, "dwd_norm", "yes"),
bie3.Attribute(rulebase.SignalFile_Merge, "quantile_norm", "no"),
)
network = bie3.remove_data_node(
network,
bie3.Attribute(rulebase.SignalFile_Merge, "shiftscale_norm", "yes"),
bie3.Attribute(rulebase.SignalFile_Merge, "quantile_norm", "no"),
)
# Make sure shiftscale occurs before dwd.
network = bie3.remove_data_node(
network,
bie3.Attribute(rulebase.SignalFile_Merge, "shiftscale_norm", "no"),
bie3.Attribute(rulebase.SignalFile_Merge, "dwd_norm", "yes"),
)
bie3.plot_network_gv("out_after.png", network)
bie3.print_network(network, open("out_after.log", 'w'))
network = bie3.complete_network(network)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case34():
"""test case for batch effect remove report, take a long time
to generate the network, when the report include 8 files, it runs
quick, but when including 10 or 12 files, it takes few hours to
finish.
"""
out_data = rulebase.ReportFile.output(
report_type="batch_effect_remove",
)
# backchain only 1.6s
# backchain+complete <long time>
network = bie3.backchain(rulebase.all_modules, out_data)
network = bie3.complete_network(network)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case35():
"""Testing code for finding input nodes."""
out_data = rulebase.GenesetAnalysis
network = bie3.backchain(rulebase.all_modules, out_data)
network = bie3.complete_network(network)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
print
print "Possible Inputs"
inputs = bie3.get_inputs(network)
dt2inputs = bie3.group_inputs_by_datatype(network, inputs)
for i, dt in enumerate(sorted(dt2inputs)):
x = [x.name for x in dt]
print "%d. %s" % (i+1, ", ".join(x))
for j, inputs in enumerate(dt2inputs[dt]):
for k, inp in enumerate(inputs):
node = network.nodes[inp]
assert isinstance(node, bie3.Data)
print node.datatype.name
for name in sorted(node.attributes):
print "%s%s=%s" % (" "*5, name, node.attributes[name])
print
print
def run_case36():
"""get an error with bie3.py "global name 'is_subset' is not defined"
"""
network = bie3.backchain(
rulebase.all_modules, rulebase.SignalFile_Order,
bie3.Attribute(rulebase.SignalFile_Order, "gene_order", "diff_sam"),
)
network = bie3.complete_network(network)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case37():
"""test case for diff expr analysis.
Expected a network generated from:
SignalFile_Proprocess -> ... -> SignalFile ->
(cal_diffexp_with_ttest)->DiffExprFile
However, we got a network which only show DiffExprFile as input node but
no SignalFile to generate DiffExprFile
"""
## network = bie3.backchain(
## rulebase.all_modules, rulebase.SignalFile_Filter,
## bie3.Attribute(rulebase.SignalFile_Filter, "gene_order", "diff_ttest"),
## bie3.Attribute(rulebase.GeneListFile, "contents", "diff_unspecified"),
## )
network = bie3.backchain(
rulebase.all_modules, rulebase.SignalFile,
bie3.Attribute(rulebase.SignalFile, "gene_order", "diff_ttest"),
bie3.Attribute(rulebase.GeneListFile, "contents", "diff_unspecified"),
)
#network = bie3.backchain(
# rulebase.all_modules, rulebase.SignalFile_Filter,
# bie3.Attribute(rulebase.SignalFile_Filter, "gene_order","diff_ttest"),
# bie3.Attribute(rulebase.GeneListFile, "contents", "diff_unspecified"),
# )
# This network is truncated for some reason.
#network = bie3.backchain(
# rulebase.all_modules, rulebase.SignalFile,
# bie3.Attribute(rulebase.SignalFile, "gene_order", "diff_ttest"),
# bie3.Attribute(rulebase.GeneListFile, "contents", "diff_unspecified"),
# )
#network = bie3.backchain(
# rulebase.all_modules, rulebase.SignalFile_Filter,
# bie3.Attribute(rulebase.SignalFile_Filter, "gene_order","diff_ttest"),
# bie3.Attribute(rulebase.GeneListFile, "contents", "diff_unspecified"),
# )
## network = bie3.backchain(
## rulebase.all_modules, rulebase.GeneListFile,
## bie3.Attribute(rulebase.SignalFile_Filter, "gene_order","diff_ttest"),
## bie3.Attribute(rulebase.GeneListFile, "gene_order", "diff_ttest"),
## bie3.Attribute(rulebase.GeneListFile, "contents", "diff_unspecified"),
## )
network = bie3.complete_network(network)
network = bie3.optimize_network(network)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case38():
"""test case for make_normalize_file,
We expect the ReportFile only generated from
make_normalize_report_illumina since the SignalFile is
set preprocess=illumina.
But we get the ReportFile generated not only from
make_normalize_report_illumina but also make_normalize_report
and make_normalize_report_rma.
The expect network will be only the right part network
of the current network it generates with preprocess=illumina .
140914 JTC Doesn't appear to be a bug in the inferencing engine.
Need to split ReportFile data type and have some way in the rules
of specifying desired preprocessing.
"""
user_attributes = [
bie3.Attribute(rulebase.SignalFile, "preprocess", "illumina"),
bie3.Attribute(rulebase.SignalFile, "quantile_norm","yes"),
]
network = bie3.backchain(
rulebase.all_modules, rulebase.ReportFile, user_attributes)
network = bie3.complete_network(network, user_attributes)
network = bie3.optimize_network(network, user_attributes)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case39():
"""test case for make_normalize_file.
When we require a ReportFile without any normalization, the
network only contains 1 node. This may be because the conflicts
between two same Pcaplot pipeline.
If we require any normalization like quantile_norm=yes like in
case38, it will get a network.
Here normalization means any changes between SignalFile_Merge and
SignalFile.
In make_normalize_report module, we defined two Pcaplot, the first
Pcaplot is required to be no normalization from SignalFile_merge
to SignalFile. The second PcaPlot can have different
normalization. If we do not require any normalization, then the
two PcaPlot will be the same, then it might make the conflict.
Need to turn off optimize_network, comment out
make_normalize_report_rma, and make_normalize_report_illumina.
With quantile_norm:
bie3.Attribute(rulebase.SignalFile, "quantile_norm","yes"),
Points to make_normalize_report [1]:
SignalFile [2]
PcaPlot [5] quantile_norm="yes"
PcaPlot [7] quantile_norm="no"
ActbPlot [6]
IntensityPlot [3]
ControlPlot [4]
No quantile_norm. Only 1 PcaPlot going into it.
SignalFile[2]
PcaPlot[5] quantile_norm="no"
ActbPlot[6]
IntensityPlot[3]
ControlPlot[4]
make_normalize_report[1]
"""
user_attributes = [
bie3.Attribute(rulebase.SignalFile, "preprocess", "illumina"),
#bie3.Attribute(rulebase.SignalFile, "quantile_norm","yes"),
]
network = bie3.backchain(
rulebase.all_modules, rulebase.ReportFile, user_attributes)
#prev_ids = bie3._backchain_to_ids(network, 1)
#x = bie3._get_valid_input_combinations(
# network, 1, prev_ids, user_attributes)
#print x
#network = bie3.optimize_network(network, user_attributes)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case41():
"""The start node of this network is GEOSeries[41]. We want the
select_start_node() function can generate a network which from
ExpressionFiles[39] and below.
140914 JTC Implemented now.
"""
user_attributes = [
bie3.Attribute(rulebase.SignalFile, "preprocess", "illumina"),
bie3.Attribute(rulebase.SignalFile, "quantile_norm","yes"),
]
network = bie3.backchain(
rulebase.all_modules, rulebase.SignalFile, user_attributes)
network = bie3.complete_network(network, user_attributes)
network = bie3.optimize_network(network, user_attributes)
bie3.print_network(network)
bie3.plot_network_gv("out_before.png", network)
fn = getattr(rulebase, 'ExpressionFiles')
in_data = fn.input()
start_node = bie3._find_start_nodes(network, in_data)
print 'start_node',start_node
# Here is the function to generate new network. We expect it is
# from Node[39] and all the nodes below. Get rid of the Node [41]
# and Node[40].
network = bie3.select_start_node(network, in_data)
bie3.print_network(network)
bie3.plot_network_gv("out_after.png", network)
def run_case42():
"""Testing the flag:
DEFAULT_INPUT_ATTRIBUTE_IS_ALL_VALUES
"""
user_attributes = [
#bie3.Attribute(rulebase.SignalFile, "gene_center", "mean")
]
network = bie3.backchain(
rulebase.all_modules, rulebase.Heatmap, user_attributes)
network = bie3.complete_network(network, user_attributes)
network = bie3.optimize_network(network, user_attributes)
bie3.print_network(network)
bie3.plot_network_gv("out.png", network)
def run_case43():
"""
Generates a network about 2 minutes. For optimization.
"""
import time
user_attributes=[
bie3.Attribute(rulebase.SignalFile, "gene_center", "mean"),
bie3.Attribute(rulebase.SignalFile, "gene_normalize", "variance"),
bie3.Attribute(rulebase.SignalFile,"predataset",'yes'),
bie3.Attribute(rulebase.SignalFile,"gene_order",'class_neighbors'),
bie3.Attribute(rulebase.SignalFile,"predataset",'yes'),
bie3.Attribute(rulebase.SignalFile,"annotate",'yes'),
bie3.Attribute(rulebase.SignalFile,"rename_sample",'yes'),
]
start = time.strftime("%H:%M:%S")
print start
network = bie3.backchain(rulebase.all_modules,
rulebase.NetworkFile_Test,user_attributes)
network = bie3.complete_network(network,user_attributes)
network = bie3.optimize_network(network,user_attributes)
stop = time.strftime("%H:%M:%S")
print stop
bie3.print_network(network)
#bie3.plot_network_gv("out.png", network)
def run_case44():
"""
Test the bie3.get_inputs() function. It runs too slow.
"""
user_attributes = []
network = bie3.backchain(
rulebase.all_modules, rulebase.DiffReportFile, user_attributes)
network = bie3.complete_network(network, user_attributes)
network = bie3.optimize_network(network, user_attributes)
bie3.plot_network_gv("out.png", network)
print 'generate network'
inputs = bie3.get_inputs(network, user_attributes)
print "Num inputs %d" % len(inputs)
#dt2inputs = bie3.group_inputs_by_datatype(network, inputs)
print 'done'
def main():
#run_case01()
#run_case02()
#run_case03()
#run_case04()
#run_case05()
#run_case06()
#run_case07()
#run_case08()
#run_case09()
#run_case10()
#run_case11()
#run_case12()
#run_case13()
#run_case14()
#run_case15()
#run_case16()
#run_case17()
#run_case18()
#run_case19()
#run_case20()
#run_case21()
#run_case22()
#run_case23()
#run_case24()
#run_case25()
#run_case26()
#run_case27()
#run_case28()
#run_case29()
#run_case30()
#run_case31()
#run_case32()
#run_case33()
#run_case34()
#run_case35()
#run_case36()
#run_case37()
#run_case38()
#run_case39()
#run_case41()
#run_case42()
#run_case43()
run_case44()
if __name__ == '__main__':
main()
#import cProfile; cProfile.run("main()")
| [
"jeffrey.t.chang@uth.tmc.edu"
] | jeffrey.t.chang@uth.tmc.edu |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.