blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
360ba0b1b762d53dadf4c5dd5863880fcc1a54a8
|
e1c9acaa95b467a08483ff1d3cb1d4b145b68967
|
/main.py
|
ef8c063cac066699603ede049d23a071b88b264f
|
[] |
no_license
|
FredericStraub/CalendarApp
|
de7e172dc0bddd80619cc51ee2496f64afd44553
|
9936762042f99508af39be36744501ea1e04c010
|
refs/heads/main
| 2023-02-27T05:25:24.422060
| 2021-02-04T21:21:23
| 2021-02-04T21:21:23
| 336,084,362
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,078
|
py
|
from kivymd.app import MDApp
from kivy.lang import Builder
from kivymd.theming import ThemeManager
from kivy.uix.boxlayout import BoxLayout
from kivymd.uix.picker import MDDatePicker, MDThemePicker, MDTimePicker
from kivymd.uix.expansionpanel import MDExpansionPanel, MDExpansionPanelThreeLine
import datetime
from kivymd.uix.label import MDLabel
from kivy.uix.textinput import TextInput
from kivy.uix.popup import Popup
from kivy.uix.floatlayout import FloatLayout
class Inputtext(FloatLayout):
pass
class Content(BoxLayout):
pass
class MainApp(MDApp):
#define builder
def build(self):
self.theme_cls.theme_style = "Dark"
self.meetingname = ""
self.dates = {}
self.temp_date_storage = None
self.popup = None
#sets meeting name and picks time
def set_meeting_name(self):
self.meetingname = self.popup.content.ids.input.text
self.popup.dismiss()
self.show_timepicker()
#pick date
def show_datepicker(self):
picker = MDDatePicker(callback = self.got_date)
picker.open()
#saves date in dict (database still in progress) and opens popup for description
def got_date(self, the_date):
print(the_date)
if the_date in self.dates.keys():
pass
else:
self.dates[the_date] = 0
self.temp_date_storage = the_date
self.popup = Popup(title="Name of Meeting",content=Inputtext(), size_hint=(None,None),size=(300, 300))
self.popup.open()
#sets time
def show_timepicker(self):
picker = MDTimePicker()
picker.bind(time = self.got_time)
picker.open()
#saves time in dict (database still in progress)
def got_time(self, picker_widget,the_time):
print(the_time)
p=Inputtext()
if self.dates.get(self.temp_date_storage) == 0:
self.dates[self.temp_date_storage] = [the_time]
else:
self.dates.get(self.temp_date_storage).append(the_time)
dropdown = Content()
dropdown.ids.drawout.text = self.popup.content.ids.input2.text
self.root.ids.box.add_widget(MDExpansionPanel(
content=dropdown,
panel_cls=MDExpansionPanelThreeLine(
text=self.meetingname,
secondary_text=str(the_time),
tertiary_text=str(self.temp_date_storage)
)))
self.temp_date_storage = None
#define themepicker
def show_themepicker(self):
picker = MDThemePicker()
picker.open()
#define bar of current weekdays (will be connected later accrodingly with the dates)
def show_getday(self,dayint):
l = ["Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday"]
x = datetime.datetime.today().weekday() + dayint
if x >= 7:
x -= 7
return l[x]
MainApp().run()
|
[
"noreply@github.com"
] |
FredericStraub.noreply@github.com
|
f0d3b443e3d1229ac55830a7ca6100f493115d2a
|
1b10d96758c45e1746d8ca69b3222fc90aae012e
|
/document_validation/models/models.py
|
48a7d794822bf31d1b290260068d3872d2e7d86f
|
[] |
no_license
|
hassanfadl/livedhalts
|
1565e3b9dec1b470c1700b90c927724cf02a99ae
|
dab87ade79a97b616fda3b52c38b2cea695ee4d5
|
refs/heads/main
| 2023-06-29T04:29:34.679643
| 2021-07-15T11:11:43
| 2021-07-15T11:11:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 649
|
py
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api,_
from odoo.exceptions import ValidationError, UserError
class DocumentAttachmentCustomValidate(models.Model):
_inherit = "ir.attachment"
@api.model_create_multi
def create(self, vals_list):
if len(vals_list) > 0 and 'name' in vals_list[0]:
prev_files = self.env['documents.document'].search([]).mapped('name')
if vals_list[0]['name'] in prev_files:
raise UserError(_('file already existed'))
else:
res = super(DocumentAttachmentCustomValidate, self).create(vals_list)
return res
|
[
"noreply@github.com"
] |
hassanfadl.noreply@github.com
|
dc0e48787640ae04b76fedf2b7227688743ab729
|
05bcad1eee910ef13272ffbda397066ac09a6078
|
/books/data/urls.py
|
c618829e59bff176ff323b0e2df348873df3284b
|
[] |
no_license
|
Erlan1998/python_group_7_exam_6_Erlan_Kurbanaliev
|
9aa0f5c36a7101ffb949d40f0b996fb0400fc52c
|
aaa88e7a1aae29485512bfd6980e1e6b5242e1f0
|
refs/heads/master
| 2023-03-13T20:28:19.395508
| 2021-03-06T12:38:42
| 2021-03-06T12:38:42
| 345,087,063
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,170
|
py
|
"""data URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from webapp.views import (
index_view,
guest_add_view,
guest_update_view,
guest_delete_view, search_view,
)
urlpatterns = [
path('admin/', admin.site.urls),
path('', index_view, name='index_all'),
path('guest/add/', guest_add_view, name='guest_add'),
path('guest/<int:id>/update', guest_update_view, name='guest_update'),
path('guest/<int:id>/delete', guest_delete_view, name='guest_delete'),
path('search/', search_view, name='search')
]
|
[
"kurbanalieverlan@gmail.com"
] |
kurbanalieverlan@gmail.com
|
aaf627500a44341044bdd665ff30927d5f3938fe
|
ef9f3550249b961fbe43f1bc7e581cbb80a50063
|
/dict_and_list_random_access/run.py
|
0ad00ed631dadf32ef8e01ddcf0a6bc4cef87d9a
|
[] |
no_license
|
fatowl-dev/python_performance_test
|
08efe5fa77bf0bbb5ffa9f9306bc228a39b16703
|
53c6d340be11e51d5100214e077ad02b13f6af0c
|
refs/heads/master
| 2020-12-14T13:36:53.806073
| 2020-01-18T18:06:49
| 2020-01-18T18:06:49
| 234,760,459
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,688
|
py
|
# list、dict ランダムアクセス速度比較
import random
import string
import gc
N = 100 # 試行回数
REF_COUNT = 100000 # 1回の試行で参照する回数
ITEM_COUNT = 100 # アイテム数
KEY_LENGTH = 32 # 辞書のキーの長さ
SLEEP_TIME = 0.1 # 試行前の待ち時間(なぜかこれを入れないとおかしな値になる)
list_results = []
dict_results = []
# 辞書のキーを作成する
keys = []
for i in range(ITEM_COUNT):
while True:
randlist = [random.choice(string.ascii_letters + string.digits) for i in range(KEY_LENGTH)]
key = ''.join(randlist)
if key not in keys:
keys.append(key)
break
# リストを作成
item_list = [ i for i in range(ITEM_COUNT)]
# 辞書を作成
item_dict = {keys[i]: i for i in range(ITEM_COUNT)}
print('--------------------------------------')
print('テスト開始')
print('--------------------------------------')
print(f'試行回数: {N}回')
print(f'参照回数: {REF_COUNT}回')
print(f'辞書のキーの長さ: {KEY_LENGTH}文字')
print(f'要素数: {ITEM_COUNT}個')
for count in range(N):
print('\n--------------------------------------')
print(f'{count+1}回目')
print('--------------------------------------')
# ゴミ掃除
gc.collect()
# 何故かこのスリープを入れないとおかしな値になる
time.sleep(SLEEP_TIME)
# ランダムアクセスのインデックスリストを作成
rand_indexes = [random.randrange(0, ITEM_COUNT) for i in range(REF_COUNT)]
# ランダムアクセスのキーリストを作成
rand_keys = [keys[n] for n in rand_indexes]
# list
start_time = time.time()
for i in rand_indexes:
x = item_list[i]
end_time = time.time()
elapsed_time = end_time - start_time
list_results.append(elapsed_time)
print(f'list: {elapsed_time}')
# dict
start_time = time.time()
for key in rand_keys:
x = item_dict[key]
end_time = time.time()
elapsed_time = end_time - start_time
dict_results.append(elapsed_time)
print(f'dict: {elapsed_time}')
print('\n--------------------------------------')
print('条件')
print('--------------------------------------')
print(f'試行回数: {N}回')
print(f'参照回数: {REF_COUNT}回')
print(f'辞書のキーの長さ: {KEY_LENGTH}文字')
print(f'要素数: {ITEM_COUNT}個')
print('\n--------------------------------------')
print('結果')
print('--------------------------------------')
print(f'list average: {sum(list_results) / len(list_results)}')
print(f'dict average: {sum(dict_results) / len(dict_results)}')
|
[
"kingyo.summer.japan@gmail.com"
] |
kingyo.summer.japan@gmail.com
|
921817ab1c99956af4f567eedbe80116edbdde30
|
4464aa9d3c42a20caf7b769d39015d439d856c48
|
/pypm/utils/evaluate.py
|
145f0b5145f8febefcff15b8808f19c9e017a470
|
[
"MIT"
] |
permissive
|
Yansf677/PyPM
|
070e49d6cd69f5d5b51ead1cdb45c847e9532007
|
3012416aa04bb10f88689c2745702c71858bbe24
|
refs/heads/master
| 2022-10-25T01:22:21.835663
| 2020-06-16T14:54:00
| 2020-06-16T14:54:00
| 267,231,636
| 2
| 2
|
MIT
| 2020-05-28T07:54:14
| 2020-05-27T05:45:30
|
Python
|
UTF-8
|
Python
| false
| false
| 613
|
py
|
# -*- coding: utf-8 -*-
def detective_rate(statistic, threshold):
"""
function to calculate detective rate
parameters
----------
statistic:
statistics of testing data
threshold:
threshold by the offline data
return
------
fault detective rate or false alarm
"""
n_sample = statistic.shape[0]
detective_rate = 0
for i in range(n_sample):
if statistic[i] > threshold:
detective_rate += 1/n_sample
return detective_rate
def accuracy():
"""
f1_score...
"""
|
[
"13122282826@163.com"
] |
13122282826@163.com
|
42e4e64761ede6a73a14b630e8b5611c15e53445
|
58c5ea755a8e37b90534432593b12e7039ad06b4
|
/puppet_gui/src/puppet_gui/actions/ActionSet.py
|
64b9ada092326831300a39b0f8b91732f5f9863b
|
[] |
no_license
|
ros-visualization/puppet
|
d40f5e3b15781f8aacf8444035edff770ab3d8f2
|
621342735092f86a205826a75d0d27ffdf07f168
|
refs/heads/master
| 2023-08-30T14:01:07.039995
| 2013-07-17T16:02:33
| 2013-07-17T16:02:33
| 7,615,088
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,158
|
py
|
from Action import Action
from Pr2SpeakAction import Pr2SpeakAction
# Andreas: added dependency on Pr2SpeakAction to hack in speech running in parallel to motion.
class ActionSet(Action):
def __init__(self):
super(ActionSet, self).__init__()
self._actions = []
self._executing = 0
def add_action(self, action):
self._actions.append(action)
def remove_all_actions(self):
self._actions = []
def get_duration(self):
duration = 0.0
for action in self._actions:
duration = max(duration, action.get_duration())
return duration
def set_duration(self, duration):
for action in self._actions:
action.set_duration(duration)
def get_labels(self):
labels = []
for action in self._actions:
try:
labels += action.get_labels()
except AttributeError:
# action does not support get_value
pass
return labels
def get_value(self, label):
for action in self._actions:
try:
value = action.get_value(label)
return value
except AttributeError:
# action does not support get_value
pass
except KeyError:
# action does not have a joint with that label
pass
raise KeyError('joint with label "%s" not found' % label)
def get_joint_info(self, label):
for action in self._actions:
try:
indexes = [i for i, data in enumerate(action._joints) if data['label'] == label]
if len(indexes) == 1:
return action._joints[indexes[0]]
except AttributeError:
# action does not support get_value
pass
raise KeyError('joint with label "%s" not found' % label)
def update_value(self, label, value):
for action in self._actions:
try:
action.update_value(label, value)
return
except:
pass
raise KeyError('joint with label "%s" not found' % label)
def to_string(self):
data = []
for action in self._actions:
data.append(action.to_string())
return ';'.join(data)
def deepcopy(self):
set = super(ActionSet, self).deepcopy()
for action in self._actions:
set.add_action(action.deepcopy())
return set
def serialize(self, stream):
stream.serialize_data(len(self._actions))
for action in self._actions:
stream.serialize_instance(action)
def deserialize(self, stream):
self.remove_all_actions()
count = stream.deserialize_data()
for i in range(count):
action = stream.deserialize_instance()
self.add_action(action)
def execute(self):
if self._executing > 0:
print('ActionSet.execute() skipped because previous execute has not yet finished')
return
#print('ActionSet.execute() %d' % len(self._actions))
self._executing = len(self._actions)
for action in self._actions:
action.execute_finished_signal.connect(self._action_finished)
action.execute()
def _action_finished(self):
assert(self._executing > 0)
self._executing -= 1
if self._executing == 0:
#print('ActionSet.execute() finished\n')
self._stop()
self._execute_finished()
def stop(self, reason=None):
print('ActionSet.stop()\n')
self._stop(reason)
self._executing = 0
def _stop(self, reason=None):
for action in self._actions:
action.execute_finished_signal.disconnect(self._action_finished)
action.stop(reason)
# Speech utterances can run in parallel to motions.
# So there might be no action registered here, but
# yet speech actions are running in the background:
if reason is not None:
Pr2SpeakAction.stopAllSpeech();
|
[
"paepcke@cs.stanford.edu"
] |
paepcke@cs.stanford.edu
|
17d148604b81fa22333cc519233fa00399b10bd3
|
2ab391bfaadf0743da8ffee084896b999e88482d
|
/base.py
|
45620e4642ea2d812e1babc45e4a2fd9ff569477
|
[] |
no_license
|
wean/coupon-windows
|
552a59637ea45539bdfa70c6d1bd04626f0fdbd0
|
9565b23c7f44594f182d7a268d4ed45bdeaf8dd3
|
refs/heads/master
| 2020-04-05T07:11:43.024665
| 2017-11-24T08:23:50
| 2017-11-24T08:23:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,675
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import json
import math
import os
import pathlib
import time
from datetime import tzinfo, timedelta, datetime
from functools import total_ordering
from imgkit import ImageKit
from infor import getSlogan, getComments
from network import Network
from operator import attrgetter
from urlutils import UrlUtils
from utils import atoi, chmod, seconds2Datetime, datetime2Seconds, hexlifyUtf8, unhexlifyUtf8, OutputPath
from validation import Validation
class BaseDict:
def __init__(self, data):
self.data = data
def insert(self, db, tableName):
if db is None or tableName is None:
return
keys = self.getAlterKeys()
for key in keys:
if key not in self.data.keys():
self.data[key] = None
db.insert(tableName, self.data, keys)
def getAlterKeys(self):
return None
def __repr__(self):
return json.dumps(self.data, ensure_ascii=False, indent=4, sort_keys=True)
class SkuBase(BaseDict):
def __init__(self, data):
BaseDict.__init__(self, data)
def equals(self, skuid):
return skuid == self.data['skuid']
class Sku(SkuBase):
def __init__(self, data):
SkuBase.__init__(self, data)
self.data['skuid'] = int(self.data.pop('skuid'))
self.data['price'] = float(self.data.pop('price'))
self.data['comRate'] = float(self.data.pop('comRate'))
# TODO: Commission price can be calculate by price and comRate
self.data['commissionprice'] = float(self.data.pop('commissionprice'))
self.data['goodCom'] = atoi(self.data.pop('goodCom'))
self.data['salecount'] = int(self.data.pop('salecount'))
def getAlterKeys(self):
return ['skuid', 'title']
class Coupon(SkuBase):
def __init__(self, data):
SkuBase.__init__(self, data)
# Set as the same name
self.data['skuid'] = int(self.data.pop('skuId'))
# XXX: Sometimes quota is NOT as same as price in SKU, because the coupon
# may be invalid then. So don't worry about that.
self.data['quota'] = float(self.data.pop('quota'))
self.data['denomination'] = float(self.data.pop('denomination'))
self.data['usedNum'] = int(self.data.pop('usedNum'))
self.data['couponNum'] = int(self.data.pop('couponNum'))
self.data['cutPrice'] = self.data['quota'] - self.data['denomination']
self.data['validBeginTime'] = int(self.data.pop('validBeginTime'))
self.data['validEndTime'] = int(self.data.pop('validEndTime'))
self.data['couponValid'] = int(self.data.pop('couponValid'))
def getAlterKeys(self):
return ['skuid', 'validBeginTime', 'validEndTime']
class Discount(SkuBase):
def __init__(self, data):
SkuBase.__init__(self, data)
self.data['skuid'] = int(self.data.pop('skuid'))
self.data['cutPrice'] = float(self.data.pop('promoPrice'))
def getAlterKeys(self):
return ['skuid']
class MatchesItem(BaseDict):
def __init__(self, data):
BaseDict.__init__(self, data)
class Seckill(BaseDict):
def __init__(self, data):
BaseDict.__init__(self, data)
self.data['skuid'] = int(self.data.pop('wareId'))
self.data['title'] = self.data.pop('wname')
self.data['cutPrice'] = float(self.data.pop('miaoShaPrice'))
self.data['jdPrice'] = float(self.data.pop('jdPrice'))
def setPeriod(self, startTime, endTime):
self.data['startTime'] = startTime
self.data['endTime'] = endTime
def getAlterKeys(self):
return ['skuid', 'startTimeMills', 'rate', 'title', 'tagText', 'cName', 'adword', 'mTips', 'tips']
class PromotionHistory(BaseDict):
def __init__(self, data):
BaseDict.__init__(self, data)
class PriceHistory:
def __init__(self, **kwargs):
self.set(**kwargs)
def set(self, **kwargs):
for keyword in ['price', 'time']:
setattr(self, keyword, kwargs[keyword])
def __repr__(self):
fields = [' {}={!r}'.format(k, v)
for k, v in self.__dict__.items() if not k.startswith('_')]
return ' {}:\n{}'.format(self.__class__.__name__, '\n'.join(fields))
class Price:
def __init__(self, price, days, ratio):
self.price = price
self.days = days
self.ratio = ratio
def __repr__(self):
fields = [' {}={!r}'.format(k, v)
for k, v in self.__dict__.items() if not k.startswith('_')]
return ' {}:\n{}'.format(self.__class__.__name__, '\n'.join(fields))
class PriceHistoryData(BaseDict):
def __init__(self, data):
BaseDict.__init__(self, data)
self.data['skuid'] = int(self.data.pop('skuid'))
def getAlterKeys(self):
return ['skuid']
def updatePromotion(self, promotionHistoryList):
# TODO: Update history price by the promotion history
pass
def __repr__(self):
fields = [' {}={}'.format(k, v)
for k, v in self.__dict__.items()
if not k.startswith('_') and 'data' != k]
str = BaseDict.__repr__(self)
return '{}:\n{}\n{}'.format(self.__class__.__name__, '\n'.join(fields), str)
class SkuInformation(BaseDict):
def __init__(self, data, version, clock=None):
BaseDict.__init__(self, data)
self.data['skuid'] = int(self.data.pop('skuid'))
self.data['version'] = version
self.data['historyList'] = json.loads(self.data.pop('historyList'))
keys = ['startTime', 'endTime']
for key in keys:
if key not in self.data.keys():
self.data[key] = None
if 'validBeginTime' in self.data.keys():
self.data['startTime'] = seconds2Datetime(self.data.pop('validBeginTime') / 1000L)
if 'validEndTime' in self.data.keys():
self.data['endTime'] = seconds2Datetime(self.data.pop('validEndTime') / 1000L)
if 'outputTime' not in self.data.keys():
if clock is not None:
self.data['outputTime'] = clock.randomTime(self.data['startTime'],
self.data['endTime'])
elif self.data['startTime'] is None:
self.data['outputTime'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
else:
self.data['outputTime'] = self.data['startTime']
def setSlogan(self, slogan):
self.data['slogan'] = slogan
def setComments(self, data):
self.data.update(data)
self.data.pop('code')
self.data['allCnt'] = int(self.data.pop('allCnt'))
self.data['goodCnt'] = int(self.data.pop('goodCnt'))
self.data['badCnt'] = int(self.data.pop('badCnt'))
self.data['normalCnt'] = int(self.data.pop('normalCnt'))
self.data['pictureCnt'] = int(self.data.pop('pictureCnt'))
self.data['showPicCnt'] = int(self.data.pop('showPicCnt'))
self.data['consultationCount'] = int(self.data.pop('consultationCount'))
self.data['percentOfGoodComments'] = self.data.pop('goods')
commentInfoList = list()
for info in self.data.pop('commentInfoList'):
commentInfo = dict()
commentInfo['commentShareUrl'] = info['commentShareUrl']
commentInfo['userNickName'] = info['userNickName']
commentInfo['commentData'] = hexlifyUtf8(info['commentData'])
commentInfo['commentScore'] = int(info['commentScore'])
commentInfoList.append(commentInfo)
self.data['commentList'] = json.dumps(commentInfoList, ensure_ascii=False,
indent=4, sort_keys=True)
def getAlterKeys(self):
return ['skuid', 'slogan', 'commentList']
def updatePrices(self):
cutPrice = self.data['cutPrice']
# Correct cut-price if not right
if cutPrice > self.data['price']:
self.priceCorrected = True
cutPrice = self.data['price']
else:
self.priceCorrected = False
self.prices = list()
self.histories = list()
self.histories.append(PriceHistory(price=float(cutPrice), time=datetime.now().strftime('%Y-%m-%d')))
for history in self.data.pop('historyList'):
self.histories.append(PriceHistory(price=float(history['price']), time=history['time']))
# Sort histories
self.histories.sort(key=attrgetter('time'))
# Calculate prices ratios
prices = []
totalDays = 0
size = len(self.histories)
for i in range(0, size):
history = self.histories[i]
days = 1
if i < size - 1:
thisDate = datetime.strptime(history.time, '%Y-%m-%d')
nextDate = datetime.strptime(self.histories[i+1].time, '%Y-%m-%d')
days = (nextDate - thisDate).days
totalDays += days
prices.append((history.price, days))
prices.sort()
pos = -1
for price in prices:
if pos >= 0 and self.prices[pos].price == price[0]:
self.prices[pos].days += price[1]
self.prices[pos].ratio = float(self.prices[pos].days) / float(totalDays)
else:
self.prices.append(Price(price[0], price[1], float(price[1]) / float(totalDays)))
pos += 1
# Calculate prices and discounts
lowestPrice = float(int(100 * self.prices[0].price)) / 100
avgPrice = 0.0
for price in self.prices:
avgPrice += float(price.price) * price.ratio
avgPrice = float(int(100 * avgPrice)) / 100
# Calculate discounts
discount = int(100 * float(cutPrice) / float(avgPrice))
if 0 == discount:
discount = 1
lowestRatio = int(100 * float(lowestPrice) / float(avgPrice))
# Calculate weights
'''
Weight should be measured by factors as following:
1, discount relative to lowest prices
2, discount relative to average prices
3, off amount
4, days
'''
lowestDiscount = float(cutPrice) / float(lowestPrice)
lg = math.log(totalDays)
if 0 == lg: lg = 0.1 # Log(1) is 0.0
weight = lowestDiscount / lg
self.data['totalDays'] = totalDays
self.data['weight'] = weight
self.data['lowestPrice'] = lowestPrice
self.data['avgPrice'] = avgPrice
self.data['discount'] = discount
self.data['lowestRatio'] = lowestRatio
def updateSlogan(self):
slogan = getSlogan(self.data['skuid'])
self.setSlogan(slogan)
def updateComments(self):
comments = getComments(self.data['skuid'])
if comments is not None:
self.setComments(comments)
def updateCouponLink(self):
couponLink = None
if 'couponLink' in self.data.keys():
url = self.data.pop('couponLink')
if url is not None:
pos = url.find('?')
if pos > 0:
url = 'http://coupon.m.jd.com/coupons/show.action{}'.format(url[pos:])
couponLink = UrlUtils.toShortUrl(url)
self.data['couponLink'] = couponLink
def update(self):
self.updatePrices()
self.updateSlogan()
self.updateComments()
self.updateCouponLink()
@total_ordering
class Special(SkuBase):
def __init__(self, data):
SkuBase.__init__(self, data)
if 'commentList' in self.data.keys():
comments = json.loads(self.data.pop('commentList'))
self.data['commentList'] = list()
for comment in comments:
comment['commentData'] = unhexlifyUtf8(comment.pop('commentData'))
if Validation.isCommentBad(comment['commentData']):
continue
self.data['commentList'].append(comment)
def update(self, db=None, tableName=None):
if db is None or tableName is None:
return
if 'outputTime' not in self.data.keys():
return
data = dict()
data['id'] = self.data['id']
# XXX: Postpone and set outputTime to that after next three days
THREE_DAYS_TICKS = 3 * 24 * 3600
now = time.time()
outputTs = datetime2Seconds(self.data['outputTime'])
diff = now - outputTs
if diff < 0:
diff = 1.0 # Set as positive
outputTs += math.ceil(diff / THREE_DAYS_TICKS) * THREE_DAYS_TICKS
data['outputTime'] = seconds2Datetime(outputTs)
print 'Update', '{}'.format(self.data['skuid']).ljust(16), ':',
print self.data['outputTime'], '-->', data['outputTime'],
print 'in (', self.data['startTime'], ',', self.data['endTime'], ')'
db.update(tableName, data, ['id'])
def __lt__(self, other):
return (self.data['weight'] < other.data['weight'])
def __gt__(self, other):
return (self.data['weight'] > other.data['weight'])
class SpecialFormatter:
def __init__(self, data):
self.data = data
@staticmethod
def create(data):
formatter = SpecialFormatter(data)
formatter.prepare()
return formatter
def prepare(self):
self.skuid = self.data['skuid']
self.title = self.data['title']
self.slogan = self.data['slogan']
if self.slogan is None:
self.slogan = ''
self.skuimgurl = self.data['skuimgurl']
self.price = self.data['price']
self.lowestPrice = self.data['lowestPrice']
self.avgPrice = self.data['avgPrice']
self.cutPrice = self.data['cutPrice']
self.totalDays = self.data['totalDays']
self.percentOfGoodComments = self.data['percentOfGoodComments']
self.startTime = self.data['startTime']
self.endTime = self.data['endTime']
self.comments = self.data['commentList']
self.couponLink = self.data['couponLink']
def preparePlate(self, qwd):
if self.avgPrice < self.price:
self.plateAvgPrice = '历史平均价:¥{}'.format(self.avgPrice)
else:
self.plateAvgPrice = ''
if self.totalDays < 30:
self.plateTotalDays = '{}天'.format(self.totalDays)
elif self.totalDays < 360:
self.plateTotalDays = '{}个月'.format(self.totalDays/30)
else:
self.plateTotalDays = '超过1年'
if self.startTime is not None and self.endTime is not None:
self.platePeriod = u'特价时间:{}到{}'.format(self.startTime, self.endTime)
else:
self.platePeriod = ''
self.plateComments = ''
for comment in self.comments:
commentData = comment['commentData'].replace('\n', '')
self.plateComments += u'{}:{}\n'.format(comment['userNickName'], commentData)
if self.couponLink is not None:
self.plateCouponLink = u'领券:{}'.format(self.couponLink)
else:
self.plateCouponLink = ''
self.plateShareUrl = qwd.getShareUrl(self.skuid)
def getPlate(self, qwd):
self.preparePlate(qwd)
with open('plate/special.txt') as fp:
content = fp.read().format(self)
return content.replace('\n\n', '\n')
def prepareHtml(self):
if self.couponLink is not None:
icon = 'coupon.png'
elif self.cutPrice <= self.lowestPrice:
icon = 'lowest.png'
elif self.cutPrice < self.price:
icon = 'discount.png'
else:
icon = 'good.png'
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(path, 'images')
path = os.path.join(path, icon)
self.icon = pathlib.Path(path).as_uri()
self.discount = int(self.cutPrice * 100 / self.price)
self.lowestRatio = int(self.lowestPrice * 100 / self.price)
self.avgRatio = int(self.avgPrice * 100 / self.price)
self.curRatio = 100
maxRatio = 76
self.discountDisplay = int(self.cutPrice * maxRatio / self.price)
self.lowestRatioDisplay = int(self.lowestPrice * maxRatio / self.price)
self.avgRatioDisplay = int(self.avgPrice * maxRatio / self.price)
self.curRatioDisplay = maxRatio
# Colors
if self.totalDays < 30:
self.totalDaysColor = 'rgb(255, 57, 31)'
elif self.totalDays < 60:
self.totalDaysColor = 'rgb(255, 169, 33)'
elif self.totalDays < 90:
self.totalDaysColor = 'rgb(5, 157, 127)'
else:
self.totalDaysColor = '#666'
def getHtml(self):
self.prepareHtml()
with open('html/special.html') as fp:
content = fp.read().format(self)
return content
def getPlateImage(self):
if self.skuimgurl is None:
return None
path = OutputPath.getDataPath('sku-{}-plate'.format(self.skuid), 'jpeg')
ret = Network.saveGetUrl(path, self.skuimgurl)
if ret < 0:
return None
return path
def getComplexImage(self):
content = self.getHtml()
path = OutputPath.getDataPath('sku-{}-complex'.format(self.skuid), 'html')
with open(path, 'w') as fp:
fp.write(content)
chmod(path)
return ImageKit.fromHtml(path, pageSize=(80, 150))
def getImage(self, imageType):
if imageType is 0: # 0, Plate
return self.getPlateImage()
# 1, Complex
return self.getComplexImage()
|
[
"974644081@qq.com"
] |
974644081@qq.com
|
16c0fa0f1493a48823e1692fafd6ad8476c2b198
|
bfbf4c7df621a8190f00f58d981b6ccfb4dc3382
|
/21_naive_baysian_test/home_word.py
|
7196f82fd385ba4786bc892ed0ac718a397b6ce7
|
[] |
no_license
|
zjtprince/DataAnalysisTraining
|
5786725431c7f47041826bc08cad0109c9902c77
|
4c2167e0edbddbeb74621d2a02d3ee56f85586a2
|
refs/heads/master
| 2023-01-28T16:42:05.457291
| 2020-12-13T06:38:04
| 2020-12-13T06:38:04
| 320,966,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,000
|
py
|
import jieba
from sklearn.naive_bayes import MultinomialNB
import os
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score
BASE_DIR='/home/zjtprince/Documents/text_classification/text classification/'
def cut_text(filepath):
text = open(filepath,'r',encoding='gb18030').read()
words = jieba.cut(text)
return ' '.join(words) ;
def load_features_and_labels(dir , label):
features = []
labels = []
files = os.listdir(dir)
for file in files:
features.append(cut_text(dir + os.sep + file))
labels.append(label)
return features , labels
def build_word_list_and_label_list(type_name):
train_features1, labels1 = load_features_and_labels(BASE_DIR+type_name+'/女性', '女性')
train_features2, labels2 = load_features_and_labels(BASE_DIR+type_name+'/文学', '文学')
train_features3, labels3 = load_features_and_labels(BASE_DIR+type_name+'/校园', '校园')
train_features4, labels4 = load_features_and_labels(BASE_DIR+type_name+'/体育', '体育')
train_list = train_features1 + train_features2 + train_features3 + train_features4
label_list = labels1 + labels2 + labels3 + labels4
return train_list, label_list
def load_stop_words():
stop_words = open(BASE_DIR+"stop/stopword.txt", 'r',encoding='utf-8').read()
stop_words = stop_words.encode('utf-8').decode('utf-8-sig')
return stop_words.split('\n')
if __name__ == '__main__':
stop_words = load_stop_words()
train_list, label_list = build_word_list_and_label_list('train')
test_list, test_labels = build_word_list_and_label_list('test')
vec = TfidfVectorizer(stop_words=stop_words)
vec.fit(train_list)
train_data = vec.transform(train_list)
test_data = vec.transform(test_list)
bayes = MultinomialNB(alpha=0.001)
ctf = bayes.fit(train_data, label_list)
predict = ctf.predict(test_data)
accur = accuracy_score(predict,test_labels)
print("准确率为:%f" , accur)
|
[
"zjtprince@qq.com"
] |
zjtprince@qq.com
|
7c1c4d76de80e3cfdd0bfce9ad4f252d6ef21326
|
f1fe88da8f05ae54b17eeb328bc6c4e1bf9ebd0b
|
/test2/booktest/urls.py
|
70b13a0f42d35ef0aa55d5ec40c617a600dcf8dd
|
[] |
no_license
|
itachaaa/pytest
|
8a8af6f1e834c9841a21986881b0dbe62d653d56
|
907d181121ef83c53982446d7c7cdc034762f55a
|
refs/heads/master
| 2021-08-29T23:20:54.544775
| 2017-12-15T08:03:51
| 2017-12-15T08:03:51
| 114,343,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
from django.conf.urls import url
from booktest import views
urlpatterns = [
url(r'index', views.index),
url(r'^booklist$', views.booklist),
url(r'^area$', views.area),
]
|
[
"LC@LCdeMacBook-Air.local"
] |
LC@LCdeMacBook-Air.local
|
9b1b453afef1fcb92f4f6772ca25369709d8c1cd
|
1fc2b8709668c2b7f735b7694e67e8c7d57dca04
|
/crawler/AppUI.py
|
6e2f6378a31ddbec8b5ebbec6d74116e8b3bac35
|
[] |
no_license
|
arno06/SimpleCrawler
|
1cce2d28fa87f5b31537e40ebbbab4e8681df963
|
310059815b8ac0aaaac42174431b62902918de5a
|
refs/heads/master
| 2021-01-23T11:55:10.736206
| 2015-06-26T08:09:59
| 2015-06-26T08:09:59
| 38,098,701
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,322
|
py
|
from tkinter import *
from tkinter import ttk
class Main():
def __init__(self):
self.running = False
self.toggle_handler = None
self.window = Tk()
self.window.columnconfigure(0, weight=1)
self.window.rowconfigure(0, weight=1)
self.main_panel = ttk.Frame(self.window, padding=(5, 5, 5, 5))
self.main_panel.grid(column=0, row=0, sticky=(N, S, E, W))
self.main_panel.columnconfigure(0, weight=1)
self.main_panel.rowconfigure(1, weight=1)
self.nav_panel = ttk.Frame(self.main_panel)
self.nav_panel.grid(column=0, row=0, sticky=(N, E, W))
self.nav_panel.columnconfigure(0, weight=1)
self.base_url = StringVar()
self.base_url.set("http://")
self.entry = ttk.Entry(self.nav_panel, textvariable=self.base_url, width=100)
self.toggle_button = ttk.Button(self.nav_panel, text="Go", command=self.click_handler)
self.entry.grid(column=0, row=0, sticky=(N, W, E, S))
self.toggle_button.grid(column=1, row=0, sticky=(N, W, E, S))
self.content = ttk.Frame(self.main_panel, padding=(0, 5, 0, 0))
self.content.grid(column=0, row=1, sticky=(N, W, E, S))
self.content.columnconfigure(0, weight=1)
self.content.rowconfigure(0, weight=1)
self.left_content = ttk.Frame(self.content)
self.left_content.grid(column=0, row=0, sticky=(N, W, E, S))
self.left_content.columnconfigure(0, weight=1)
self.left_content.rowconfigure(1, weight=1)
self.content.columnconfigure(0, weight=1)
self.todo_count = ttk.Label(self.left_content, text=" ")
self.todo_count.grid(column=0, row=0, sticky=(N, W, E, S))
scrollbar = ttk.Scrollbar(self.left_content)
self.todo_label = Text(self.left_content, yscrollcommand=scrollbar.set)
scrollbar.config(command=self.todo_label.yview)
self.todo_label.grid(column=0, row=1, sticky=(N, W, E, S))
scrollbar.grid(column=1, row=1, sticky=(N, W, E, S))
self.right_content = ttk.Frame(self.content)
self.right_content.grid(column=1, row=0, sticky=(N, W, E, S))
self.content.columnconfigure(1, weight=1)
self.content.rowconfigure(0, weight=1)
self.right_content.columnconfigure(0, weight=1)
self.right_content.rowconfigure(1, weight=1)
scrollbar = ttk.Scrollbar(self.right_content)
self.main_label = Text(self.right_content, yscrollcommand=scrollbar.set)
scrollbar.config(command=self.main_label.yview)
self.main_label.grid(column=0, row=1, sticky=(N, W, E, S))
scrollbar.grid(column=1, row=1, sticky=(N, W, E, S))
self.done_count = ttk.Label(self.right_content, text=" ")
self.done_count.grid(column=0, row=0, sticky=(N, W, E, S))
def mainloop(self):
self.window.mainloop()
def update(self, _tbd, _crawled, _global_time, _remaining):
self.todo_count['text'] = str(len(_tbd))+" urls restantes ("+self.format_time(_remaining)+")"
self.todo_label.delete(1.0, END)
self.todo_label.insert(END, "\n".join(_tbd))
self.done_count['text'] = str(len(_crawled))+" urls parcourues ("+self.format_time(_global_time)+")"
self.main_label.delete(1.0, END)
self.main_label.insert(END, "\n".join(_crawled))
def click_handler(self):
if self.toggle_handler is None or callable(self.toggle_handler) is False or self.base_url.get() == "http://":
return
self.running = False if self.running is True else True
if self.running is True:
self.entry.config(state="disable")
self.toggle_button.config(text='Stop')
else:
self.entry.config(state="normal")
self.toggle_button.config(text='Go')
self.toggle_handler(self.running, self.base_url.get())
def reset(self):
if self.running is True:
self.click_handler()
@staticmethod
def format_time(_time):
if _time < 60:
return str(round(_time, 2))+" sec"
_time /= 60
if _time < 60:
return str(round(_time, 2))+" min"
_time /= 60
if _time < 24:
return str(round(_time, 2))+" h"
_time /= 24
return str(round(_time, 2))+" j"
|
[
"arno06@gmail.com"
] |
arno06@gmail.com
|
3301c1cfa4fc89a77fbb5449ec4d0a37e120dd1d
|
6449afd5e812de0ece5d5932dc567dddfbd1ef51
|
/testing.py
|
ca90e77df96f41d85b8073a6fc269a3c8672032d
|
[
"BSD-3-Clause"
] |
permissive
|
AgPipeline/template-rgb-plot
|
1dabeaec29c6af5569972105e0720e04357fec86
|
34f6eab3edbeaf995d7e888a2e8004963ec9ba4d
|
refs/heads/main
| 2021-12-26T11:54:57.604066
| 2021-09-13T20:13:55
| 2021-09-13T20:13:55
| 223,216,077
| 2
| 3
|
BSD-3-Clause
| 2021-09-13T20:13:56
| 2019-11-21T16:23:53
|
Python
|
UTF-8
|
Python
| false
| false
| 5,064
|
py
|
#!/usr/bin/env python3
"""Test script for algorithm_rgb code
"""
import os
import sys
import numpy as np
from PIL import Image
import algorithm_rgb
def _get_variables_header_fields() -> str:
"""Returns a string representing the variable header fields
Return:
Returns a string representing the variables' header fields
"""
variables = algorithm_rgb.VARIABLE_NAMES.split(',')
labels = algorithm_rgb.VARIABLE_LABELS.split(',')
labels_len = len(labels)
units = algorithm_rgb.VARIABLE_UNITS.split(',')
units_len = len(units)
if labels_len != len(variables):
sys.stderr.write("The number of defined labels doesn't match the number of defined variables")
sys.stderr.write(" continuing processing")
if units_len != len(variables):
sys.stderr.write("The number of defined units doesn't match the number of defined variables")
sys.stderr.write(" continuing processing")
headers = ''
for idx, variable_name in enumerate(variables):
variable_header = variable_name
if idx < labels_len:
variable_header += ' - %s' % labels[idx]
if idx < units_len:
variable_header += ' (%s)' % units[idx]
headers += variable_header + ','
return headers
def print_usage():
"""Displays information on how to use this script
"""
argc = len(sys.argv)
if argc:
our_name = os.path.basename(sys.argv[0])
else:
our_name = os.path.basename(__file__)
print(our_name + " <folder>|<filename> ...")
print(" folder: path to folder containing images to process")
print(" filename: path to an image file to process")
print("")
print(" One or more folders and/or filenames can be used")
print(" Only files at the top level of a folder are processed")
def check_arguments():
"""Checks that we have script argument parameters that appear valid
"""
argc = len(sys.argv)
if argc < 2:
sys.stderr.write("One or more paths to images need to be specified on the command line\n")
print_usage()
return False
# Check that the paths exist.
have_errors = False
for idx in range(1, argc):
if not os.path.exists(sys.argv[idx]):
print("The following path doesn't exist: " + sys.argv[idx])
have_errors = True
if have_errors:
sys.stderr.write("Please correct any problems and try again\n")
return not have_errors
def check_configuration():
"""Checks if the configuration is setup properly for testing
"""
if not hasattr(algorithm_rgb, 'VARIABLE_NAMES') or not algorithm_rgb.VARIABLE_NAMES:
sys.stderr.write("Variable names configuration variable is not defined yet. Please define and try again")
sys.stderr.write(" Update configuration.py and set VALUE_NAMES variable with your variable names")
return False
return True
def run_test(filename):
"""Runs the extractor code using pixels from the file
Args:
filename(str): Path to image file
Return:
The result of calling the extractor's calculate() method
Notes:
Assumes the path passed in is valid. An error is reported if
the file is not an image file.
"""
try:
open_file = Image.open(filename)
if open_file:
# Get the pixels and call the calculation
pix = np.array(open_file.ReadAsArray())
calc_val = algorithm_rgb.calculate(np.rollaxis(pix, 0, 3))
# Check for unsupported types
if isinstance(calc_val, set):
raise RuntimeError("A 'set' type of data was returned and isn't supported. Please use a list or a tuple instead")
# Perform any type conversions to a printable string
if isinstance(calc_val, str):
print_val = calc_val
else:
# Check if the return is iterable and comma separate the values if it is
try:
_ = iter(calc_val)
print_val = ",".join(map(str, calc_val))
except Exception:
print_val = str(calc_val)
print(filename + "," + print_val)
except Exception as ex:
sys.stderr.write("Exception caught: " + str(ex) + "\n")
sys.stderr.write(" File: " + filename + "\n")
def process_files():
"""Processes the command line file/folder arguments
"""
argc = len(sys.argv)
if argc:
print("Filename," + _get_variables_header_fields())
for idx in range(1, argc):
cur_path = sys.argv[idx]
if not os.path.isdir(cur_path):
run_test(cur_path)
else:
allfiles = [os.path.join(cur_path, fn) for fn in os.listdir(cur_path) if os.path.isfile(os.path.join(cur_path, fn))]
for one_file in allfiles:
run_test(one_file)
if __name__ == "__main__":
if check_arguments() and check_configuration():
process_files()
|
[
"schnaufer@email.arizona.edu"
] |
schnaufer@email.arizona.edu
|
a98631e0b1a78861f698b938b9ac1138db85b93e
|
8bd51cc155b88b251115b7542065dc45431125d4
|
/distribution_utils.py
|
1a8b93d11552ab9f403870ac1ed81d6b131125b4
|
[] |
no_license
|
sunke123/tf-hrnet-imagenet-tmp
|
e7ec84fc4fbc20dd5feb2c4dddd955344b95a8d8
|
05c381cb35949317a10b70a70c909be737135612
|
refs/heads/master
| 2020-09-23T02:48:30.396786
| 2019-10-04T07:12:21
| 2019-10-04T07:12:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,954
|
py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for running models in a distributed setting."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import tensorflow as tf
def _collective_communication(all_reduce_alg):
"""Return a CollectiveCommunication based on all_reduce_alg.
Args:
all_reduce_alg: a string specifying which collective communication to pick,
or None.
Returns:
tf.distribute.experimental.CollectiveCommunication object
Raises:
ValueError: if `all_reduce_alg` not in [None, 'ring', 'nccl']
"""
collective_communication_options = {
None: tf.distribute.experimental.CollectiveCommunication.AUTO,
"ring": tf.distribute.experimental.CollectiveCommunication.RING,
"nccl": tf.distribute.experimental.CollectiveCommunication.NCCL
}
if all_reduce_alg not in collective_communication_options:
raise ValueError(
"When used with `multi_worker_mirrored`, valid values for "
"all_reduce_alg are ['ring', 'nccl']. Supplied value: {}".format(
all_reduce_alg))
return collective_communication_options[all_reduce_alg]
def _mirrored_cross_device_ops(all_reduce_alg, num_packs):
"""Return a CrossDeviceOps based on all_reduce_alg and num_packs.
Args:
all_reduce_alg: a string specifying which cross device op to pick, or None.
num_packs: an integer specifying number of packs for the cross device op.
Returns:
tf.distribute.CrossDeviceOps object or None.
Raises:
ValueError: if `all_reduce_alg` not in [None, 'nccl', 'hierarchical_copy'].
"""
if all_reduce_alg is None:
return None
mirrored_all_reduce_options = {
"nccl": tf.distribute.NcclAllReduce,
"hierarchical_copy": tf.distribute.HierarchicalCopyAllReduce
}
if all_reduce_alg not in mirrored_all_reduce_options:
raise ValueError(
"When used with `mirrored`, valid values for all_reduce_alg are "
"['nccl', 'hierarchical_copy']. Supplied value: {}".format(
all_reduce_alg))
cross_device_ops_class = mirrored_all_reduce_options[all_reduce_alg]
return cross_device_ops_class(num_packs=num_packs)
# NOTE
def get_distribution_strategy(distribution_strategy="default",
num_gpus=0,
num_workers=1,
all_reduce_alg=None,
num_packs=1):
"""Return a DistributionStrategy for running the model.
Args:
distribution_strategy: a string specifying which distribution strategy to
use. Accepted values are 'off', 'default', 'one_device', 'mirrored',
'parameter_server', and 'multi_worker_mirrored' -- case insensitive.
'off' means not to use Distribution Strategy; 'default' means to choose from
`MirroredStrategy`, `MultiWorkerMirroredStrategy`, or `OneDeviceStrategy`
according to the number of GPUs and number of workers.
num_gpus: Number of GPUs to run this model.
num_workers: Number of workers to run this model.
all_reduce_alg: Optional. Specifies which algorithm to use when performing
all-reduce. For `MirroredStrategy`, valid values are "nccl" and
"hierarchical_copy". For `MultiWorkerMirroredStrategy`, valid values are
"ring" and "nccl". If None, DistributionStrategy will choose based on
device topology.
num_packs: Optional. Sets the `num_packs` in `tf.distribute.NcclAllReduce`
or `tf.distribute.HierarchicalCopyAllReduce` for `MirroredStrategy`.
Returns:
tf.distribute.DistibutionStrategy object.
Raises:
ValueError: if `distribution_strategy` is 'off' or 'one_device' and
`num_gpus` is larger than 1; or `num_gpus` is negative.
"""
if num_gpus < 0:
raise ValueError("`num_gpus` can not be negative.")
distribution_strategy = distribution_strategy.lower()
if distribution_strategy == "off":
if num_gpus > 1:
raise ValueError(
"When {} GPUs and {} workers are specified, distribution_strategy "
"flag cannot be set to 'off'.".format(num_gpus, num_workers))
return None
if distribution_strategy == "multi_worker_mirrored":
return tf.distribute.experimental.MultiWorkerMirroredStrategy(
communication=_collective_communication(all_reduce_alg))
if (distribution_strategy == "one_device" or
(distribution_strategy == "default" and num_gpus <= 1)):
if num_gpus == 0:
return tf.distribute.OneDeviceStrategy("device:CPU:0")
else:
if num_gpus > 1:
raise ValueError("`OneDeviceStrategy` can not be used for more than "
"one device.")
return tf.distribute.OneDeviceStrategy("device:GPU:0")
if distribution_strategy in ("mirrored", "default"):
if num_gpus == 0:
assert distribution_strategy == "mirrored"
devices = ["device:CPU:0"]
else:
devices = ["device:GPU:%d" % i for i in range(num_gpus)]
return tf.distribute.MirroredStrategy(
devices=devices,
cross_device_ops=_mirrored_cross_device_ops(all_reduce_alg, num_packs))
if distribution_strategy == "parameter_server":
return tf.distribute.experimental.ParameterServerStrategy()
raise ValueError(
"Unrecognized Distribution Strategy: %r" % distribution_strategy)
def per_replica_batch_size(batch_size, num_gpus):
"""For multi-gpu, batch-size must be a multiple of the number of GPUs.
Note that distribution strategy handles this automatically when used with
Keras. For using with Estimator, we need to get per GPU batch.
Args:
batch_size: Global batch size to be divided among devices. This should be
equal to num_gpus times the single-GPU batch_size for multi-gpu training.
num_gpus: How many GPUs are used with DistributionStrategies.
Returns:
Batch size per device.
Raises:
ValueError: if batch_size is not divisible by number of devices
"""
if num_gpus <= 1:
return batch_size
remainder = batch_size % num_gpus
if remainder:
err = ('When running with multiple GPUs, batch size '
'must be a multiple of the number of available GPUs. Found {} '
'GPUs with a batch size of {}; try --batch_size={} instead.'
).format(num_gpus, batch_size, batch_size - remainder)
raise ValueError(err)
return int(batch_size / num_gpus)
def configure_cluster(worker_hosts=None, task_index=-1):
"""Set multi-worker cluster spec in TF_CONFIG environment variable.
Args:
worker_hosts: comma-separated list of worker ip:port pairs.
Returns:
Number of workers in the cluster.
"""
tf_config = json.loads(os.environ.get('TF_CONFIG', '{}'))
if tf_config:
num_workers = (len(tf_config['cluster'].get('chief', [])) +
len(tf_config['cluster'].get('worker', [])))
elif worker_hosts:
workers = worker_hosts.split(',')
num_workers = len(workers)
if num_workers > 1 and task_index < 0:
raise ValueError('Must specify task_index when number of workers > 1')
task_index = 0 if num_workers == 1 else task_index
os.environ['TF_CONFIG'] = json.dumps({
'cluster': {
'worker': workers
},
'task': {'type': 'worker', 'index': task_index}
})
else:
num_workers = 1
return num_workers
|
[
"c.r.deng2012@gmail.com"
] |
c.r.deng2012@gmail.com
|
db2ed20cd3e6ba74bf7c30085f6f1b7f1526181c
|
d94b6845aeeb412aac6850b70e22628bc84d1d6d
|
/invariant_slot_attention/modules/__init__.py
|
4de3565ef241189eac33dea9d2f901f5305be01f
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
ishine/google-research
|
541aea114a68ced68736340e037fc0f8257d1ea2
|
c1ae273841592fce4c993bf35cdd0a6424e73da4
|
refs/heads/master
| 2023-06-08T23:02:25.502203
| 2023-05-31T01:00:56
| 2023-05-31T01:06:45
| 242,478,569
| 0
| 0
|
Apache-2.0
| 2020-06-23T01:55:11
| 2020-02-23T07:59:42
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,255
|
py
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module library."""
# pylint: disable=g-multiple-import
# pylint: disable=g-bad-import-order
# Re-export commonly used modules and functions
from .attention import (GeneralizedDotProductAttention,
InvertedDotProductAttention, SlotAttention,
TransformerBlock, Transformer)
from .convolution import (SimpleCNN, CNN)
from .decoders import (SpatialBroadcastDecoder, SiameseSpatialBroadcastDecoder)
from .initializers import (GaussianStateInit, ParamStateInit,
SegmentationEncoderStateInit,
CoordinateEncoderStateInit)
from .misc import (Dense, GRU, Identity, MLP, PositionEmbedding, Readout,
RelativePositionEmbedding)
from .video import (CorrectorPredictorTuple, FrameEncoder, Processor, SAVi)
from .resnet import (ResNet18, ResNet34, ResNet50, ResNet101, ResNet152,
ResNet200)
from .invariant_attention import (InvertedDotProductAttentionKeyPerQuery,
SlotAttentionExplicitStats,
SlotAttentionPosKeysValues,
SlotAttentionTranslEquiv,
SlotAttentionTranslScaleEquiv,
SlotAttentionTranslRotScaleEquiv)
from .invariant_initializers import (
ParamStateInitRandomPositions,
ParamStateInitRandomPositionsScales,
ParamStateInitRandomPositionsRotationsScales,
ParamStateInitLearnablePositions,
ParamStateInitLearnablePositionsScales,
ParamStateInitLearnablePositionsRotationsScales)
# pylint: enable=g-multiple-import
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
0a597db3d461f09e56059cf629f3e82694d6008a
|
61b8d32a16ca2e5dc0162b65ac2beef8cb60bc05
|
/test/show_data.py
|
d22cbdfe4e006c73019d0020e596499f48ecd79c
|
[] |
no_license
|
antique493/2021_huawei_model
|
1f9d6dbf6fa8ef245bbafb69a085f5cf509b34d9
|
6793fd4ca9640cd34e2d89091183d3ccb88c1c10
|
refs/heads/main
| 2023-08-20T10:09:47.062840
| 2021-10-16T02:39:03
| 2021-10-16T02:39:03
| 416,988,399
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,001
|
py
|
import numpy as np
import glob
import matplotlib.pyplot as plt
def read_tags():
tags = []
with open("./数据集/Tag坐标信息.txt", encoding='UTF-8') as f:
lines = f.readlines()
for line in lines:
results = line.split()
tag = []
for i in range(1,len(results)):
a = int(results[i])
tag.append(a)
tags.append(tag)
# [N, 3]
return np.array(tags)
def read_files():
# estimate_results [files, 4, N]
estimate_results = []
files = []
for i in range(1,325):
files.append("数据集/正常数据清洗/"+str(i)+".正常.txt")
for file in files:
with open(file,"r") as f:
lines = f.readlines()[1:]
file_results = [[],[],[],[]]
for line in lines:
res = line.split(":")
for i in range(len(res)):
file_results[i].append(int(res[i]))
estimate_results.append(file_results)
# print(file)
# print(len(file_results[0]))
# print(len(file_results[1]))
# print(len(file_results[2]))
# print(len(file_results[3]))
# [file, 4, N]
return estimate_results
if __name__ == '__main__':
input_data = np.array([[0,0,1300],[5000,0,1700],[0,5000,1700],[5000,5000,1300]])
tags = read_tags()
distances = read_files()
for which in range(4):
x = []
y = []
for i in range(tags.shape[0]):
tag = tags[i]
distance_gt = ((tag - input_data/10)**2).sum(axis=1)**0.5*10
pose = distances[i]
x += pose[which]
y += list((pose[which]-distance_gt[which]))
with open(str(which)+".txt", "w") as f:
for x_r in x:
f.write(str(x_r)+" ")
f.write("\n")
for x_r in y:
f.write(str(x_r)+" ")
plt.scatter(x, y)
plt.show()
|
[
"tianyuxin@buaa.edu.cn"
] |
tianyuxin@buaa.edu.cn
|
244da2e90a574f3a7c00e6e17460aa91ecec4cd7
|
0b456a4506670be25f95f7790f78f9f91d81530b
|
/script/githubregister/get_confirm_url.py
|
c3d7e254096ce4556e8f41205e3f15f6a3d5a99c
|
[
"MIT"
] |
permissive
|
atupalykl/github-crawler
|
e862b18e43d18baad2f5af3f73a97b0e248ab750
|
8298a7ab2980447d65522fd966afe39dae8d8955
|
refs/heads/master
| 2020-05-29T11:07:39.271991
| 2015-11-29T11:52:43
| 2015-11-29T11:52:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
#!/usr/bin/python
#coding:utf8
import poplib
import re
def get_mail(user, password):
M = poplib.POP3_SSL('pop3.live.com')
M.user(user)
M.pass_(password)
num_messages = len(M.list()[1])
for i in range(num_messages):
for j in M.retr(i+1)[1]:
github_confirm_url = re.findall("https://github.com/users/.*", j)
if github_confirm_url:
return github_confirm_url[0]
return None
def main():
print get_mail("cardiganpater@hotmail.com", "EmerY444443")
if __name__ == '__main__':
main()
|
[
"binhe22@gmail.com"
] |
binhe22@gmail.com
|
8c4ce3ae200b52e3a8b57d1c0a960df0d3dbc5f1
|
d7a1478256fea3c5a5d94c4ddfd8ffc4f2d87ab2
|
/radix_r_sort.py
|
588b8a62e438c47b578feb4bdf59ca60c55f5556
|
[] |
no_license
|
Awarua-/Cosc262_Assignment_Part1
|
0db96927f79065799a1f43ca14feae127fe92746
|
7e3e05c59ff8a24323c06055ff7769e1961a3dd7
|
refs/heads/master
| 2021-01-14T11:53:06.553242
| 2015-03-17T08:43:30
| 2015-03-17T08:43:30
| 18,115,199
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 845
|
py
|
__author__ = 'Dion'
def main(data, base, max_len):
"""
Runs the algorithm
:param data: the data array
:param base: the base of the data in a
:param max_len: the maximum number of places in the largest data entry in a
:return:
"""
# Create an array of buckets according to the base
buckets = []
for _ in range(0, base):
buckets.append([])
i = 0
while i < max_len:
# sort current place into buckets
for k in data[1:]:
x = (k // base ** i) % base
buckets[x].append(k)
# reset the data array
# copy the results out of the buckets back into the data array
# and empty the bucket array
data = data[:1]
for j in buckets:
r, j[:] = j[:], []
data.extend(r)
i += 1
return data
|
[
"woolley.dion@gmail.com"
] |
woolley.dion@gmail.com
|
c1a60a1a5803673add99db26807bf8e620335a83
|
17c08826eac8129c0c83ecaa368ac305f9b03a11
|
/Code/S_add_sym_on_pareto.py
|
75bb613a4da3b71168ed5c2e1238855fdfc0ea51
|
[
"MIT"
] |
permissive
|
abdalazizrashid/AI-Feynman
|
f445062b831cf835c2f53111e85fd615ecb7d38d
|
3121276389a2cb6749592206268ac8f31f6c1bbc
|
refs/heads/master
| 2022-12-06T01:18:47.214162
| 2020-08-05T18:04:39
| 2020-08-05T18:04:39
| 279,799,700
| 1
| 2
|
MIT
| 2020-07-15T07:37:34
| 2020-07-15T07:37:33
| null |
UTF-8
|
Python
| false
| false
| 902
|
py
|
# Combines 2 pareto fromtier obtained from the separability test into a new one.
from get_pareto import Point, ParetoSet
from sympy.parsing.sympy_parser import parse_expr
import numpy as np
import matplotlib.pyplot as plt
import os
from os import path
from sympy import Symbol, lambdify, N
from get_pareto import Point, ParetoSet
def add_sym_on_pareto(pathdir,filename,PA1,idx1,idx2,PA,sym_typ):
possible_vars = ["x%s" %i for i in np.arange(0,30,1)]
PA1 = np.array(PA1.get_pareto_points()).astype('str')
for i in range(len(PA1)):
exp1 = PA1[i][2]
for j in range(len(possible_vars)-2,idx2-1,-1):
exp1 = exp1.replace(possible_vars[j],possible_vars[j+1])
exp1 = exp1.replace(possible_vars[idx1],"(" + possible_vars[idx1] + sym_typ + possible_vars[idx2] + ")")
PA.add(Point(x=float(PA1[i][0]),y=float(PA1[i][1]),data=str(exp1)))
return PA
|
[
"noreply@github.com"
] |
abdalazizrashid.noreply@github.com
|
143d796996a3fdd288046d70f7fd9193c1bf3abd
|
aee291b86d7fabb4c6b7d484712c6a0d5d1753d1
|
/config.py
|
330ff01c3a29f97add90a7477cf7d615271efc2c
|
[
"MIT"
] |
permissive
|
liubing1545/flask-scan-ftp-directory
|
2e76c88df580bba3ec85dd479dfff84b6d7c56f9
|
bbf0096b8590da8e2fe30c5520fcfd47ec293904
|
refs/heads/master
| 2021-04-15T04:54:39.685586
| 2018-04-01T04:59:24
| 2018-04-01T04:59:24
| 126,834,103
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,007
|
py
|
import os
from app.main import timer
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SSL_DISABLE = False
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_RECORD_QUERIES = True
CELERY_BROKER_URL = 'redis://localhost:6379/0'
CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
FLASKY_SLOW_DB_QUERY_TIME=0.5
JOBS = [
{
'id': 'job1',
'func': timer.cron_scan,
'trigger': 'interval',
'seconds': 5
}
]
# SCHEDULER_JOBSTORES = {
# 'default': SQLAlchemyJobStore(url='sqlite:///flask_context.db')
# }
SCHEDULER_API_ENABLED = True
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
WTF_CSRF_ENABLED = False
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
@classmethod
def init_app(cls, app):
Config.init_app(app)
class UnixConfig(ProductionConfig):
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# log to syslog
import logging
from logging.handlers import SysLogHandler
syslog_handler = SysLogHandler()
syslog_handler.setLevel(logging.WARNING)
app.logger.addHandler(syslog_handler)
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'unix': UnixConfig,
'default': DevelopmentConfig
}
|
[
"lbingg@hotmail.com"
] |
lbingg@hotmail.com
|
c629cbdd90dab58a61b3447a0fd008ed272ee17d
|
9ec46412be4b97c29b57feacd99d3142b80871f8
|
/unit6-4.py
|
3a0729c75e3647c0b011484155334b1f0255386c
|
[] |
no_license
|
klim1286/PythonBasics
|
6e788544b06b8e5017d7bfaae66646359f67a420
|
5b8a23a27c6308505e3d10fd1b650c11c2bebe7f
|
refs/heads/master
| 2023-02-16T01:39:48.519366
| 2021-01-11T21:43:01
| 2021-01-11T21:43:01
| 311,781,511
| 0
| 0
| null | 2021-01-11T21:43:02
| 2020-11-10T20:50:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,443
|
py
|
from random import choice
class Car:
def __init__(self, speed, color, name, is_police):
self.speed = speed
self.color = color
self.name = name
self.is_police = is_police
def go(self):
print(f'{self.name} поехала')
def stop(self):
print(f'{self.name} остановилась')
def turn(self):
print(f'{self.name} {choice(["вперед", "назад", "на лево", "на право"])}')
def show_speed(self):
print(f'{self.name} движется со скоростью {self.speed}')
class TownCar(Car):
def show_speed(self):
super().show_speed()
if self.speed >= 60:
print(f'{self.name} превысила скорость!')
class SportCar(Car):
pass
class WorkCar(Car):
def show_speed(self):
super().show_speed()
if self.speed >= 40:
print(f'{self.name} превысила скорость!')
class PoliceCar(Car):
pass
mazeratty = SportCar(100, 'blue', 'mazeratty', False)
cruma = TownCar(100, 'black', 'cruma', False)
lada = WorkCar(60, 'yellow', '2107', False)
crown = PoliceCar(100, 'white', 'victoriya', True)
mazeratty.go()
mazeratty.turn()
mazeratty.show_speed()
mazeratty.stop()
cruma.go()
cruma.turn()
cruma.show_speed()
cruma.stop()
lada.go()
lada.turn()
lada.show_speed()
lada.stop()
crown.go()
crown.turn()
crown.show_speed()
crown.stop()
|
[
"klim1286@gmail.com"
] |
klim1286@gmail.com
|
1f3c07e46d45ac1e84121a5bb87edf9005f788f0
|
b170fb19f25507533dd31006c9776aa2df387ca1
|
/utils/utils.py
|
cd1a1749936cfb8cc0a7d1a1d996492db20a9237
|
[
"MIT"
] |
permissive
|
LCFractal/YOLOV4_MCMOT
|
cf58603386a68824509120c1068523aeb7f258fb
|
0caf0c2feb788c2c3428b4917ace46e437e71b2f
|
refs/heads/master
| 2023-02-26T21:10:31.474650
| 2020-11-06T07:47:10
| 2020-11-06T07:47:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 62,728
|
py
|
import glob
import math
import os
import random
import shutil
import subprocess
from pathlib import Path
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from tqdm import tqdm
from . import torch_utils # , google_utils
# Set printoptions
torch.set_printoptions(linewidth=320, precision=5, profile='long')
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
matplotlib.rc('font', **{'size': 11})
# Prevent OpenCV from multithreading (to use PyTorch DataLoader)
cv2.setNumThreads(0)
def init_seeds(seed=0):
random.seed(seed)
np.random.seed(seed)
torch_utils.init_seeds(seed=seed)
def check_git_status():
# Suggest 'git pull' if repo is out of date
s = subprocess.check_output('if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8')
if 'Your branch is behind' in s:
print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n')
def load_classes(path):
# Loads *.names file at 'path'
with open(path, 'r') as f:
names = f.read().split('\n')
return list(filter(None, names)) # filter removes empty strings (such as last line)
def labels_to_class_weights(labels, nc=80):
# Get class weights (inverse frequency) from training labels
if labels[0] is None: # no labels loaded
return torch.Tensor()
labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
classes = labels[:, 0].astype(np.int) # labels = [class xywh]
weights = np.bincount(classes, minlength=nc) # occurences per class
# Prepend gridpoint count (for uCE trianing)
# gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
# weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
weights[weights == 0] = 1 # replace empty bins with 1
weights = 1 / weights # number of targets per class
weights /= weights.sum() # normalize
return torch.from_numpy(weights)
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
# Produces image weights based on class mAPs
n = len(labels)
class_counts = np.array([np.bincount(labels[i][:, 0].astype(np.int), minlength=nc) for i in range(n)])
image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
# index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
return image_weights
def coco_class_weights(): # frequency of each class in coco train2014
n = [187437, 4955, 30920, 6033, 3838, 4332, 3160, 7051, 7677, 9167, 1316, 1372, 833, 6757, 7355, 3302, 3776, 4671,
6769, 5706, 3908, 903, 3686, 3596, 6200, 7920, 8779, 4505, 4272, 1862, 4698, 1962, 4403, 6659, 2402, 2689,
4012, 4175, 3411, 17048, 5637, 14553, 3923, 5539, 4289, 10084, 7018, 4314, 3099, 4638, 4939, 5543, 2038, 4004,
5053, 4578, 27292, 4113, 5931, 2905, 11174, 2873, 4036, 3415, 1517, 4122, 1980, 4464, 1190, 2302, 156, 3933,
1877, 17630, 4337, 4624, 1075, 3468, 135, 1380]
weights = 1 / torch.Tensor(n)
weights /= weights.sum()
# with open('data/coco.names', 'r') as f:
# for k, v in zip(f.read().splitlines(), n):
# print('%20s: %g' % (k, v))
return weights
def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
# https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
# a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
# b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
# x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
# x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
return x
def xyxy2xywh(x):
# Transform box coordinates from [x1, y1, x2, y2] (where xy1=top-left, xy2=bottom-right) to [x, y, w, h]
y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xywh2xyxy(x):
# Transform box coordinates from [x, y, w, h] to [x1, y1, x2, y2] (where xy1=top-left, xy2=bottom-right)
y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
# def xywh2xyxy(box):
# # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2]
# if isinstance(box, torch.Tensor):
# x, y, w, h = box.t()
# return torch.stack((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).t()
# else: # numpy
# x, y, w, h = box.T
# return np.stack((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).T
#
#
# def xyxy2xywh(box):
# # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h]
# if isinstance(box, torch.Tensor):
# x1, y1, x2, y2 = box.t()
# return torch.stack(((x1 + x2) / 2, (y1 + y2) / 2, x2 - x1, y2 - y1)).t()
# else: # numpy
# x1, y1, x2, y2 = box.T
# return np.stack(((x1 + x2) / 2, (y1 + y2) / 2, x2 - x1, y2 - y1)).T
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = max(img1_shape) / max(img0_shape) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain # scale back to img0's scale
clip_coords(coords, img0_shape)
return coords
def clip_coords(boxes, img_shape):
# Clip bounding xyxy bounding boxes to image shape (height, width)
boxes[:, 0].clamp_(0, img_shape[1]) # x1
boxes[:, 1].clamp_(0, img_shape[0]) # y1
boxes[:, 2].clamp_(0, img_shape[1]) # x2
boxes[:, 3].clamp_(0, img_shape[0]) # y2
def ap_per_class(tp, conf, pred_cls, target_cls):
""" Compute the average precision, given the recall and precision curves.
Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
# Arguments
tp: True positives (nparray, nx1 or nx10).
conf: Objectness value from 0-1 (nparray).
pred_cls: Predicted object classes (nparray).
target_cls: True object classes (nparray).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# Sort by objectness
i = np.argsort(-conf)
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
# Find unique classes
unique_classes = np.unique(target_cls)
# Create Precision-Recall curve and compute AP for each class
pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
s = [len(unique_classes), tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)
for ci, c in enumerate(unique_classes):
i = pred_cls == c
n_gt = (target_cls == c).sum() # Number of ground truth objects
n_p = i.sum() # Number of predicted objects
if n_p == 0 or n_gt == 0:
continue
else:
# Accumulate FPs and TPs
fpc = (1 - tp[i]).cumsum(0)
tpc = tp[i].cumsum(0)
# Recall
recall = tpc / (n_gt + 1e-16) # recall curve
r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases
# Precision
precision = tpc / (tpc + fpc) # precision curve
p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score
# AP from recall-precision curve
for j in range(tp.shape[1]):
ap[ci, j] = compute_ap(recall[:, j], precision[:, j])
# Plot
# fig, ax = plt.subplots(1, 1, figsize=(5, 5))
# ax.plot(recall, precision)
# ax.set_xlabel('Recall')
# ax.set_ylabel('Precision')
# ax.set_xlim(0, 1.01)
# ax.set_ylim(0, 1.01)
# fig.tight_layout()
# fig.savefig('PR_curve.png', dpi=300)
# Compute F1 score (harmonic mean of precision and recall)
f1 = 2 * p * r / (p + r + 1e-16)
return p, r, ap, f1, unique_classes.astype('int32')
def compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves.
Source: https://github.com/rbgirshick/py-faster-rcnn.
# Arguments
recall: The recall curve (list).
precision: The precision curve (list).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# Append sentinel values to beginning and end
mrec = np.concatenate(([0.], recall, [min(recall[-1] + 1E-3, 1.)]))
mpre = np.concatenate(([0.], precision, [0.]))
# Compute the precision envelope
mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
# Integrate area under curve
method = 'interp' # methods: 'continuous', 'interp'
if method == 'interp':
x = np.linspace(0, 1, 101) # 101-point interp (COCO)
ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
else: # 'continuous'
i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
return ap
def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False):
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.t()
# Get the coordinates of bounding boxes
if x1y1x2y2: # x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
else: # transform from xywh to xyxy
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
# Intersection area
inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
(torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
# Union Area
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1
union = (w1 * h1 + 1e-16) + w2 * h2 - inter
iou = inter / union # iou
if GIoU or DIoU or CIoU:
cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
if GIoU: # Generalized IoU https://arxiv.org/pdf/1902.09630.pdf
c_area = cw * ch + 1e-16 # convex area
return iou - (c_area - union) / c_area # GIoU
if DIoU or CIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
# convex diagonal squared
c2 = cw ** 2 + ch ** 2 + 1e-16
# centerpoint distance squared
rho2 = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4 + ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4
if DIoU:
return iou - rho2 / c2 # DIoU
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
alpha = v / (1 - iou + v)
return iou - (rho2 / c2 + v * alpha) # CIoU
return iou
def box_iou(box1, box2):
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
Arguments:
box1 (Tensor[N, 4])
box2 (Tensor[M, 4])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise
IoU values for every element in boxes1 and boxes2
"""
def box_area(box):
# box = 4xn
return (box[2] - box[0]) * (box[3] - box[1])
area1 = box_area(box1.t())
area2 = box_area(box2.t())
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
def wh_iou(wh1, wh2):
"""
Using tensor's broadcasting mechanism
:param wh1:
:param wh2:
:return: N×M matrix for each N×M's iou
"""
# Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
wh1 = wh1[:, None] # [N, 1, 2]
wh2 = wh2[None] # [1, M, 2]
min_wh = torch.min(wh1, wh2) # min w and min h for N and M box: N×M×2
inter = min_wh.prod(dim=2) # min_w × min_h for [N, M]
return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
class FocalLoss(nn.Module):
# Wraps focal loss_funcs around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
super(FocalLoss, self).__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
self.reduction = loss_fcn.reduction
self.loss_fcn.reduction = 'none' # required to apply FL to each element
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
# p_t = torch.exp(-loss_funcs)
# loss_funcs *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
# TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
pred_prob = torch.sigmoid(pred) # prob from logits
p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
modulating_factor = (1.0 - p_t) ** self.gamma
loss *= alpha_factor * modulating_factor
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else: # 'none'
return loss
def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
# return positive, negative label smoothing BCE targets
return 1.0 - 0.5 * eps, 0.5 * eps
def compute_loss_no_upsample(preds, reid_feat_out, targets, track_ids, model):
"""
:param preds:
:param reid_feat_out:
:param targets:
:param track_ids:
:param model:
:return:
"""
ft = torch.cuda.FloatTensor if preds[0].is_cuda else torch.Tensor
l_cls, l_box, l_obj, l_reid = ft([0]), ft([0]), ft([0]), ft([0])
# build targets for loss_funcs computation
t_cls, t_box, indices, anchor_vec, t_track_ids = build_targets_with_ids(preds, targets, track_ids, model)
h = model.hyp # hyper parameters
red = 'mean' # Loss reduction (sum or mean)
# Define criteria
BCE_cls = nn.BCEWithLogitsLoss(pos_weight=ft([h['cls_pw']]), reduction=red)
BCE_obj = nn.BCEWithLogitsLoss(pos_weight=ft([h['obj_pw']]), reduction=red)
CE_reid = nn.CrossEntropyLoss()
# class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
cp, cn = smooth_BCE(eps=0.0)
# focal loss_funcs
g = h['fl_gamma'] # focal loss_funcs gamma
if g > 0:
BCE_cls, BCE_obj = FocalLoss(BCE_cls, g), FocalLoss(BCE_obj, g)
np, ng = 0, 0 # number grid points, targets(GT)
# Compute losses for each YOLO layer
for i, pred_i in enumerate(preds): # layer index, layer predictions
id_map_w, id_map_h = reid_feat_out[i].shape[3], reid_feat_out[i].shape[2] # 3 feature map layers
ny, nx = pred_i.shape[2], pred_i.shape[3]
b, a, gy, gx = indices[i] # image, anchor, grid_y, grid_x
tr_ids = t_track_ids[i] # track ids
cls_ids = t_cls[i]
t_obj = torch.zeros_like(pred_i[..., 0]) # target obj(confidence score), e.g. 5×3×96×96
np += t_obj.numel() # total number of elements
# Compute losses
nb = len(b) # number of targets(GT boxes)
if nb: # if exist GT box
ng += nb
# prediction subset corresponding to targets
# specified item_i_in_batch, anchor_i, grid_y, grid_x
pred_s = pred_i[b, a, gy, gx] # nb × 10
# pred_s[:, 2:4] = torch.sigmoid(pred_s[:, 2:4]) # wh power loss_funcs (uncomment)
# GIoU
pxy = torch.sigmoid(pred_s[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
pwh = torch.exp(pred_s[:, 2:4]).clamp(max=1E3) * anchor_vec[i]
p_box = torch.cat((pxy, pwh), 1) # predicted bounding box
g_iou = bbox_iou(p_box.t(), t_box[i], x1y1x2y2=False, GIoU=True) # g_iou computation: in YOLO layer's scale
l_box += (1.0 - g_iou).sum() if red == 'sum' else (1.0 - g_iou).mean() # g_iou loss_funcs
t_obj[b, a, gy, gx] = (1.0 - model.gr) + model.gr * g_iou.detach().clamp(0).type(
t_obj.dtype) # g_iou ratio taken into account
if model.nc > 1: # cls loss_funcs (only if multiple classes)
t = torch.full_like(pred_s[:, 5:], cn) # targets: nb × num_classes
t[range(nb), cls_ids] = cp
l_cls += BCE_cls(pred_s[:, 5:], t) # BCE loss for each object class
# l_cls += CE(pred_s[:, 5:], cls_ids) # CE
# ----- compute reid loss_funcs for each GT box
# get center point coordinates for all GT
center_x = gx + pred_s[:, 0]
center_y = gy + pred_s[:, 1]
# convert to reid_feature map's scale
center_x *= float(id_map_w) / float(nx)
center_y *= float(id_map_h) / float(ny)
# convert to int64 for indexing
center_x += 0.5
center_y += 0.5
center_x = center_x.long()
center_y = center_y.long()
# avoid exceed reid feature map's range
center_x.clamp_(0, id_map_w - 1)
center_y.clamp_(0, id_map_h - 1)
# get reid feature vector for GT boxes
t_reid_feat_vects = reid_feat_out[i][b, :, center_y, center_x] # nb × 128
# ----- compute each object class's reid loss_funcs
multi_gpu = type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
if multi_gpu:
for cls_id, id_num in model.module.max_id_dict.items():
inds = torch.where(cls_ids == cls_id)
if inds[0].shape[0] == 0:
# print('skip class id', cls_id)
continue
id_vects = t_reid_feat_vects[inds]
id_vects = F.normalize(id_vects, dim=1) # L2 normalize the feature vector
fc_preds = model.module.id_classifiers[cls_id].forward(id_vects).contiguous()
l_reid += CE_reid(fc_preds, tr_ids[inds])
else:
for cls_id, id_num in model.max_id_dict.items():
inds = torch.where(cls_ids == cls_id)
if inds[0].shape[0] == 0:
# print('skip class id', cls_id)
continue
id_vects = t_reid_feat_vects[inds]
# L2 normalize the feature vector
id_vects = F.normalize(id_vects, dim=1)
fc_preds = model.id_classifiers[cls_id].forward(id_vects).contiguous()
l_reid += CE_reid(fc_preds, tr_ids[inds])
# Append targets to text file
# with open('targets.txt', 'a') as file:
# [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
l_obj += BCE_obj(pred_i[..., 4], t_obj) # obj loss_funcs(confidence score loss_funcs)
l_box *= h['giou']
l_obj *= h['obj']
l_cls *= h['cls']
# l_reid *= h['reid']
l_reid /= float(nb) # reid loss_funcs normalize by number of GT objects
if red == 'sum':
bs = t_obj.shape[0] # batch size
l_obj *= 3 / (6300 * bs) * 2 # 3 / np * 2
if ng:
l_cls *= 3 / ng / model.nc
l_box *= 3 / ng
loss = l_box + l_obj + l_cls + l_reid
return loss, torch.cat((l_box, l_obj, l_cls, l_reid, loss)).detach()
def compute_loss_with_ids(preds, reid_feat_map, targets, track_ids, model):
"""
:param preds:
:param reid_feat_map:
:param targets:
:param track_ids:
:param model:
:return:
"""
ft = torch.cuda.FloatTensor if preds[0].is_cuda else torch.Tensor
l_cls, l_box, l_obj, l_reid = ft([0]), ft([0]), ft([0]), ft([0])
# build targets for loss_funcs computation
t_cls, t_box, indices, anchor_vec, t_track_ids = build_targets_with_ids(preds, targets, track_ids, model)
h = model.hyp # hyper parameters
red = 'mean' # Loss reduction (sum or mean)
# Define criteria
BCE_cls = nn.BCEWithLogitsLoss(pos_weight=ft([h['cls_pw']]), reduction=red)
BCE_obj = nn.BCEWithLogitsLoss(pos_weight=ft([h['obj_pw']]), reduction=red)
CE_reid = nn.CrossEntropyLoss()
# class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
cp, cn = smooth_BCE(eps=0.0)
# focal loss_funcs
g = h['fl_gamma'] # focal loss_funcs gamma
if g > 0:
BCE_cls, BCE_obj = FocalLoss(BCE_cls, g), FocalLoss(BCE_obj, g)
id_map_w, id_map_h = reid_feat_map.shape[3], reid_feat_map.shape[2]
np, ng = 0, 0 # number grid points, targets(GT)
# Compute losses for each YOLO layer
for i, pred_i in enumerate(preds): # layer index, layer predictions
ny, nx = pred_i.shape[2], pred_i.shape[3]
b, a, gy, gx = indices[i] # image, anchor, grid_y, grid_x
tr_ids = t_track_ids[i] # track ids
cls_ids = t_cls[i]
t_obj = torch.zeros_like(pred_i[..., 0]) # target obj(confidence score), e.g. 5×3×96×96
np += t_obj.numel() # total number of elements
# Compute losses
nb = len(b) # number of targets(GT boxes)
if nb: # if exist GT box
ng += nb
# prediction subset corresponding to targets
# specified item_i_in_batch, anchor_i, grid_y, grid_x
pred_s = pred_i[b, a, gy, gx] # nb × 10
# pred_s[:, 2:4] = torch.sigmoid(pred_s[:, 2:4]) # wh power loss_funcs (uncomment)
# GIoU
pxy = torch.sigmoid(pred_s[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
pwh = torch.exp(pred_s[:, 2:4]).clamp(max=1E3) * anchor_vec[i]
p_box = torch.cat((pxy, pwh), 1) # predicted bounding box
g_iou = bbox_iou(p_box.t(), t_box[i], x1y1x2y2=False, GIoU=True) # g_iou computation: in YOLO layer's scale
l_box += (1.0 - g_iou).sum() if red == 'sum' else (1.0 - g_iou).mean() # g_iou loss_funcs
t_obj[b, a, gy, gx] = (1.0 - model.gr) + model.gr * g_iou.detach().clamp(0).type(
t_obj.dtype) # g_iou ratio taken into account
if model.nc > 1: # cls loss_funcs (only if multiple classes)
t = torch.full_like(pred_s[:, 5:], cn) # targets: nb × num_classes
t[range(nb), cls_ids] = cp
l_cls += BCE_cls(pred_s[:, 5:], t) # BCE loss for each object class
# l_cls += CE(pred_s[:, 5:], cls_ids) # CE
# ----- compute reid loss_funcs for each GT box
# get center point coordinates for all GT
center_x = gx + pred_s[:, 0]
center_y = gy + pred_s[:, 1]
# convert to reid_feature map's scale
center_x *= float(id_map_w) / float(nx)
center_y *= float(id_map_h) / float(ny)
# convert to int64 for indexing
center_x += 0.5
center_y += 0.5
center_x = center_x.long()
center_y = center_y.long()
# avoid exceed reid feature map's range
center_x.clamp_(0, id_map_w - 1)
center_y.clamp_(0, id_map_h - 1)
# get reid feature vector for GT boxes
t_reid_feat_vects = reid_feat_map[b, :, center_y, center_x] # nb × 128
# ----- compute each object class's reid loss_funcs
multi_gpu = type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
if multi_gpu:
for cls_id, id_num in model.module.max_id_dict.items():
inds = torch.where(cls_ids == cls_id)
if inds[0].shape[0] == 0:
# print('skip class id', cls_id)
continue
id_vects = t_reid_feat_vects[inds]
id_vects = F.normalize(id_vects, dim=1) # L2 normalize the feature vector
fc_preds = model.module.id_classifiers[cls_id].forward(id_vects).contiguous()
l_reid += CE_reid(fc_preds, tr_ids[inds])
else:
for cls_id, id_num in model.max_id_dict.items():
inds = torch.where(cls_ids == cls_id)
if inds[0].shape[0] == 0:
# print('skip class id', cls_id)
continue
id_vects = t_reid_feat_vects[inds]
id_vects = F.normalize(id_vects, dim=1) # L2 normalize the feature vector
fc_preds = model.id_classifiers[cls_id].forward(id_vects).contiguous()
l_reid += CE_reid(fc_preds, tr_ids[inds])
# Append targets to text file
# with open('targets.txt', 'a') as file:
# [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
l_obj += BCE_obj(pred_i[..., 4], t_obj) # obj loss_funcs(confidence score loss_funcs)
l_box *= h['giou']
l_obj *= h['obj']
l_cls *= h['cls']
# l_reid *= h['reid']
l_reid /= float(nb) # reid loss_funcs normalize by number of GT objects
if red == 'sum':
bs = t_obj.shape[0] # batch size
l_obj *= 3 / (6300 * bs) * 2 # 3 / np * 2
if ng:
l_cls *= 3 / ng / model.nc
l_box *= 3 / ng
loss = l_box + l_obj + l_cls + l_reid
return loss, torch.cat((l_box, l_obj, l_cls, l_reid, loss)).detach()
def compute_loss(preds, targets, model): # predictions, targets, model
ft = torch.cuda.FloatTensor if preds[0].is_cuda else torch.Tensor
l_cls, l_box, l_obj = ft([0]), ft([0]), ft([0])
t_cls, t_box, indices, anchor_vec = build_targets(preds, targets, model)
h = model.hyp # hyper parameters
red = 'mean' # Loss reduction (sum or mean)
# Define criteria
BCE_cls = nn.BCEWithLogitsLoss(pos_weight=ft([h['cls_pw']]), reduction=red)
BCE_obj = nn.BCEWithLogitsLoss(pos_weight=ft([h['obj_pw']]), reduction=red)
# class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
cp, cn = smooth_BCE(eps=0.0)
# focal loss_funcs
g = h['fl_gamma'] # focal loss_funcs gamma
if g > 0:
BCE_cls, BCE_obj = FocalLoss(BCE_cls, g), FocalLoss(BCE_obj, g)
# Compute losses
np, ng = 0, 0 # number grid points, targets(GT)
for i, pred_i in enumerate(preds): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, grid_y, grid_x
t_obj = torch.zeros_like(pred_i[..., 0]) # target obj(confidence score), e.g. 5×3×96×96
np += t_obj.numel() # total number of elements
# Compute losses
nb = len(b) # number of targets(GT boxes)
if nb: # if exist GT box
ng += nb
# prediction subset corresponding to targets
# specified item_i_in_batch, anchor_i, grid_y, grid_x
pred_s = pred_i[b, a, gj, gi] # nb × 10
# pred_s[:, 2:4] = torch.sigmoid(pred_s[:, 2:4]) # wh power loss_funcs (uncomment)
# GIoU
pxy = torch.sigmoid(pred_s[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
pwh = torch.exp(pred_s[:, 2:4]).clamp(max=1E3) * anchor_vec[i]
p_box = torch.cat((pxy, pwh), 1) # predicted bounding box
g_iou = bbox_iou(p_box.t(), t_box[i], x1y1x2y2=False, GIoU=True) # g_iou computation
l_box += (1.0 - g_iou).sum() if red == 'sum' else (1.0 - g_iou).mean() # g_iou loss_funcs
t_obj[b, a, gj, gi] = (1.0 - model.gr) \
+ model.gr * g_iou.detach().clamp(0).type(
t_obj.dtype) # g_iou ratio taken into account
if model.nc > 1: # cls loss_funcs (only if multiple classes)
t = torch.full_like(pred_s[:, 5:], cn) # targets: nb × num_classes
t[range(nb), t_cls[i]] = cp
l_cls += BCE_cls(pred_s[:, 5:], t) # BCE
# l_cls += CE(pred_s[:, 5:], t_cls[i]) # CE
# Append targets to text file
# with open('targets.txt', 'a') as file:
# [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
l_obj += BCE_obj(pred_i[..., 4], t_obj) # obj loss_funcs(confidence score loss_funcs)
l_box *= h['giou']
l_obj *= h['obj']
l_cls *= h['cls']
if red == 'sum':
bs = t_obj.shape[0] # batch size
l_obj *= 3 / (6300 * bs) * 2 # 3 / np * 2
if ng:
l_cls *= 3 / ng / model.nc
l_box *= 3 / ng
loss = l_box + l_obj + l_cls
return loss, torch.cat((l_box, l_obj, l_cls, loss)).detach()
def build_targets_with_ids(preds, targets, track_ids, model):
"""
:param preds:
:param targets:
:param track_ids:
:param model:
:return:
"""
# targets = [image, class, x, y, w, h]
nt = targets.shape[0]
t_cls, t_box, indices, av, t_track_ids = [], [], [], [], []
reject, use_all_anchors = True, True
gain = torch.ones(6, device=targets.device) # normalized to grid space gain
# m = list(model.modules())[-1]
# for i in range(m.nl):
# anchors = m.anchors[i]
multi_gpu = type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
# build each YOLO layer of corresponding scale
for i, idx in enumerate(model.yolo_layer_inds):
# get number of grid points and anchor vec for this YOLO layer:
# anchors in YOLO layer(feature map)'s scale
anchors = model.module.module_list[idx].anchor_vec if multi_gpu else model.module_list[idx].anchor_vec
# iou of targets-anchors
gain[2:] = torch.tensor(preds[i].shape)[[3, 2, 3, 2]] # xyxy gain
t, a = targets * gain, []
gwh = t[:, 4:6] # targets(GT): bbox_w, bbox_h in yolo layer(feature map)'s scale
if nt:
iou = wh_iou(anchors, gwh) # iou(3,n) = wh_iou(anchors(3,2), gwh(n,2))
if use_all_anchors:
na = anchors.shape[0] # number of anchors
a = torch.arange(na).view(-1, 1).repeat(1, nt).view(
-1) # anchor index, N_a × N_gt_box:e.g. 56个0, 56个1, 56个2
t = t.repeat(na, 1) # 56 × 6 -> (56×3) × 6
tr_ids = track_ids.repeat(na) # 56 -> 56×3
else: # use best anchor only
iou, a = iou.max(0) # best iou and anchor
# reject anchors below iou_thres (OPTIONAL, increases P, lowers R)
if reject:
# get index whose anchor and gt box's iou exceeds the iou threshold,
# defined as positive sample
idx = iou.view(-1) > model.hyp['iou_t'] # iou threshold hyper parameter
t, a = t[idx], a[idx]
# GT track ids: for reid classification training
tr_ids = tr_ids[idx]
# Indices
b, c = t[:, :2].long().t() # target image index in the batch, class id
gxy = t[:, 2:4] # grid x, y (GT center)
gwh = t[:, 4:6] # grid w, h
gi, gj = gxy.long().t() # grid x, y indices(int64), .t(): transpose a matrix
indices.append((b, a, gj, gi))
# Box
gxy -= gxy.floor() # GT box center xy 's fractional part
t_box.append(torch.cat((gxy, gwh), 1)) # xywh (grids)
av.append(anchors[a]) # anchor vectors of corresponding GT boxes
# GT track ids
t_track_ids.append(tr_ids)
# GT Class ids
t_cls.append(c)
if c.shape[0]: # if any targets
assert c.max() < model.nc, \
'Model accepts %g classes labeled from 0-%g, however you labelled a class %g. ' \
'See https://github.com/ultralytics/yolov3/wiki/Train-Custom-Data' % (
model.nc, model.nc - 1, c.max())
return t_cls, t_box, indices, av, t_track_ids
def build_targets(preds, targets, model):
# targets = [image, class, x, y, w, h]
nt = targets.shape[0]
t_cls, t_box, indices, av = [], [], [], []
reject, use_all_anchors = True, True
gain = torch.ones(6, device=targets.device) # normalized to grid space gain
# m = list(model.modules())[-1]
# for i in range(m.nl):
# anchors = m.anchors[i]
multi_gpu = type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
for i, idx in enumerate(model.yolo_layer_inds): # each YOLO layer of corresponding scale
# get number of grid points and anchor vec for this YOLO layer:
# anchors in YOLO layer(feature map)'s scale
anchors = model.module.module_list[idx].anchor_vec if multi_gpu else model.module_list[idx].anchor_vec
# iou of targets-anchors
gain[2:] = torch.tensor(preds[i].shape)[[3, 2, 3, 2]] # xyxy gain
t, a = targets * gain, []
gwh = t[:, 4:6] # targets(GT): bbox_w, bbox_h in yolo layer(feature map)'s scale
if nt:
iou = wh_iou(anchors, gwh) # iou(3,n) = wh_iou(anchors(3,2), gwh(n,2))
if use_all_anchors:
na = anchors.shape[0] # number of anchors
a = torch.arange(na).view(-1, 1).repeat(1, nt).view(
-1) # anchor index, N_a × N_gt_box:e.g. 56个0, 56个1, 56个2
t = t.repeat(na, 1) # 56 × 6 -> (56×3) × 6
else: # use best anchor only
iou, a = iou.max(0) # best iou and anchor
# reject anchors below iou_thres (OPTIONAL, increases P, lowers R)
if reject:
# get index whose anchor and gt box's iou exceeds the iou threshold,
# defined as positive sample
idx = iou.view(-1) > model.hyp['iou_t'] # iou threshold hyper parameter
t, a = t[idx], a[idx]
# Indices
b, c = t[:, :2].long().t() # target image index in the batch, class id
gxy = t[:, 2:4] # grid x, y (GT center)
gwh = t[:, 4:6] # grid w, h
gi, gj = gxy.long().t() # grid x, y indices(int64), .t(): transpose a matrix
indices.append((b, a, gj, gi))
# Box
gxy -= gxy.floor() # GT box center xy 's fractional part
t_box.append(torch.cat((gxy, gwh), 1)) # xywh (grids)
av.append(anchors[a]) # anchor vectors of corresponding GT boxes
# GT Class ids
t_cls.append(c)
if c.shape[0]: # if any targets
assert c.max() < model.nc, \
'Model accepts %g classes labeled from 0-%g, however you labelled a class %g. ' \
'See https://github.com/ultralytics/yolov3/wiki/Train-Custom-Data' % (
model.nc, model.nc - 1, c.max())
return t_cls, t_box, indices, av
def non_max_suppression_with_yolo_inds(predictions,
yolo_inds,
conf_thres=0.1,
iou_thres=0.6,
merge=False,
classes=None,
agnostic=False):
"""Performs Non-Maximum Suppression (NMS) on inference results
Returns:
detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
"""
if predictions.dtype is torch.float16:
predictions = predictions.float() # to FP32
nc = predictions[0].shape[1] - 5 # number of classes
xc = predictions[..., 4] > conf_thres # candidates
# Settings
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
max_det = 300 # maximum number of detections per image
time_limit = 10.0 # seconds to quit after
redundant = True # require redundant detections
multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
# t = time.time()
output = [None] * predictions.shape[0]
output_yolo_inds = [None] * predictions.shape[0]
for xi, x in enumerate(predictions): # xi: image index in the batch, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
x = x[xc[xi]] # confidence
yolo_inds = yolo_inds[xi][xc[xi]]
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf(目标概率*前景概率)
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero().t()
boxes = box[i]
cls_scores = x[i, j + 5, None]
cls_inds = j[:, None].float()
yolo_inds = yolo_inds[i]
# x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
x = torch.cat((boxes, cls_scores, cls_inds), 1) # box(4), cls_score(1), cls_id(1): n×6
else: # best class only
conf, j = x[:, 5:].max(1, keepdim=True)
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
# Filter by class
if classes:
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
# Apply finite constraint
# if not torch.isfinite(x).all():
# x = x[torch.isfinite(x).all(1)]
# If none remain process next image
n = x.shape[0] # number of boxes
if not n:
continue
# Sort by confidence
# x = x[x[:, 4].argsort(descending=True)]
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
if i.shape[0] > max_det: # limit detections
i = i[:max_det]
if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
try: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
weights = iou * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
if redundant:
i = i[iou.sum(1) > 1] # require redundancy
except: # possible CUDA error https://github.com/ultralytics/yolov3/issues/1139
print(x, i, x.shape, i.shape)
pass
output[xi] = x[i]
# if (time.time() - t) > time_limit:
# break # time limit exceeded
output_yolo_inds[xi] = yolo_inds[i]
return output, output_yolo_inds
def non_max_suppression(predictions,
conf_thres=0.1,
iou_thres=0.6,
merge=False,
classes=None,
agnostic=False):
"""Performs Non-Maximum Suppression (NMS) on inference results
Returns:
detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
"""
if predictions.dtype is torch.float16:
predictions = predictions.float() # to FP32
nc = predictions[0].shape[1] - 5 # number of classes
xc = predictions[..., 4] > conf_thres # candidates
# Settings
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
max_det = 300 # maximum number of detections per image
time_limit = 10.0 # seconds to quit after
redundant = True # require redundant detections
multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
# t = time.time()
output = [None] * predictions.shape[0]
for xi, x in enumerate(predictions): # image index, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
x = x[xc[xi]] # confidence
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero().t()
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
else: # best class only
conf, j = x[:, 5:].max(1, keepdim=True)
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
# Filter by class
if classes:
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
# Apply finite constraint
# if not torch.isfinite(x).all():
# x = x[torch.isfinite(x).all(1)]
# If none remain process next image
n = x.shape[0] # number of boxes
if not n:
continue
# Sort by confidence
# x = x[x[:, 4].argsort(descending=True)]
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
if i.shape[0] > max_det: # limit detections
i = i[:max_det]
if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
try: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
weights = iou * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
if redundant:
i = i[iou.sum(1) > 1] # require redundancy
except: # possible CUDA error https://github.com/ultralytics/yolov3/issues/1139
print(x, i, x.shape, i.shape)
pass
output[xi] = x[i]
# if (time.time() - t) > time_limit:
# break # time limit exceeded
return output
def get_yolo_layers(model):
bool_vec = [x['type'] == 'yolo' for x in model.module_defs]
return [i for i, x in enumerate(bool_vec) if x] # [82, 94, 106] for yolov3
def print_model_biases(model):
# prints the bias neurons preceding each yolo layer
print('\nModel Bias Summary: %8s%18s%18s%18s' % ('layer', 'regression', 'objectness', 'classification'))
try:
multi_gpu = type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
for l in model.yolo_layer_inds: # print pretrained biases
if multi_gpu:
na = model.module.module_list[l].na # number of anchors
b = model.module.module_list[l - 1][0].bias.view(na, -1) # bias 3x85
else:
na = model.module_list[l].na
b = model.module_list[l - 1][0].bias.view(na, -1) # bias 3x85
print(' ' * 20 + '%8g %18s%18s%18s' % (l, '%5.2f+/-%-5.2f' % (b[:, :4].mean(), b[:, :4].std()),
'%5.2f+/-%-5.2f' % (b[:, 4].mean(), b[:, 4].std()),
'%5.2f+/-%-5.2f' % (b[:, 5:].mean(), b[:, 5:].std())))
except:
pass
def strip_optimizer(f='weights/last.pt'): # from utils.utils import *; strip_optimizer()
# Strip optimizer from *.pt files for lighter files (reduced by 2/3 size)
x = torch.load(f, map_location=torch.device('cpu'))
x['optimizer'] = None
torch.save(x, f)
def create_backbone(f='weights/last.pt'): # from utils.utils import *; create_backbone()
# create a backbone from a *.pt file
x = torch.load(f, map_location=torch.device('cpu'))
x['optimizer'] = None
x['training_results'] = None
x['epoch'] = -1
for p in x['model'].values():
try:
p.requires_grad = True
except:
pass
torch.save(x, 'weights/backbone.pt')
def coco_class_count(path='../coco/labels/train2014/'):
# Histogram of occurrences per class
nc = 80 # number classes
x = np.zeros(nc, dtype='int32')
files = sorted(glob.glob('%s/*.*' % path))
for i, file in enumerate(files):
labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
x += np.bincount(labels[:, 0].astype('int32'), minlength=nc)
print(i, len(files))
def coco_only_people(path='../coco/labels/train2017/'): # from utils.utils import *; coco_only_people()
# Find images with only people
files = sorted(glob.glob('%s/*.*' % path))
for i, file in enumerate(files):
labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
if all(labels[:, 0] == 0):
print(labels.shape[0], file)
def select_best_evolve(path='evolve*.txt'): # from utils.utils import *; select_best_evolve()
# Find best evolved mutation
for file in sorted(glob.glob(path)):
x = np.loadtxt(file, dtype=np.float32, ndmin=2)
print(file, x[fitness(x).argmax()])
def crop_images_random(path='../images/', scale=0.50): # from utils.utils import *; crop_images_random()
# crops images into random squares up to scale fraction
# WARNING: overwrites images!
for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
img = cv2.imread(file) # BGR
if img is not None:
h, w = img.shape[:2]
# create random mask
a = 30 # minimum size (pixels)
mask_h = random.randint(a, int(max(a, h * scale))) # mask height
mask_w = mask_h # mask width
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
cv2.imwrite(file, img[ymin:ymax, xmin:xmax])
def coco_single_class_labels(path='../coco/labels/train2014/', label_class=43):
# Makes single-class coco datasets. from utils.utils import *; coco_single_class_labels()
if os.path.exists('new/'):
shutil.rmtree('new/') # delete output folder
os.makedirs('new/') # make new output folder
os.makedirs('new/labels/')
os.makedirs('new/images/')
for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
with open(file, 'r') as f:
labels = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
i = labels[:, 0] == label_class
if any(i):
img_file = file.replace('labels', 'images').replace('txt', 'jpg')
labels[:, 0] = 0 # reset class to 0
with open('new/images.txt', 'a') as f: # add image to dataset list
f.write(img_file + '\n')
with open('new/labels/' + Path(file).name, 'a') as f: # write label
for l in labels[i]:
f.write('%g %.6f %.6f %.6f %.6f\n' % tuple(l))
shutil.copyfile(src=img_file, dst='new/images/' + Path(file).name.replace('txt', 'jpg')) # copy images
def kmean_anchors(path='../coco/train2017.txt', n=12, img_size=(320, 1024), thr=0.10, gen=1000):
# Creates kmeans anchors for use in *.cfg files: from utils.utils import *; _ = kmean_anchors()
# n: number of anchors
# img_size: (min, max) image size used for multi-scale training (can be same values)
# thr: IoU threshold hyperparameter used for training (0.0 - 1.0)
# gen: generations to evolve anchors using genetic algorithm
from utils.datasets import LoadImagesAndLabels
def print_results(k):
k = k[np.argsort(k.prod(1))] # sort small to large
iou = wh_iou(wh, torch.Tensor(k))
max_iou = iou.max(1)[0]
bpr, aat = (max_iou > thr).float().mean(), (iou > thr).float().mean() * n # best possible recall, anch > thr
print('%.2f iou_thr: %.3f best possible recall, %.2f anchors > thr' % (thr, bpr, aat))
print('n=%g, img_size=%s, IoU_all=%.3f/%.3f-mean/best, IoU>thr=%.3f-mean: ' %
(n, img_size, iou.mean(), max_iou.mean(), iou[iou > thr].mean()), end='')
for i, x in enumerate(k):
print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
return k
def fitness(k): # mutation fitness
iou = wh_iou(wh, torch.Tensor(k)) # iou
max_iou = iou.max(1)[0]
return (max_iou * (max_iou > thr).float()).mean() # product
# Get label wh
wh = []
dataset = LoadImagesAndLabels(path, augment=True, rect=True, cache_labels=True)
nr = 1 if img_size[0] == img_size[1] else 10 # number augmentation repetitions
for s, l in zip(dataset.shapes, dataset.labels):
wh.append(l[:, 3:5] * (s / s.max())) # image normalized to letterbox normalized wh
wh = np.concatenate(wh, 0).repeat(nr, axis=0) # augment 10x
wh *= np.random.uniform(img_size[0], img_size[1], size=(wh.shape[0], 1)) # normalized to pixels (multi-scale)
wh = wh[(wh > 2.0).all(1)] # remove below threshold boxes (< 2 pixels wh)
# Darknet yolov3.cfg anchors
use_darknet = False
if use_darknet and n == 9:
k = np.array([[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]])
else:
# Kmeans calculation
from scipy.cluster.vq import kmeans
print('Running kmeans for %g anchors on %g points...' % (n, len(wh)))
s = wh.std(0) # sigmas for whitening
k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
k *= s
wh = torch.Tensor(wh)
k = print_results(k)
# # Plot
# k, d = [None] * 20, [None] * 20
# for i in tqdm(range(1, 21)):
# k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
# fig, ax = plt.subplots(1, 2, figsize=(14, 7))
# ax = ax.ravel()
# ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
# fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
# ax[0].hist(wh[wh[:, 0]<100, 0],400)
# ax[1].hist(wh[wh[:, 1]<100, 1],400)
# fig.tight_layout()
# fig.savefig('wh.png', dpi=200)
# Evolve
npr = np.random
f, sh, mp, s = fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
for _ in tqdm(range(gen), desc='Evolving anchors'):
v = np.ones(sh)
while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) # 98.6, 61.6
kg = (k.copy() * v).clip(min=2.0)
fg = fitness(kg)
if fg > f:
f, k = fg, kg.copy()
print_results(k)
k = print_results(k)
return k
def print_mutation(hyp, results, bucket=''):
# Print mutation results to evolve.txt (for use with train.py --evolve)
a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
c = '%10.4g' * len(results) % results # results (P, R, mAP, F1, test_loss)
print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
if bucket:
os.system('gsutil cp gs://%s/evolve.txt .' % bucket) # download evolve.txt
with open('evolve.txt', 'a') as f: # append result
f.write(c + b + '\n')
x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
np.savetxt('evolve.txt', x[np.argsort(-fitness(x))], '%10.3g') # save sort by fitness
if bucket:
os.system('gsutil cp evolve.txt gs://%s' % bucket) # upload evolve.txt
def apply_classifier(x, model, img, im0):
# applies a second stage classifier to yolo outputs
im0 = [im0] if isinstance(im0, np.ndarray) else im0
for i, d in enumerate(x): # per image
if d is not None and len(d):
d = d.clone()
# Reshape and pad cutouts
b = xyxy2xywh(d[:, :4]) # boxes
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
d[:, :4] = xywh2xyxy(b).long()
# Rescale boxes from img_size to im0 size
scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
# Classes
pred_cls1 = d[:, 5].long()
ims = []
for j, a in enumerate(d): # per item
cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
im = cv2.resize(cutout, (224, 224)) # BGR
# cv2.imwrite('test%i.jpg' % j, cutout)
im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
im /= 255.0 # 0 - 255 to 0.0 - 1.0
ims.append(im)
pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
return x
def fitness(x):
# Returns fitness (for use with results.txt or evolve.txt)
w = [0.0, 0.01, 0.99, 0.00] # weights for [P, R, mAP, F1]@0.5 or [P, R, mAP@0.5, mAP@0.5:0.95]
return (x[:, :4] * w).sum(1)
# Plotting functions ---------------------------------------------------------------------------------------------------
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
# Plots one bounding box on image img
tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1) # filled
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
def plot_wh_methods(): # from utils.utils import *; plot_wh_methods()
# Compares the two methods for width-height anchor multiplication
# https://github.com/ultralytics/yolov3/issues/168
x = np.arange(-4.0, 4.0, .1)
ya = np.exp(x)
yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
fig = plt.figure(figsize=(6, 3), dpi=150)
plt.plot(x, ya, '.-', label='yolo method')
plt.plot(x, yb ** 2, '.-', label='^2 power method')
plt.plot(x, yb ** 2.5, '.-', label='^2.5 power method')
plt.xlim(left=-4, right=4)
plt.ylim(bottom=0, top=6)
plt.xlabel('input')
plt.ylabel('output')
plt.legend()
fig.tight_layout()
fig.savefig('comparison.png', dpi=200)
def plot_images(imgs, targets, paths=None, fname='images.png'):
# Plots training images overlaid with targets
imgs = imgs.cpu().numpy()
targets = targets.cpu().numpy()
# targets = targets[targets[:, 1] == 21] # plot only one class
fig = plt.figure(figsize=(10, 10))
bs, _, h, w = imgs.shape # batch size, _, height, width
bs = min(bs, 16) # limit plot to 16 images
ns = np.ceil(bs ** 0.5) # number of subplots
for i in range(bs):
boxes = xywh2xyxy(targets[targets[:, 0] == i, 2:6]).T
boxes[[0, 2]] *= w
boxes[[1, 3]] *= h
plt.subplot(int(ns), int(ns), int(i + 1)).imshow(imgs[i].transpose(1, 2, 0))
plt.plot(boxes[[0, 2, 2, 0, 0]], boxes[[1, 1, 3, 3, 1]], '.-')
plt.axis('off')
if paths is not None:
s = Path(paths[i]).name
plt.title(s[:min(len(s), 40)], fontdict={'size': 8}) # limit to 40 characters
fig.tight_layout()
fig.savefig(fname, dpi=200)
plt.close()
def plot_test_txt(): # from utils.utils import *; plot_test()
# Plot test.txt histograms
x = np.loadtxt('test.txt', dtype=np.float32)
box = xyxy2xywh(x[:, :4])
cx, cy = box[:, 0], box[:, 1]
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
ax.set_aspect('equal')
fig.tight_layout()
plt.savefig('hist2d.png', dpi=300)
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
ax[0].hist(cx, bins=600)
ax[1].hist(cy, bins=600)
fig.tight_layout()
plt.savefig('hist1d.png', dpi=200)
def plot_targets_txt(): # from utils.utils import *; plot_targets_txt()
# Plot targets.txt histograms
x = np.loadtxt('targets.txt', dtype=np.float32).T
s = ['x targets', 'y targets', 'width targets', 'height targets']
fig, ax = plt.subplots(2, 2, figsize=(8, 8))
ax = ax.ravel()
for i in range(4):
ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
ax[i].legend()
ax[i].set_title(s[i])
fig.tight_layout()
plt.savefig('targets.jpg', dpi=200)
def plot_evolution_results(hyp): # from utils.utils import *; plot_evolution_results(hyp)
# Plot hyperparameter evolution results in evolve.txt
x = np.loadtxt('evolve.txt', ndmin=2)
f = fitness(x)
weights = (f - f.min()) ** 2 # for weighted results
fig = plt.figure(figsize=(12, 10))
matplotlib.rc('font', **{'size': 8})
for i, (k, v) in enumerate(hyp.items()):
y = x[:, i + 7]
# mu = (y * weights).sum() / weights.sum() # best weighted result
mu = y[f.argmax()] # best single result
plt.subplot(4, 5, i + 1)
plt.plot(mu, f.max(), 'o', markersize=10)
plt.plot(y, f, '.')
plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
print('%15s: %.3g' % (k, mu))
fig.tight_layout()
plt.savefig('evolve.png', dpi=200)
def plot_results_overlay(start=0, stop=0): # from utils.utils import *; plot_results_overlay()
# Plot training results files 'results*.txt', overlaying train and val losses
s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'F1'] # legends
t = ['GIoU', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
n = results.shape[1] # number of rows
x = range(start, min(stop, n) if stop else n)
fig, ax = plt.subplots(1, 5, figsize=(14, 3.5))
ax = ax.ravel()
for i in range(5):
for j in [i, i + 5]:
y = results[j, x]
if i in [0, 1, 2]:
y[y == 0] = np.nan # dont show zero loss_funcs values
ax[i].plot(x, y, marker='.', label=s[j])
ax[i].set_title(t[i])
ax[i].legend()
ax[i].set_ylabel(f) if i == 0 else None # add filename
fig.tight_layout()
fig.savefig(f.replace('.txt', '.png'), dpi=200)
def plot_results(start=0, stop=0, bucket='', id=()): # from utils.utils import *; plot_results()
# Plot training 'results*.txt' as seen in https://github.com/ultralytics/yolov3#training
fig, ax = plt.subplots(2, 5, figsize=(12, 6))
ax = ax.ravel()
s = ['GIoU', 'Objectness', 'Classification', 'Precision', 'Recall',
'val GIoU', 'val Objectness', 'val Classification', 'mAP@0.5', 'F1']
if bucket:
os.system('rm -rf storage.googleapis.com')
files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
else:
files = glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')
for f in sorted(files):
try:
results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
n = results.shape[1] # number of rows
x = range(start, min(stop, n) if stop else n)
for i in range(10):
y = results[i, x]
if i in [0, 1, 2, 5, 6, 7]:
y[y == 0] = np.nan # dont show zero loss_funcs values
# y /= y[0] # normalize
ax[i].plot(x, y, marker='.', label=Path(f).stem, linewidth=2, markersize=8)
ax[i].set_title(s[i])
if i in [5, 6, 7]: # share train and val loss_funcs y axes
ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
except:
print('Warning: Plotting error for %s, skipping file' % f)
fig.tight_layout()
ax[1].legend()
fig.savefig('results.png', dpi=200)
|
[
"765305261@qq.com"
] |
765305261@qq.com
|
c22f4a16a3db433255b5d0f0612f123d1d394bce
|
3aada9baaac59b10d63ab1af6841a1d8d8fcbf7b
|
/confer/wsgi.py
|
cada8d65d116841dad5f96f5a926a1894c7d1940
|
[] |
no_license
|
lesage20/confer
|
576840454d2204a67cca086cf4e64b6cb2a87b2d
|
f512262cc102a81907dda5ad41f935131c629f51
|
refs/heads/master
| 2022-11-03T02:48:00.409562
| 2020-06-11T04:44:07
| 2020-06-11T04:44:07
| 271,451,315
| 0
| 0
| null | 2020-06-11T04:30:28
| 2020-06-11T04:29:29
|
HTML
|
UTF-8
|
Python
| false
| false
| 389
|
py
|
"""
WSGI config for confer project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'confer.settings')
application = get_wsgi_application()
|
[
"angezanou00@gmail.com"
] |
angezanou00@gmail.com
|
acbf78ac1d2e46d4f344a6c734e372bd1e2986d6
|
a8062308fb3bf6c8952257504a50c3e97d801294
|
/problems/N1959_Minimum_Total_Space_Wasted_With_K_Resizing_Operations.py
|
bbd38bedade264e75ad702da913ea3aee2e9f0cd
|
[] |
no_license
|
wan-catherine/Leetcode
|
650d697a873ad23c0b64d08ad525bf9fcdb62b1b
|
238995bd23c8a6c40c6035890e94baa2473d4bbc
|
refs/heads/master
| 2023-09-01T00:56:27.677230
| 2023-08-31T00:49:31
| 2023-08-31T00:49:31
| 143,770,000
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 817
|
py
|
import sys
from typing import List
class Solution:
def minSpaceWastedKResizing(self, nums: List[int], k: int) -> int:
length = len(nums)
dp = [[sys.maxsize] * (k + 1) for _ in range(length)]
total, mx = 0, 0
for i in range(length):
mx = max(mx, nums[i])
total += nums[i]
dp[i][0] = mx * (i + 1) - total
for i in range(1, length):
for j in range(1, min(i, k) + 1):
cur, mx = 0, 0
for p in range(i, max(j-1,1)-1, -1):
mx = max(mx, nums[p])
cur += nums[p]
dp[i][j] = min(dp[i][j], dp[p-1][j-1] + mx * (i - p + 1) - cur)
res = sys.maxsize
for i in range(k + 1):
res = min(res, dp[-1][i])
return res
|
[
"rarry1988528@126.com"
] |
rarry1988528@126.com
|
a439070bbb0e642be64dd9784c376bc0b3f450d8
|
c577eaf5cde0f1313b6ea7103f8e595f929dd8e6
|
/ml/k.txt
|
15e9555a16b57d749990763e8c19a4b4677b49c7
|
[] |
no_license
|
rutvij26/sem7
|
856e281a5022450eaf7eb7f422d46a9f4256f29a
|
a745dc3aa28e8728b712ef13fc58b89303044232
|
refs/heads/master
| 2020-08-26T13:36:22.242421
| 2019-10-23T14:46:07
| 2019-10-23T14:46:07
| 217,027,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,771
|
txt
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# In[2]:
# Data set is given below
df = pd.DataFrame({
'x': [12, 20, 28, 18, 29, 33, 24, 45, 45, 52, 51, 52, 55, 53, 55, 61, 64, 69, 72],
'y': [39, 36, 30, 52, 54, 46, 55, 59, 63, 70, 66, 63, 58, 23, 14, 8, 19, 7, 24]
})
# generate random numbers
np.random.seed(200)
k = 3
# centroids[i] = [x, y]
# Return random integers
centroids = {
i+1: [np.random.randint(0, 80), np.random.randint(0, 80)] #Dictionary comprehension
for i in range(k)
}
print("Centroid=",centroids)
fig = plt.figure(figsize=(5, 5))
plt.scatter(df['x'], df['y'], color='k') # k is for BLACK
colmap = {1: 'r', 2: 'g', 3: 'b'}
for i in centroids.keys(): # represent color centroid ...
plt.scatter(*centroids[i], color=colmap[i]) # .keys() returns a view object that displays a list of all the keys.
plt.xlim(0, 80)
plt.ylim(0, 80)
plt.show()
# In[3]:
##. Assignment Stage
def assignment(df, centroids):
for i in centroids.keys(): #start from 1 to 3 everytime
# sqrt((x1 - x2)^2 - (y1 - y2)^2)
df['distance_from_{}'.format(i)] = (
np.sqrt(
(df['x'] - centroids[i][0]) ** 2
+ (df['y'] - centroids[i][1]) ** 2
)
)
centroid_distance_cols = ['distance_from_{}'.format(i) for i in centroids.keys()]
df['closest'] = df.loc[:, centroid_distance_cols].idxmin(axis=1)
df['closest'] = df['closest'].map(lambda x: int(x.lstrip('distance_from_')))
df['color'] = df['closest'].map(lambda x: colmap[x])
return df
df = assignment(df, centroids)
print(df)
fig = plt.figure(figsize=(5, 5))
plt.scatter(df['x'], df['y'], color=df['color'], alpha=0.5, edgecolor='k')
for i in centroids.keys():
plt.scatter(*centroids[i], color=colmap[i])
plt.xlim(0, 80)
plt.ylim(0, 80)
plt.show()
# In[4]:
## Update Stage
import copy
old_centroids = copy.deepcopy(centroids) #create bindings between a target and an object
def update(k):
for i in centroids.keys():
centroids[i][0] = np.mean(df[df['closest'] == i]['x'])
centroids[i][1] = np.mean(df[df['closest'] == i]['y'])
return k
centroids = update(centroids)
print("Updated centroids",centroids)
fig = plt.figure(figsize=(5, 5))
ax = plt.axes()
plt.scatter(df['x'], df['y'], color=df['color'], alpha=0.5, edgecolor='k') #alpha value for intensity
for i in centroids.keys():
plt.scatter(*centroids[i], color=colmap[i])
plt.xlim(0, 80)
plt.ylim(0, 80)
for i in old_centroids.keys():
old_x = old_centroids[i][0]
old_y = old_centroids[i][1]
dx = (centroids[i][0] - old_centroids[i][0]) * 0.75
dy = (centroids[i][1] - old_centroids[i][1]) * 0.75
ax.arrow(old_x, old_y, dx, dy, head_width=2, head_length=3, fc=colmap[i], ec=colmap[i])
plt.show()
# In[5]:
## Repeat Assigment Stage
df = assignment(df, centroids)
# Plot results
fig = plt.figure(figsize=(5, 5))
plt.scatter(df['x'], df['y'], color=df['color'], alpha=0.5, edgecolor='k')
for i in centroids.keys():
plt.scatter(*centroids[i], color=colmap[i])
plt.xlim(0, 80)
plt.ylim(0, 80)
plt.show()
# In[6]:
while True:
closest_centroids = df['closest'].copy(deep=True)
centroids = update(centroids)
df = assignment(df, centroids)
if closest_centroids.equals(df['closest']):
break
fig = plt.figure(figsize=(5, 5))
plt.scatter(df['x'], df['y'], color=df['color'], alpha=0.5, edgecolor='k')
for i in centroids.keys():
plt.scatter(*centroids[i], color=colmap[i])
plt.xlim(0, 80)
plt.ylim(0, 80)
plt.show()
|
[
"noreply@github.com"
] |
rutvij26.noreply@github.com
|
fb0be32e19d129913ec1f4e1c6d8ae956fdecfb8
|
51935327470f48ac055d9511a969b679b8637697
|
/admin_ser/apps.py
|
315c1b2b4548c4c0b81cbe9e6d980719a6815939
|
[] |
no_license
|
AngeLShuang/minon
|
06e614e4684e7b7c73ee1e378a894ca59ca58dc8
|
9ef62d227b52c5533f5e9994e25a3b9aa0826e4d
|
refs/heads/master
| 2020-04-06T08:22:07.579222
| 2018-11-13T01:36:00
| 2018-11-13T01:36:00
| 157,302,327
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 89
|
py
|
from django.apps import AppConfig
class AdminConfig(AppConfig):
name = 'admin_ser'
|
[
"1418727910@qq.com"
] |
1418727910@qq.com
|
4dba18ca0f90ca2fa410d98db21e6342d97ac10f
|
987fcdd9e4cf41f3f0341fe04ff60472e727dc15
|
/cans/cans2/data/sim_data/gen_sim_data.py
|
0fb84ad2275acfc6620544cd4085bd700c659454
|
[] |
no_license
|
lwlss/CANS
|
c7734a980259136d03a5a94f1d36785c9ce16bba
|
9db811f25bf22d13f898466af3bf232e0d4571f9
|
refs/heads/master
| 2021-01-09T07:59:25.671370
| 2016-10-15T17:48:48
| 2016-10-15T17:48:48
| 39,069,739
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,128
|
py
|
"""Simulate plate amounts and save data as json."""
import numpy as np
import json
from cans2.plate import Plate
from cans2.model import CompModel
from cans2.cans_funcs import dict_to_json
mean = 5
var = 3
rows = 5
cols = 5
file_name = '{0}x{1}_comp_model/mean_5_var_3.json'.format(rows, cols)
custom_params = {
'C_0': 0.0001,
'N_0': 1.0,
'kn': 0.1,
}
custom_params['kn'] = 0.2
model = CompModel()
times = np.linspace(0, 5, 21)
# Generate sets of simulated amounts
for i in range(1):
plate1 = Plate(rows, cols)
plate1.times = times
plate1.set_sim_data(model, r_mean=mean, r_var=var,
custom_params=custom_params)
data = {
'sim_params': plate1.sim_params,
'sim_amounts': plate1.sim_amounts,
'c_meas': plate1.c_meas,
'times': plate1.times,
'r_mean': mean,
'r_var': var,
'rows': plate1.rows,
'cols': plate1.cols,
'model': model.name,
'model_params': model.params,
}
data = dict_to_json(data)
with open(file_name, 'w') as f:
json.dump(data, f, sort_keys=True, indent=4)
|
[
"daniel.boocock@protonmail.ch"
] |
daniel.boocock@protonmail.ch
|
8828cbd790210db9499074eb33cea8aec7a3dbb7
|
3fcb5f11cd7a666d9f938db90877ad1bbce57a1b
|
/datasets/generate_datalist_pixelshift.py
|
b499306fd5fc4567561b3e01d487450adb18aba5
|
[
"MIT"
] |
permissive
|
guochengqian/TENet
|
362f498078895ec1e7718483f35ef8fc9fef0fab
|
098262e7e0ba979e303bea643b0b366efc50dcd3
|
refs/heads/master
| 2023-07-11T00:50:41.733358
| 2023-07-02T23:19:26
| 2023-07-02T23:19:26
| 186,419,827
| 261
| 37
| null | 2019-05-28T12:47:34
| 2019-05-13T12:56:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,615
|
py
|
import os
import random
import argparse
parser = argparse.ArgumentParser(description='A multi-thread tool to crop sub images')
parser.add_argument('--train_path', type=str, default='../data/pixelshift200/train/train_rggb_512')
parser.add_argument('--test_path', type=str, default='../data/pixelshift200/test/test_rggb_1024')
args = parser.parse_args()
train_data_path = args.train_path
test_data_path = args.test_path
train_dst_path = 'train_pixelshift.txt'
val_dst_path = 'val_pixelshift.txt'
# val_datasets_num = 200
# type_list = ['png', 'PNG', 'tiff', 'tif', 'TIFF', 'JPG', 'jgp']
type_list = ['mat', 'MAT']
# remove old
if os.path.exists(train_dst_path):
os.system('rm '+train_dst_path)
if os.path.exists(val_dst_path):
os.system('rm '+val_dst_path)
# change to absolute path
if train_data_path[0] != '/':
train_data_path = os.path.join(os.getcwd(), train_data_path)
if test_data_path[0] != '/':
test_data_path = os.path.join(os.getcwd(), test_data_path)
# image list
lines = []
for file in os.listdir(train_data_path):
if file.split('.')[-1] in type_list:
lines.append(os.path.join(train_data_path, file)+'\n')
random.shuffle(lines)
train_lines = lines
lines = []
for file in os.listdir(test_data_path):
if file.split('.')[-1] in type_list:
lines.append(os.path.join(test_data_path,file)+'\n')
random.shuffle(lines)
val_lines = lines
# write datalist
with open(train_dst_path, 'a') as train_files:
for line in train_lines:
train_files.write(line)
with open(val_dst_path, 'w')as val_files:
for line in val_lines:
val_files.write(line)
|
[
"guocheng.qian@outlook.com"
] |
guocheng.qian@outlook.com
|
504b50f13d44873d865dc30cdf90fd690c5aa3b3
|
762de1c66746267e05d53184d7854934616416ee
|
/tools/MolSurfGenService/MolSurfaceGen32/chimera/share/Bld2VRML/bld2vrml.py
|
66ea92d6eb6e5dc3eca472a5e20da974259f17f1
|
[] |
no_license
|
project-renard-survey/semanticscience
|
6e74f5d475cf0ebcd9bb7be6bb9522cf15ed8677
|
024890dba56c3e82ea2cf8c773965117f8cda339
|
refs/heads/master
| 2021-07-07T21:47:17.767414
| 2017-10-04T12:13:50
| 2017-10-04T12:13:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,660
|
py
|
import sys
from string import *
from math import *
# Utility functions
def getDistance(p1, p2):
return ( (p2[0] - p1[0])**2 + (p2[1] - p1[1])**2 + (p2[2] - p1[2])**2 ) ** 0.5
def interp(t, a, b):
c = list(a)
for i in range(3):
c[i] += t*(float(b[i]) - float(a[i]))
return c
def getMidpoint(p1, p2):
return tuple(interp(0.5, p1, p2))
def getRGBcolor(nCol):
if nCol == 0: return (1, 1, 1)
if nCol < 9: return interp( (nCol - 1)/8.0, (0, 1, 0), (0, 1, 1))
if nCol < 17: return interp( (nCol - 8)/8.0, (0, 1, 1), (0, 0, 1))
if nCol < 25: return interp( (nCol - 16)/8.0, (0, 0, 1), (1, 0, 1))
if nCol < 33: return interp( (nCol - 24)/8.0, (1, 0, 1), (1, 0, 0))
if nCol < 49: return interp( (nCol - 32)/16.0, (1, 0, 0), (1, 1, 0))
if nCol < 65: return interp( (nCol - 48)/16.0, (1, 1, 0), (0, 0, 0))
if nCol == 65: return (0.7, 0.7, 0.7)
return None
def getRGBcolor_byName(sCol):
from chimera import colorTable
try:
c = colorTable.getColorByName(sCol)
return c.rgba()[:3]
except KeyError:
return None
#######################
# Generic object class; used to hold environment data
class Object:
def __init__(self, **pw):
for k in pw.keys():
setattr(self, k, pw[k])
# Generic output object; used to hold output data
class Output:
def __init__(self):
self.indentLength = 0
self.textList = []
def __lt__(self, line):
if len(line) < 1:
self.textList.append('\n')
else:
if line[0] == "]" or line[0] == "}":
self.indentLength -= 1
self.textList.append(("\t" * self.indentLength) + line)
if line[-1] == "[" or line[-1] == "{":
self.indentLength += 1
def __str__(self):
return join(self.textList, '\n')
class Environment:
def __init__(self):
self.transformStack = []
self.curLines = None
self.lineObjs = []
self.geomObjs = []
self.polyObjs = []
self.color = None
self.transparency = 0
self.tCoords = (0, 0, 0)
self.fontSize = 5
self.fontStyle = "PLAIN"
self.fontFamily = "TYPEWRITER"
def handleLine(self, line):
if len(line) < 2: return
if line[0] != '.':
self.handleLine(".text " + line)
return
line = split(line[1:])
if line[0] == "arrow":
p1 = [float(line[1]), float(line[2]), float(line[3])]
p2 = [float(line[4]), float(line[5]), float(line[6])]
if len(line) > 7: r1 = float(line[7])
else: r1 = 0.1
if len(line) > 8: r2 = float(line[8])
else: r2 = 4.0 * r1
if len(line) > 9: rho = float(line[9])
else: rho = 0.75
pJunct = list(interp(rho, p1, p2))
self.handleLine(".cylinder %f %f %f %f %f %f %f" % tuple(p1 + pJunct + [r1]))
self.handleLine(".cone %f %f %f %f %f %f %f" % tuple(pJunct + p2 + [r2]))
elif line[0] == "cmov":
self.tCoords = (float(line[1]), float(line[2]), float(line[3]))
elif line[0] == "color":
if len(line) == 2:
try:
color = getRGBcolor(int(line[1]))
except:
if line[1].lower() == "none":
self.color = color = None
else:
color = getRGBcolor_byName(
line[1])
else:
try:
color = (float(line[1]), float(line[2]), float(line[3]))
except:
color = getRGBcolor_byName(" ".join(line[1:]))
if color:
color = tuple(map(float, color))
self.color = color
elif line[0] == "transparency":
try:
transparency = float(line[1])
except:
transparency = 0
self.transparency = transparency
elif line[0] == "box":
obj = Object(shape = "box")
p1 = (float(line[1]), float(line[2]), float(line[3]))
p2 = (float(line[4]), float(line[5]), float(line[6]))
pCentre = getMidpoint(p1, p2)
obj.width = p2[0] - p1[0]
obj.height = p2[1] - p1[1]
obj.depth = p2[2] - p1[2]
obj.color = self.color
obj.transparency = self.transparency
obj.transforms = self.transformStack[:]
obj.transforms.append(Object(to=pCentre, form='translate'))
self.geomObjs.append(obj)
elif line[0] == "cone":
obj = Object(shape = "cone")
p1 = (float(line[1]), float(line[2]), float(line[3]))
p2 = (float(line[4]), float(line[5]), float(line[6]))
obj.radius = float(line[7])
obj.height = getDistance(p1, p2)
pCentre = getMidpoint(p1, p2)
if len(line) < 9: obj.closed = True
elif lower(line[8]) == "open": obj.closed = False
obj.color = self.color
obj.transparency = self.transparency
obj.transforms = self.transformStack[:]
fTheta = asin( (p2[2] - p1[2]) / obj.height )
try:
fPhi = atan2(p2[0] - p1[0], p2[1] - p1[1])
except ValueError:
fPhi = 0.0
obj.transforms.append(Object(to=pCentre, form='translate'))
obj.transforms.append(Object(angle=-fPhi, axis='z', form='rotate'))
obj.transforms.append(Object(angle=fTheta, axis='x', form='rotate'))
self.geomObjs.append(obj)
elif line[0] == "cylinder":
obj = Object(shape = "cylinder")
p1 = (float(line[1]), float(line[2]), float(line[3]))
p2 = (float(line[4]), float(line[5]), float(line[6]))
obj.radius = float(line[7])
obj.height = getDistance(p1, p2)
pCentre = getMidpoint(p1, p2)
if len(line) < 9: obj.closed = True
elif lower(line[8]) == "open": obj.closed = False
obj.color = self.color
obj.transparency = self.transparency
obj.transforms = self.transformStack[:]
fTheta = asin( (p2[2] - p1[2]) / obj.height )
try:
fPhi = atan2(p2[0] - p1[0], p2[1] - p1[1])
except ValueError:
fPhi = 0.0
obj.transforms.append(Object(to=pCentre, form='translate'))
obj.transforms.append(Object(angle=-fPhi, axis='z', form='rotate'))
obj.transforms.append(Object(angle=fTheta, axis='x', form='rotate'))
self.geomObjs.append(obj)
elif line[0] in ("d", "draw"):
if self.curLines:
x, y, z = (float(line[1]), float(line[2]), float(line[3]))
self.curPosition = (x, y, z)
self.curLines.vertices.append(self.curPosition)
self.curLines.vertex_colors.append(self.color)
else:
self.handleLine(".move " + join(line[1:]))
elif line[0] in ("dot", "dotat"):
self.handleLine(".move " + join(line[1:]))
self.handleLine(".sphere " + join(line[1:]) + " 1")
elif line[0] in ("dr", "drawrel"):
dx, dy, dz = (float(line[1]), float(line[2]), float(line[3]))
x, y, z = self.curPosition
self.handleLine(".draw " + join(map(str, (x+dx, y+dy, z+dz))))
elif line[0] == "font":
family, size = lower(line[1]), int(line[2])
if len(line) > 3: style = lower(line[3])
else: style = "plain"
self.fontSize = size
if family[:5] == "times": self.fontFamily = "SERIF"
elif family == "helvetica": self.fontFamily = "SANS"
elif family == "courier": self.fontFamily = "TYPEWRITER"
elif family == "serif": self.fontFamily = "SERIF"
elif family[:4] == "sans": self.fontFamily = "SANS"
elif family == "typewriter": self.fontFamily = "TYPEWRITER"
elif family == "tt": self.fontFamily = "TYPEWRITER"
else: self.fontFamily = "TYPEWRITER"
if style == "plain": self.fontStyle = "PLAIN"
elif style == "bold": self.fontStyle = "BOLD"
elif style == "italic": self.fontStyle = "ITALIC"
elif style == "bold italic": self.fontStyle = "BOLD ITALIC"
else: self.fontStyle = "PLAIN"
elif line[0] in ("m", "move"):
self.flush()
x, y, z = (float(line[1]), float(line[2]), float(line[3]))
self.curPosition = (x, y, z)
self.curLines = Object()
self.curLines.vertices = [self.curPosition]
self.curLines.vertex_colors = [self.color]
elif line[0] == "marker":
self.handleLine(".move " + join(line[1:]))
x, y, z = float(line[1]), float(line[2]), float(line[3])
self.handleLine(".box %f %f %f %f %f %f" % (x-0.5, y-0.5, z-0.5, x+0.5, y+0.5, z+0.5))
elif line[0] == "mr" or line[0] == "moverel":
dx, dy, dz = (float(line[1]), float(line[2]), float(line[3]))
x, y, z = self.curPosition
self.handleLine(".move " + join(map(str, (x+dx, y+dy, z+dz))))
elif line[0] == "polygon":
obj = Object(shape = "polygon")
obj.vertices = []
for i in range(1, len(line), 3):
x = float(line[i])
y = float(line[i + 1])
z = float(line[i + 2])
obj.vertices.append( (x, y, z) )
obj.color = self.color
obj.transparency = self.transparency
obj.transforms = self.transformStack[:]
self.polyObjs.append(obj)
elif line[0] == "pop":
self.flush()
self.transformStack.pop()
elif line[0] in ("rot", "rotate"):
self.flush()
if line[2][0] in 'xyzXYZ':
obj = Object(form='rotate', axis=lower(line[2][0]), angle=radians(float(line[1])))
else:
obj = Object(form='rotate', axis=(float(line[2]), float(line[3]), float(line[4])), angle=radians(float(line[1])))
self.transformStack.append(obj)
elif line[0] == "scale":
self.flush()
obj = Object(form = 'scale')
xscale = float(line[1])
if len(line) > 2:
yscale = float(line[2])
zscale = float(line[3])
else:
yscale = zscale = xscale
obj.xscale, obj.yscale, obj.zscale = xscale, yscale, zscale
self.transformStack.append(obj)
elif line[0] == "sphere":
obj = Object(shape = "sphere")
pCentre = (float(line[1]), float(line[2]), float(line[3]))
obj.radius = float(line[4])
obj.color = self.color
obj.transparency = self.transparency
obj.transforms = self.transformStack[:]
obj.transforms.append(Object(to=pCentre, form='translate'))
self.geomObjs.append(obj)
elif line[0] == "text":
obj = Object(shape = "text")
obj.color = self.color
obj.transparency = self.transparency
obj.string = join(line[1:])
obj.fontSize = self.fontSize
obj.fontStyle = self.fontStyle
obj.fontFamily = self.fontFamily
obj.transforms = self.transformStack[:]
obj.transforms.append(Object(to=self.tCoords, form='translate'))
self.geomObjs.append(obj)
elif line[0] in ("tran", "translate"):
self.flush()
tCoords = (float(line[1]), float(line[2]), float(line[3]))
self.transformStack.append(Object(form='translate', to=tCoords))
elif line[0] == "v" or line[0] == "vector":
self.handleLine(".m " + join(line[1:4]))
self.handleLine(".d " + join(line[4:7]))
elif line[0] in ("c", "comment"):
pass
else:
raise "Unrecognized Command"
def flush(self):
if self.curLines:
self.curLines.transforms = self.transformStack[:]
self.lineObjs.append(self.curLines)
self.curLines = None
# Finish generating the environment object; called after the last handleLine().
def finish(self):
self.flush()
# Convert from the "environment" object to a VRML output
def buildVRML(self):
o = Output()
o < "#VRML V2.0 utf8"
for obj in self.lineObjs:
post = handleTransform(o, obj.transforms)
o < ""
o < "Transform {"
o < "children ["
o < "Shape {"
o < "geometry IndexedLineSet {"
o < "coord Coordinate {"
o < "point ["
for vertex in obj.vertices:
o < "%f %f %f," % vertex
o < "0 0 0"
o < "]"
o < "}"
o < "coordIndex ["
o < join(map(str, range(len(obj.vertices))))
o < "]"
if obj.vertex_colors[0] is not None:
o < "color Color {"
o < "color ["
for vcolor in obj.vertex_colors:
o < "%f %f %f," % vcolor
o < "0 0 0"
o < "]"
o < "}"
o < "colorIndex ["
o < join(map(str, range(len(obj.vertex_colors))))
o < "]"
o < "colorPerVertex TRUE"
o < "}"
o < "}"
o < "]"
o < "}"
for s in post: o < s
for obj in self.polyObjs:
post = handleTransform(o, obj.transforms)
o < ""
o < "Transform {"
o < "children ["
o < "Shape {"
o < "appearance Appearance {"
if obj.color is None:
o < 'namedColor "__model__"'
else:
o < "material Material {"
o < "diffuseColor %f %f %f" % obj.color
o < "transparency %f" % obj.transparency
o < "}"
o < "}"
o < "geometry IndexedFaceSet {"
o < "coord Coordinate {"
o < "point ["
for vertex in obj.vertices:
o < "%f %f %f," % vertex
o < "0 0 0"
o < "]"
o < "}"
o < "coordIndex ["
o < join(map(str, range(len(obj.vertices))))
o < "]"
o < "color Color {"
o < "color ["
for vertex in obj.vertices:
o < "%f %f %f," % obj.color
o < "0 0 0"
o < "]"
o < "}"
o < "colorIndex ["
o < join(map(str, range(len(obj.vertices))))
o < "]"
o < "solid FALSE"
o < "colorPerVertex TRUE"
o < "}"
o < "}"
o < "]"
o < "}"
for s in post: o < s
for obj in self.geomObjs:
post = handleTransform(o, obj.transforms)
o < ""
o < "Shape {"
o < "appearance Appearance {"
if obj.color is None:
o < 'namedColor "__model__"'
else:
o < "material Material {"
o < "diffuseColor %f %f %f" % obj.color
o < "transparency %f" % obj.transparency
o < "}"
o < "}"
if obj.shape == "sphere":
o < "geometry Sphere {"
o < "radius %f" % obj.radius
o < "}"
elif obj.shape == "cylinder":
o < "geometry Cylinder {"
o < "radius %f" % obj.radius
o < "height %f" % obj.height
if not obj.closed:
o < "top FALSE"
o < "bottom FALSE"
o < "}"
elif obj.shape == "cone":
o < "geometry Cone {"
o < "bottomRadius %f" % obj.radius
o < "height %f" % obj.height
if not obj.closed:
o < "bottom FALSE"
o < "}"
elif obj.shape == "box":
o < "geometry Box {"
o < "size %f %f %f" % (obj.width, obj.height, obj.depth)
o < "}"
elif obj.shape == "text":
o < "geometry Text {"
o < "string [ \"%s\" ]" % obj.string
o < "fontStyle FontStyle {"
o < "size %d" % obj.fontSize
o < "family \"%s\"" % obj.fontFamily
o < "style \"%s\"" % obj.fontStyle
o < "}"
o < "}"
o < "}"
for s in post: o < s
return str(o)
# Appropriately generate the enclosing Transform blocks from the environment transform stack
# Return the lines that go at the end
def handleTransform(o, tStack):
if not tStack:
return []
else:
post = handleTransform(o, tStack[:-1])
trans = tStack[-1]
o < "Transform {"
if trans.form == "translate":
o < "translation %f %f %f" % trans.to
elif trans.form == "rotate" and isinstance(trans.axis, tuple):
o < "rotation %f %f %f %f" % (trans.axis + (trans.angle,))
elif trans.form == "rotate" and trans.axis == 'x':
o < "rotation 1 0 0 %f" % trans.angle
elif trans.form == "rotate" and trans.axis == 'y':
o < "rotation 0 1 0 %f" % trans.angle
elif trans.form == "rotate" and trans.axis == 'z':
o < "rotation 0 0 1 %f" % trans.angle
elif trans.form == "scale":
o < "scale %f %f %f" % (trans.xscale, trans.yscale, trans.zscale)
o < "children ["
post.append(']')
post.append('}')
return post
def main():
env = Environment()
code = [".color white", ".dot 0 0 0"]
code += [".color red", ".vector 0 0 0 20 0 0", ".dot 20 0 0"]
code += [".color green", ".vector 0 0 0 0 20 0", ".dot 0 20 0"]
code += [".color blue", ".vector 0 0 0 0 0 20", ".dot 0 0 20"]
code += [".color 44", ".arrow 1 1 1 5 5 5", ".arrow 1 1 2 8 6 9"]
code += [".arrow 1 2 1 10 10 4", ".arrow 2 1 1 -2 7 10"]
code += [".color 22", ".polygon 20 0 0 0 20 0 0 0 20"]
code += [".color 5", ".marker 20 20 20", ".dr -20 0 0"]
code += [".v 20 20 20 20 0 20", ".dr 0 20 -20"]
code += [".v 20 20 20 20 20 0", ".dr -20 0 20", ".dr 20 -20 0"]
code += [".color 12", ".sphere 10 10 10 3"]
code += [".color 10", ".arrow 19 19 19 12 12 12 0.2 1.0"]
code += [".color 53", ".cylinder 14 -5 14 14 0 14 2 open"]
code += [".sphere 14 -5 14 2", ".sphere 14 0 14 2"]
for line in code:
try:
env.handleLine(line)
except:
sys.stderr.write("ERROR: \"%s\" raised an %s:\n\t\"%s\"\n" % (line, str(sys.exc_type), str(sys.exc_value)))
env.finish()
print env.buildVRML()
if __name__ == "__main__":
main()
|
[
"alex.gawronski@d60594c4-dda9-11dd-87d8-31aa04531ed5"
] |
alex.gawronski@d60594c4-dda9-11dd-87d8-31aa04531ed5
|
f93ebfa2f9e08428f3cbfe27e0ea99e6437e4a2f
|
5b77cd52b82ac205cc4dabe8715e419ec0bd14d6
|
/python_xpath_Process_duoyexiaoshuo.py
|
20046a2382679929b30121b625bbdff5caa13b33
|
[
"Apache-2.0"
] |
permissive
|
17790793549/tongjier
|
c9ab29e4737e8f0c5406847907a4f9a5ab92c3e9
|
79d1657f2b99595604ec65992f37d09e5da6215c
|
refs/heads/master
| 2020-03-19T13:21:16.991108
| 2020-01-16T08:45:13
| 2020-01-16T08:45:13
| 136,575,007
| 10
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,932
|
py
|
"""
http://www.quanben.io/n/guantiwanzhengban/list.html
1. 开启两个进程 master, worker1
2. master 去爬取该页面的所有连接
3. worker1 爬取页面详细存入本地(可以考虑进程锁)
"""
"""
开多进程...
在2209个文件中检索关键字为"美丽,帅哥."
"""
import multiprocessing
from random import randint
from time import time, sleep
import os
import requests
from lxml import etree
import time
import multiprocessing
def dict_1():
url = 'http://www.quanben.io/n/guantiwanzhengban/list.html'
dict_ = {}
response = requests.get(url)
response.encoding='gbk'
wb=response.text
html=etree.HTML(wb)
html_data = html.xpath('/html/body/div[3]/ul/li/a/@href')
for i in html_data:
q = i.split('/')[-1]
w = q.split('.')[0]
dict_[w]=i
return dict_
def download_task(num1,num2,dict_):
for i in range(num1,num2+1):
i_ = dict_[str(i)]
url = 'http://www.quanben.io{}'.format(i_)
response = requests.get(url)
response.encoding = 'utf8'
wb_data = response.text
html = etree.HTML(wb_data)
html_data = html.xpath('//*[@id="content"]/p/text()')
path = 'D://PYTHON//{}.txt'.format(str(i))
with open(path,'w',encoding='utf8',errors='ignore') as f:
for ii in html_data:
f.write('\n'+ii)
print('完成')
def main():
dict_ = dict_1()
download_task(1,5,dict_)
# 开启两个多进程, 函数名 传递的参数,需要注意的是,它接受的是一个元组(tuple)
p1 = multiprocessing.Process(target=download_task, args=(1,20,dict_))
p2 = multiprocessing.Process(target=download_task, args=(21,40,dict_))
# 获取进程号
# 启动进程
p1.start()
p2.start()
##############
# 进程阻塞.
p1.join()
p2.join()
##############
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
17790793549.noreply@github.com
|
6157904590133276d22da7f720c8e9ba2659740c
|
a1119965e2e3bdc40126fd92f4b4b8ee7016dfca
|
/trunk/network_semantics_tests/tests/sendmess/sendmess_bad_host.py
|
e953c972d2377e0f14234eb4c8d78f1737c344da
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
SeattleTestbed/attic
|
0e33211ddf39efdbcf5573d4fc7fa5201aa7310d
|
f618a962ce2fd3c4838564e8c62c10924f5df45f
|
refs/heads/master
| 2021-06-10T23:10:47.792847
| 2017-05-15T12:05:43
| 2017-05-15T12:05:43
| 20,154,061
| 0
| 1
| null | 2014-10-16T17:21:06
| 2014-05-25T12:34:00
|
Python
|
UTF-8
|
Python
| false
| false
| 267
|
py
|
# if a host name that cannot be resolved is used an exception occurs.
if callfunc == 'initialize':
port = 12345
try:
sendmess('notactuallyahost',port,'ping')
except:
pass
else:
print "using 'notactuallyahost' did not cause exception"
|
[
"USER@DOMAIN"
] |
USER@DOMAIN
|
f252c21a4d16a3088e67a2cec3db1c7097ef3c80
|
4929b54ac86e5d0064913b097da89abfb079fd48
|
/KNN/kNN.py
|
deb9ba15653c05271e039bf45405b7e6bc632bfd
|
[] |
no_license
|
haiboowang/meachine-learning-in-action-in-python3-
|
5914135cc1b95a3058a1e52680f1f1f4343bfcc8
|
aa9f916add6a973592b5493b368be5600f7e4f8d
|
refs/heads/master
| 2021-07-08T22:48:01.412645
| 2017-10-06T12:14:36
| 2017-10-06T12:14:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,125
|
py
|
from numpy import *
import operator
def createDataSet():
group=array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])
labels=['A','A','B','B']
return group,labels
#2-1 k-邻近算法
def classify0(inX,dataSet,labels,k):
#构造与dataSet格式相同的矩阵并计算距离
dataSetSize=dataSet.shape[0]
diffMat=tile(inX,(dataSetSize,1))-dataSet
sqDiffMat=diffMat**2
sqDistances=sqDiffMat.sum(axis=1)
distances=sqDistances**0.5
#根据距离排序,argsort()返回排序后的下标 默认升序
sortedDistIndicies=distances.argsort()
classCount={}
#统计前k个距离小的点的标签并计数
for i in range(k):
voteIlabel=labels[sortedDistIndicies[i]]
classCount[voteIlabel]=classCount.get(voteIlabel,0)+1
sortedClassCount=sorted(classCount.items(),key=operator.itemgetter(1),reverse=True)
#返回数量最多的标签
return sortedClassCount[0][0]
#2-2 将文本记录转换为Numpy的解析程序
def file2matrix(filename):
fr=open(filename)
arraylines=fr.readlines()
numberOflines=len(arraylines)
returnMat=zeros((numberOflines,3))
classLabelVector=[]
index=0
#分别将每行的数据读到returnMat中,标签读到classLableVector中
for line in arraylines:
line=line.strip()
listFormLine=line.split('\t')#以空格为标志分割数据
returnMat[index ,:]=listFormLine[0:3]
classLabelVector.append(int(listFormLine[-1]))
index+=1
return returnMat,classLabelVector
#2-3 归一化特征值
def autoNorm(dataSet):
minVals=dataSet.min(0)
maxVals=dataSet.max(0)
ranges=maxVals-minVals
normDataSet=zeros(shape(dataSet))
m=dataSet.shape[0]
normDataSet=dataSet-tile(minVals,(m,1))
normDataSet=normDataSet/tile(ranges,(m,1))
return normDataSet,ranges,minVals
#2-4 分类器针对约会网站的测试代码
def datingClassTest():
hoRatio=0.50
datingDataMat,datingLabels=file2matrix('datingTestSet2.txt')
normMat,ranges,minVals=autoNorm(datingDataMat)
m=normMat.shape[0]
numTestVecs=int(m*hoRatio)
errorCount=0.0
for i in range(numTestVecs):
classifierResult=classify0(normMat[i,:],normMat[numTestVecs:m,:],datingLabels[numTestVecs:m],3)
print("the classifier came back with:%d,the real answer is:%d"%(classifierResult,datingLabels[i]))
if(classifierResult!=datingLabels[i]):errorCount+=1.0
print("the total error rate is :%f" %(errorCount/float(numTestVecs)))
print(errorCount)
#2-5 约会网站预测函数
def classifyPerson():
resultList=['不喜欢','有点喜欢','很喜欢']
percentTats=float(input("每天玩游戏所占百分比?"))
ffMiles=float(input("每年飞行的里程?"))
iceCream=float(input("一年吃的冰淇淋的数量(升)?"))
datingDataMat,datingLabels=file2matrix('datingTestSet2.txt')
normMat,ranges,minVals=autoNorm(datingDataMat)
inArr=array([ffMiles,percentTats,iceCream])
inArr=array((inArr - minVals) / ranges)
classifierResult=classify0(inArr,normMat,datingLabels,3)
print("you will probably like this person:",resultList[classifierResult-1])
#使用sklearn实现KNN的例子
def sklearnKNN():
resultList=['不喜欢','有点喜欢','很喜欢']
percentTats=float(input("每天玩游戏所占百分比?"))
ffMiles=float(input("每年飞行的里程?"))
iceCream=float(input("一年吃的冰淇淋的数量(升)?"))
datingDataMat,datingLabels=file2matrix('datingTestSet2.txt')
normMat,ranges,minVals=autoNorm(datingDataMat)
inArr=[ffMiles,percentTats,iceCream]
inArr = array([(inArr - minVals) / ranges])
X_test=[[37777,5.99911,1.58877]]
from sklearn.neighbors import KNeighborsClassifier
neigh=KNeighborsClassifier(n_neighbors=3)
neigh.fit(normMat,datingLabels)
Y_pred=neigh.predict(inArr)
print(Y_pred)
print("you will probably like this person:", resultList[Y_pred[0] - 1])
if __name__=='__main__':
#group,labels=createDataSet()
#print(classify0([0,0],group,labels,3))
# db,dl=file2matrix('datingTestSet2.txt')
# print(db)
# normMat,ranges,minVals=autoNorm(db)
# print(normMat)
# datingClassTest()
# classifyPerson()
sklearnKNN()
|
[
"noreply@github.com"
] |
haiboowang.noreply@github.com
|
724f4a0ba505bb08a9c2d1a3642182e18d2d45dd
|
929c9fc294df777ea3f795e0ff6c96edcfc307c0
|
/gnn.py
|
cc31f03a1206341987998bb100fd1af09e620195
|
[] |
no_license
|
charudatta10/gnn
|
61d0062a4a7039dc57a914ac1f65f0d5bdca7847
|
020e1b12047d3bca1b7661f8adb5e650d506d7c8
|
refs/heads/master
| 2022-01-07T18:51:48.322545
| 2019-08-25T12:53:22
| 2019-08-25T12:53:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,680
|
py
|
import tensorflow as tf
class GCNLayer(tf.keras.layers.Layer):
def __init__(self, output_dim, activation=None, **kwargs):
self.output_dim = output_dim
self.activation = activation
super().__init__(**kwargs)
def build(self, input_shape): # [batch_size, num_vertices, num_vertices], [batch_size, num_vertices, num_features]
A_shape, H_shape = input_shape
self.num_vertices = A_shape[1].value
self.W = self.add_weight( # [num_features, output_dim]
name='W',
shape=[H_shape[2].value, self.output_dim]
)
super().build(input_shape)
def call(self, inputs, **kwargs):
"""
:param inputs: A for adjacent matrix [batch_size, num_vertices, num_vertices] (should be normalized in advance)
H for features [batch_size, num_vertices, num_features]
"""
A, H = inputs[0], inputs[1]
# A * H * W [batch_size, num_vertices, num_vertices] * [batch_size, num_vertices, num_features] * [num_features, output_dim]
# see https://www.tensorflow.org/api_docs/python/tf/tensordot and https://www.machenxiao.com/blog/tensordot
# for tf.tensordot()
H_next = tf.tensordot(tf.matmul(A, H), self.W, axes=[2, 0])
if self.activation is not None:
H_next = self.activation(H_next)
return H_next
class GATLayer(tf.keras.layers.Layer):
# reference: https://github.com/danielegrattarola/keras-gat/blob/master/keras_gat/graph_attention_layer.py
def __init__(self, output_dim, activation=None, **kwargs):
self.output_dim = output_dim
self.activation = activation
super().__init__(**kwargs)
def build(self, input_shape):
A_shape, H_shape = input_shape
self.W = self.add_weight( # [output_dim, num_features]
name='W',
shape=[H_shape[2].value, self.output_dim]
)
# a = [a_1, a_2]
self.a_1 = self.add_weight(name='a_1', shape=[self.output_dim, 1])
self.a_2 = self.add_weight(name='a_2', shape=[self.output_dim, 1])
def call(self, inputs, **kwargs):
A, H = inputs[0], inputs[1]
# [batch_size, num_vertices, num_features] * [num_features, output_dim]
H_ = tf.tensordot(H, self.W, axes=[2, 0]) # [batch_size, num_vertices, output_dim]
e = tf.nn.leaky_relu(
tf.tensordot(H_, self.a_1, axes=[2, 0]) + tf.transpose(tf.tensordot(H_, self.a_2, axes=[2, 0]), perm=[0, 2, 1]),
alpha=0.2
) # [batch_size, num_vertices, num_vertices]
A = tf.cast(tf.math.greater(A, 0.0), dtype=tf.float32)
alpha = tf.nn.softmax(e * A)
H_next = tf.matmul(alpha, H_)
if self.activation is not None:
return self.activation(H_next)
else:
return H_next
class MultiHeadGATLayer(tf.keras.layers.Layer):
def __init__(self, output_dim, num_heads, activation, aggregation, **kwargs):
"""
:param aggregation: 'concat' or 'average'
"""
self.output_dim = output_dim
self.num_heads = num_heads
self.activation = activation
self.aggregation = aggregation
self.layers = [GATLayer(output_dim, activation=None) for _ in range(num_heads)]
super().__init__(**kwargs)
def call(self, inputs, **kwargs):
A, H = inputs[0], inputs[1]
H_next_list = [self.layers[i](A, H) for i in self.num_heads]
if self.aggregation == 'concat':
return self.activation(tf.concat(H_next_list, axis=-1))
else:
return self.activation(tf.reduce_mean(tf.stack(H_next_list, axis=-1), axis=-1))
|
[
"xihanli316@gmail.com"
] |
xihanli316@gmail.com
|
253260a9a8189faaf006e479b65e1a1ce4a1c6e1
|
3daae8bdcc94142cd22fdb3d25580960e84e59b8
|
/moose_nerp/cells/proto154_84362/param_chan.py
|
e1ee455a38ab2d4781f14aec784d733343632d6d
|
[] |
no_license
|
neurord/moose_nerp
|
a2cc543fe0e02fc5bd473a1b991d599211b36398
|
74bc20685c6e68cbc7a786271c55a6387f437965
|
refs/heads/master
| 2022-10-11T15:22:14.251028
| 2022-08-24T21:25:31
| 2022-08-24T21:25:31
| 27,131,330
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,319
|
py
|
# Generated from npzfile: fitgp-proto-cmaes_proto154_84362_24.npz of fit number: 6398
from moose_nerp.prototypes.util import NamedDict
from moose_nerp.prototypes.chan_proto import (
AlphaBetaChannelParams,
StandardMooseTauInfChannelParams,
TauInfMinChannelParams,
ZChannelParams,
BKChannelParams,
ChannelSettings,
TypicalOneD,
TwoD)
#units for membrane potential: volts
krev=-90e-3
narev=50e-3
carev=130e-3
hcnrev=-30e-3
ZpowCDI=2
VMIN = -120e-3
VMAX = 50e-3
VDIVS = 3401 #0.5 mV steps
#units for calcium concentration: mM
CAMIN = 0.01e-3 #10 nM
CAMAX = 60e-3 #40 uM, might want to go up to 100 uM with spines
CADIVS = 5999 #10 nM steps
#contains all gating parameters and reversal potentials
# Gate equations have the form:
# AlphaBetaChannelParams (specify forward and backward transition rates):
# alpha(v) or beta(v) = (rate + B * v) / (C + exp((v + vhalf) / vslope))
# OR
# StandardMooseTauInfChannelParams (specify steady state and time constants):
# tau(v) or inf(v) = (rate + B * v) / (C + exp((v + vhalf) / vslope))
# OR
# TauInfMinChannelParams (specify steady state and time constants with non-zero minimum - useful for tau):
# inf(v) = min + max / (1 + exp((v + vhalf) / vslope))
# tau(v) = taumin + tauVdep / (1 + exp((v + tauVhalf) / tauVslope))
# or if tau_power=2: tau(v) = taumin + tauVdep / (1 + exp((v + tauVhalf) / tauVslope))* 1 / (1 + exp((v + tauVhalf) / -tauVslope))
#
# where v is membrane potential in volts, vhalf and vslope have units of volts
# C, min and max are dimensionless; and C should be either +1, 0 or -1
# Rate has units of per sec, and B has units of per sec per volt
# taumin and tauVdep have units of per sec
qfactNaF = 1.0
#These values were too fast - change rate from 35e3 to 16e3
Na_m_params = AlphaBetaChannelParams(A_rate=28854.29710252535,
A_B=0.0,
A_C=1,
A_vhalf=0.032944893267078876,
A_vslope=-0.005,
B_rate=28854.29710252535,
B_B=0.0,
B_C=1,
B_vhalf=0.032944893267078876,
B_vslope=0.005)
Na_h_params = AlphaBetaChannelParams(A_rate=7358.084358004004,
A_B=0.0,
A_C=1,
A_vhalf=0.07381216143041182,
A_vslope=0.009,
B_rate=7358.084358004004,
B_B=0.0,
B_C=1,
B_vhalf=0.03381216143041183,
B_vslope=-0.005)
'''
Na_s_params= AlphaBetaChannelParams(A_rate = 100,
A_B = 0,
A_C = 1,
A_vhalf = 84e-3,
A_vslope = 10.0e-3,
B_rate = 80,
B_B = 0.0,
B_C = 1,
B_vhalf = -26e-3,
B_vslope = -14e-3)
'''
Na_s_params = TauInfMinChannelParams(T_min=0.01,
T_vdep=2.2,
T_vhalf=-0.032,
T_vslope=0.012,
SS_min=0.15,
SS_vdep=0.85,
SS_vhalf=-0.045,
SS_vslope=0.0054,
T_power=2)
NaFparam = ChannelSettings(Xpow=3, Ypow=1, Zpow=0, Erev=narev, name='NaF')
#persistent Na_channel. Slow down from Dieter Jaeger
'''
NaS_m_params = AlphaBetaChannelParams(A_rate=76e3,
A_B=0,
A_C=1,
A_vhalf=-55.4e-3,
A_vslope=-13.6e-3,
B_rate=70e3,
B_B=0.0,
B_C=1,
B_vhalf=135e-3,
B_vslope=13.5e-3)
'''
NaS_m_params = AlphaBetaChannelParams(A_rate=26240.667648781226,
A_B=0.0,
A_C=1,
A_vhalf=-0.0615232210785421,
A_vslope=-0.027,
B_rate=26240.667648781226,
B_B=0.0,
B_C=1,
B_vhalf=0.1288767789214579,
B_vslope=0.0262)
NaS_h_params = AlphaBetaChannelParams(A_rate=23.59121938827259,
A_B=0.0,
A_C=1,
A_vhalf=0.05911281386076481,
A_vslope=0.0045,
B_rate=22.116768176505555,
B_B=0.0,
B_C=1,
B_vhalf=0.058112813860764806,
B_vslope=-0.004)
NaS_s_params = AlphaBetaChannelParams(A_rate=-0.147,
A_B=-8.64,
A_C=1,
A_vhalf=0.017,
A_vslope=0.00463,
B_rate=1.341,
B_B=20.82,
B_C=1,
B_vhalf=0.0644,
B_vslope=-0.00263)
'''
NaS_s_params = TauInfMinChannelParams(SS_min = 0,
SS_vdep = 1,
SS_vhalf = -0.010,
SS_vslope = 0.0049,
T_min = 0.5,
T_vdep = 8,
T_vhalf = -0.066,
T_vslope = -0.016,
T_power=2)
'''
NaSparam = ChannelSettings(Xpow=3, Ypow=1, Zpow=0, Erev=narev, name='NaP')
#KDrparam -Kv2
KDrparam = ChannelSettings(Xpow=4, Ypow=1, Zpow=0, Erev=krev, name='KDr')
KDr_X_params = AlphaBetaChannelParams(A_rate=5400.0,
A_B=0,
A_C=1,
A_vhalf=-0.049,
A_vslope=-0.015,
B_rate=2300.0,
B_B=0.0,
B_C=1,
B_vhalf=0.12,
B_vslope=0.022)
KDr_Y_params = AlphaBetaChannelParams(A_rate=0.292,
A_B=0,
A_C=1,
A_vhalf=0.0,
A_vslope=0.015,
B_rate=0.292,
B_B=0.0,
B_C=1,
B_vhalf=0.0,
B_vslope=-0.015)
Kv3param = ChannelSettings(Xpow=4, Ypow=1, Zpow=0, Erev=krev, name='Kv3')
#qfactKrp=1
Kv3_X_params = AlphaBetaChannelParams(A_rate=7450.388612841055,
A_B=0.0,
A_C=1,
A_vhalf=-0.025848597988629853,
A_vslope=-0.015,
B_rate=10132.528513463834,
B_B=0.0,
B_C=1,
B_vhalf=0.08515140201137016,
B_vslope=0.015)
Kv3_Y_params = TauInfMinChannelParams(T_min=0.0055144867513592715,
T_vdep=0.020482379362191577,
T_vhalf=-0.0029495138630166806,
T_vslope=0.01,
SS_min=0.6,
SS_vdep=0.4,
SS_vhalf=-0.02294951386301668,
SS_vslope=0.01,
T_power=1)
KvFparam = ChannelSettings(Xpow=4, Ypow=1, Zpow=0, Erev=krev, name='KvF')
KvF_X_params = AlphaBetaChannelParams(A_rate=5368.493927236361,
A_B=0.0,
A_C=1,
A_vhalf=-0.036008449930908994,
A_vslope=-0.026,
B_rate=5368.493927236361,
B_B=0.0,
B_C=1,
B_vhalf=0.12599155006909102,
B_vslope=0.027)
KvF_Y_params = AlphaBetaChannelParams(A_rate=209.86251175212638,
A_B=0.0,
A_C=1,
A_vhalf=0.09433084562399177,
A_vslope=0.012,
B_rate=198.66984445867965,
B_B=0.0,
B_C=1,
B_vhalf=0.06933084562399178,
B_vslope=-0.012)
KvSparam = ChannelSettings(Xpow=2, Ypow=1, Zpow=0, Erev=krev, name='KvS')
KvS_X_params = AlphaBetaChannelParams(A_rate=6604.590651800115,
A_B=0.0,
A_C=1,
A_vhalf=-0.028709824768143793,
A_vslope=-0.022,
B_rate=2607.075257289519,
B_B=0.0,
B_C=1,
B_vhalf=0.10129017523185621,
B_vslope=0.02)
KvS_Y_params = AlphaBetaChannelParams(A_rate=12.845858592579612,
A_B=0.0,
A_C=1,
A_vhalf=0.08844775082089602,
A_vslope=0.01,
B_rate=13.521956413241696,
B_B=0.0,
B_C=1,
B_vhalf=0.07344775082089602,
B_vslope=-0.012)
KCNQparam = ChannelSettings(Xpow=4, Ypow=0, Zpow=0, Erev=krev, name='KCNQ')
KCNQ_X_params = AlphaBetaChannelParams(A_rate=195,
A_B=0,
A_C=1,
A_vhalf=-0.039,
A_vslope=-0.0295,
B_rate=120,
B_B=0,
B_C=1,
B_vhalf=0.128,
B_vslope=0.032)
KCNQ_Y_params = []
HCN1param = ChannelSettings(Xpow=1, Ypow=0, Zpow=0, Erev=hcnrev, name='HCN1')
HCN1_X_params = AlphaBetaChannelParams(A_rate=90.63953483779824,
A_B=0.0,
A_C=1,
A_vhalf=0.1312855411436227,
A_vslope=0.01,
B_rate=39.2771317630459,
B_B=0.0,
B_C=1,
B_vhalf=0.011285541143622714,
B_vslope=-0.012)
HCN1_Y_params=[]
HCN2param = ChannelSettings(Xpow=1, Ypow=0, Zpow=0, Erev=hcnrev, name='HCN2')
HCN2_X_params = AlphaBetaChannelParams(A_rate=13.842119385137138,
A_B=0.0,
A_C=1,
A_vhalf=0.14198712434702515,
A_vslope=0.01,
B_rate=9.228079590091426,
B_B=0.0,
B_C=1,
B_vhalf=0.02198712434702514,
B_vslope=-0.012)
HCN2_Y_params=[]
#Caparam - D.James Surmeier, ( tau and ss)
Caparam=ChannelSettings(Xpow=1 , Ypow=0 , Zpow=0, Erev=carev , name='Ca')
Ca_X_params = AlphaBetaChannelParams(A_rate=378823.64359600894,
A_B=-5389963.259331261,
A_C=-1,
A_vhalf=-0.07028315878409347,
A_vslope=-0.011,
B_rate=889.343936900314,
B_B=0.0,
B_C=1,
B_vhalf=0.008716841215906525,
B_vslope=0.013)
Ca_Y_params=[]
SKparam= ChannelSettings(Xpow=0, Ypow=0, Zpow=1, Erev=krev, name='SKCa')
SK_Z_params = ZChannelParams(Kd=0.00035,
power=4.6,
tau=0.002,
taumax=0.0037928,
tau_power=4.3,
cahalf=0.002703)
#BK channel
BKparam = ChannelSettings(Xpow=1, Ypow=0, Zpow=0, Erev=krev, name='BKCa')
BK_X_params=[BKChannelParams(alphabeta=480, K=0.18, delta=-0.84),
BKChannelParams(alphabeta=280, K=0.011, delta=-1.0)]
Channels = NamedDict(
'Channels',
KDr = TypicalOneD(KDrparam, KDr_X_params, KDr_Y_params),
Kv3 = TypicalOneD(Kv3param, Kv3_X_params, Kv3_Y_params),
KvF = TypicalOneD(KvFparam, KvF_X_params, KvF_Y_params),
KvS = TypicalOneD(KvSparam, KvS_X_params, KvS_Y_params),
HCN1 = TypicalOneD(HCN1param,HCN1_X_params, []),
HCN2 = TypicalOneD(HCN2param,HCN2_X_params, []),
KCNQ = TypicalOneD(KCNQparam,KCNQ_X_params, []),
NaF = TypicalOneD(NaFparam, Na_m_params, Na_h_params,Na_s_params),
NaS= TypicalOneD(NaSparam,NaS_m_params,NaS_h_params,NaS_s_params),
Ca = TypicalOneD(Caparam,Ca_X_params, [],[], calciumPermeable=True),
SKCa= TypicalOneD(SKparam, [], [], SK_Z_params , calciumDependent=True),
BKCa=TwoD(BKparam, BK_X_params, calciumDependent=True),
)
# have to add NaP and calcium channels
|
[
"avrama@gmu.edu"
] |
avrama@gmu.edu
|
1efde54ba66e1b49a043e16ee6709eb0c5fbc0d3
|
09a970bebcbcd1fd1e1e6703e1fccc3c05d6a813
|
/client/configuration/tests/configuration_test.py
|
1f133401c2949db35cea10e6c3a4f69f1ec50d7c
|
[
"MIT"
] |
permissive
|
joshkehn/pyre-check
|
ff401557de08a46d6ddb9d4c2aee34f3779b1747
|
922b62e78e4990d8d268bb2ebde5912ceea9e226
|
refs/heads/master
| 2022-06-06T10:36:06.142551
| 2022-05-20T19:27:06
| 2022-05-20T19:27:06
| 166,895,044
| 0
| 0
|
MIT
| 2019-01-21T23:32:17
| 2019-01-21T23:32:17
| null |
UTF-8
|
Python
| false
| false
| 52,365
|
py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import shutil
import site
import subprocess
import sys
import tempfile
import unittest
from pathlib import Path
import testslide
from ... import command_arguments, find_directories
from ...find_directories import BINARY_NAME
from ...tests.setup import (
ensure_directories_exists,
ensure_files_exist,
switch_environment,
switch_working_directory,
write_configuration_file,
)
from ..configuration import (
check_nested_local_configuration,
Configuration,
create_configuration,
ExtensionElement,
get_default_site_roots,
PartialConfiguration,
)
from ..exceptions import InvalidConfiguration
from ..ide_features import IdeFeatures
from ..platform_aware import PlatformAware
from ..python_version import PythonVersion
from ..search_path import (
SimpleElement,
SimpleRawElement,
SitePackageRawElement,
SubdirectoryRawElement,
)
from ..shared_memory import SharedMemory
from ..site_packages import SearchStrategy
from ..unwatched import UnwatchedDependency, UnwatchedFiles
class PartialConfigurationTest(unittest.TestCase):
def test_create_from_command_arguments(self) -> None:
configuration = PartialConfiguration.from_command_arguments(
command_arguments.CommandArguments(
local_configuration=None,
logger="logger",
targets=[],
source_directories=[],
search_path=["x", "y"],
binary="binary",
buck_mode="opt",
exclude=["excludes"],
typeshed="typeshed",
dot_pyre_directory=Path(".pyre"),
python_version="3.6.7",
shared_memory_heap_size=42,
number_of_workers=43,
use_buck2=True,
)
)
self.assertEqual(configuration.binary, "binary")
self.assertEqual(
configuration.buck_mode, PlatformAware.from_json("opt", "buck_mode")
)
self.assertEqual(configuration.dot_pyre_directory, Path(".pyre"))
self.assertListEqual(list(configuration.excludes), ["excludes"])
self.assertEqual(configuration.logger, "logger")
self.assertEqual(configuration.oncall, None)
self.assertListEqual(
list(configuration.search_path),
[SimpleRawElement("x"), SimpleRawElement("y")],
)
self.assertIsNone(configuration.source_directories)
self.assertEqual(configuration.strict, None)
self.assertIsNone(configuration.targets)
self.assertEqual(configuration.typeshed, "typeshed")
self.assertEqual(configuration.unwatched_dependency, None)
self.assertEqual(
configuration.python_version, PythonVersion(major=3, minor=6, micro=7)
)
self.assertEqual(configuration.shared_memory, SharedMemory(heap_size=42))
self.assertEqual(configuration.site_package_search_strategy, None)
self.assertEqual(configuration.site_roots, None)
self.assertEqual(configuration.number_of_workers, 43)
self.assertEqual(configuration.use_buck2, True)
def test_create_from_command_arguments__ide_features(self) -> None:
configuration = PartialConfiguration.from_command_arguments(
command_arguments.CommandArguments(
enable_hover=True,
enable_go_to_definition=True,
enable_find_symbols=True,
)
)
assert configuration.ide_features is not None
self.assertTrue(configuration.ide_features.is_hover_enabled())
self.assertTrue(configuration.ide_features.is_go_to_definition_enabled())
self.assertTrue(configuration.ide_features.is_find_symbols_enabled())
configuration = PartialConfiguration.from_command_arguments(
command_arguments.CommandArguments(
enable_hover=False,
enable_go_to_definition=False,
)
)
assert configuration.ide_features is not None
self.assertFalse(configuration.ide_features.is_hover_enabled())
self.assertFalse(configuration.ide_features.is_go_to_definition_enabled())
configuration = PartialConfiguration.from_command_arguments(
command_arguments.CommandArguments()
)
self.assertEqual(configuration.ide_features, None)
def test_create_from_string_success(self) -> None:
self.assertEqual(
PartialConfiguration.from_string(json.dumps({"binary": "foo"})).binary,
"foo",
)
for mode in [
"foo",
{"default": "foo"},
{"linux": "foo"},
{"default": "bar", "macos": "foo", "linux": "foo"},
]:
buck_mode = PartialConfiguration.from_string(
json.dumps({"buck_mode": mode})
).buck_mode
expected_value = PlatformAware.from_json("foo", "buck_mode")
self.assertIsNotNone(buck_mode)
self.assertIsNotNone(expected_value)
self.assertEqual(buck_mode.get(), expected_value.get())
for null_mode in [{}, None]:
self.assertIsNone(
PartialConfiguration.from_string(
json.dumps({"buck_mode": null_mode})
).buck_mode
)
self.assertListEqual(
list(
PartialConfiguration.from_string(
json.dumps({"do_not_ignore_errors_in": ["foo", "bar"]})
).do_not_ignore_errors_in
),
["foo", "bar"],
)
self.assertEqual(
PartialConfiguration.from_string(
json.dumps({"dot_pyre_directory": "foo"})
).dot_pyre_directory,
Path("foo"),
)
self.assertListEqual(
list(
PartialConfiguration.from_string(
json.dumps({"exclude": "foo"})
).excludes
),
["foo"],
)
self.assertListEqual(
list(
PartialConfiguration.from_string(
json.dumps({"exclude": ["foo", "bar"]})
).excludes
),
["foo", "bar"],
)
self.assertListEqual(
list(
PartialConfiguration.from_string(
json.dumps({"extensions": [".foo", ".bar"]})
).extensions
),
[ExtensionElement(".foo", False), ExtensionElement(".bar", False)],
)
self.assertListEqual(
list(
PartialConfiguration.from_string(
json.dumps(
{
"extensions": [
".foo",
{
"suffix": ".bar",
"include_suffix_in_module_qualifier": True,
},
{
"suffix": ".baz",
"include_suffix_in_module_qualifier": False,
},
]
}
)
).extensions
),
[
ExtensionElement(".foo", False),
ExtensionElement(".bar", True),
ExtensionElement(".baz", False),
],
)
self.assertListEqual(
list(
PartialConfiguration.from_string(
json.dumps({"ignore_all_errors": ["foo", "bar"]})
).ignore_all_errors
),
["foo", "bar"],
)
self.assertEqual(
PartialConfiguration.from_string(json.dumps({"logger": "foo"})).logger,
"foo",
)
self.assertEqual(
PartialConfiguration.from_string(json.dumps({"oncall": "foo"})).oncall,
"foo",
)
self.assertEqual(
PartialConfiguration.from_string(
json.dumps({"workers": 42})
).number_of_workers,
42,
)
self.assertListEqual(
list(
PartialConfiguration.from_string(
json.dumps({"critical_files": ["foo", "bar"]})
).other_critical_files
),
["foo", "bar"],
)
self.assertListEqual(
list(
PartialConfiguration.from_string(
json.dumps({"search_path": "foo"})
).search_path
),
[SimpleRawElement("foo")],
)
self.assertListEqual(
list(
PartialConfiguration.from_string(
json.dumps(
{"search_path": ["foo", {"root": "bar", "subdirectory": "baz"}]}
)
).search_path
),
[
SimpleRawElement("foo"),
SubdirectoryRawElement("bar", "baz"),
],
)
self.assertIsNone(
PartialConfiguration.from_string(
json.dumps({})
).site_package_search_strategy
)
self.assertEqual(
PartialConfiguration.from_string(
json.dumps({"site_package_search_strategy": "pep561"})
).site_package_search_strategy,
SearchStrategy.PEP561,
)
self.assertEqual(
PartialConfiguration.from_string(json.dumps({"strict": True})).strict, True
)
self.assertListEqual(
list(
PartialConfiguration.from_string(
json.dumps({"taint_models_path": "foo"})
).taint_models_path
),
["foo"],
)
self.assertListEqual(
list(
PartialConfiguration.from_string(
json.dumps({"taint_models_path": ["foo", "bar"]})
).taint_models_path
),
["foo", "bar"],
)
self.assertEqual(
PartialConfiguration.from_string(json.dumps({"typeshed": "foo"})).typeshed,
"foo",
)
self.assertEqual(
PartialConfiguration.from_string(
json.dumps({"version": "abc"})
).version_hash,
"abc",
)
self.assertEqual(
PartialConfiguration.from_string(
json.dumps({"pysa_version": "abc"})
).pysa_version_hash,
"abc",
)
self.assertEqual(
PartialConfiguration.from_string(
json.dumps({"python_version": "3"})
).python_version,
PythonVersion(major=3, minor=0, micro=0),
)
self.assertEqual(
PartialConfiguration.from_string(
json.dumps({"python_version": "3.6"})
).python_version,
PythonVersion(major=3, minor=6, micro=0),
)
self.assertEqual(
PartialConfiguration.from_string(
json.dumps({"python_version": "3.6.7"})
).python_version,
PythonVersion(major=3, minor=6, micro=7),
)
self.assertEqual(
PartialConfiguration.from_string(
json.dumps({"shared_memory": {"heap_size": 1}})
).shared_memory,
SharedMemory(heap_size=1),
)
self.assertEqual(
PartialConfiguration.from_string(
json.dumps({"shared_memory": {"dependency_table_power": 2}})
).shared_memory,
SharedMemory(dependency_table_power=2),
)
self.assertEqual(
PartialConfiguration.from_string(
json.dumps({"shared_memory": {"hash_table_power": 3}})
).shared_memory,
SharedMemory(hash_table_power=3),
)
self.assertEqual(
PartialConfiguration.from_string(
json.dumps({"use_buck2": False})
).use_buck2,
False,
)
self.assertIsNone(PartialConfiguration.from_string("{}").source_directories)
source_directories = PartialConfiguration.from_string(
json.dumps({"source_directories": ["foo", "bar"]})
).source_directories
self.assertIsNotNone(source_directories)
self.assertListEqual(
list(source_directories),
[SimpleRawElement("foo"), SimpleRawElement("bar")],
)
self.assertIsNone(PartialConfiguration.from_string(json.dumps({})).site_roots)
site_roots = PartialConfiguration.from_string(
json.dumps({"site_roots": ["foo", "bar"]})
).site_roots
self.assertIsNotNone(site_roots)
self.assertListEqual(
list(site_roots),
["foo", "bar"],
)
source_directories = PartialConfiguration.from_string(
json.dumps(
{
"source_directories": [
"foo",
{"root": "bar", "subdirectory": "baz"},
]
}
)
).source_directories
self.assertIsNotNone(source_directories)
self.assertListEqual(
list(source_directories),
[
SimpleRawElement("foo"),
SubdirectoryRawElement("bar", "baz"),
],
)
source_directories = PartialConfiguration.from_string(
json.dumps(
{
"source_directories": [
"foo",
{"import_root": "bar", "source": "baz"},
]
}
)
).source_directories
self.assertIsNotNone(source_directories)
self.assertListEqual(
list(source_directories),
[
SimpleRawElement("foo"),
SubdirectoryRawElement("bar", "baz"),
],
)
self.assertIsNone(PartialConfiguration.from_string("{}").targets)
targets = PartialConfiguration.from_string(
json.dumps({"targets": ["//foo", "//bar"]})
).targets
self.assertIsNotNone(targets)
self.assertListEqual(list(targets), ["//foo", "//bar"])
unwatched_dependency = PartialConfiguration.from_string(
json.dumps(
{
"unwatched_dependency": {
"change_indicator": "foo",
"files": {"root": "bar", "checksum_path": "baz"},
}
}
)
).unwatched_dependency
self.assertIsNotNone(unwatched_dependency)
self.assertEqual(
unwatched_dependency,
UnwatchedDependency(
change_indicator="foo",
files=UnwatchedFiles(root="bar", checksum_path="baz"),
),
)
def test_create_from_string_failure(self) -> None:
def assert_raises(content: str) -> None:
with self.assertRaises(InvalidConfiguration):
PartialConfiguration.from_string(content)
assert_raises("")
assert_raises("{")
assert_raises(json.dumps({"binary": True}))
assert_raises(json.dumps({"buck_mode": {"default": 5}}))
assert_raises(json.dumps({"buck_mode": {"bad-platform": "mode"}}))
assert_raises(
json.dumps(
{
"buck_mode": {
"win": "valid",
"bad": "valid-also",
}
}
)
)
assert_raises(json.dumps({"do_not_ignore_errors_in": "abc"}))
assert_raises(json.dumps({"dot_pyre_directory": {}}))
assert_raises(json.dumps({"exclude": 42}))
assert_raises(json.dumps({"extensions": 42}))
assert_raises(json.dumps({"ignore_all_errors": [1, 2, 3]}))
assert_raises(json.dumps({"logger": []}))
assert_raises(json.dumps({"oncall": []}))
assert_raises(json.dumps({"workers": "abc"}))
assert_raises(json.dumps({"critical_files": "abc"}))
assert_raises(json.dumps({"source_directories": "abc"}))
assert_raises(json.dumps({"strict": 42}))
assert_raises(json.dumps({"taint_models_path": True}))
assert_raises(json.dumps({"taint_models_path": ["foo", 42]}))
assert_raises(json.dumps({"targets": "abc"}))
assert_raises(json.dumps({"typeshed": ["abc"]}))
assert_raises(json.dumps({"version": 123}))
assert_raises(json.dumps({"pysa_version": 123}))
assert_raises(json.dumps({"python_version": "abc"}))
assert_raises(json.dumps({"python_version": 42}))
assert_raises(json.dumps({"shared_memory": "abc"}))
assert_raises(json.dumps({"shared_memory": {"heap_size": "abc"}}))
assert_raises(json.dumps({"site_package_search_strategy": False}))
assert_raises(json.dumps({"site_roots": 42}))
assert_raises(json.dumps({"unwatched_dependency": {"change_indicator": "abc"}}))
assert_raises(json.dumps({"use_buck2": {}}))
def test_expand_relative_paths(self) -> None:
self.assertEqual(
PartialConfiguration(binary="foo").expand_relative_paths("bar").binary,
"bar/foo",
)
self.assertEqual(
PartialConfiguration(binary="~/foo").expand_relative_paths("bar").binary,
str(Path.home() / "foo"),
)
self.assertEqual(
PartialConfiguration(do_not_ignore_errors_in=["foo", "bar"])
.expand_relative_paths("baz")
.do_not_ignore_errors_in,
["baz/foo", "baz/bar"],
)
self.assertEqual(
PartialConfiguration(ignore_all_errors=["foo", "bar"])
.expand_relative_paths("baz")
.ignore_all_errors,
["baz/foo", "baz/bar"],
)
self.assertEqual(
PartialConfiguration(logger="foo").expand_relative_paths("bar").logger,
"bar/foo",
)
self.assertEqual(
PartialConfiguration(other_critical_files=["foo", "bar"])
.expand_relative_paths("baz")
.other_critical_files,
["baz/foo", "baz/bar"],
)
self.assertEqual(
PartialConfiguration(
search_path=[
SimpleRawElement("foo"),
SubdirectoryRawElement("bar", "baz"),
SitePackageRawElement("package"),
]
)
.expand_relative_paths("root")
.search_path,
[
SimpleRawElement("root/foo"),
SubdirectoryRawElement("root/bar", "baz"),
SitePackageRawElement("package"),
],
)
self.assertEqual(
PartialConfiguration(
source_directories=[
SimpleRawElement("foo"),
SimpleRawElement("bar"),
]
)
.expand_relative_paths("baz")
.source_directories,
[
SimpleRawElement("baz/foo"),
SimpleRawElement("baz/bar"),
],
)
self.assertEqual(
PartialConfiguration(taint_models_path=["foo", "bar"])
.expand_relative_paths("baz")
.taint_models_path,
["baz/foo", "baz/bar"],
)
self.assertEqual(
PartialConfiguration(typeshed="foo").expand_relative_paths("bar").typeshed,
"bar/foo",
)
def assert_expanded_unwatched_root(
original: str, root: str, expected: str
) -> None:
actual = (
PartialConfiguration(
unwatched_dependency=UnwatchedDependency(
change_indicator="indicator",
files=UnwatchedFiles(root=original, checksum_path="checksum"),
)
)
.expand_relative_paths(root)
.unwatched_dependency
)
self.assertIsNotNone(actual)
self.assertEqual(actual.files.root, expected)
assert_expanded_unwatched_root(
original="foo",
root="bar",
expected="bar/foo",
)
class ConfigurationTest(testslide.TestCase):
def test_from_partial_configuration(self) -> None:
configuration = Configuration.from_partial_configuration(
project_root=Path("root"),
relative_local_root="local",
partial_configuration=PartialConfiguration(
binary="binary",
buck_mode=PlatformAware.from_json("opt", "buck_mode"),
do_not_ignore_errors_in=["foo"],
dot_pyre_directory=None,
excludes=["exclude"],
extensions=[ExtensionElement(".ext", False)],
ide_features=IdeFeatures(
hover_enabled=True,
go_to_definition_enabled=True,
find_symbols_enabled=True,
),
ignore_all_errors=["bar"],
logger="logger",
number_of_workers=3,
oncall="oncall",
other_critical_files=["critical"],
python_version=PythonVersion(major=3, minor=6, micro=7),
search_path=[SimpleRawElement("search_path")],
shared_memory=SharedMemory(heap_size=1024),
site_package_search_strategy=SearchStrategy.NONE,
site_roots=["site_root"],
source_directories=None,
strict=None,
taint_models_path=["taint"],
targets=None,
typeshed="typeshed",
unwatched_dependency=None,
use_buck2=None,
version_hash="abc",
),
)
self.assertEqual(configuration.project_root, "root")
self.assertEqual(configuration.relative_local_root, "local")
self.assertEqual(configuration.binary, "binary")
self.assertIsNotNone(configuration.buck_mode)
self.assertEqual(configuration.buck_mode.get(), "opt")
self.assertListEqual(list(configuration.do_not_ignore_errors_in), ["foo"])
self.assertEqual(configuration.dot_pyre_directory, Path("root/.pyre"))
self.assertListEqual(list(configuration.excludes), ["exclude"])
self.assertEqual(configuration.extensions, [ExtensionElement(".ext", False)])
self.assertEqual(
configuration.ide_features,
IdeFeatures(
hover_enabled=True,
go_to_definition_enabled=True,
find_symbols_enabled=True,
),
)
self.assertListEqual(list(configuration.ignore_all_errors), ["bar"])
self.assertEqual(configuration.logger, "logger")
self.assertEqual(configuration.number_of_workers, 3)
self.assertEqual(configuration.oncall, "oncall")
self.assertListEqual(list(configuration.other_critical_files), ["critical"])
self.assertListEqual(
list(configuration.search_path), [SimpleRawElement("search_path")]
)
self.assertEqual(
configuration.python_version, PythonVersion(major=3, minor=6, micro=7)
)
self.assertEqual(configuration.source_directories, None)
self.assertEqual(configuration.shared_memory, SharedMemory(heap_size=1024))
self.assertEqual(
configuration.site_package_search_strategy, SearchStrategy.NONE
)
self.assertEqual(configuration.site_roots, ["site_root"])
self.assertEqual(configuration.strict, False)
self.assertEqual(configuration.taint_models_path, ["taint"])
self.assertEqual(configuration.targets, None)
self.assertEqual(configuration.typeshed, "typeshed")
self.assertEqual(configuration.unwatched_dependency, None)
self.assertEqual(configuration.use_buck2, False)
self.assertEqual(configuration.version_hash, "abc")
def test_get_default_site_roots(self) -> None:
global_site_package = "/venv/lib/pythonX/site-packages"
user_site_package = "/user/lib/pythonX/site-packages"
self.mock_callable(site, "getsitepackages").to_return_value(
[global_site_package]
).and_assert_called_once()
self.mock_callable(site, "getusersitepackages").to_return_value(
user_site_package
).and_assert_called_once()
self.assertListEqual(
get_default_site_roots(), [user_site_package, global_site_package]
)
def test_derived_attributes(self) -> None:
self.assertIsNone(
Configuration(
project_root="foo", dot_pyre_directory=Path(".pyre")
).local_root
)
self.assertEqual(
Configuration(
project_root="foo",
dot_pyre_directory=Path(".pyre"),
relative_local_root="bar",
).local_root,
"foo/bar",
)
self.assertEqual(
Configuration(
project_root="foo",
dot_pyre_directory=Path(".pyre"),
relative_local_root="bar/baz",
).local_root,
"foo/bar/baz",
)
self.assertEqual(
Configuration(
project_root="foo", dot_pyre_directory=Path(".pyre")
).log_directory,
".pyre",
)
self.assertEqual(
Configuration(
project_root="foo",
dot_pyre_directory=Path(".pyre"),
relative_local_root="bar",
).log_directory,
".pyre/bar",
)
self.assertEqual(
Configuration(
project_root="foo",
dot_pyre_directory=Path(".pyre"),
relative_local_root="bar/baz",
).log_directory,
".pyre/bar/baz",
)
def test_existent_search_path_with_typeshed(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root)
ensure_directories_exists(root_path, ["a"])
ensure_directories_exists(
root_path, ["typeshed/stdlib", "typeshed/stubs/foo"]
)
self.assertListEqual(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
search_path=[
SimpleRawElement(str(root_path / "a")),
],
typeshed=str(root_path / "typeshed"),
).expand_and_get_existent_search_paths(),
[
SimpleElement(str(root_path / "a")),
SimpleElement(str(root_path / "typeshed/stdlib")),
SimpleElement(str(root_path / "typeshed/stubs/foo")),
],
)
def test_existent_unwatched_dependency(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
ensure_files_exist(root_path, ["a/b"])
self.assertIsNotNone(
Configuration(
project_root=str(root_path),
dot_pyre_directory=Path(".pyre"),
unwatched_dependency=UnwatchedDependency(
change_indicator="indicator",
files=UnwatchedFiles(
root=str(root_path / "a"), checksum_path="b"
),
),
).get_existent_unwatched_dependency()
)
self.assertIsNone(
Configuration(
project_root=str(root_path),
dot_pyre_directory=Path(".pyre"),
unwatched_dependency=UnwatchedDependency(
change_indicator="indicator",
files=UnwatchedFiles(
root=str(root_path / "a"), checksum_path="c"
),
),
).get_existent_unwatched_dependency()
)
self.assertIsNone(
Configuration(
project_root=str(root_path),
dot_pyre_directory=Path(".pyre"),
unwatched_dependency=UnwatchedDependency(
change_indicator="indicator",
files=UnwatchedFiles(
root=str(root_path / "c"), checksum_path="b"
),
),
).get_existent_unwatched_dependency()
)
def test_existent_do_not_ignore_errors(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
ensure_directories_exists(root_path, ["a", "b/c"])
self.assertCountEqual(
Configuration(
project_root=str(root_path),
dot_pyre_directory=Path(".pyre"),
do_not_ignore_errors_in=[
str(root_path / "a"),
str(root_path / "x"),
"//b/c",
"//y/z",
],
).get_existent_do_not_ignore_errors_in_paths(),
[str(root_path / "a"), str(root_path / "b/c")],
)
def test_existent_ignore_all_errors(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
ensure_directories_exists(root_path, ["a", "b/c", "b/d"])
self.assertCountEqual(
Configuration(
project_root=str(root_path),
dot_pyre_directory=Path(".pyre"),
ignore_all_errors=[
str(root_path / "a"),
str(root_path / "x"),
"//b/c",
"//y/z",
f"{root_path}/b/*",
],
).get_existent_ignore_all_errors_paths(),
[
str(root_path / "a"),
str(root_path / "b/c"),
str(root_path / "b/c"),
str(root_path / "b/d"),
],
)
def test_get_binary_version_ok(self) -> None:
binary_path = "foo"
version = "facefacefaceb00"
self.mock_callable(subprocess, "run").to_return_value(
subprocess.CompletedProcess(
args=[binary_path, "-version"], returncode=0, stdout=f"{version}\n"
)
).and_assert_called_once()
self.assertEqual(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
binary=binary_path,
).get_binary_version(),
version,
)
def test_get_binary_version_fail(self) -> None:
binary_path = "foo"
self.mock_callable(subprocess, "run").to_return_value(
subprocess.CompletedProcess(
args=[binary_path, "-version"], returncode=1, stdout="derp"
)
).and_assert_called_once()
self.assertIsNone(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
binary=binary_path,
).get_binary_version()
)
def test_get_number_of_workers(self) -> None:
self.assertEqual(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
number_of_workers=42,
).get_number_of_workers(),
42,
)
# Whatever the default number is, it should be positive
self.assertGreater(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
number_of_workers=None,
).get_number_of_workers(),
0,
)
def test_get_python_versions(self) -> None:
self.assertEqual(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
python_version=PythonVersion(major=3, minor=6, micro=7),
).get_python_version(),
PythonVersion(major=3, minor=6, micro=7),
)
self.assertEqual(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
python_version=None,
).get_python_version(),
PythonVersion(
major=sys.version_info.major,
minor=sys.version_info.minor,
micro=sys.version_info.micro,
),
)
def test_get_binary_from_configuration(self) -> None:
with switch_environment({}):
self.assertEqual(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
binary="foo",
).get_binary_respecting_override(),
"foo",
)
def test_get_binary_auto_determined(self) -> None:
self.mock_callable(shutil, "which").for_call(BINARY_NAME).to_return_value(
"foo"
).and_assert_called_once()
with switch_environment({}):
self.assertEqual(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
binary=None,
).get_binary_respecting_override(),
"foo",
)
def test_get_binary_cannot_auto_determine(self) -> None:
self.mock_callable(shutil, "which").to_return_value(None).and_assert_called()
with switch_environment({}):
self.assertEqual(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
binary=None,
).get_binary_respecting_override(),
None,
)
def test_get_typeshed_from_configuration(self) -> None:
with switch_environment({}):
self.assertEqual(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
typeshed="foo",
).get_typeshed_respecting_override(),
"foo",
)
def test_get_typeshed_auto_determined(self) -> None:
self.mock_callable(
find_directories, "find_typeshed"
).for_call().to_return_value(Path("foo")).and_assert_called_once()
with switch_environment({}):
self.assertEqual(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
typeshed=None,
).get_typeshed_respecting_override(),
"foo",
)
def test_get_typeshed_cannot_auto_determine(self) -> None:
self.mock_callable(
find_directories, "find_typeshed"
).for_call().to_return_value(None).and_assert_called_once()
with switch_environment({}):
self.assertEqual(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
typeshed=None,
).get_typeshed_respecting_override(),
None,
)
def test_get_version_hash_from_configuration(self) -> None:
with switch_environment({}):
self.assertEqual(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
version_hash="abc",
).get_version_hash_respecting_override(),
"abc",
)
def test_get_version_hash_environment_override(self) -> None:
with switch_environment({"PYRE_VERSION_HASH": "abc"}):
self.assertEqual(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
version_hash=None,
).get_version_hash_respecting_override(),
"abc",
)
self.assertEqual(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
version_hash="def",
).get_version_hash_respecting_override(),
"abc",
)
def test_get_valid_extension_suffixes(self) -> None:
self.assertListEqual(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
extensions=[],
).get_valid_extension_suffixes(),
[],
)
self.assertListEqual(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
extensions=[
ExtensionElement(".foo", False),
ExtensionElement(".bar", False),
],
).get_valid_extension_suffixes(),
[".foo", ".bar"],
)
self.assertListEqual(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
extensions=[
ExtensionElement("foo", False),
ExtensionElement(".bar", False),
ExtensionElement("baz", False),
],
).get_valid_extension_suffixes(),
[".bar"],
)
def test_is_hover_enabled(self) -> None:
self.assertFalse(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
).is_hover_enabled(),
)
self.assertTrue(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
ide_features=IdeFeatures(hover_enabled=True),
).is_hover_enabled(),
)
def test_is_go_to_definition_enabled(self) -> None:
self.assertFalse(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
).is_go_to_definition_enabled(),
)
self.assertTrue(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
ide_features=IdeFeatures(go_to_definition_enabled=True),
).is_go_to_definition_enabled(),
)
def test_is_find_symbols_enabled(self) -> None:
self.assertFalse(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
).is_find_symbols_enabled(),
)
self.assertTrue(
Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
ide_features=IdeFeatures(find_symbols_enabled=True),
).is_find_symbols_enabled(),
)
def test_create_from_command_arguments_only(self) -> None:
# We assume there does not exist a `.pyre_configuration` file that
# covers this temporary directory.
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
with switch_working_directory(root_path):
configuration = create_configuration(
command_arguments.CommandArguments(
source_directories=["."], dot_pyre_directory=None
),
base_directory=Path(root),
)
self.assertEqual(configuration.project_root, str(root_path))
self.assertEqual(configuration.relative_local_root, None)
self.assertEqual(configuration.dot_pyre_directory, root_path / ".pyre")
self.assertListEqual(
list(configuration.source_directories or []),
[SimpleRawElement(str(root_path))],
)
def test_create_from_global_configuration(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
write_configuration_file(root_path, {"strict": False})
with switch_working_directory(root_path):
configuration = create_configuration(
command_arguments.CommandArguments(
strict=True, # override configuration file
source_directories=["."],
dot_pyre_directory=Path(".pyre"),
),
base_directory=Path(root),
)
self.assertEqual(configuration.project_root, str(root_path))
self.assertEqual(configuration.relative_local_root, None)
self.assertEqual(configuration.dot_pyre_directory, Path(".pyre"))
self.assertEqual(configuration.strict, True)
self.assertListEqual(
list(configuration.source_directories or []),
[SimpleRawElement(str(root_path))],
)
def test_create_from_local_configuration(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
ensure_directories_exists(root_path, ["foo", "bar", "baz"])
write_configuration_file(
root_path, {"strict": False, "search_path": ["foo"]}
)
write_configuration_file(
root_path,
{"strict": True, "search_path": ["//bar", "baz"]},
relative="local",
)
with switch_working_directory(root_path):
configuration = create_configuration(
command_arguments.CommandArguments(
local_configuration="local",
source_directories=["."],
dot_pyre_directory=Path(".pyre"),
),
base_directory=Path(root),
)
self.assertEqual(configuration.project_root, str(root_path))
self.assertEqual(configuration.relative_local_root, "local")
self.assertEqual(configuration.dot_pyre_directory, Path(".pyre"))
self.assertEqual(configuration.strict, True)
self.assertListEqual(
list(configuration.source_directories or []),
[SimpleRawElement(str(root_path))],
)
self.assertListEqual(
list(configuration.search_path),
[
SimpleRawElement(str(root_path / "bar")),
SimpleRawElement(str(root_path / "local/baz")),
SimpleRawElement(str(root_path / "foo")),
],
)
def test_check_nested_local_configuration_no_nesting(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
write_configuration_file(root_path, {})
write_configuration_file(root_path, {}, relative="local")
try:
check_nested_local_configuration(
Configuration(
project_root=root,
dot_pyre_directory=Path(".pyre"),
relative_local_root="local",
)
)
except InvalidConfiguration:
self.fail("Nested local configuration check fails unexpectedly!")
def test_check_nested_local_configuration_not_excluded(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
write_configuration_file(root_path, {})
write_configuration_file(root_path, {}, relative="nest")
write_configuration_file(root_path, {}, relative="nest/local")
with self.assertRaises(InvalidConfiguration):
check_nested_local_configuration(
Configuration(
project_root=root,
dot_pyre_directory=Path(".pyre"),
relative_local_root="nest/local",
)
)
def test_check_nested_local_configuration_excluded(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
write_configuration_file(root_path, {})
write_configuration_file(
root_path,
{"ignore_all_errors": [str(root_path / "nest/local")]},
relative="nest",
)
write_configuration_file(root_path, {}, relative="nest/local")
try:
check_nested_local_configuration(
Configuration(
project_root=root,
dot_pyre_directory=Path(".pyre"),
relative_local_root="nest/local",
)
)
except InvalidConfiguration:
self.fail("Nested local configuration check fails unexpectedly!")
def test_check_nested_local_configuration_excluded_parent(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
write_configuration_file(root_path, {})
write_configuration_file(
root_path,
{"ignore_all_errors": [str(root_path / "nest")]},
relative="nest",
)
write_configuration_file(root_path, {}, relative="nest/local")
try:
check_nested_local_configuration(
Configuration(
project_root=root,
dot_pyre_directory=Path(".pyre"),
relative_local_root="nest/local",
)
)
except InvalidConfiguration:
self.fail("Nested local configuration check fails unexpectedly!")
def test_check_nested_local_configuration_not_all_nesting_excluded(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
write_configuration_file(root_path, {})
write_configuration_file(root_path, {}, relative="nest0")
write_configuration_file(
root_path,
{"ignore_all_errors": [str(root_path / "nest0/nest1/local")]},
relative="nest0/nest1",
)
write_configuration_file(root_path, {}, relative="nest0/nest1/local")
with self.assertRaises(InvalidConfiguration):
check_nested_local_configuration(
Configuration(
project_root=root,
dot_pyre_directory=Path(".pyre"),
relative_local_root="nest0/nest1/local",
)
)
def test_check_nested_local_configuration_all_nesting_excluded(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
write_configuration_file(root_path, {})
write_configuration_file(
root_path,
{"ignore_all_errors": [str(root_path / "nest0/nest1/local")]},
relative="nest0",
)
write_configuration_file(
root_path,
{"ignore_all_errors": [str(root_path / "nest0/nest1/local")]},
relative="nest0/nest1",
)
write_configuration_file(root_path, {}, relative="nest0/nest1/local")
try:
check_nested_local_configuration(
Configuration(
project_root=root,
dot_pyre_directory=Path(".pyre"),
relative_local_root="nest0/nest1/local",
)
)
except InvalidConfiguration:
self.fail("Nested local configuration check fails unexpectedly!")
def test_check_nested_local_configuration_expand_global_root(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
write_configuration_file(root_path, {})
write_configuration_file(
root_path,
{"ignore_all_errors": ["//nest0/nest1/local"]},
relative="nest0",
)
write_configuration_file(
root_path,
{"ignore_all_errors": [str(root_path / "nest0/**")]},
relative="nest0/nest1",
)
write_configuration_file(root_path, {}, relative="nest0/nest1/local")
try:
check_nested_local_configuration(
Configuration(
project_root=root,
dot_pyre_directory=Path(".pyre"),
relative_local_root="nest0/nest1/local",
)
)
except InvalidConfiguration:
self.fail("Nested local configuration check fails unexpectedly!")
def test_check_nested_local_configuration_expand_relative_root(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
write_configuration_file(root_path, {})
write_configuration_file(
root_path, {"ignore_all_errors": ["nest1/local"]}, relative="nest0"
)
write_configuration_file(
root_path, {"ignore_all_errors": ["*"]}, relative="nest0/nest1"
)
write_configuration_file(root_path, {}, relative="nest0/nest1/local")
try:
check_nested_local_configuration(
Configuration(
project_root=root,
dot_pyre_directory=Path(".pyre"),
relative_local_root="nest0/nest1/local",
)
)
except InvalidConfiguration:
self.fail("Nested local configuration check fails unexpectedly!")
def test_source_directories_glob(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root)
ensure_directories_exists(root_path, ["a1", "a2", "b", "c"])
source_directories = Configuration(
project_root="irrelevant",
dot_pyre_directory=Path(".pyre"),
source_directories=[
SimpleRawElement(str(root_path / "a*")),
SimpleRawElement(str(root_path / "b")),
],
).expand_and_get_existent_source_directories()
self.assertIsNotNone(source_directories)
self.assertListEqual(
list(source_directories),
[
SimpleElement(str(root_path / "a1")),
SimpleElement(str(root_path / "a2")),
SimpleElement(str(root_path / "b")),
],
)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
415b68c26b7d3402984137aa26281194e8b527b6
|
54b46ac21712627097e2938b9508150f7f797b28
|
/docs/conf.py
|
9d24e85e760d9cc4eec4be8de3b3137d7f163d33
|
[
"MIT"
] |
permissive
|
riskybacon/mnist_arma
|
69987e1cd7c8117976bc1af4e8791a22b7570a29
|
4921686adf2382d7fb87d41d25d5e7e6342e6ec3
|
refs/heads/master
| 2021-08-26T09:25:49.401537
| 2017-11-22T21:00:07
| 2017-11-22T21:00:07
| 110,301,106
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,299
|
py
|
# -*- coding: utf-8 -*-
#
# mnist_arma documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 14 14:53:19 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.imgmath',
'breathe']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'mnist_arma'
copyright = u'2017, Jeffrey Bowles'
author = u'Jeffrey Bowles'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'mnist_armadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'mnist_arma.tex', u'mnist\\_arma Documentation',
u'Jeffrey Bowles', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mnist_arma', u'mnist_arma Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'mnist_arma', u'mnist_arma Documentation',
author, 'mnist_arma', 'One line description of project.',
'Miscellaneous'),
]
breathe_projects = {'mnist_arma': './doxyxml/'}
breathe_default_project = 'mnist_arma'
|
[
"jbowles@newmexicoconsortium.org"
] |
jbowles@newmexicoconsortium.org
|
50bba1b031f6b26df64745a4488f051af8cc1198
|
76f7cc243a743904c24dc328d057306720251e30
|
/Week_02/429_n_ary_levelorder_traversal.py
|
eed438eb66bb3f8d6ed0d4b4cd369408fb509b50
|
[] |
no_license
|
underseatravel/AlgorithmQIUZHAO
|
e2340ad8f3e723fd1e25fe678bdd547bb76483e0
|
b41e4ddae5e31074992d0b96bd029fcb6291d2ed
|
refs/heads/master
| 2022-11-29T05:25:00.338298
| 2020-08-22T10:23:17
| 2020-08-22T10:23:17
| 279,102,613
| 0
| 0
| null | 2020-07-12T16:22:05
| 2020-07-12T16:22:04
| null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/7/22 1:26
# @Author : weiyu
# @File : 429_n_ary_levelorder_traversal.py
class Solution:
def levelOrder(self, root):
if not root: return []
queue = [root]
res = []
while queue:
res.append([node.val for node in queue])
queue = [child for node in queue for child in node.children if child]
return res
|
[
"1355472435@qq.com"
] |
1355472435@qq.com
|
e74b3048c30aaaa5c659ce91f95b5db92a2aaa1d
|
052275c2dd6d59a0d0fcfe85591b44106343662b
|
/env/lib/python3.8/site-packages/openpyxl/worksheet/worksheet.py
|
95186be4844a6f205071fab8860f3c97f8c1014a
|
[] |
no_license
|
nimadorostkar/Django-Real-Estate
|
93d104ad1847674103e525ae428af186fffa9e30
|
bf868e49bb4703e4081d8e7e9fd5e3ae23fc9af9
|
refs/heads/master
| 2023-08-10T17:07:29.829253
| 2021-09-19T10:55:47
| 2021-09-19T10:55:47
| 338,533,461
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,473
|
py
|
# Copyright (c) 2010-2020 openpyxl
"""Worksheet is the 2nd-level container in Excel."""
# Python stdlib imports
from itertools import chain
from operator import itemgetter
from inspect import isgenerator
from warnings import warn
# compatibility imports
from openpyxl.compat import (
deprecated,
)
# package imports
from openpyxl.utils import (
column_index_from_string,
get_column_letter,
range_boundaries,
coordinate_to_tuple,
absolute_coordinate,
)
from openpyxl.cell import Cell, MergedCell
from openpyxl.formatting.formatting import ConditionalFormattingList
from openpyxl.packaging.relationship import RelationshipList
from openpyxl.workbook.child import _WorkbookChild
from openpyxl.workbook.defined_name import COL_RANGE_RE, ROW_RANGE_RE
from openpyxl.formula.translate import Translator
from .datavalidation import DataValidationList
from .page import (
PrintPageSetup,
PageMargins,
PrintOptions,
)
from .dimensions import (
ColumnDimension,
RowDimension,
DimensionHolder,
SheetFormatProperties,
)
from .protection import SheetProtection
from .filters import AutoFilter
from .views import (
Pane,
Selection,
SheetViewList,
)
from .cell_range import MultiCellRange, CellRange
from .merge import MergedCellRange
from .properties import WorksheetProperties
from .pagebreak import RowBreak, ColBreak
from .scenario import ScenarioList
from .table import TableList
class Worksheet(_WorkbookChild):
"""Represents a worksheet.
Do not create worksheets yourself,
use :func:`openpyxl.workbook.Workbook.create_sheet` instead
"""
_rel_type = "worksheet"
_path = "/xl/worksheets/sheet{0}.xml"
mime_type = "application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml"
BREAK_NONE = 0
BREAK_ROW = 1
BREAK_COLUMN = 2
SHEETSTATE_VISIBLE = 'visible'
SHEETSTATE_HIDDEN = 'hidden'
SHEETSTATE_VERYHIDDEN = 'veryHidden'
# Paper size
PAPERSIZE_LETTER = '1'
PAPERSIZE_LETTER_SMALL = '2'
PAPERSIZE_TABLOID = '3'
PAPERSIZE_LEDGER = '4'
PAPERSIZE_LEGAL = '5'
PAPERSIZE_STATEMENT = '6'
PAPERSIZE_EXECUTIVE = '7'
PAPERSIZE_A3 = '8'
PAPERSIZE_A4 = '9'
PAPERSIZE_A4_SMALL = '10'
PAPERSIZE_A5 = '11'
# Page orientation
ORIENTATION_PORTRAIT = 'portrait'
ORIENTATION_LANDSCAPE = 'landscape'
def __init__(self, parent, title=None):
_WorkbookChild.__init__(self, parent, title)
self._setup()
def _setup(self):
self.row_dimensions = DimensionHolder(worksheet=self,
default_factory=self._add_row)
self.column_dimensions = DimensionHolder(worksheet=self,
default_factory=self._add_column)
self.row_breaks = RowBreak()
self.col_breaks = ColBreak()
self._cells = {}
self._charts = []
self._images = []
self._rels = RelationshipList()
self._drawing = None
self._comments = []
self.merged_cells = MultiCellRange()
self._tables = TableList()
self._pivots = []
self.data_validations = DataValidationList()
self._hyperlinks = []
self.sheet_state = 'visible'
self.page_setup = PrintPageSetup(worksheet=self)
self.print_options = PrintOptions()
self._print_rows = None
self._print_cols = None
self._print_area = None
self.page_margins = PageMargins()
self.views = SheetViewList()
self.protection = SheetProtection()
self._current_row = 0
self.auto_filter = AutoFilter()
self.paper_size = None
self.formula_attributes = {}
self.orientation = None
self.conditional_formatting = ConditionalFormattingList()
self.legacy_drawing = None
self.sheet_properties = WorksheetProperties()
self.sheet_format = SheetFormatProperties()
self.scenarios = ScenarioList()
@property
def sheet_view(self):
return self.views.sheetView[0]
@property
def selected_cell(self):
return self.sheet_view.selection[0].sqref
@property
def active_cell(self):
return self.sheet_view.selection[0].activeCell
@property
def page_breaks(self):
return (self.row_breaks, self.col_breaks) # legacy, remove at some point
@property
def show_gridlines(self):
return self.sheet_view.showGridLines
""" To keep compatibility with previous versions"""
@property
def show_summary_below(self):
return self.sheet_properties.outlinePr.summaryBelow
@property
def show_summary_right(self):
return self.sheet_properties.outlinePr.summaryRight
@property
def freeze_panes(self):
if self.sheet_view.pane is not None:
return self.sheet_view.pane.topLeftCell
@freeze_panes.setter
def freeze_panes(self, topLeftCell=None):
if isinstance(topLeftCell, Cell):
topLeftCell = topLeftCell.coordinate
if topLeftCell == 'A1':
topLeftCell = None
if not topLeftCell:
self.sheet_view.pane = None
return
row, column = coordinate_to_tuple(topLeftCell)
view = self.sheet_view
view.pane = Pane(topLeftCell=topLeftCell,
activePane="topRight",
state="frozen")
view.selection[0].pane = "topRight"
if column > 1:
view.pane.xSplit = column - 1
if row > 1:
view.pane.ySplit = row - 1
view.pane.activePane = 'bottomLeft'
view.selection[0].pane = "bottomLeft"
if column > 1:
view.selection[0].pane = "bottomRight"
view.pane.activePane = 'bottomRight'
if row > 1 and column > 1:
sel = list(view.selection)
sel.insert(0, Selection(pane="topRight", activeCell=None, sqref=None))
sel.insert(1, Selection(pane="bottomLeft", activeCell=None, sqref=None))
view.selection = sel
def cell(self, row, column, value=None):
"""
Returns a cell object based on the given coordinates.
Usage: cell(row=15, column=1, value=5)
Calling `cell` creates cells in memory when they
are first accessed.
:param row: row index of the cell (e.g. 4)
:type row: int
:param column: column index of the cell (e.g. 3)
:type column: int
:param value: value of the cell (e.g. 5)
:type value: numeric or time or string or bool or none
:rtype: openpyxl.cell.cell.Cell
"""
if row < 1 or column < 1:
raise ValueError("Row or column values must be at least 1")
cell = self._get_cell(row, column)
if value is not None:
cell.value = value
return cell
def _get_cell(self, row, column):
"""
Internal method for getting a cell from a worksheet.
Will create a new cell if one doesn't already exist.
"""
if not 0 < row < 1048577:
raise ValueError("Row numbers must be between 1 and 1048576")
coordinate = (row, column)
if not coordinate in self._cells:
cell = Cell(self, row=row, column=column)
self._add_cell(cell)
return self._cells[coordinate]
def _add_cell(self, cell):
"""
Internal method for adding cell objects.
"""
column = cell.col_idx
row = cell.row
self._current_row = max(row, self._current_row)
self._cells[(row, column)] = cell
def __getitem__(self, key):
"""Convenience access by Excel style coordinates
The key can be a single cell coordinate 'A1', a range of cells 'A1:D25',
individual rows or columns 'A', 4 or ranges of rows or columns 'A:D',
4:10.
Single cells will always be created if they do not exist.
Returns either a single cell or a tuple of rows or columns.
"""
if isinstance(key, slice):
if not all([key.start, key.stop]):
raise IndexError("{0} is not a valid coordinate or range".format(key))
key = "{0}:{1}".format(key.start, key.stop)
if isinstance(key, int):
key = str(key
)
min_col, min_row, max_col, max_row = range_boundaries(key)
if not any([min_col, min_row, max_col, max_row]):
raise IndexError("{0} is not a valid coordinate or range".format(key))
if min_row is None:
cols = tuple(self.iter_cols(min_col, max_col))
if min_col == max_col:
cols = cols[0]
return cols
if min_col is None:
rows = tuple(self.iter_rows(min_col=min_col, min_row=min_row,
max_col=self.max_column, max_row=max_row))
if min_row == max_row:
rows = rows[0]
return rows
if ":" not in key:
return self._get_cell(min_row, min_col)
return tuple(self.iter_rows(min_row=min_row, min_col=min_col,
max_row=max_row, max_col=max_col))
def __setitem__(self, key, value):
self[key].value = value
def __iter__(self):
return self.iter_rows()
def __delitem__(self, key):
row, column = coordinate_to_tuple(key)
if (row, column) in self._cells:
del self._cells[(row, column)]
@property
def min_row(self):
"""The minimium row index containing data (1-based)
:type: int
"""
min_row = 1
if self._cells:
rows = set(c[0] for c in self._cells)
min_row = min(rows)
return min_row
@property
def max_row(self):
"""The maximum row index containing data (1-based)
:type: int
"""
max_row = 1
if self._cells:
rows = set(c[0] for c in self._cells)
max_row = max(rows)
return max_row
@property
def min_column(self):
"""The minimum column index containing data (1-based)
:type: int
"""
min_col = 1
if self._cells:
cols = set(c[1] for c in self._cells)
min_col = min(cols)
return min_col
@property
def max_column(self):
"""The maximum column index containing data (1-based)
:type: int
"""
max_col = 1
if self._cells:
cols = set(c[1] for c in self._cells)
max_col = max(cols)
return max_col
def calculate_dimension(self):
"""Return the minimum bounding range for all cells containing data (ex. 'A1:M24')
:rtype: string
"""
if self._cells:
rows = set()
cols = set()
for row, col in self._cells:
rows.add(row)
cols.add(col)
max_row = max(rows)
max_col = max(cols)
min_col = min(cols)
min_row = min(rows)
else:
return "A1:A1"
return f"{get_column_letter(min_col)}{min_row}:{get_column_letter(max_col)}{max_row}"
@property
def dimensions(self):
"""Returns the result of :func:`calculate_dimension`"""
return self.calculate_dimension()
def iter_rows(self, min_row=None, max_row=None, min_col=None, max_col=None, values_only=False):
"""
Produces cells from the worksheet, by row. Specify the iteration range
using indices of rows and columns.
If no indices are specified the range starts at A1.
If no cells are in the worksheet an empty tuple will be returned.
:param min_col: smallest column index (1-based index)
:type min_col: int
:param min_row: smallest row index (1-based index)
:type min_row: int
:param max_col: largest column index (1-based index)
:type max_col: int
:param max_row: largest row index (1-based index)
:type max_row: int
:param values_only: whether only cell values should be returned
:type values_only: bool
:rtype: generator
"""
if self._current_row == 0 and not any([min_col, min_row, max_col, max_row ]):
return iter(())
min_col = min_col or 1
min_row = min_row or 1
max_col = max_col or self.max_column
max_row = max_row or self.max_row
return self._cells_by_row(min_col, min_row, max_col, max_row, values_only)
def _cells_by_row(self, min_col, min_row, max_col, max_row, values_only=False):
for row in range(min_row, max_row + 1):
cells = (self.cell(row=row, column=column) for column in range(min_col, max_col + 1))
if values_only:
yield tuple(cell.value for cell in cells)
else:
yield tuple(cells)
@property
def rows(self):
"""Produces all cells in the worksheet, by row (see :func:`iter_rows`)
:type: generator
"""
return self.iter_rows()
@property
def values(self):
"""Produces all cell values in the worksheet, by row
:type: generator
"""
for row in self.iter_rows(values_only=True):
yield row
def iter_cols(self, min_col=None, max_col=None, min_row=None, max_row=None, values_only=False):
"""
Produces cells from the worksheet, by column. Specify the iteration range
using indices of rows and columns.
If no indices are specified the range starts at A1.
If no cells are in the worksheet an empty tuple will be returned.
:param min_col: smallest column index (1-based index)
:type min_col: int
:param min_row: smallest row index (1-based index)
:type min_row: int
:param max_col: largest column index (1-based index)
:type max_col: int
:param max_row: largest row index (1-based index)
:type max_row: int
:param values_only: whether only cell values should be returned
:type values_only: bool
:rtype: generator
"""
if self._current_row == 0 and not any([min_col, min_row, max_col, max_row]):
return iter(())
min_col = min_col or 1
min_row = min_row or 1
max_col = max_col or self.max_column
max_row = max_row or self.max_row
return self._cells_by_col(min_col, min_row, max_col, max_row, values_only)
def _cells_by_col(self, min_col, min_row, max_col, max_row, values_only=False):
"""
Get cells by column
"""
for column in range(min_col, max_col+1):
cells = (self.cell(row=row, column=column)
for row in range(min_row, max_row+1))
if values_only:
yield tuple(cell.value for cell in cells)
else:
yield tuple(cells)
@property
def columns(self):
"""Produces all cells in the worksheet, by column (see :func:`iter_cols`)"""
return self.iter_cols()
def set_printer_settings(self, paper_size, orientation):
"""Set printer settings """
self.page_setup.paperSize = paper_size
self.page_setup.orientation = orientation
def add_data_validation(self, data_validation):
""" Add a data-validation object to the sheet. The data-validation
object defines the type of data-validation to be applied and the
cell or range of cells it should apply to.
"""
self.data_validations.append(data_validation)
def add_chart(self, chart, anchor=None):
"""
Add a chart to the sheet
Optionally provide a cell for the top-left anchor
"""
if anchor is not None:
chart.anchor = anchor
self._charts.append(chart)
def add_image(self, img, anchor=None):
"""
Add an image to the sheet.
Optionally provide a cell for the top-left anchor
"""
if anchor is not None:
img.anchor = anchor
self._images.append(img)
def add_table(self, table):
"""
Check for duplicate name in definedNames and other worksheet tables
before adding table.
"""
if self.parent._duplicate_name(table.name):
raise ValueError("Table with name {0} already exists".format(table.name))
if not hasattr(self, "_get_cell"):
warn("In write-only mode you must add table columns manually")
self._tables.add(table)
@property
def tables(self):
return self._tables
def add_pivot(self, pivot):
self._pivots.append(pivot)
def merge_cells(self, range_string=None, start_row=None, start_column=None, end_row=None, end_column=None):
""" Set merge on a cell range. Range is a cell range (e.g. A1:E1) """
if range_string is None:
cr = CellRange(range_string=range_string, min_col=start_column, min_row=start_row,
max_col=end_column, max_row=end_row)
range_string = cr.coord
mcr = MergedCellRange(self, range_string)
self.merged_cells.add(mcr)
self._clean_merge_range(mcr)
def _clean_merge_range(self, mcr):
"""
Remove all but the top left-cell from a range of merged cells
and recreate the lost border information.
Borders are then applied
"""
cells = mcr.cells
next(cells) # skip first cell
for row, col in cells:
self._cells[row, col] = MergedCell(self, row, col)
mcr.format()
@property
@deprecated("Use ws.merged_cells.ranges")
def merged_cell_ranges(self):
"""Return a copy of cell ranges"""
return self.merged_cells.ranges[:]
def unmerge_cells(self, range_string=None, start_row=None, start_column=None, end_row=None, end_column=None):
""" Remove merge on a cell range. Range is a cell range (e.g. A1:E1) """
cr = CellRange(range_string=range_string, min_col=start_column, min_row=start_row,
max_col=end_column, max_row=end_row)
if cr.coord not in self.merged_cells:
raise ValueError("Cell range {0} is not merged".format(cr.coord))
self.merged_cells.remove(cr)
cells = cr.cells
next(cells) # skip first cell
for row, col in cells:
del self._cells[(row, col)]
def append(self, iterable):
"""Appends a group of values at the bottom of the current sheet.
* If it's a list: all values are added in order, starting from the first column
* If it's a dict: values are assigned to the columns indicated by the keys (numbers or letters)
:param iterable: list, range or generator, or dict containing values to append
:type iterable: list|tuple|range|generator or dict
Usage:
* append(['This is A1', 'This is B1', 'This is C1'])
* **or** append({'A' : 'This is A1', 'C' : 'This is C1'})
* **or** append({1 : 'This is A1', 3 : 'This is C1'})
:raise: TypeError when iterable is neither a list/tuple nor a dict
"""
row_idx = self._current_row + 1
if (isinstance(iterable, (list, tuple, range))
or isgenerator(iterable)):
for col_idx, content in enumerate(iterable, 1):
if isinstance(content, Cell):
# compatible with write-only mode
cell = content
if cell.parent and cell.parent != self:
raise ValueError("Cells cannot be copied from other worksheets")
cell.parent = self
cell.column = col_idx
cell.row = row_idx
else:
cell = Cell(self, row=row_idx, column=col_idx, value=content)
self._cells[(row_idx, col_idx)] = cell
elif isinstance(iterable, dict):
for col_idx, content in iterable.items():
if isinstance(col_idx, str):
col_idx = column_index_from_string(col_idx)
cell = Cell(self, row=row_idx, column=col_idx, value=content)
self._cells[(row_idx, col_idx)] = cell
else:
self._invalid_row(iterable)
self._current_row = row_idx
def _move_cells(self, min_row=None, min_col=None, offset=0, row_or_col="row"):
"""
Move either rows or columns around by the offset
"""
reverse = offset > 0 # start at the end if inserting
row_offset = 0
col_offset = 0
# need to make affected ranges contiguous
if row_or_col == 'row':
cells = self.iter_rows(min_row=min_row)
row_offset = offset
key = 0
else:
cells = self.iter_cols(min_col=min_col)
col_offset = offset
key = 1
cells = list(cells)
for row, column in sorted(self._cells, key=itemgetter(key), reverse=reverse):
if min_row and row < min_row:
continue
elif min_col and column < min_col:
continue
self._move_cell(row, column, row_offset, col_offset)
def insert_rows(self, idx, amount=1):
"""
Insert row or rows before row==idx
"""
self._move_cells(min_row=idx, offset=amount, row_or_col="row")
self._current_row = self.max_row
def insert_cols(self, idx, amount=1):
"""
Insert column or columns before col==idx
"""
self._move_cells(min_col=idx, offset=amount, row_or_col="column")
def delete_rows(self, idx, amount=1):
"""
Delete row or rows from row==idx
"""
remainder = _gutter(idx, amount, self.max_row)
self._move_cells(min_row=idx+amount, offset=-amount, row_or_col="row")
# calculating min and max col is an expensive operation, do it only once
min_col = self.min_column
max_col = self.max_column + 1
for row in remainder:
for col in range(min_col, max_col):
if (row, col) in self._cells:
del self._cells[row, col]
self._current_row = self.max_row
if not self._cells:
self._current_row = 0
def delete_cols(self, idx, amount=1):
"""
Delete column or columns from col==idx
"""
remainder = _gutter(idx, amount, self.max_column)
self._move_cells(min_col=idx+amount, offset=-amount, row_or_col="column")
# calculating min and max row is an expensive operation, do it only once
min_row = self.min_row
max_row = self.max_row + 1
for col in remainder:
for row in range(min_row, max_row):
if (row, col) in self._cells:
del self._cells[row, col]
def move_range(self, cell_range, rows=0, cols=0, translate=False):
"""
Move a cell range by the number of rows and/or columns:
down if rows > 0 and up if rows < 0
right if cols > 0 and left if cols < 0
Existing cells will be overwritten.
Formulae and references will not be updated.
"""
if isinstance(cell_range, str):
cell_range = CellRange(cell_range)
if not isinstance(cell_range, CellRange):
raise ValueError("Only CellRange objects can be moved")
if not rows and not cols:
return
down = rows > 0
right = cols > 0
if rows:
cells = sorted(cell_range.rows, reverse=down)
else:
cells = sorted(cell_range.cols, reverse=right)
for row, col in chain.from_iterable(cells):
self._move_cell(row, col, rows, cols, translate)
# rebase moved range
cell_range.shift(row_shift=rows, col_shift=cols)
def _move_cell(self, row, column, row_offset, col_offset, translate=False):
"""
Move a cell from one place to another.
Delete at old index
Rebase coordinate
"""
cell = self._get_cell(row, column)
new_row = cell.row + row_offset
new_col = cell.column + col_offset
self._cells[new_row, new_col] = cell
del self._cells[(cell.row, cell.column)]
cell.row = new_row
cell.column = new_col
if translate and cell.data_type == "f":
t = Translator(cell.value, cell.coordinate)
cell.value = t.translate_formula(row_delta=row_offset, col_delta=col_offset)
def _invalid_row(self, iterable):
raise TypeError('Value must be a list, tuple, range or generator, or a dict. Supplied value is {0}'.format(
type(iterable))
)
def _add_column(self):
"""Dimension factory for column information"""
return ColumnDimension(self)
def _add_row(self):
"""Dimension factory for row information"""
return RowDimension(self)
@property
def print_title_rows(self):
"""Rows to be printed at the top of every page (ex: '1:3')"""
if self._print_rows:
return self._print_rows
@print_title_rows.setter
def print_title_rows(self, rows):
"""
Set rows to be printed on the top of every page
format `1:3`
"""
if rows is not None:
if not ROW_RANGE_RE.match(rows):
raise ValueError("Print title rows must be the form 1:3")
self._print_rows = rows
@property
def print_title_cols(self):
"""Columns to be printed at the left side of every page (ex: 'A:C')"""
if self._print_cols:
return self._print_cols
@print_title_cols.setter
def print_title_cols(self, cols):
"""
Set cols to be printed on the left of every page
format ``A:C`
"""
if cols is not None:
if not COL_RANGE_RE.match(cols):
raise ValueError("Print title cols must be the form C:D")
self._print_cols = cols
@property
def print_titles(self):
if self.print_title_cols and self.print_title_rows:
return ",".join([self.print_title_rows, self.print_title_cols])
else:
return self.print_title_rows or self.print_title_cols
@property
def print_area(self):
"""
The print area for the worksheet, or None if not set. To set, supply a range
like 'A1:D4' or a list of ranges.
"""
return self._print_area
@print_area.setter
def print_area(self, value):
"""
Range of cells in the form A1:D4 or list of ranges
"""
if isinstance(value, str):
value = [value]
self._print_area = [absolute_coordinate(v) for v in value]
def _gutter(idx, offset, max_val):
"""
When deleting rows and columns are deleted we rely on overwriting.
This may not be the case for a large offset on small set of cells:
range(cells_to_delete) > range(cell_to_be_moved)
"""
gutter = range(max(max_val+1-offset, idx), min(idx+offset, max_val)+1)
return gutter
|
[
"nimadorostkar97@gmail.com"
] |
nimadorostkar97@gmail.com
|
34cb20d94952a36b1456472333a03be16addc0c7
|
644163d6cafa83976fd7cb9216bbe6b1b2a25109
|
/ex25.py
|
0f988e02c4c86018c96da5732d93ef98517c3caa
|
[] |
no_license
|
wen-t/Learn-Python
|
6f52773144b2c358aa931a22a529063a8b1ff648
|
ea8ccb7d1f62c73a5da9c16e12c8aa113b8a2cb7
|
refs/heads/master
| 2021-01-10T01:39:11.646276
| 2015-11-30T14:02:17
| 2015-11-30T14:02:17
| 45,468,678
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 981
|
py
|
def break_words(stuff):
"""This function will break up words for us."""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sorts the word."""
return sorted(words)
def print_first_word(words):
"""Prints the first word after poppin it off."""
word = words.pop(0)
print word
def print_last_word(words):
"""Prints the last word after popping it off."""
word = words.pop(-1)
print word
def sort_sentence(sentence):
"""Takes in a full sentence and returns the sorted words."""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of the sentence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words then prints the first and last one."""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words)
|
[
"mail.wen.t@gmail.com"
] |
mail.wen.t@gmail.com
|
76a76c143edcf3347857cf52bb1c494b0d17340f
|
57a0bbdacb39c652570fe4132cf6df77242480d3
|
/serving_patterns/src/api_composition_proxy/routers/proxy.py
|
011975457ef19238db90a225444a926ce465f903
|
[
"MIT"
] |
permissive
|
GeHongpeng/ml-system-in-action
|
cca7e722f9725ce3df7372125a91c6e2405f895d
|
0aa9d6bc4a4346236b9c971ec90afad04bcf5cca
|
refs/heads/master
| 2023-04-16T19:47:08.367659
| 2021-05-01T02:07:28
| 2021-05-01T02:07:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,681
|
py
|
from fastapi import APIRouter, Body, BackgroundTasks
import logging
import aiohttp
import asyncio
from typing import Dict, Any
from pydantic import BaseModel
import uuid
from src.api_composition_proxy.configurations import ServiceConfigurations
from src.api_composition_proxy import helpers
from src.jobs import store_data_job
from src.helper import get_job_id
logger = logging.getLogger(__name__)
router = APIRouter()
class Data(BaseModel):
data: Any = None
async def _get_redirect(session, url: str, alias: str) -> Dict[str, Any]:
async with session.get(url) as response:
response_json = await response.json()
resp = {alias: {"response": response_json, "status_code": response.status}}
logger.info(f"response: {resp}")
return resp
@router.get("/{redirect_path:path}")
async def get_redirect(redirect_path: str) -> Dict[str, Any]:
logger.info(f"GET redirect to: /{redirect_path}")
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=2)) as session:
tasks = [
asyncio.ensure_future(
_get_redirect(session, helpers.customized_redirect_builder(k, v, redirect_path, ServiceConfigurations.customized_redirect_map), k)
)
for k, v in ServiceConfigurations.urls.items()
]
responses = await asyncio.gather(*tasks)
logger.info(f"responses: {responses}")
return responses
async def _post_redirect(session, url: str, data: Dict[Any, Any], alias: str) -> Dict[str, Any]:
async with session.post(url, json=data) as response:
response_json = await response.json()
resp = {alias: {"response": response_json, "status_code": response.status}}
logger.info(f"response: {resp}")
return resp
@router.post("/{redirect_path:path}")
async def post_redirect(redirect_path: str, data: Data, background_tasks: BackgroundTasks) -> Dict[str, Any]:
data.data["job_id"] = get_job_id()
logger.info(f'POST redirect to: /{redirect_path} as {data.data["job_id"]}')
if ServiceConfigurations.enqueue:
store_data_job._save_data_job(data.data, data.data["job_id"], background_tasks, True)
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=2)) as session:
tasks = [
asyncio.ensure_future(
_post_redirect(session, helpers.customized_redirect_builder(k, v, redirect_path, ServiceConfigurations.customized_redirect_map), data.data, k)
)
for k, v in ServiceConfigurations.urls.items()
]
responses = await asyncio.gather(*tasks)
logger.info(f"responses: {responses}")
return responses
|
[
"shibuiyusuke@gmail.com"
] |
shibuiyusuke@gmail.com
|
cddaacb4ec03f46b0039d3f454686cec9cd18af0
|
31ce1d0fa2f527d52c19c52e50a90e2ca83279e7
|
/Chapter_13/json_example2.py
|
6a7a9b428cb9ed6014e0f595b696a0b5cae27770
|
[] |
no_license
|
Gaborjan/Python_Coursera
|
4ef58cbf1f5bdef12cce8d1e05a4748793dbb66a
|
3f53b0a9da53410f38eb3df50b4f021ba0a28784
|
refs/heads/master
| 2022-09-11T06:28:24.309200
| 2020-05-27T19:23:24
| 2020-05-27T19:23:24
| 255,036,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
import json
input='''[
{
"id":"001",
"x":"2",
"name":"Chuck"
},
{
"id":"009",
"x":"7",
"name":"Gabika"
}
]'''
info=json.loads(input)
print(type(info))
print('User count:',len(info))
for item in info:
print(type(item))
print('Name:',item["name"])
print('Id:',item["id"])
print('Attribute:',item["x"])
|
[
"janvari.gabor2@gmail.com"
] |
janvari.gabor2@gmail.com
|
f9a07f1248b76978e6b8b1c52aeada5995d17cd4
|
ea35068bdeb270cfde9ef8cb5803c9510016a55f
|
/HomeFeed-Web App/HomeFeed/flaskblog/__init__.py
|
dd6333ffead67db53ec66f51a5405f8f4add1fe3
|
[] |
no_license
|
PawanPatankar/Web_Apps
|
398f6c8430fb73c24e5a452504a9c45d9716bab9
|
997e65350005fca690341a58233e72375b39eda9
|
refs/heads/master
| 2021-07-06T23:13:39.632789
| 2020-10-03T07:57:08
| 2020-10-03T07:57:08
| 187,206,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 721
|
py
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
from flask_mail import Mail
app = Flask(__name__)
app.config['SECRET_KEY']='dbd1c16a8d17965dc1e39fe73b6c4586'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
login_manager = LoginManager(app)
login_manager.login_view = 'login'
login_manager.login_message_category='info'
app.config['MAIL_SERVER'] = 'smtp.googlemail.com'
app.config['MAIL_PORT'] = 587
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USERNAME'] = "wingloryold@gmail.com"
app.config['MAIL_PASSWORD'] = "25051998"
mail = Mail(app)
from flaskblog import routes
|
[
"patankarpawan03@gmail.com"
] |
patankarpawan03@gmail.com
|
0b5cf38e9adaf27a8761abf91e7c9e0a67abfe46
|
955c2caeb13ac0b493d0ef147c44a0d76b5ede28
|
/B1_course/week12/solution.py
|
ee5b2523d78dcca477df413fbad0f750fd2e6b41
|
[] |
no_license
|
ryanh153/Morsels
|
dcb079cafec63d895160a028fffdb799112cf364
|
087d8f4e548e31223acb39862b7491b819542320
|
refs/heads/master
| 2022-05-27T22:45:02.492518
| 2022-03-23T22:39:22
| 2022-03-23T22:39:22
| 214,299,982
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 818
|
py
|
from random import randint
from typing import List
class RandMemory:
"""Takes in a max/min value
Generates random numbers in that range (inclusive)
Stores the history of numbers generated"""
def __init__(self, lowest: int, highest: int) -> None:
self._lowest = lowest
self._highest = highest
self._result_log: List[int] = list()
@property
def lowest(self) -> int:
return self._lowest
@property
def highest(self) -> int:
return self._highest
@property
def get(self) -> int:
"""Get a new number and log it"""
self._result_log.append(randint(self.lowest, self.highest))
return self._result_log[-1]
def history(self) -> List[int]:
"""Return list of logged numbers"""
return self._result_log
|
[
"rhorton@scitec.com"
] |
rhorton@scitec.com
|
f2e63399077fa27918591fffb6acefe07330bce8
|
98ccf9473905b25fda057d441381c3b66f4ad9a7
|
/interfaces/azure/vcycleAzure.py
|
ec518b6a11b6c600038fca67ab57821ea01102db
|
[
"BSD-2-Clause"
] |
permissive
|
cjdcordeiro/vcycle
|
623cf6956738b614e3c2069e249252b310225f7e
|
26b291ff8e301def73086467a90aa6a7a4f924fb
|
refs/heads/master
| 2021-01-18T12:56:16.772671
| 2015-03-27T16:22:50
| 2015-03-27T16:22:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,196
|
py
|
from vcycleBase import vcycleBase
import os
import VCYCLE
import uuid
import time , calendar
import interfaces.azure.client
class vcycleAzure(vcycleBase):
def _create_client(self):
'''Creates a new Azure client'''
tenancy = self.tenancy
self.provider_name = tenancy['tenancy_name']
return interfaces.azure.client.Azure(tenancy['proxy'],
tenancy['tenancy_name'],
storage_account=tenancy['storage_account'],
account_key=tenancy['storage_key'])
def _servers_list(self):
'''Returns a list of all servers created and not deleted in the tenancy'''
return self.client.list_vms()
def _retrieve_properties(self, server, vmtypeName, servers):
'''Returns the server's properties'''
properties = {}
start_time = server.name[server.name.find("-",server.name.find('-')+1)+1:]
properties['startTime'] = int(start_time)
properties['ip'] = server.ip
try:
properties['heartbeatTime'] = int(os.stat('/var/lib/vcycle/machines/' + server['name'] + '/machineoutputs/vm-heartbeat').st_ctime)
properties['heartbeatStr'] = str(int(time.time() - properties['heartbeatTime'])) + 's'
except:
properties['heartbeatTime'] = None
properties['heartbeatStr'] = '-'
try:
properties['fizzleTime'] = int(os.stat('/var/lib/vcycle/machines/' + server.name + '/machineoutputs/vm-start').st_ctime)
properties['fizzleStr'] = str(int(properties['fizzleTime']) - int(properties['startTime'])) + 's'
servers[server.name]['fizzle'] = int(properties['startTime']) - int(servers[server.name]['start_time'])
except Exception:
properties['fizzleTime'] = None
properties['fizzleStr'] = '-'
VCYCLE.logLine(self.tenancyName, server.name + ' ' +
(vmtypeName + ' ')[:16] +
(properties['ip'] + ' ')[:16] +
(server.state + ' ')[:8] +
properties['fizzleStr'] + " " +
properties['heartbeatStr'] + " " +
str(int(time.time()) - properties['startTime'] ) + "s"
)
return properties
def _update_properties(self, server, vmtypeName,runningPerVmtype, notPassedFizzleSeconds, properties, totalRunning):
'''Updates the server's properties'''
tenancy = self.tenancy
tenancyName = self.tenancyName
if server.state == 'Stopped' and (properties['startTime']) < tenancy['vmtypes'][vmtypeName]['fizzle_seconds']:
VCYCLE.logLine(tenancyName, server.name + ' was a fizzle! ' + properties['startTime']) + ' seconds'
if server.state == 'Started':
# These ones are running properly
totalRunning += 1
if vmtypeName not in runningPerVmtype:
runningPerVmtype[vmtypeName] = 1
else:
runningPerVmtype[vmtypeName] += 1
# These ones are starting/running, but not yet passed tenancy['vmtypes'][vmtypeName]['fizzle_seconds']
if server.state in ['Starting','Started'] and \
(int(time.time() - properties['startTime']) < tenancy['vmtypes'][vmtypeName]['fizzle_seconds']):
if vmtypeName not in notPassedFizzleSeconds:
notPassedFizzleSeconds[vmtypeName] = 1
else:
notPassedFizzleSeconds[vmtypeName] += 1
return totalRunning
def _describe(self, server):
'''Returns the descripion of a server. This method is empty because when the server is created,
Azure returns directly all the vm description'''
pass
def _delete(self, server, vmtypeName, properties):
'''Deletes a server'''
tenancy = self.tenancy
if server.state == 'Starting':
return False
if server.state == 'Stopped' or \
(server.state == 'Started' and ((int(time.time()) - properties['startTime']) > tenancy['vmtypes'][vmtypeName]['max_wallclock_seconds'])) or \
(
# STARTED gets deleted if heartbeat defined in configuration but not updated by the VM
'heartbeat_file' in tenancy['vmtypes'][vmtypeName] and
'heartbeat_seconds' in tenancy['vmtypes'][vmtypeName] and
server.state == 'Started' and
((int(time.time()) - properties['startTime']) > tenancy['vmtypes'][vmtypeName]['heartbeat_seconds']) and
(
(properties['heartbeatTime'] is None) or
((int(time.time()) - properties['heartbeatTime']) > tenancy['vmtypes'][vmtypeName]['heartbeat_seconds'])
)
):
VCYCLE.logLine(self.tenancyName, 'Deleting ' + server.name)
try:
self.client.delete_vm(server.name)
return True
except Exception as e:
VCYCLE.logLine(self.tenancyName, 'Delete ' + server.name + ' fails with ' + str(e))
return False
def _server_name(self, name=None):
'''Returns the server name'''
return 'vcycle-' + name + '-' + str(int(time.time()))
def _create_machine(self, server_name, vmtypeName, proxy=False):
import base64
tenancy_name = self.tenancyName
user_data = open("/var/lib/vcycle/user_data/%s:%s" % (tenancy_name, vmtypeName), 'r').read()
return self.client.create_virtual_machine(server_name,
username=VCYCLE.tenancies[tenancy_name]['vmtypes'][vmtypeName]['username'],
password=VCYCLE.tenancies[tenancy_name]['vmtypes'][vmtypeName]['password'],
image_name=VCYCLE.tenancies[tenancy_name]['vmtypes'][vmtypeName]['image_name'],
flavor=VCYCLE.tenancies[tenancy_name]['vmtypes'][vmtypeName]['flavor_name'],
user_data="/var/lib/vcycle/user_data/%s:%s" % (tenancy_name, vmtypeName))
|
[
"luis.villazon.esteban@cern.ch"
] |
luis.villazon.esteban@cern.ch
|
d12d2181b7bdfe380b28d28879d1043d0486ef8b
|
5c96777507c87a293c34e03b5f6b768c8a6bddf2
|
/objeto.py
|
7689e82b3ca97b004818940bf4ec9e72b7fc3849
|
[] |
no_license
|
mtgsjr/Machine_Learning
|
63f3162f9d15dd3536399b3615fe6f79f23e8dc9
|
f7c886f9b11d5ea6f4b858117bd1a2228eb4ce83
|
refs/heads/master
| 2023-03-15T10:18:40.536845
| 2021-03-04T23:57:18
| 2021-03-04T23:57:18
| 344,641,687
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 558
|
py
|
import cv2
import numpy as np
cam=cv2.VideoCapture(0)
kernel=np.ones((5,5),np.uint8)
while (1):
ret,frame=cam.read()
rangomax=np.array([50,255,50])
rangomin=np.array([0,51,0])
mascara=cv2.inRange(frame,rangomin,rangomax)
opening=cv2.morphologyEx(mascara, cv2.MORPH_OPEN, kernel)
x,y,w,h=cv2.boundingRect(opening)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),3)
cv2.circle(frame,(x+w/2,y+h/2),5,(0,0,255),-1)
cv2.imshow('Camera',frame)
k=cv2.waitKey(1) & 0xFF
if k==27:
break
captura.release()
cv2.destroyAllWindows()
|
[
"mtgsjr@hotmail.com"
] |
mtgsjr@hotmail.com
|
229e1fd8c5d5880338a604437236c845881625d6
|
11228a51cf7bfe3cef852efb0de393ae42c768f2
|
/rules/gatk3/gatk_genotype_gvcfs.smk
|
46b05bc905018529fb1c2267a13a418372eaec26
|
[
"MIT"
] |
permissive
|
orionzhou/snk
|
cd36c6ced8fb514101b1de7c70f322e76368e760
|
5ead8aebf5ed00a2aec15363b8023c9b75b0ed4a
|
refs/heads/master
| 2021-07-13T12:08:53.168670
| 2020-06-01T05:19:46
| 2020-06-01T05:19:46
| 133,903,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,441
|
smk
|
def gatk_gg_cmd(wildcards):
java_mem = config['java']['mem']
if 'java_mem' in config['gatk']:
java_mem = config['gatk']['java_mem']
if 'java_mem' in config['gatk']['genotype_gvcfs']:
java_mem = config['gatk']['genotype_gvcfs']['java_mem']
java_tmpdir = config['tmpdir']
if 'tmpdir' in config['java']:
java_tmpdir = config['java']['tmpdir']
cmd = "%s -Xmx%s -Djava.io.tmpdir=%s" % (config['gatk']['cmd'], java_mem, java_tmpdir)
return cmd
rule gatk_genotype_gvcfs:
input:
expand(["%s/{sid}.g.vcf.gz" % config['gatk']['odir']], sid = config['t']['SampleID'])
output:
protected(config['gatk']['outfile'])
log:
"%s/gatk.log" % config['dirl']
params:
cmd = gatk_gg_cmd,
ref = config['gatk']['ref'],
gvcfs = lambda wildcards, input: ["-V %s" % x for x in input],
gvcf = "%s/all.g.vcf.gz" % config['gatk']['odir'],
extra = config['gatk']['genotype_gvcfs']['extra'],
threads:
config["gatk"]['genotype_gvcfs']["threads"]
shell:
"""
source activate gatk
{params.cmd} -T CombineGVCFs \
-R {params.ref} \
{params.extra} \
{params.gvcfs} \
-o {params.gvcf} 2>{log}
{params.cmd} -T GenotypeGVCFs \
-nt {threads} \
-R {params.ref} \
{params.extra} \
-V {params.gvcf} \
-o {output} 2>{log}
"""
|
[
"zhoupenggeni@gmail.com"
] |
zhoupenggeni@gmail.com
|
f12b661134198995402bfdfd54a3df7c0e327f53
|
89e141cf19b1bafc5b2ef186aeb2631a893d3378
|
/2601/main.py
|
2eea3a2f9c1fc3f121e4d6879b1bb5535f7f77f7
|
[] |
no_license
|
100ballovby/8v_3ch
|
7369c287b6f95cf3a7e54a142e8b22c8b7394183
|
e064d920ff12cdab65cb4dcb1132af00ae7a693c
|
refs/heads/master
| 2023-02-23T08:45:03.374521
| 2021-01-26T09:20:38
| 2021-01-26T09:20:38
| 329,604,273
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 930
|
py
|
'''
while логическое_выражение:
действие 1
...
действие n
'''
# просим ввести в переменную число с клавиатуры
n = input('Введите число: ')
# пока в переменной n не будет целого числа (int)
while type(n) != int:
# пытаемся привести число n к целому виду
try:
n = int(n)
# если не получается - ловим ошибку ValueError
except ValueError:
# пишем об этом и просим ввести число еще раз
print('Вы ввели не целое число!')
n = input('Введите число: ')
"""Как только цикл получит целое число, он остановится"""
if n % 2 == 0:
print('Четное!')
else:
print('Нечетное!')
|
[
"greatraksin@icloud.com"
] |
greatraksin@icloud.com
|
571ca496b3b9b4f0cc92ccbe72be970e9ffc1562
|
f7f54c3a496ed05a315919db452d0198488d7d3e
|
/utils.py
|
915f1f7d0c7cd0b52bc9db6f094269f073797d8a
|
[] |
no_license
|
hiyouthinker/tcp_fsm
|
b3867918a079f2b8f2471b620551de4d50b0d879
|
d2d0a5b96fbd28a2d5b2e5df5fffcba65bdf765f
|
refs/heads/master
| 2023-04-18T01:54:30.099968
| 2021-05-04T22:33:25
| 2021-05-04T22:33:25
| 363,361,315
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,090
|
py
|
#!/usr/bin/env python
#coding=utf-8
'''
BigBro @ 2021.04
'''
from scapy.all import *
import signal
import tcp_state
def send_tcp_pkts(type):
keys = tcp_state.sessions.keys()
for key in keys :
value = tcp_state.sessions.get(key)
state = value[0]
seq = value[2]
ack = value[1] + value[3]
flags = tcp_state.tcp_flags_rstack
target = "RST"
substate = tcp_state.TCP_SESSION_SUBSTATE_CLOSED | tcp_state.tcp_session_server_rst
if ((value[0] == tcp_state.TCP_FIN_WAIT) and (value[4] & 0x0f) == tcp_state.TCP_SESSION_SUBSTATE_CLOSED):
continue
if (type == 1) :
flags = tcp_state.tcp_flags_fin
target = "FIN"
substate = tcp_state.TCP_SESSION_SUBSTATE_FIN_WAIT1 | tcp_state.tcp_session_server_fin
print ("send %s to [%s:%d => %s:%d] session (state: %s, seq/ack: %d/%d, length: %d)"
% (target, key[2], key[3], key[0], key[1], tcp_state.tcp_session_states[state], seq, ack, value[3]))
value = (tcp_state.TCP_FIN_WAIT, value[1], value[2], value[3], substate)
tcp_state.sessions.update({key : value})
l3 = IP(src=key[2], dst=key[0])/TCP(sport=key[3], dport=key[1], flags=flags, seq=seq, ack=ack)
send(l3, verbose=False)
def show_tcp_all_sessions():
keys = tcp_state.sessions.keys()
print "\nsession table: %d item(s)" % len(keys)
for key in keys :
value = tcp_state.sessions.get(key)
state = value[0]
if (state == tcp_state.TCP_FIN_WAIT):
print ("\t[%s:%d => %s:%d], state: %s/%s (first %s)"
% (key[0], key[1], key[2], key[3],
tcp_state.tcp_session_states[state],
tcp_state.tcp_session_substates[value[4] & 0x0f],
tcp_state.tcp_session_destroy_first_pkt_dir[value[4] & 0xf0]))
else :
print ("\t[%s:%d => %s:%d], state: %s"
% (key[0], key[1], key[2], key[3], tcp_state.tcp_session_states[state]))
def signal_handler(signum, stack):
print 'Received: %d' % signum
if (signum == signal.SIGINT):
show_tcp_all_sessions()
send_tcp_pkts(0)
show_tcp_all_sessions()
exit(0)
elif (signum == signal.SIGUSR1):
show_tcp_all_sessions()
else :
show_tcp_all_sessions()
send_tcp_pkts(1)
show_tcp_all_sessions()
|
[
"hi.youthinker@gmail.com"
] |
hi.youthinker@gmail.com
|
514db84cd47d638f0cb747ea8fba0c4df5427e73
|
873bcf16ea9438d03c14bba89be548e4d566d6b7
|
/mini-projects/Library/microtest.py
|
7376990417f302e62cf964971bc1bbfe4bf6de08
|
[] |
no_license
|
snaik/codeskulptor-mini-projects
|
4af46b3d17bff396284b9f33a1271bb3c1090e77
|
6a12cdf44a6fecd929b468b7ced2827649cac832
|
refs/heads/master
| 2020-12-24T16:58:42.550286
| 2012-11-09T08:10:06
| 2012-11-09T08:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,306
|
py
|
import types, sys, traceback
class TestException(Exception): pass
def test(modulename, verbose=None, log=sys.stdout):
''' Execute all functions in the named module which have __test__
in their name and take no arguments.
modulename: name of the module to be tested.
verbose: If true, print test names as they are executed
Returns None on success, raises exception on failure.
'''
module = __import__(modulename)
total_tested = 0
total_failed = 0
for name in dir(module):
if '__test__' in name:
obj = getattr(module, name)
if (isinstance(obj, types.FunctionType) and
not obj.func_code.co_argcount):
if verbose:
print>>log, 'Testing %s' % name
try:
total_tested += 1
obj()
except Exception, e:
total_failed += 1
print>>sys.stderr, '%s.%s FAILED' % (modulename, name)
traceback.print_exc()
message = 'Module %s failed %s out of %s unittests.' % (
modulename, total_failed, total_tested)
if total_failed:
raise TestException(message)
if verbose:
print>>log, message
def __test__():
print 'in __test__'
|
[
"sachin.u.naik@gmail.com"
] |
sachin.u.naik@gmail.com
|
dd75b7eb7bc4981c28a04e678eb2541210ddbf26
|
b5f7f7df4c6066508dfe3667018c74424c55c2f3
|
/teachers/apps.py
|
43c7c3112f47f758302b27fda48cf990c25f9b69
|
[] |
no_license
|
miha-pavel/hillel_students_tracker
|
539d8b13c8cd210e17d2f10232fae79a663e21c6
|
b510e7035faa2b5865491bf2fb167236fda9bf83
|
refs/heads/master
| 2022-12-12T04:56:52.921872
| 2020-02-12T05:35:29
| 2020-02-12T05:35:29
| 230,735,782
| 0
| 0
| null | 2022-12-08T03:27:12
| 2019-12-29T10:37:01
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 211
|
py
|
from django.apps import AppConfig
class TeachersConfig(AppConfig):
name = 'teachers'
def ready(self):
# Импорт делаеться только здесь
import teachers.signals
|
[
"pavlom@oneplanetops.com"
] |
pavlom@oneplanetops.com
|
38e32c8adec32252e5a5ee80d1c30ea914b8702f
|
3992bdce87f78028fd4f9f48d954d6ccbfcd5ad5
|
/Codes/ssort.py
|
20430a46b9f9a2ee525fe4719909f9e1de665170
|
[] |
no_license
|
IIInvokeII/1st_sem_python
|
b8d973da8349b0b0b148683dd16abe9a25d77b8d
|
508fe50a85f9fd056b7e53b05ea2ae1b0ae420d1
|
refs/heads/main
| 2023-07-10T22:46:58.220059
| 2021-08-11T11:27:20
| 2021-08-11T11:27:20
| 394,961,452
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 626
|
py
|
def ssortasc(a):
for i in range(len(a)):
mini=i
for j in range(i+1,len(a)):
if(a[mini]>a[j]):
mini=j
a[mini],a[i]=a[i],a[mini]
return a
def ssortdesc(a):
for i in range(len(a)):
mini=i
for j in range(i+1,len(a)):
if(a[mini]<a[j]):
mini=j
a[mini],a[i]=a[i],a[mini]
return a
a=[76,9,233,6564,254,6,7,4,2,6,8,4,2,5,1]
print(ssortasc(a))
print(ssortdesc(a))
print("--------------------")
import bsort as b
b.bsortasc(a)
print(a)
c=a[3:]
print(c)
print("Min :",min(c))
|
[
"noreply@github.com"
] |
IIInvokeII.noreply@github.com
|
edb8a2e13321ebe34626eb8f672c16db8392b8d2
|
0e1e64bd49cce6e94543ba473edc69fb2547905b
|
/deepneuro/pipelines/Skull_Stripping/cli.py
|
e3b07ea02050cedd058b5d11b0e60cbb65844278
|
[
"MIT"
] |
permissive
|
medical-projects/DeepRad
|
ab967577695c657d181adba933c2e1af81c26fab
|
96b16aac873089e0d766fea34ed428a59c9e9080
|
refs/heads/master
| 2021-09-22T23:09:36.208120
| 2018-09-12T08:53:42
| 2018-09-12T08:53:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,500
|
py
|
import argparse
import sys
import os
from deepneuro.docker.docker_cli import nvidia_docker_wrapper
class Skull_Stripping_cli(object):
def __init__(self):
parser = argparse.ArgumentParser(
description='A number of pre-packaged commands used by the Quantiative Tumor Imaging Lab at the Martinos Center',
usage='''skull_stripping <command> [<args>]
The following commands are available:
pipeline Run the entire segmentation pipeline, with options to leave certain pre-processing steps out.
docker_pipeline Run the previous command via a Docker container via nvidia-docker.
''')
parser.add_argument('command', help='Subcommand to run')
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print('Sorry, that\'s not one of the commands.')
parser.print_help()
exit(1)
# use dispatch pattern to invoke method with same name
getattr(self, args.command)()
def parse_args(self):
parser = argparse.ArgumentParser(
description='''skull_strip pipeline <T1post> <FLAIR> <output_folder> [-gpu_num <int> -niftis -nobias -preprocessed -keep_outputs]
Segment an image from DICOMs with all preprocessing steps included.
-output_folder: A filepath to your output folder. Two nifti files will be generated "enhancingtumor.nii.gz" and "wholetumor.nii.gz"
-T1POST, -FLAIR: Filepaths to input MR modalities. Inputs can be either nifti files or DICOM folders. Note that DICOM folders should only contain one volume each.
-mask_output: Name of output for your binary skull mask. Should not be a filepath, like '/home/user/enhancing.nii.gz', but just a name, like "enhancing"
-gpu_num: Which CUDA GPU ID # to use. Defaults to 0, i.e. the first gpu.
-debiased: If flagged, data is assumed to already have been N4 bias-corrected, and skips that preprocessing step.
-resampled: If flagged, data is assumed to already have been isotropically resampled, and skips that preprocessing step.
-registered: If flagged, data is assumed to already have been registered into the same space, and skips that preprocessing step.
-save_all_steps: If flagged, intermediate volumes in between preprocessing steps will be saved in output_folder.
-save_preprocessed: If flagged, the final volume after all preprocessing steps will be saved in output_folder
''')
parser.add_argument('-output_folder', type=str)
parser.add_argument('-T1POST', type=str)
parser.add_argument('-FLAIR', type=str)
parser.add_argument('-input_directory', type=str)
parser.add_argument('-mask_output', nargs='?', type=str, const='skullstrip_mask', default='skullstrip_mask.nii.gz')
parser.add_argument('-gpu_num', nargs='?', const='0', default='0', type=str)
parser.add_argument('-debiased', action='store_true')
parser.add_argument('-resampled', action='store_true')
parser.add_argument('-registered', action='store_true')
parser.add_argument('-normalized', action='store_true')
parser.add_argument('-save_preprocess', action='store_true')
parser.add_argument('-save_all_steps', action='store_true')
parser.add_argument('-output_probabilities', action='store_true')
args = parser.parse_args(sys.argv[2:])
return args
def pipeline(self):
args = self.parse_args()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_num)
from deepneuro.pipelines.Skull_Stripping.predict import skull_strip
skull_strip(output_folder=args.output_folder, T1POST=args.T1POST, FLAIR=args.FLAIR, ground_truth=None, input_directory=args.input_directory, bias_corrected=args.debiased, resampled=args.resampled, registered=args.registered, normalized=args.normalized, save_preprocess=args.save_preprocess, save_all_steps=args.save_all_steps, mask_output=args.mask_output)
def docker_pipeline(self):
args = self.parse_args()
nvidia_docker_wrapper(['skull_strip', 'pipeline'], vars(args), ['output_folder', 'T1POST', 'FLAIR', 'input_directory'], docker_container='qtimlab/deepneuro_skull_strip:latest')
def main():
Skull_Stripping_cli()
|
[
"andrew_beers@alumni.brown.edu"
] |
andrew_beers@alumni.brown.edu
|
92663081b0db9c1d5fd17c8e4658db632ac361e8
|
ca5201a99fa9271e7833e88c5926d48a11061519
|
/brynweb/userdb/migrations/0017_team_tenants_available.py
|
ffe40d9028c0caef9ce3b53d94b1c197de939c00
|
[] |
no_license
|
m-bull/bryn
|
c94ba0347f2e48e2f921393818150a072cd6d66a
|
68a42d45d1938f7219b4af1666d70cae0b34cf32
|
refs/heads/master
| 2020-03-06T19:10:25.268797
| 2018-08-31T12:45:45
| 2018-08-31T12:45:45
| 127,021,966
| 1
| 0
| null | 2018-03-27T17:41:04
| 2018-03-27T17:23:16
|
Python
|
UTF-8
|
Python
| false
| false
| 459
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-13 07:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userdb', '0016_auto_20160712_0722'),
]
operations = [
migrations.AddField(
model_name='team',
name='tenants_available',
field=models.BooleanField(default=False),
),
]
|
[
"n.j.loman@bham.ac.uk"
] |
n.j.loman@bham.ac.uk
|
c6df51600a30b2b426cae7397d74df668ae25de0
|
5092b7eab30ae9a1e0c17d6915b9da13ae91e663
|
/mit-6.00-ex/ps5/ps5_ghost.py
|
676e46fff504071c2180fa4abc6403ba54de1f8e
|
[
"Giftware"
] |
permissive
|
Hausdorffcode/mit-ex
|
acf2db5d645c887b09d021aacceeddcca5bea8b8
|
48d34a1de35a1fd789d1cb5278a5510be356da87
|
refs/heads/master
| 2021-01-12T10:47:15.249520
| 2016-11-03T02:04:45
| 2016-11-03T02:04:45
| 72,699,475
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,229
|
py
|
# Problem Set 5: Ghost
# Name:
# Collaborators:
# Time:
#
import random
import string
# -----------------------------------
# Helper code
# (you don't need to understand this helper code)
import string
WORDLIST_FILENAME = "words.txt"
def load_words():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print "Loading word list from file..."
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r', 0)
# wordlist: list of strings
wordlist = []
for line in inFile:
wordlist.append(line.strip().lower())
print " ", len(wordlist), "words loaded."
return wordlist
def get_frequency_dict(sequence):
"""
Returns a dictionary where the keys are elements of the sequence
and the values are integer counts, for the number of times that
an element is repeated in the sequence.
sequence: string or list
return: dictionary
"""
# freqs: dictionary (element_type -> int)
freq = {}
for x in sequence:
freq[x] = freq.get(x,0) + 1
return freq
# (end of helper code)
# -----------------------------------
# Actually load the dictionary of words and point to it with
# the wordlist variable so that it can be accessed from anywhere
# in the program.
wordlist = load_words()
# TO DO: your code begins here!
def display_fragment(fragment):
print "Current word fragment: '" + fragment + "'"
def is_vaild_letter(letter):
return letter in string.ascii_letters
def player_number(player):
if player:
return '2'
else:
return '1'
def find_str(fragment, wordlist):
isIn = False
for word in wordlist:
if fragment in word:
isIn = True
return isIn
def who_wins(fragment, player, wordlist):
if len(fragment) > 3 and fragment in wordlist:
print "Player " + player_number(player) + " loses because '" + fragment + "' is a word!"
print "Player " + player_number(not player) + " wins!"
return True
elif not find_str(fragment, wordlist):
print "Player " + player_number(player) + " loses because no word begins with '" + fragment + "'!"
print "Player " + player_number(not player) + " wins!"
return True
else:
return False
def input_letter(player, fragment):
letter = "1"
while not is_vaild_letter(letter):
if player:
letter = raw_input("Player 2 says letter: ")
else:
letter = raw_input("Player 1 says letter: ")
fragment += string.lower(letter)
return fragment
def whosTurn(player):
if player:
print "Player 2's turn."
else:
print "Player 1's turn."
def play_game(wordlist):
print "Welcome to Ghost!"
player = False
print "Player 1 goes first."
fragment = ''
display_fragment(fragment)
while True:
fragment = input_letter(player, fragment)
print
display_fragment(fragment)
if who_wins(fragment, player, wordlist):
break
else:
player = not player
whosTurn(player)
play_game(wordlist)
|
[
"m15950172843@163.com"
] |
m15950172843@163.com
|
fc9f30f416f389d049fb8ed919eb064c11d92309
|
4faee7abaf73112bc07ac9c32f3ed97b26d82d52
|
/pirpy/io/filenames.py
|
26c26f0f48e6f506c0445d9d10ee8b0fd9afce74
|
[] |
no_license
|
juliotux/pirpy
|
8eeca3cd3d03553264e9081f28aae7fff154d426
|
9b590d4d61fa0d8220114f6c38163c0773048942
|
refs/heads/master
| 2020-04-21T03:40:56.861887
| 2016-02-26T17:27:56
| 2016-02-26T17:27:56
| 38,067,852
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,194
|
py
|
'''
Functions to easy generate lists of fits files names, following some standarts.
'''
from os.path import join
import glob
__all__ = ['from_basename', 'from_file', 'ls']
def from_basename(basename, start_count, n_img, pad, sep='_', base_dir=''):
'''
This routine generates a file list, based on a basename, incrementing from
a start_count by n_img times. The names follows the standart of the
aquisition program from Pico dos Dias Observatory, and looks like:
basename_0000.fits
basename_0001.fits
basename_0002.fits
As this kind of name is used for other observatories, with different
basename-number separator, you can set the sep variable to customize it.
Parameters:
basename : string
The basename of the file list.
start_count : int
The number of the first image.
n_img : int
The total number of images.
pad : int
The number of digits to increment, like 4 for 0000.fits
sep (optional) : string
The string that separates the basename and the number. Default: '_'
base_dir (optional) : string
The path to be appended before the name.
Returns:
list of string: A list with the filenames.
'''
return [join(base_dir,basename + sep + str(i+start_count).rjust(pad,'0') + '.fits')
for i in xrange(n_img)]
def from_file(filename):
'''
Generates a list of files from a file that contains the list. What???
Example:
$ ls *.fits >> mylist.txt
mylist.txt
----
file01.fits
file02.fits
file03.fits
----
This code will read this and return:
['file01.fits','file02.fits','file03.fits']
Parameters:
filename : string
The file that contains the list.
Returns:
list of string: The list of the filenames found inside the file.
'''
f1 = open(filename, 'rb').readlines()
f2 = []
for i in f1:
f2.append(i.strip('\n'))
return f2
def ls(pattern):
'''
Returns a list of filenames with a `ls` pattern.
'''
return glob.glob(str(pattern))
|
[
"juliocampagnolo@gmail.com"
] |
juliocampagnolo@gmail.com
|
0abc1075d8f5d1b2f7f2d1a31f01c198def2afc2
|
9e397b7e5787826534d06972ce7bb3390aab3677
|
/manage.py
|
d7c015a88c210b2a2cabe4ce7772f3f1bb913329
|
[] |
no_license
|
KangSuzy/My_first_blog
|
88d6dca24026c00471f1b9fa0e0ca9f62b6a82e6
|
17b7591cde84f223a01c4265ca9f71e20f193741
|
refs/heads/master
| 2020-04-14T00:30:37.131183
| 2019-01-01T16:09:01
| 2019-01-01T16:09:01
| 163,535,213
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mysite.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"astnwl321@gmail.com"
] |
astnwl321@gmail.com
|
14cd63d1eece035c530e8b448b2b946ff4fbb808
|
bbea674e2c8b377dc4ba98bb32c9945d161e5c79
|
/Single_pair_of_images_model/unetmodel.py
|
5715399cf3db34f0fef73bf1782c8150250fc6a9
|
[] |
no_license
|
AtilaSaraiva/Unet-LSRTM-filter
|
a9084382ed9e8dc2195d2c1b5d05a551b3eccec6
|
841bc14b57568fd07a051d140c7e7660730b6684
|
refs/heads/master
| 2023-07-02T14:16:55.790062
| 2021-08-11T22:28:44
| 2021-08-11T22:28:44
| 358,454,156
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,142
|
py
|
from keras.layers import Conv2D, MaxPooling2D, Dropout, concatenate, UpSampling2D
from keras.optimizers import Adam
from keras.models import *
from keras.utils import plot_model
def unet2(pretrained_weights = None,input_size = (512,512,1)):
inputs = Input(input_size)
conv1 = Conv2D(64, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(64, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(conv3)
# pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
# conv4 = Conv2D(512, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(pool3)
# conv4 = Conv2D(512, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(conv4)
# drop4 = Dropout(0.5)(conv4)
# pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
# conv5 = Conv2D(1024, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(pool4)
# conv5 = Conv2D(1024, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(conv5)
# drop5 = Dropout(0.5)(conv2)
# up6 = Conv2D(512, 2, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
# merge6 = concatenate([drop4,up6], axis = 3)
# conv6 = Conv2D(512, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(merge6)
# conv6 = Conv2D(512, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(conv6)
# up7 = Conv2D(256, 2, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
# merge7 = concatenate([conv3,up7], axis = 3)
# conv7 = Conv2D(256, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(merge7)
# conv7 = Conv2D(256, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = Conv2D(128, 2, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv3))
merge8 = concatenate([conv2,up8], axis = 3)
conv8 = Conv2D(128, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(128, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(conv8)
up9 = Conv2D(64, 2, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9], axis = 3)
conv9 = Conv2D(64, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(64, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(2, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv10 = Conv2D(1, 1)(conv9)
model = Model(inputs = inputs, outputs = conv10)
# model.compile(optimizer = Adam(lr = 1e-4), loss = 'mean_squared_error', metrics = ['mean_squared_error'])
model.compile(optimizer = Adam(lr = 0.5e-4),
loss = 'mean_squared_error',
metrics = ['mean_squared_error'])
#model.summary()
plot_model(model, to_file='model.png')
if(pretrained_weights):
model.load_weights(pretrained_weights)
return model
def unet1(pretrained_weights = None,input_size = (512,512,1)):
inputs = Input(input_size)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4,up6], axis = 3)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7], axis = 3)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8], axis = 3)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9], axis = 3)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)
model = Model(inputs = inputs, outputs = conv1)
# model.compile(optimizer = Adam(lr = 1e-4), loss = 'mean_squared_error', metrics = ['mean_squared_error'])
model.compile(optimizer = Adam(lr = 1e4),
loss = 'cosine_similarity',
metrics = ['mean_squared_error'])
#model.summary()
if(pretrained_weights):
model.load_weights(pretrained_weights)
return model
def unet(pretrained_weights = None,input_size = (512,512,1),learningRate=0.5e-4):
inputs = Input(input_size)
conv1 = Conv2D(64, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(64, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, 2, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4,up6], axis = 3)
conv6 = Conv2D(512, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(512, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(conv6)
up7 = Conv2D(256, 2, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7], axis = 3)
conv7 = Conv2D(256, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(256, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = Conv2D(128, 2, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8], axis = 3)
conv8 = Conv2D(128, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(128, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(conv8)
up9 = Conv2D(64, 2, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9], axis = 3)
conv9 = Conv2D(64, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(64, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(2, 3, activation = 'tanh', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv10 = Conv2D(1, 1, activation = 'tanh')(conv9)
model = Model(inputs = inputs, outputs = conv10)
# model.compile(optimizer = Adam(lr = 1e-4), loss = 'mean_squared_error', metrics = ['mean_squared_error'])
model.compile(optimizer = Adam(learning_rate = learningRate),
loss = 'mean_squared_error',
metrics = ['mean_squared_error'])
#model.summary()
if(pretrained_weights):
model.load_weights(pretrained_weights)
return model
if __name__ == "__main__":
model = unet()
plot_model(model, to_file='model.png')
|
[
"atilasaraiva@gmail.com"
] |
atilasaraiva@gmail.com
|
1c06cea3dc146b10c247aa89165127ff7527308d
|
4e891592463f5daa53573a89a8ed3eaa1ca20c30
|
/test/test_db_matches_ui.py
|
2fa2528d8dceae2b116d1aff18b82467a79d3f58
|
[
"Apache-2.0"
] |
permissive
|
Doc-fee/python_training
|
36940047e5cd8d21a44e2b51e5fbf82e68e5684c
|
67464bba88a6ddfc05eb93fde4866de6b0542b63
|
refs/heads/master
| 2021-04-14T05:21:02.346418
| 2020-05-25T17:35:22
| 2020-05-25T17:35:22
| 249,209,496
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
from model.grroup import Group
def test_group_list(app, db):
ui_list = app.group.get_group_list()
def clean(group):
return Group(id=group.id, name=group.name.strip())
db_list = map(clean, db.get_group_list())
assert sorted(ui_list, key=Group.id_or_max) == sorted(db_list, key=Group.id_or_max)
|
[
"bugsbunny.k38@gmail.ru"
] |
bugsbunny.k38@gmail.ru
|
f7757b2880b47dfefd72c18007828fbbe1155d51
|
4dfea1afb726814bb6ff8d4f1da8b1106afe39a7
|
/istype/internal/__init__.py
|
440c5a335bbba2b9587b71d6b06f6ac0829e2973
|
[
"MIT"
] |
permissive
|
Cologler/istype-python
|
ddd1b7341d2b940fccdf5f32d370842936fb7b95
|
b4dcea88468b1ee43ebb36413b099e3e8508b3ce
|
refs/heads/master
| 2021-06-05T18:40:54.585381
| 2020-06-29T03:43:44
| 2020-06-29T03:43:44
| 114,655,652
| 6
| 0
|
MIT
| 2020-06-29T03:43:00
| 2017-12-18T15:16:22
|
Python
|
UTF-8
|
Python
| false
| false
| 375
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017~2999 - cologler <skyoflw@gmail.com>
# ----------
#
# ----------
import os
import importlib
from .common import TypeMatcher
for name in os.listdir(os.path.dirname(__file__)):
if name.startswith('_') or not name.endswith('.py'):
continue
importlib.import_module('.' + name[:-3], __name__)
|
[
"skyoflw@gmail.com"
] |
skyoflw@gmail.com
|
f83352d6e3685adb02728b8444acd02c40e198b8
|
8a2aa2c4480c0c874275a8d8984d292b495bdd16
|
/tutorons/common/migrations/0002_auto_20160208_0000.py
|
a0b707c5149c3271fca3c98ba71d4e6a627797f3
|
[] |
no_license
|
andrewhead/tutorons-server
|
76318bf75420f962da55c4acbd1b3c9fa56dd7ce
|
8ea4cc3d7d1431de03aef1806d7d5f6e4a2fa3c0
|
refs/heads/master
| 2021-01-15T13:14:20.157931
| 2018-10-24T18:17:42
| 2018-10-24T18:17:42
| 26,420,128
| 7
| 2
| null | 2016-07-08T06:53:38
| 2014-11-10T03:53:42
|
Python
|
UTF-8
|
Python
| false
| false
| 569
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('common', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='region',
name='end_time',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='region',
name='start_time',
field=models.DateTimeField(null=True, blank=True),
),
]
|
[
"head.andrewm@gmail.com"
] |
head.andrewm@gmail.com
|
d1394ce8aee7d45bdd5c7b4ac0743949f87e6994
|
985013eb7f20a38b756ca1f000971df1cd0f0746
|
/LoadAllJsonFilesFromDir.py
|
d939bdecd551ce0bc0f121ffbb0e883f972c16c5
|
[] |
no_license
|
hasantuberlin/Impro-project
|
6fc3ee344cf840ee5f9fe89fb72769a7aa05cc6a
|
a52f6b03b584cce75004eecea5fa1edbc9f2a2ab
|
refs/heads/master
| 2023-01-05T03:27:12.558540
| 2020-11-05T12:37:01
| 2020-11-05T12:37:01
| 310,293,438
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 614
|
py
|
'''
loading data in elasticsearch
'''
import requests, json, os
from elasticsearch import Elasticsearch
#connecting to elastic search
res = requests.get('http://localhost:9200')
print (res.content)
es = Elasticsearch([{'host': 'localhost', 'port': '9200'}])
directory = os.getcwd()
for filename in os.listdir(directory):
if filename.endswith(".json"):
f = open(filename)
print(filename)
#docket_content = f.read()
# Send the data into es
#es.index(index='myindex', ignore=400, doc_type='docket',
#id=i, body=json.loads(docket_content))
#i = i + 1
|
[
"hasan.1@campus.tu-berlin.de"
] |
hasan.1@campus.tu-berlin.de
|
ada4d7c93cd7370c5ea393351b6cc0a57da531b7
|
1643d6745b3806e7c52c6ea7e744ebee0eb518fd
|
/SE_Backend/ContestPlus/migrations/0006_auto_20201210_1617.py
|
988b5e354d1897ba1f24fd5b59fc80a42b57dce1
|
[] |
no_license
|
cobs14/SE_ContestServicePlatform
|
7eb3279a5f42bc34343eabaa303789d984e2834a
|
3a3d9d4fe7cbfa6a61110a7ea7da7eb5e7db4535
|
refs/heads/master
| 2023-02-27T19:58:32.348228
| 2021-01-31T13:19:53
| 2021-01-31T13:19:53
| 299,851,916
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
# Generated by Django 3.1.3 on 2020-12-10 16:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ContestPlus', '0005_contest_publishresult'),
]
operations = [
migrations.AlterField(
model_name='user',
name='userType',
field=models.CharField(default='guest', max_length=16),
),
]
|
[
"52957393+LiXCloudland@users.noreply.github.com"
] |
52957393+LiXCloudland@users.noreply.github.com
|
e9f99dd72dc9800d274ca3b2ee210f847433f7ee
|
be4de24b9cca2fe00328c30c609e53b46e698d17
|
/quandl_to_db.py
|
140d170e7c6373def894c2590ae2fb11f8d7c165
|
[] |
no_license
|
carstenf/Security-Master
|
2544a8a18c293be5b8b38a4c80d07dbb5f110756
|
230a029c1d942a8be2af042e5ab4cb6503b02c57
|
refs/heads/master
| 2021-08-22T04:09:22.134263
| 2021-01-10T13:55:11
| 2021-01-10T13:55:11
| 242,366,399
| 15
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 33,363
|
py
|
import time
import math
import pandas as pd
from sqlalchemy import create_engine
from tqdm import tqdm
import quandl
import zipfile
import timeit
# create the engine for the database
engine = create_engine('mysql+mysqlconnector://root:root@localhost/securities_master')
# define path to data dump from data vendor
#path_dir = '/Users/carsten/opt/data/quandl/'
path_dir = '/Users/carsten/ziplinetools/data/quandl/'
#ext = '.zip' # for data downloaded with this method -> quandl.export_table('SHARADAR/TICKERS', filename=path)
#ext = '.csv' # download manually from Quandl
# input you Quand Api key
quandl.ApiConfig.api_key = 'Quand Api key'
quandl.ApiConfig.api_version = '2015-04-09'
# if initalise_from_quandl == TRUE, download budels from Quandel and store them on the disk
# if initalise_from_quandl == False, read from on the disk
# from_quandl = False
# mode_db == 1 read everything / from disk, with manual downloaded file from quandl
# mode_db == 2 read everything / get data directly fom quandel with Quand Api key
mode_db = 1 # todo, define as input
if mode_db == 1:
from_quandl = False
ext = '.csv'
if mode_db == 2:
from_quandl = True
ext = '.zip'
def get_symbol_security_id_quandl(qtable):
## get ticker symbol and security_id relation
# query data_vendor_id from the data_vendor table
query = """SELECT id FROM data_vendor WHERE name = 'Quandl';"""
value = pd.read_sql_query(query, engine)
data_vendor_id = value.id[0].astype(int)
# query ticker symbols and the ticker_id from the security table
query_1 = """SELECT ticker, id FROM security WHERE """
# choose the Quandel table, ticker for SF1=fundamental , SEP=price or SFP=ETF,INDEX
if qtable == 'SF1':
query_2 = """ ttable = '{}' """.format( 'SF1')
if qtable == 'SEP':
query_2 = """ ttable = '{}' """.format( 'SEP')
if qtable == 'SFP':
query_2 = """ ttable = '{}' """.format( 'SFP')
query_3 = """ and data_vendor_id = {} """.format( data_vendor_id )
query = query_1 + query_2 + query_3
# query securities_master
result = pd.read_sql_query(query, engine)
return result
def get_name_exchange_id():
query = """SELECT id, name FROM exchange;"""
result = pd.read_sql_query(query, engine)
return result
def initalise_database():
############################## fill the 3 small table Data Vendor, Asset Class and Exchange Table ########
#Define Data Vendors and populate data_vendor table
df_vendor=pd.DataFrame({'names': ['Quandl','yahoo'], 'website': ['www.qandl.com', 'www.yahoo.com']})
# Fill Data Vendor
# initial
insert_init = """insert into data_vendor (name, website_url) values """
# Add values for all days to the insert statement
vals = ",".join(["""('{}','{}')""".format(row.names,row.website)for items,row in df_vendor.iterrows()])
# Handle duplicates - Avoiding errors if you've already got some data
# in your table
insert_end = """ on duplicate key update name=values(name),website_url=values(website_url);"""
# Put the parts together
query = insert_init + vals + insert_end
# Fire insert statement
engine.execute(query)
Process=True
print('data_vendor table filled')
# Define Asset Classes and populate asset_class table
list_asset=['stocks','future']
# Fill Asset Class
# initial
insert_init = """insert into asset_class (asset_class) values """
# Add values for all days to the insert statement
vals = ",".join(["""('{}')""".format(items)for items in list_asset])
# Handle duplicates - Avoiding errors if you've already got some data
# in your table
insert_end = """ on duplicate key update asset_class=values(asset_class);"""
# Put the parts together
query = insert_init + vals + insert_end
# Fire insert statement
engine.execute(query)
Process=True
print('asset_class table filled')
# Define Exchanges and populate exchange table
df_exchange=pd.DataFrame({'exchange': ['NYSE','NASDAQ','NYSEMKT','OTC','NYSEARCA','BATS','INDEX','None'], 'currency': ['USD','USD','USD','USD','USD','USD','P','None']})
# Fill Exchange
# initial
insert_init = """insert into exchange (name, currency) values """
# Add values for all days to the insert statement
vals = ",".join(["""('{}','{}')""".format(row.exchange,row.currency)for items,row in df_exchange.iterrows()])
# Handle duplicates - Avoiding errors if you've already got some data
# in your table
insert_end = """ on duplicate key update name=values(name),currency=values(currency);"""
# Put the parts together
query = insert_init + vals + insert_end
# Fire insert statement
engine.execute(query)
Process=True
print('exchange table filled')
def fill_ticker():
############################################################## Fill Security / Ticker table
## next step, need always updates if new ipo's exists
# Polulate security table with ticker symbols
# defintion of the asset class
# need to be adjusted if other securities should be read into database
# query asset_class_id
query = """SELECT id FROM asset_class WHERE asset_class = 'stocks';"""
value = pd.read_sql_query(query, engine)
asset_class_id = value.id[0].astype(int)
# definition of the vendor
# need to be adjusted if other securities should be read into database
# query data_vendor_id
query = """SELECT id FROM data_vendor WHERE name = 'Quandl';"""
value = pd.read_sql_query(query, engine)
data_vendor_id = value.id[0].astype(int)
# read ticker data from file
file_name = 'SHARADAR_TICKERS' + ext
path = path_dir + file_name
data = pd.read_csv(path)
data = data.fillna(0)
# sending the ticker information to the security table
insert_init = """insert into security
(ticker, name, code, sector, isdelisted, ttable, category, exchange_id, asset_class_id, data_vendor_id)
values """
# get the exchange and exchange_id relation
name_ex_id=get_name_exchange_id()
for index, row in tqdm(data.iterrows(), total=data.shape[0]):
# check if empty
if name_ex_id[name_ex_id['name'] == row.exchange ].empty:
print ("""please add ("{}") to exchange list""".format(row.exchange) )
# find the exchange_id
exchange_id=name_ex_id[name_ex_id['name'] == row.exchange ].id.iloc[0]
if math.isnan(exchange_id):
print('error, exchange not in database')
print(row.exchange)
# Add values for all days to the insert statement
vals = """("{}","{}",{},"{}","{}","{}","{}",{},{},{})""".format(
row.ticker,
row['name'],
row.siccode,
row.sector,
row.isdelisted,
row.table,
row.category,
exchange_id,
asset_class_id,
data_vendor_id)
# write all the data into memory and dump them all to the database to improve speed
# not possible with price and fundamental table; gets overflow message from database
# if there is an error regarding overflow from database, remove this and change it accordingly
if index == 0:
all_vals=vals
else:
all_vals= ",".join([all_vals,vals])
# Handle duplicates - Avoiding errors if you've already got some data
# in your table
insert_end = """ on duplicate key update
ticker =values(ticker),
name =values(name),
code =values(code),
sector =values(sector),
isdelisted =values(isdelisted),
ttable =values(ttable),
category =values(category),
exchange_id =values(exchange_id),
asset_class_id=values(asset_class_id),
data_vendor_id=values(data_vendor_id)
;"""
# Put the parts together
query = insert_init + all_vals + insert_end
#query = insert_init + vals + insert_end
# Fire insert statement
engine.execute(query)
Process=True
print('ticker table filled')
def fill_SP500_member():
################################################################ fill SP00 Members table
# exchange_id, vendor_id and asset_class_id relation is already stored in the security_id
# read data from file
file_name = 'SHARADAR_SP500' + ext
path = path_dir + file_name
data_read = pd.read_csv(path)
# get symbol and security_id from Quandl
query_result_df = get_symbol_security_id_quandl('SEP')
for index, row in tqdm(query_result_df.iterrows(), total=query_result_df.shape[0]):
tik = row.ticker
security_id = row.id
data = data_read.loc[data_read['ticker'] == tik ]
# handle NaN
data = data.fillna(0)
#print(data)
if not data.empty:
# sending the information to the security table
insert_init = """insert into SP500_const
(date, action, ticker, contraticker, security_id) values """
# Add values for all days to the insert statement
vals = ",".join(["""('{}','{}','{}','{}',{})""".format(
row.date,
row.action,
row.ticker,
row.contraticker,
security_id) for index, row in data.iterrows()])
insert_end = """ on duplicate key update
date =values(date),
action =values(action),
ticker =values(ticker),
contraticker =values(contraticker),
security_id =values(security_id)
;"""
# Put the 3 query parts together
query = insert_init + vals + insert_end
# Fire insert statement
engine.execute(query)
Process=True
print('SP500_const table filled')
def fill_corporate_action():
############################################################## fill corporate action
# exchange_id, vendor_id and asset_class_id relation is already stored in the security_id
# read data from file
file_name = 'SHARADAR_ACTIONS' + ext
path = path_dir + file_name
data_read = pd.read_csv(path)
# get symbol and security_id from Quandl
query_result_df = get_symbol_security_id_quandl('SEP')
for index, row in tqdm(query_result_df.iterrows(), total=query_result_df.shape[0]):
tik = row.ticker
security_id = row.id
data = data_read.loc[data_read['ticker'] == tik ]
# handle NaN
data = data.fillna(0)
if not data.empty:
# send the prices to the daily_price table
insert_init = """insert into corp_action
(date, action, value, contraticker, security_id) values """
# Add values for all days to the insert statement
vals = ",".join(["""('{}','{}',{},'{}',{})""".format (
row.date,
row.action,
row.value,
row.contraticker,
security_id ) for index, row in data.iterrows()])
insert_end = """ on duplicate key update
date =values(date),
action =values(action),
value =values(value),
contraticker =values(contraticker),
security_id =values(security_id)
;"""
# Put the 3 query parts together
query = insert_init + vals + insert_end
# Fire insert statement
#print(query)
engine.execute(query)
Process=True
print('corp_action table filled')
def fill_price_div_data(name):
###################################### populate price table ########### populate dividents table
###### this part ist still very very slow, needs around 12 hours to store in database......
# exchange_id, vendor_id and asset_class_id relation is already stored in the security_id
# read price into memonry
if name == 'SEP':
file_name = 'SHARADAR_SEP' + ext
# get symbol and security_id from Quandl
query_result_df = get_symbol_security_id_quandl('SEP')
if name == 'SFP':
file_name = 'SHARADAR_SFP' + ext
# get symbol and security_id from Quandl
query_result_df = get_symbol_security_id_quandl('SFP')
path = path_dir + file_name
print('reading {} '.format(path))
data_price = pd.read_csv(path)
insert_init_price = """insert into daily_price
(trade_date, open, high, low, close, closeunadj, volume , security_id)
values """
insert_end_price = """ on duplicate key update
trade_date =values(trade_date),
open =values(open),
high =values(high),
low =values(low),
close =values(close),
closeunadj =values(closeunadj),
volume =values(volume),
security_id =values(security_id)
;"""
insert_init_div = """insert into dividends
(date, dividends, security_id) values """
insert_end_div = """ on duplicate key update
date =values(date),
dividends =values(dividends),
security_id =values(security_id)
;"""
#j=0 # got it from last run , TODO tqdm
for ticker, data in tqdm(data_price.groupby('ticker')):
#j=j+1
#print(j,ticker)
if not data.empty:
if not query_result_df[query_result_df.ticker == ticker ].empty:
security_id=query_result_df[query_result_df.ticker == ticker ].id.iloc[0]
# handle NaN, database error if get NaN
data = data.fillna(0)
#data.fillna(method='ffill', inplace=True)
#Add values for all days to the insert statement
vals = ",".join(["""('{}',{},{},{},{},{},{},{})""".format (
data.at[i, 'date'],
data.at[i, 'open'],
data.at[i, 'high'],
data.at[i, 'low'],
data.at[i, 'close'],
data.at[i, 'closeunadj'],
data.at[i, 'volume'],
security_id ) for i in data.index])
# Put the 3 query parts together
query = insert_init_price + vals + insert_end_price
# Fire insert statement
engine.execute(query)
Process=True
# send the dividends to the dividends table
# Add values for all days to the insert statement
vals = ",".join(["""('{}',{},{})""".format (
data.at[i, 'date'],
data.at[i, 'dividends'],
security_id ) for i in data.index])
# Put the 3 query parts together
query = insert_init_div + vals + insert_end_div
# Fire insert statement
engine.execute(query)
Process=True
else:
print(""" ("{}") not in the SEP or SEF dump file""".format(ticker) )
else:
# don't print that message for update==True, as a lot of the ticker are delisted
print("""Missing, no price data for ("{}") found""".format(ticker) )
print('{} daily_price table filled and dividends table filled'.format(name))
def fill_fundamental_data():
############################################################ populate fundamentals table
###### this part ist as well still very very slow to store in database......
# exchange_id, vendor_id and asset_class_id relation is already stored in the security_id
# read price into memonry
file_name = 'SHARADAR_SF1' + ext
path = path_dir + file_name
data_funda = pd.read_csv(path)
# Build a list with tickers from security table
query_result_df = get_symbol_security_id_quandl('SF1')
# send the prices to the daily_price table
insert_init = """insert into fundamental
(revenue, cor, sgna, rnd, opex, intexp, taxexp, netincdis, consolinc, netincnci,
netinc, prefdivis, netinccmn, eps, epsdil, shareswa, shareswadil, capex, ncfbus, ncfinv,
ncff, ncfdebt, ncfcommon, ncfdiv, ncfi, ncfo, ncfx, ncf, sbcomp, depamor,
assets, cashneq, investments, investmentsc, investmentsnc, deferredrev, deposits, ppnenet, inventory, taxassets,
receivables, payables, intangibles, liabilities, equity, retearn, accoci, assetsc, assetsnc, liabilitiesc,
liabilitiesnc, taxliabilities, debt, debtc, debtnc, ebt, ebit, ebitda, fxusd, equityusd,
epsusd, revenueusd, netinccmnusd, cashnequsd, debtusd, ebitusd, ebitdausd, sharesbas, dps, sharefactor,
marketcap, ev, invcap, equityavg, assetsavg, invcapavg, tangibles, roe, roa, fcf,
roic, gp, opinc, grossmargin, netmargin, ebitdamargin, ros, assetturnover, payoutratio, evebitda,
evebit, pe, pe1, sps, ps1, ps, pb, de, divyield, currentratio,
workingcapital, fcfps, bvps, tbvps, price, ticker, dimension, calendardate, datekey,reportperiod,
lastupdated, security_id) values """
insert_end = """ on duplicate key update
revenue =values(revenue),
cor =values(cor),
sgna =values(sgna),
rnd =values(rnd),
opex =values(opex),
intexp =values(intexp),
taxexp =values(taxexp),
netincdis =values(netincdis),
consolinc =values(consolinc),
netincnci =values(netincnci),
netinc =values(netinc),
prefdivis =values(prefdivis),
netinccmn =values(netinccmn),
eps =values(eps),
epsdil =values(epsdil),
shareswa =values(shareswa),
shareswadil =values(shareswadil),
capex =values(capex),
ncfbus =values(ncfbus),
ncfinv =values(ncfinv),
ncff =values(ncff),
ncfdebt =values(ncfdebt),
ncfcommon =values(ncfcommon),
ncfdiv =values(ncfdiv),
ncfi =values(ncfi),
ncfo =values(ncfo),
ncfx =values(ncfx),
ncf =values(ncf),
sbcomp =values(sbcomp),
depamor =values(depamor),
assets =values(assets),
cashneq =values(cashneq),
investments =values(investments),
investmentsc =values(investmentsc),
investmentsnc =values(investmentsnc),
deferredrev =values(deferredrev),
deposits =values(deposits),
ppnenet =values(ppnenet),
inventory =values(inventory),
taxassets =values(taxassets),
receivables =values(receivables),
payables =values(payables),
intangibles =values(intangibles),
liabilities =values(liabilities),
equity =values(equity),
retearn =values(retearn),
accoci =values(accoci),
assetsc =values(assetsc),
assetsnc =values(assetsnc),
liabilitiesc =values(liabilitiesc),
liabilitiesnc =values(liabilitiesnc),
taxliabilities =values(taxliabilities),
debt =values(debt),
debtc =values(debtc),
debtnc =values(debtnc),
ebt =values(ebt),
ebit =values(ebit),
ebitda =values(ebitda),
fxusd =values(fxusd),
equityusd =values(equityusd),
epsusd =values(epsusd),
revenueusd =values(revenueusd),
netinccmnusd =values(netinccmnusd),
cashnequsd =values(cashnequsd),
debtusd =values(debtusd),
ebitusd =values(ebitusd),
ebitdausd =values(ebitdausd),
sharesbas =values(sharesbas),
dps =values(dps),
sharefactor =values(sharefactor),
marketcap =values(marketcap),
ev =values(ev),
invcap =values(invcap),
equityavg =values(equityavg),
assetsavg =values(assetsavg),
invcapavg =values(invcapavg),
tangibles =values(tangibles),
roe =values(roe),
roa =values(roa),
fcf =values(fcf),
roic =values(roic),
gp =values(gp),
opinc =values(opinc),
grossmargin =values(grossmargin),
netmargin =values(netmargin),
ebitdamargin =values(ebitdamargin),
ros =values(ros),
assetturnover =values(assetturnover),
payoutratio =values(payoutratio),
evebitda =values(evebitda),
evebit =values(evebit),
pe =values(pe),
pe1 =values(pe1),
sps =values(sps),
ps1 =values(ps1),
ps =values(ps),
pb =values(pb),
de =values(de),
divyield =values(divyield),
currentratio =values(currentratio),
workingcapital =values(workingcapital),
fcfps =values(fcfps),
bvps =values(bvps),
tbvps =values(tbvps),
price =values(price),
ticker =values(ticker),
dimension =values(dimension),
calendardate =values(calendardate),
datekey =values(datekey),
reportperiod =values(reportperiod),
lastupdated =values(lastupdated),
security_id =values(security_id)
;"""
#i=0 # got it from last run , TODO tqdm
for ticker, result in tqdm(data_funda.groupby('ticker')):
#i=i+1
#print(i,ticker)
if not result.empty:
if not query_result_df[query_result_df.ticker == ticker ].empty:
security_id=query_result_df[query_result_df.ticker == ticker ].id.iloc[0]
# copy required dimensions into data
# ???? (result.loc[result.dimension == 'MRY' ] and result.[result.dimension == 'MRY' ] gets same result ???)
#result6 = result.loc[result.dimension == 'MRY' ]
#result5 = result.loc[result.dimension == 'MRQ']
#result4 = result.loc[result.dimension == 'MRT']
result3 = result.loc[result.dimension == 'ARY']
result2 = result.loc[result.dimension == 'ARQ' ]
result1 = result.loc[result.dimension == 'ART' ]
data = pd.concat([result1, result2, result3], ignore_index=True)
if not data.empty:
# handle NaN, database error if get NaN
data = data.fillna(0)
#data.fillna(method='ffill', inplace=True) # did not work
# Add values for all days to the insert statement
vals = ",".join(["""( {},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},
{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},
{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},
{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},
{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},
{},{},{},{},{},'{}','{}','{}','{}','{}',
'{}',{})""".format (
row.revenue,
row.cor,
row.sgna,
row.rnd,
row.opex,
row.intexp,
row.taxexp,
row.netincdis,
row.consolinc,
row.netincnci,
row.netinc,
row.prefdivis,
row.netinccmn,
row.eps,
row.epsdil,
row.shareswa,
row.shareswadil,
row.capex,
row.ncfbus,
row.ncfinv,
row.ncff,
row.ncfdebt,
row.ncfcommon,
row.ncfdiv,
row.ncfi,
row.ncfo,
row.ncfx,
row.ncf,
row.sbcomp,
row.depamor,
row.assets,
row.cashneq,
row.investments,
row.investmentsc,
row.investmentsnc,
row.deferredrev,
row.deposits,
row.ppnenet,
row.inventory,
row.taxassets,
row.receivables,
row.payables,
row.intangibles,
row.liabilities,
row.equity,
row.retearn,
row.accoci,
row.assetsc,
row.assetsnc,
row.liabilitiesc,
row.liabilitiesnc,
row.taxliabilities,
row.debt,
row.debtc,
row.debtnc,
row.ebt,
row.ebit,
row.ebitda,
row.fxusd,
row.equityusd,
row.epsusd,
row.revenueusd,
row.netinccmnusd,
row.cashnequsd,
row.debtusd,
row.ebitusd,
row.ebitdausd,
row.sharesbas,
row.dps,
row.sharefactor,
row.marketcap,
row.ev,
row.invcap,
row.equityavg,
row.assetsavg,
row.invcapavg,
row.tangibles,
row.roe,
row.roa,
row.fcf,
row.roic,
row.gp,
row.opinc,
row.grossmargin,
row.netmargin,
row.ebitdamargin,
row.ros,
row.assetturnover,
row.payoutratio,
row.evebitda,
row.evebit,
row.pe,
row.pe1,
row.sps,
row.ps1,
row.ps,
row.pb,
row.de,
row.divyield,
row.currentratio,
row.workingcapital,
row.fcfps,
row.bvps,
row.tbvps,
row.price,
row.ticker,
row.dimension,
row.calendardate,
row.datekey,
row.reportperiod,
row.lastupdated,
security_id) for index, row in data.iterrows()])
# Put the 3 query parts together
query = insert_init + vals + insert_end
# Fire insert statement
engine.execute(query)
Process=True
else:
# theoretical should be no error, only if ticker is in the fundamental ist and is not in the ticker list
print("""Strange, no ticker found in security db but fundamental data for ("{}") exist""".format(ticker) )
else:
# don't print that message for update==True, as a lot of the ticker are delisted
print("""Missing, no fundamental data for ("{}") found""".format(ticker) )
print('fundamental table filled')
if __name__ == '__main__':
# update or initialize same method
if from_quandl == True:
print('get ticker data from Quandl...')
file_name = 'SHARADAR_TICKERS.zip'
path = path_dir + file_name
dummy = quandl.export_table('SHARADAR/TICKERS', filename=path)
# download SP00 Members table
print('get SP00 Members from Quandl...')
file_name = 'SHARADAR_SP500.zip'
path = path_dir + file_name
dummy = quandl.export_table('SHARADAR/SP500', filename=path)
# download corporate action
print('get corporate action from Quandl...')
file_name = 'SHARADAR_ACTIONS.zip'
path = path_dir + file_name
dummy = quandl.export_table('SHARADAR/ACTIONS', filename=path)
# download SEP price data
print('get price data from Quandl...downloading price date, takes 5 min')
file_name = 'SHARADAR_SEP.zip'
path = path_dir + file_name
dummy = quandl.export_table('SHARADAR/SEP', filename=path)
# download SFP price data
print('get price data from Quandl...downloading price date, takes 5 min')
file_name = 'SHARADAR_SFP.zip'
path = path_dir + file_name
dummy = quandl.export_table('SHARADAR/SFP', filename=path)
# download fundamental data
print('get fundamental data from Quandl...downloading fundamental date, takes 5 min')
file_name = 'SHARADAR_SF1.zip'
path = path_dir + file_name
dummy = quandl.export_table('SHARADAR/SF1', filename=path)
print('All files downloaded from Quandl')
# else
# read everything from disk
initalise_database()
fill_ticker()
fill_SP500_member()
fill_corporate_action()
fill_price_div_data('SEP')
fill_price_div_data('SFP')
fill_fundamental_data()
print('job done')
|
[
"noreply@github.com"
] |
carstenf.noreply@github.com
|
3d1e6770334522878a01c66e9e49e03b04187275
|
31b482a5a44209a7a0607acd7a28ff30a70fc412
|
/aladdin-cas/init/init_object_detection.py
|
dca6024bb4d48c28e4eac7bb6616da5ddc0e201e
|
[] |
no_license
|
ARES3366/aladdin
|
3ac526ccff98972233c05c262e38e434d0fcf235
|
fb268cf7901322bb16b8b295f2c791628665bfb8
|
refs/heads/main
| 2023-08-20T11:42:21.614951
| 2021-10-28T07:25:55
| 2021-10-28T07:25:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,755
|
py
|
from utils import get_mongo_client, mongo_config, milvus_config
from time import sleep
from milvus import Milvus, IndexType, MetricType, Status
def init_object_detection_mongodb():
try:
print(mongo_config)
client, _ = get_mongo_client(mongo_config)
db = client[mongo_config["db"]]
db["object_detection"].create_index("url")
db["object_detection"].create_index("vid")
db["tbl_face_info"].drop()
return True
except Exception as e:
print(e)
return False
def init_object_detection_milvus():
collection_name = "object_detection"
try:
print("Name of Milvus' Collection : {} ".format(collection_name))
milvus = Milvus(host=milvus_config["host"], port=milvus_config["port"])
param = {
'collection_name': collection_name,
'dimension': 128,
'index_file_size': 1024,
'metric_type': MetricType.L2
}
milvus.create_collection(param)
for i in range(1000):
milvus.create_partition(collection_name, str(i))
ivf_param = {"m": 16, "nlist": 1024}
milvus.create_index(collection_name, IndexType.IVF_PQ, ivf_param)
return True
except Exception as e:
print(e)
return False
def init_face_recognition_milvus():
collection_name = "face_recognition"
try:
print("Name of Milvus' Collection : {} ".format(collection_name))
milvus = Milvus(host=milvus_config["host"], port=milvus_config["port"])
param = {
'collection_name': collection_name,
'dimension': 128,
'index_file_size': 1024,
'metric_type': MetricType.L2
}
milvus.create_collection(param)
ivf_param = {"m": 16, "nlist": 1024}
milvus.create_index(collection_name, IndexType.IVF_PQ, ivf_param)
return True
except Exception as e:
print(e)
return False
def init_object_detection():
x = 1
while True:
print("=="*14 + " Ready to Init MongoDB In Object Detection : {} ".format(x) + "=="*14)
res = init_object_detection_mongodb()
if res:
print("=="*14 + " Successfully Initialized MongoDB In Object Detection " + "=="*14 + "\n\n")
break
else:
print("=="*14 + " Failed Initialization MongoDB In Object Detection " + "=="*14 + "\n\n")
print("Sleep for {ti} minutes and try again : {tim} times.".format(ti=5*x, tim=x))
sleep(300*x)
continue
y = 1
while True:
print("=="*14 + " Ready to Init Milvus In Object Detection : {} ".format(y) + "=="*14)
res = init_object_detection_milvus()
if res:
print("=="*14 + " Successfully Initialized Milvus In Object Detection " + "=="*14 + "\n\n")
break
else:
print("=="*14 + " Failed Initialization Milvus In Object Detection " + "=="*14 + "\n\n")
print("Sleep for {ti} minutes and try again : {tim} times.".format(ti=5*y, tim=y))
sleep(300*y)
continue
while True:
print("=="*14 + " Ready to Init Milvus In Object Detection : {} ".format(y) + "=="*14)
res = init_face_recognition_milvus()
if res:
print("=="*14 + " Successfully Initialized Milvus In Object Detection " + "=="*14 + "\n\n")
break
else:
print("=="*14 + " Failed Initialization Milvus In Object Detection " + "=="*14 + "\n\n")
print("Sleep for {ti} minutes and try again : {tim} times.".format(ti=5*y, tim=y))
sleep(300*y)
continue
if __name__ == "__main__":
init_object_detection()
|
[
"1010276502@qq.com"
] |
1010276502@qq.com
|
818d0b9f2d7a27de56e82aac986ec04fa6af6318
|
05dfa7cf71dddb604d8835cc9c16f31c4992f260
|
/orders/context_processors.py
|
631bf008fc3146b010dde8f7c743a12a4895e6c8
|
[] |
no_license
|
asanisimova/ProjectWeb
|
c605f2848a1a0352b378dd010e0515e292b52392
|
46f5566fcc1573a999cb0b174679c38e0a2ba453
|
refs/heads/master
| 2022-11-23T09:01:38.041139
| 2020-01-12T18:43:57
| 2020-01-12T18:43:57
| 230,278,128
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
from .models import ProductInBasket
def getting_basket_info(request):
session_key = request.session.session_key
if not session_key:
request.session.cycle_key()
products_in_basket = ProductInBasket.objects.filter(session_key=session_key, is_active=True, order__isnull=True)
products_total_nmb = products_in_basket.count()
return locals()
|
[
"anna_anisimova1997@mail.ru"
] |
anna_anisimova1997@mail.ru
|
18e3a7a37db089ffd5b2d90e18d157f16cdf3498
|
63df49317f5ef093e572c3cb8cd635c2e74ef09a
|
/ep_clustering/likelihoods/_gaussian_likelihood.py
|
82b347981917fe64c664071636365dcd3a5cb48a
|
[
"MIT"
] |
permissive
|
PeiKaLunCi/EP_Collapsed_Gibbs
|
e4015b9b1dbef8b80a837146794dd71e32ee2c30
|
3b2e8c3addeab2343837b9e86e9cb57b00798b9a
|
refs/heads/master
| 2023-03-16T03:12:27.608096
| 2019-12-08T00:24:08
| 2019-12-10T06:23:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,234
|
py
|
#!/usr/bin/env python
"""
Gaussian Likelihood
"""
# Import Modules
import numpy as np
import logging
from ep_clustering._utils import fix_docs
from ep_clustering.likelihoods._likelihoods import Likelihood
from ep_clustering.exp_family._normal_mean import (
NormalFamily,
DiagNormalFamily
)
logger = logging.getLogger(name=__name__)
LOGGING_FORMAT = '%(levelname)s: %(asctime)s - %(name)s: %(message)s ...'
logging.basicConfig(
level = logging.INFO,
format = LOGGING_FORMAT,
)
@fix_docs
class GaussianLikelihood(Likelihood):
""" Gaussian Likelihood Object
Args:
**kwargs :
variance (double) - cluster noise
"""
# Inherit Docstrings
__doc__ += Likelihood.__doc__
# Class Variables
name = "Gaussian"
def __init__(self, data, **kwargs):
self.y = data.matrix
self.num_dim = data.num_dim
super(GaussianLikelihood, self).__init__(data, **kwargs)
return
def _get_default_prior(self):
theta_prior = DiagNormalFamily(num_dim = self.num_dim,
precision=np.ones(self.num_dim)/100.0)
return theta_prior
def _get_default_parameters(self):
"""Returns default parameters dict"""
default_parameter = {
"variance": np.ones(self.num_dim),
}
return default_parameter
def _get_default_parameters_prior(self):
"""Returns default parameters prior dict"""
prior = {
"alpha_variance0": 3.0,
"beta_variance0": 2.0,
}
return prior
def _sample_from_prior(self):
parameter = {
"variance": 1.0/np.random.gamma(
shape=self.prior.alpha_variance0,
scale=self.prior.beta_variance0,
size=self.num_dim)
}
return parameter
def loglikelihood(self, index, theta):
y = self.y[index]
loglikelihood = -0.5*((y-theta)/self.parameter.variance).dot(y-theta) +\
-0.5*self.num_dim*np.log(2*np.pi) + \
-0.5*np.sum(np.log(self.parameter.variance))
return loglikelihood
def collapsed(self, index, subset_indices, theta_parameter):
posterior = theta_parameter
for s_index in subset_indices:
s_y = self.y[s_index]
posterior = (posterior +
DiagNormalFamily.from_mean_variance(
mean=s_y,
variance=self.parameter.variance)
)
mean = posterior.get_mean()
variance = posterior.get_variance() + self.parameter.variance
y = self.y[index]
loglikelihood = -0.5*((y-mean)/variance).dot(y-mean) + \
-0.5*self.num_dim*np.log(2*np.pi) + \
-0.5*np.sum(np.log(variance))
return loglikelihood
def moment(self, index, theta_parameter):
y = self.y[index]
site = DiagNormalFamily.from_mean_variance(
mean=y,
variance=self.parameter.variance,
)
unnormalized_post_approx = (theta_parameter + site)
unnormalized_post_approx.log_scaling_coef = \
unnormalized_post_approx.logpartition() - \
(theta_parameter.logpartition() + site.logpartition())
return unnormalized_post_approx
def sample(self, indices, prior_parameter):
posterior = prior_parameter
for index in indices:
y = self.y[index]
posterior = (posterior +
DiagNormalFamily.from_mean_variance(
mean=y,
variance=self.parameter.variance)
)
mean = posterior.get_mean()
variance = posterior.get_variance()
return np.random.randn(posterior.num_dim)*np.sqrt(variance) + mean
def update_parameters(self, z, theta, parameter_name = None):
if parameter_name is None:
self._update_variance(z, theta)
elif parameter_name == "variance":
self._update_variance(z, theta)
else:
raise ValueError("Unrecognized parameter_name: " + parameter_name)
return
def _update_variance(self, z, theta, k_list=None):
if k_list is None:
k_list = range(np.shape(theta)[0])
sse = 0
N = 0
for k in k_list:
ind = (z == k)
N += np.sum(ind)
sse += np.sum((self.y[ind,:] - theta[k]) ** 2)
alpha_variance = self.prior.alpha_variance0 + N/2.0
beta_variance = self.prior.beta_variance0 + sse/2.0
self.parameter.variance = 1.0/np.random.gamma(shape = alpha_variance,
scale = 1.0/beta_variance, size = self.num_dim)
return
def update_local_parameters(self, k, z, theta, parameter_name = None):
if parameter_name is None:
self._update_variance(z, theta, k_list[k])
elif parameter_name == "variance":
self._update_variance(z, theta, k_list[k])
else:
raise ValueError("Unrecognized parameter_name: " + parameter_name)
return
# EOF
|
[
"aicherc@uw.edu"
] |
aicherc@uw.edu
|
e2cbf792b9c854508db3d2a349801d2165b6ffec
|
5e29a10631cf23e54adba1109e13c345e2eafba4
|
/selenium_test/Web_TimeEvent_Test.py
|
c9fba36dbe0cfc257157a60e7276193cc2a375fe
|
[] |
no_license
|
kuangtao94/Selenium
|
1926c9c11790e6fe741fdeb5b0f05278686ebc31
|
128203d3446b31b31593b7e605a99f75f32e8e40
|
refs/heads/master
| 2020-06-29T07:10:41.766221
| 2019-11-23T16:25:19
| 2019-11-23T16:25:19
| 200,471,309
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,745
|
py
|
from selenium import webdriver
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.wait import WebDriverWait
import time as t
driver = webdriver.Chrome()
driver.maximize_window()
# driver.get("http://www.baidu.com")
driver.get("https://kyfw.12306.cn/otn/leftTicket/init?linktypeid=wf")
driver.implicitly_wait(10)
# driver.find_element_by_id("kw").send_keys("selenium")
# print(driver.find_element_by_id("kw").get_attribute("value"))
# action = ActionChains(driver)
# chepiao = driver.find_element_by_class_name("nav-hd")
# action.move_to_element(chepiao).perform()
# t.sleep(2)
#
# wangfang = driver.find_element_by_link_text("往返")
# action.click(wangfang)
# t.sleep(2)
WebDriverWait(driver,10)
driver.find_element_by_id("wf_label").click()
#开始地点
driver.find_element_by_id("fromStationText").clear()
driver.find_element_by_id("fromStationText").send_keys("东莞\n")
t.sleep(3)
#到达地点
driver.find_element_by_id("toStationText").clear()
driver.find_element_by_id("toStationText").send_keys("广州\n")
t.sleep(3)
#处理时间
#js去掉readonly属性
js = 'document.getElementById("train_date").removeAttribute("readonly");'
driver.execute_script(js)
#js添加时间
js_value = 'document.getElementById("train_date").value="2019-5-1"'
driver.execute_script(js_value)
t.sleep(3)
#处理返程时间
JS = 'document.getElementById("back_train_date").removeAttribute("readonly");'
driver.execute_script(JS)
end_js= 'document.getElementById("back_train_date").value="2019.5.3"'
driver.execute_script(end_js)
t.sleep(3)
#点击查询按钮
driver.find_element_by_id("query_ticket").click()
t.sleep(2)
#关闭窗口
driver.quit()
|
[
"1512500241@qq.com"
] |
1512500241@qq.com
|
4241e79403d4a85677d2e3a33a6482de9a703ffa
|
3740de0d6e43ea140fc09ab314e4c492603ba185
|
/scripts/sources/s_evaluation_satis_scenprob.py
|
3b2ec5f3a31142b953373d3773527a921aa13e8e
|
[
"MIT"
] |
permissive
|
s0ap/arpmRes
|
29c60c65fd3e11be1cc31d46494e5b3ebf6e05ab
|
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
|
refs/heads/master
| 2022-02-16T05:01:22.118959
| 2019-08-20T16:45:02
| 2019-08-20T16:45:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,446
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.5
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_evaluation_satis_scenprob [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_evaluation_satis_scenprob&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=EBEvalNumericalExample).
# +
import pandas as pd
import numpy as np
from scipy.optimize import fsolve
from scipy.stats import norm
from arpym.portfolio import spectral_index
from arpym.statistics import meancov_sp, quantile_sp
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_evaluation_satis_scenprob-parameters)
c = 0.99 # confidence level
lam_1 = 0.5 # parameter for mean-variance and mean-semideviation trade-off
lam_2 = 2 # parameter for certainty-equivalent (exponential function)
alpha = 0.25 # parameter for α-expectile
zeta = 2 # parameter for Esscher expectation
theta = -0.1 # parameter for Wang expectation
alpha_ph = 0.5 # parameter for proportional hazards expectation
r = 0.0001 # target for omega ratio
z = np.array([-0.0041252, -0.00980853, -0.00406089, 0.02680999]) # risk factor
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_evaluation_satis_scenprob-implementation-step01): Load data from db_aggregation_scenario_numerical
j_ = pd.read_csv('../../../databases/temporary-databases/db_aggregation_scenario_numerical.csv',
usecols=['j_'], nrows=1).values[0, 0].astype(int)
n_ = pd.read_csv('../../../databases/temporary-databases/db_aggregation_scenario_numerical.csv',
usecols=['n_'], nrows=1).values[0, 0].astype(int)
pi = pd.read_csv('../../../databases/temporary-databases/db_aggregation_scenario_numerical.csv',
usecols=['pi']).values.reshape(j_, n_)
p = pd.read_csv('../../../databases/temporary-databases/db_aggregation_scenario_numerical.csv',
usecols=['p']).iloc[:j_].values.reshape(j_, )
v_h = pd.read_csv('../../../databases/temporary-databases/db_aggregation_scenario_numerical.csv',
usecols=['v_h'], nrows=1).values[0, 0].astype(int)
v_b = pd.read_csv('../../../databases/temporary-databases/db_aggregation_scenario_numerical.csv',
usecols=['v_b'], nrows=1).values[0, 0].astype(int)
h = pd.read_csv('../../../databases/temporary-databases/db_aggregation_scenario_numerical.csv',
usecols=['h']).iloc[:n_].values.reshape(n_, )
h_b = pd.read_csv('../../../databases/temporary-databases/db_aggregation_scenario_numerical.csv',
usecols=['h_b']).iloc[:n_].values.reshape(n_, )
h_tilde = pd.read_csv('../../../databases/temporary-databases/db_aggregation_scenario_numerical.csv',
usecols=['h_tilde']).iloc[:n_].values.reshape(n_, )
r_h = pd.read_csv('../../../databases/temporary-databases/db_aggregation_scenario_numerical.csv',
usecols=['r_h']).iloc[:j_].values.reshape(j_, )
pi_b_resc = pd.read_csv('../../../databases/temporary-databases/db_aggregation_scenario_numerical.csv',
usecols=['pi_b_resc']).iloc[:j_].values.reshape(j_, )
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_evaluation_satis_scenprob-implementation-step02): Compute the expectation, variance, standard deviation of the ex-ante performance
mu_r_h, s2_r_h = meancov_sp(r_h, p) # ex-ante performance exp. and var.
std_r_h = np.sqrt(s2_r_h) # standard deviation
s2_satis = - s2_r_h # variance index of satisfaction
std_satis = -std_r_h # standard deviation index of satisfaction
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_evaluation_satis_scenprob-implementation-step03): Compute mean-variance trade-off, expectation and covariance of the instruments P&L's, and then the mean-variance trade-off
mv_r_h = mu_r_h - lam_1 / 2 * s2_r_h # mean-variance trade-off (definition)
mu_pi, s2_pi = meancov_sp(pi, p) # instruments P&L's exp. and cov.
mv_r_h_1 = h_tilde@mu_pi - (lam_1 / 2) * h_tilde@s2_pi@h_tilde # mean-variance trade-off (quadratic form)
# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_evaluation_satis_scenprob-implementation-step04): Compute the certainty-equivalent
# +
# exponential utility function
def exp_utility_f(y, lam):
return -np.exp(-lam * y)
# inverse exponential utility function
def ce_f(z, lam):
return -(1 / lam) * np.log(-z)
utility_r_h = exp_utility_f(r_h, lam_2) # utility
mu_utility = utility_r_h@p # expected utility computation
cert_eq_r_h = ce_f(mu_utility, lam_2) # certainty-equivalent]
# -
# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_evaluation_satis_scenprob-implementation-step05): Compute the quantile (VaR)
q_r_h = quantile_sp(1-c, r_h, p=p)
# ## [Step 6](https://www.arpm.co/lab/redirect.php?permalink=s_evaluation_satis_scenprob-implementation-step06): Compute the Expected shortfall
# +
# compute expected shortfall using spectral_index
# indicator function
def indicator(x):
return (0 <= x and x <= 1-c)
# spectrum function
def spectr_es(x):
return (1 / (1 - c)) * indicator(x)
# negative expected shortfall
es2, _ = spectral_index(spectr_es, pi, p, h_tilde)
r_h_sort = np.sort(r_h)
index = np.argsort(r_h)
p_sort = p[index]
u_sort = np.r_[0, np.cumsum(p_sort)] # cumulative sum of ordered probs.
j_c = next(i for i, x in enumerate(u_sort) if 0 <= x and x <= 1-c)
es = np.sum(r_h_sort[:j_c+1])/(1-c)
# -
# ## [Step 7](https://www.arpm.co/lab/redirect.php?permalink=s_evaluation_satis_scenprob-implementation-step07): Compute the Wang expectation
f_wang = norm.cdf(norm.ppf(np.cumsum(p_sort)) - theta)
w_wang_spectr = np.append(f_wang[0], np.diff(f_wang))
wang_expectation_r_h = r_h_sort@w_wang_spectr
# ## [Step 8](https://www.arpm.co/lab/redirect.php?permalink=s_evaluation_satis_scenprob-implementation-step08): Compute the proportional hazard expectation
f_prop_haz = (np.cumsum(p_sort)) ** alpha_ph # proportional hazards transform
w_prop_haz_spectr = np.append(f_prop_haz[0], np.diff(f_prop_haz)) # derivative
# ex-ante performance proportional hazards expectation
prop_haz_expectation_r_h = r_h_sort@w_prop_haz_spectr
# ## [Step 9](https://www.arpm.co/lab/redirect.php?permalink=s_evaluation_satis_scenprob-implementation-step09): Compute the mean-semideviation trade-off
semiv_r_h = sum(((r_h[r_h <= mu_r_h] - mu_r_h) ** 2)
* p[r_h <= mu_r_h]) # ex-ante performance semivariance
semid_r_h = (semiv_r_h) ** (0.5) # ex-ante performance semideviation
# ex-ante performance mean-semideviation trade-off
msemid_r_h = mu_r_h - lam_1 * semid_r_h
# ## [Step 10](https://www.arpm.co/lab/redirect.php?permalink=s_evaluation_satis_scenprob-implementation-step10): Compute the alpha-expectile
# +
def expectile_f(x, p, alpha):
return alpha * np.sum(p * np.maximum(r_h - x, 0)) + \
(1 - alpha) * (np.sum(p * np.minimum(r_h - x, 0)))
# ex-ante performance α-expectile
expectile_r_h = fsolve(expectile_f, -0.01, args=(p, alpha))
# -
# ## [Step 11](https://www.arpm.co/lab/redirect.php?permalink=s_evaluation_satis_scenprob-implementation-step11): Compute information ratio, Sortino ratio and omega ratio
info_ratio_r_h = mu_r_h / std_r_h # ex-ante performance information ratio
# ex-ante performance Sortino ratio
sortino_ratio_r_h = (mu_r_h - r) / np.sqrt((np.maximum(r - r_h, 0) ** 2)@p)
# ex-ante performance omega ratio
omega_ratio_r_h = (np.maximum(r_h - r, 0)@p) / (np.maximum(r - r_h, 0)@p)
omega_ratio_1_r_h = (r_h@p - r) / (np.maximum(r - r_h, 0)@p) + 1
# ## [Step 12](https://www.arpm.co/lab/redirect.php?permalink=s_evaluation_satis_scenprob-implementation-step12): Compute the scenario-probability distribution of factor Z, beta, correlation
mu_z, s2_z = meancov_sp(z, p) # variance of z
cv_yz = (r_h * z)@p - mu_r_h * mu_z # covariance of r_h and z
beta_r_h_z = - cv_yz / s2_z # ex-ante performance opposite of beta
# opposite of correlation between performance and factor
corr_r_h_z = - cv_yz / (np.sqrt(s2_r_h) * np.sqrt(s2_z))
# ## [Step 13](https://www.arpm.co/lab/redirect.php?permalink=s_evaluation_satis_scenprob-implementation-step13): Compute the Buhlmann expectation and the Esscher expectation
# +
bulhmann_expectation_r_h, _ = meancov_sp(np.exp(-zeta * pi_b_resc) * r_h, p)[0] \
/ meancov_sp(np.exp(-zeta * pi_b_resc), p)
esscher_expectation_r_h, _ = meancov_sp(np.exp(-zeta * r_h) *
r_h, p)[0] \
/ meancov_sp(np.exp(-zeta * r_h), p)
# -
# ## [Step 14](https://www.arpm.co/lab/redirect.php?permalink=s_evaluation_satis_scenprob-implementation-step14): Save the data
# +
output = {'s2_satis': pd.Series(s2_satis),
'std_satis': pd.Series(std_satis),
'wang_expectation_r_h': pd.Series(wang_expectation_r_h),
'prop_haz_expectation_r_h': pd.Series(prop_haz_expectation_r_h),
'expectile_r_h': pd.Series(expectile_r_h),
'bulhmann_expectation_r_h': pd.Series(bulhmann_expectation_r_h),
'esscher_expectation_r_h': pd.Series(esscher_expectation_r_h)
}
df = pd.DataFrame(output)
df.to_csv('../../../databases/temporary-databases/db_evaluation_scenprob.csv')
|
[
"dario.popadic@yahoo.com"
] |
dario.popadic@yahoo.com
|
64eed1d7f291132bd242926d2ed2a29df9025286
|
70bbd6e465c2360e71899077a15b376c92d872ea
|
/config.py
|
2a7ec4a2de9d2b0755d217bb93a1179b416c61b9
|
[] |
no_license
|
anchyy/Zavrsni
|
afe531f590031ef4991bd2b48725ff557dcc9004
|
e385db393484ded76a39499783723bac7f3e4572
|
refs/heads/master
| 2022-12-31T09:15:55.683956
| 2020-10-21T15:52:50
| 2020-10-21T15:52:50
| 296,143,195
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 315
|
py
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SECRET_KEY='gvpTrsDaf5vgzIzdzC2XKA'
SQLALCHEMY_DATABASE_URI='sqlite:///webshopDB.sqlite'
SQLALCHEMY_TRACK_MODIFICATIONS= False #kako bi se koristilo manje memorije
SQLALCHEMY_ECHO= True
ENV = 'development'
|
[
"ana.strenja88@gmail.com"
] |
ana.strenja88@gmail.com
|
09222c4ffe43f89b913e1ae57331c6ad73adb1c9
|
25b007b58317f6f920dc3ca56b5d4a9291096ca2
|
/Intense/migrations/0019_auto_20201006_1727.py
|
e052d45e385957d99d5fd9c5ab0e671d1b8ded3d
|
[] |
no_license
|
kmfaizullah/tango-backend
|
2a6b79195f86f156f41d780b966d3617724d2527
|
f6b377fdfe7dc1deee7b0f3df6eba4dfb39b128c
|
refs/heads/master
| 2023-01-04T21:08:29.764484
| 2020-10-11T10:47:38
| 2020-10-11T10:47:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
# Generated by Django 2.2.15 on 2020-10-06 11:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Intense', '0018_user_role'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(blank=True, db_index=True, max_length=255, null=True),
),
]
|
[
"sameesayeed880@gmail.com"
] |
sameesayeed880@gmail.com
|
07bf834854fde7fe6a8945e0e36b96c2d80530b1
|
cb4be2d145c529192cad597ebf6bba8aed0ec12e
|
/scripts/00_important/mec_shelf_loader/shelves/00_trash/Thomas_Nathan_CRI1_1409_rigging_tools.py
|
2081cd5f275dc4ee7849f12dbdd2772d02882a98
|
[] |
no_license
|
mclavan/Work-Maya-Folder
|
63e791fdbd6f8ac1f4fda2d46015cd98df38825c
|
c56dbdb85a7b1a87ef6dd35296c56e0057254617
|
refs/heads/master
| 2020-05-20T07:21:15.891179
| 2014-10-17T14:28:45
| 2014-10-17T14:28:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,181
|
py
|
'''
Nathan Thomas
Thomas_Nathan_CRI1_1409_rigging_tools.py
Description
A group of rigging related tools
How to Run:
import Thomas_Nathan_CRI1_1409_rigging_tools
reload(Thomas_Nathan_CRI1_1409_rigging_tools)
NOTE: GETTING SELECTED:
start with:
selected = pm.ls(selection=True)
'''
print 'Rigging Tools Active'
import pymel.core as pm
def tool():
print 'example tool.'
'''
tool Description
how to run tool
'''
def unlock_and_show():
'''
Description: A shortcut for unlocking and showing attributes
Can switch inputs around to hide and lock attributes.
How to Run:
import Thomas_Nathan_CRI1_1409_rigging_tools
reload(Thomas_Nathan_CRI1_1409_rigging_tools)
Thomas_Nathan_CRI1_1409_rigging_tools.unlock_and_show()
'''
selected = pm.ls(selection=True)
print 'Currently selected', selected
first_selected = selected[0]
# t means translate,
# r means rotate
# s means scale
# v means visibility
# x,y,z are variables. Lock = false means unlocked, true
# means locked. Keyable = true means show, false means hide.
# translate
first_selected.tx.set(lock=False, keyable=True)
first_selected.ty.set(lock=False, keyable=True)
first_selected.tz.set(lock=False, keyable=True)
# rotate
first_selected.rx.set(lock=False, keyable=True)
first_selected.ry.set(lock=False, keyable=True)
first_selected.rz.set(lock=False, keyable=True)
#scale
first_selected.sx.set(lock=False, keyable=True)
first_selected.sy.set(lock=False, keyable=True)
first_selected.sz.set(lock=False, keyable=True)
# visibility
first_selected.v.set(lock=False,keyable=True)
def snapping_tool():
'''
Description: snaps a control to a joint, or similar objects BOTH
ROTATION AND TRANSLATION
How to Run:
import Thomas_Nathan_CRI1_1409_rigging_tools
reload(Thomas_Nathan_CRI1_1409_rigging_tools)
Thomas_Nathan_CRI1_1409_rigging_tools.snapping_tool()
'''
selected = pm.ls(selection=True)
print 'Selected: {0}'.format(selected)
# by default, commands work on selected items, driver driven.
kenny = pm.parentConstraint(selected[0], selected[1])
pm.delete(kenny)
print 'The first selected object was moved to the second'
def point_snapping_tool():
'''
Description:moves first selected object to the second. JUST TRANSLATION
How to Run:
import Thomas_Nathan_CRI1_1409_rigging_tools
reload(Thomas_Nathan_CRI1_1409_rigging_tools)
Thomas_Nathan_CRI1_1409_rigging_tools.point_snapping_tool()
Thomas_Nathan_CRI1_1409_rigging_tools.py.point_snapping_tool()
'''
selected = pm.ls(selection=True)
print 'Selected: {0}'.format(selected)
# by default, commands work on selected items, driver driven.
kenny = pm.pointConstraint(selected[0], selected[1])
pm.delete(kenny)
print 'The first selected object was moved to the second'
# Get Selected
selected = pm.ls(selection=True)
print 'Selected: {0}'.format(selected)
first_joint = selected[0]
# Create control icon
control_icon_1 = pm.circle(normal=[0,1,0], radius = 2)[0]
#Move control icon to target joint
#delete constraint
kenny = pm.pointConstraint(first_joint, control_icon_1)
pm.delete(kenny)
print 'Icons created'
def color_controls():
'''
Color Controls
Desccription: changing the color of the control icons/anything selected
How to Run:
'''
import pymel.core as pm
selected = pm.ls(selection=True)
print 'Currently selected', selected
first_selected = selected[0]
'''
NOTE: COPY PASTE FOR EACH COLOR, OTHERWISE WILL CHANGE WHOLE SCENE
'''
# pick a color
# blue
first_selected.overrideEnabled.set(1)
blue = 6
first_selected.overrideColor.set(blue)
# red
first_selected.overrideEnabled.set(1)
red = 13
first_selected.overrideColor.set(red)
# yellow
first_selected.overrideEnabled.set(1)
yellow = 17
first_selected.overrideColor.set(yellow)
def hierarchy():
'''
Description: create a hierarchy based on a given system.
Select root joint chain and execute function.
How to Run:
import Thomas_Nathan_CRI1_1409_rigging_tools
reload(Thomas_Nathan_CRI1_1409_rigging_tools)
Thomas_Nathan_CRI1_1409_rigging_tools.hierarchy()
'''
'''
Input
What are we working on?
The root joint.
'''
joint_system = pm.ls(selection=True, dag=True)
print 'Joint System:', joint_system
root_joint = joint_system[0]
joint_2 = joint_system[1]
joint_3 = joint_system[2]
'''
Padding the Root Joint
'''
# Create an empty group
'''
Creates an empty group for you to use around icons (freeze transforms, etc)
'''
root_pad = pm.group(empty=True)
# Move Group to Target Joint (snap)
temp_constraint = pm.pointConstraint(root_joint, root_pad)
pm.delete(temp_constraint )
# Freeze Transforms
pm.makeIdentity(root_pad, apply=True, t=1, r=1, s=1, n=0)
# Parent constrain joint to group
pm.parent(root_joint, root_pad)
'''
Local Controls
'''
'''
Control 1 - the root_joint
'''
# create a control
# normal=[1,0,0], radius=2
control_icon_1 = pm.circle(normal=[1,0,0], radius=1, name='lt_middle_00_icon')[0]
# create a group
# grouping control during this process
local_pad_1 = pm.group(name='lt_middle_00_local')
# Output control and pad
print 'Control 1 created:', control_icon_1
print 'Local Pad 1 created:', local_pad_1
# move group over to the target joint
# delete contraint after snapping
# driver is joint, driven is group
temp_constraint = pm.parentConstraint(root_joint, local_pad_1)
pm.delete(temp_constraint)
# orient constrain the joint to the control
# driver --> driven
# control --> joint
pm.orientConstraint(control_icon_1, root_joint)
'''
Control 2
'''
# create a control
# normal=[1,0,0], radius=2
control_icon_2 = pm.circle(normal=[1,0,0], radius=1, name='lt_middle_01_icon')[0]
# create a group
# grouping control during this process
local_pad_2 = pm.group(name='lt_middle_01_local')
# Output control and pad
print 'Control 2 created:', control_icon_2
print 'Local Pad 2 created:', local_pad_2
# move group over to the target joint
# delete contraint after snapping
# driver is joint, driven is group
temp_constraint = pm.parentConstraint(joint_2, local_pad_2)
pm.delete(temp_constraint)
# orient constrain the joint to the control
# driver --> driven
# control --> joint
pm.orientConstraint(control_icon_2, joint_2)
'''
Control 3
'''
# create a control
# normal=[1,0,0], radius=2
control_icon_3 = pm.circle(normal=[1,0,0], radius=1, name='lt_middle_02_icon')[0]
# create a group
# grouping control during this process
local_pad_3 = pm.group(name='lt_middle_02_local')
# Output control and pad
print 'Control 3 created:', control_icon_3
print 'Local Pad 3 created:', local_pad_3
# move group over to the target joint
# delete contraint after snapping
# driver is joint, driven is group
temp_constraint = pm.parentConstraint(joint_3, local_pad_3)
pm.delete(temp_constraint)
# orient constrain the joint to the control
# driver --> driven
# control --> joint
pm.orientConstraint(control_icon_3, joint_3)
'''
Parent control togethers
'''
# connect pad 3 to control icon 2
pm.parent(local_pad_3, control_icon_2)
pm.parent(local_pad_2, control_icon_1)
'''
# lock and hide
'''
# For this, I created a new list for the control icons so I could set
# it up in a loop.
# t means translate,
# r means rotate
# s means scale
# v means visibility
# x,y,z are variables. Lock: false means unlocked, true
# means locked. Keyable = true means show, false means hide.
control_list = [control_icon_1, control_icon_2, control_icon_3]
for indiv_icon in control_list:
# translate
indiv_icon.tx.set(lock=True, keyable=False)
indiv_icon.ty.set(lock=True, keyable=False)
indiv_icon.tz.set(lock=True, keyable=False)
#scale
indiv_icon.sx.set(lock=True, keyable=False)
indiv_icon.sy.set(lock=True, keyable=False)
indiv_icon.sz.set(lock=True, keyable=False)
# visibility
indiv_icon.v.set(lock=True, keyable=False)
print 'Hierarchy created'
def padding_tool():
'''
Description: creates a group, snapping it to a target joint, freezing
transforms, then parenting and renaming.
How to run:
import Thomas_Nathan_CRI1_1409_rigging_tools.
reload(Thomas_Nathan_CRI1_1409_rigging_tools)
Thomas_Nathan_CRI1_1409_rigging_tools.padding_tool()
'''
selected = pm.ls(selection=True)
print 'Current Selected:', selected
root_joint = selected[0]
print 'Padding group created'
# create empty group
# the flag for empty group is em
# empty=True will create empty group
pad = pm.group(empty=True)
# move group to joint
temp_constraint = pm.pointConstraint(root_joint, pad)
# delete constraint
pm.delete(temp_constraint)
# freeze transforms on said group
pm.makeIdentity(pad, apply=True, t=1, r=1, s=1, n=0)
# Parent group
pm.parent(root_joint, pad)
# renaming
#lt_index_01_bind
#lt_index_01_pad
pad_name = root_joint.replace('01_bind', '01_pad')
pad.rename(pad_name)
print 'Padding Group Created'
def joint_renamer():
'''
Description: renames joints in scene
How to run:
import Thomas_Nathan_CRI1_1409_rigging_tools
reload(Thomas_Nathan_CRI1_1409_rigging_tools)
Thomas_Nathan_CRI1_1409_rigging_tools.joint_renamer()
'''
# selection
joint_chain = pm.ls(selection=True, dag=True, type='joint')
print 'Selected items:', joint_chain
# naming convention
# figure out lt, rt, ct, etc. Then part, then number, then bind, waste, etc.
ori = raw_input()
system_name = raw_input()
count = 1
suffix = 'bind'
# Loop throughout the joint chain
for current_joint in joint_chain:
new_name = '{0}_{1}_0{2}_{3}'.format(ori, system_name, count, suffix)
print 'New Name', new_name
# Rename Joint
current_joint.rename(new_name)
count = count + 1
new_name = '{0}_{1}_0{2}_{3}'.format(ori, system_name, count-1, 'waste')
current_joint.rename(new_name)
def priming_tool():
'''
Definition: creates local oriented controls.
How to Run:
import Thomas_Nathan_CRI1_1409_rigging_tools
reload(Thomas_Nathan_CRI1_1409_rigging_tools)
Thomas_Nathan_CRI1_1409_rigging_tools.priming_tool()
'''
# Get Selected:
selected = pm.ls(selection=True)
# target_joint = selected[0]
for target_joint in selected:
# Renaming
control_icon_name = target_joint.replace('_bind', '_icon')
local_pad_name = target_joint.replace('_bind', '_local')
# Creating a Control
# Set Normal and set Radius
control_icon = pm.circle(normal=[1,0,0], radius = 1.8, name=control_icon_name,)[0]
# Grouping control (not an empty group)
local_pad = pm.group(name=local_pad_name)
print 'Control Icon', control_icon
print 'Pad Created', local_pad
# Snap group to targert joint, and delete constraint
temp_constraint = pm.parentConstraint(target_joint, local_pad)
pm.delete(temp_constraint)
# Orient Constraint joint to Control
pm.orientConstraint(control_icon, target_joint)
print 'Local Oriented Controls Created'
def arrow_icon():
'''
Description: creates an arrow icon
How to run:
'''
# arrow control icon
import pymel.core as pm
mel_line = 'curve -d 1 -p 0 0 -10 -p -5 0 -5 -p -10 0 0 -p -5 0 0 -p -5 0 5 -p -5 0 10 -p 0 0 10 -p 5 0 10 -p 5 0 5 -p 5 0 0 -p 10 0 0 -p 5 0 -5 -p 0 0 -10 -k 0 -k 1 -k 2 -k 3 -k 4 -k 5 -k 6 -k 7 -k 8 -k 9 -k 10 -k 11 -k 12 ;'
pm.mel.eval(mel_line)
print 'Control Icon Created:', icon
def square_icon():
'''
Description: creates a square icon
How to run:
'''
# square control icon
import pymel.core as pm
mel_line = 'curve -d 1 -p -5 0 5 -p -5 0 0 -p 0 0 0 -p 0 0 5 -p -5 0 5 -k 0 -k 1 -k 2 -k 3 -k 4 '
pm.mel.eval(mel_line)
print 'Control Icon Created:', icon
def cube_icon():
'''
Description: creates a cube icon in scene.
How to run: We use the same method as for the square and arrow
icons we created.
import Thomas_Nathan_CRI1_1409_rigging_tools
reload(Thomas_Nathan_CRI1_1409_rigging_tools)
Thomas_Nathan_CRI1_1409_rigging_tools.cube_icon
'''
import pymel.core as pm
mel_line = 'curve -d 1 -p -0.5 0.5 -0.5 -p -0.5 0.5 0.5 -p -0.5 -0.5 0.5 -p -0.5 -0.5 -0.5 -p -0.5 0.5 -0.5 -p 0.5 0.5 -0.5 -p 0.5 -0.5 -0.5 -p -0.5 -0.5 -0.5 -p -0.5 -0.5 0.5 -p 0.5 -0.5 0.5 -p 0.5 -0.5 -0.5 -p 0.5 0.5 -0.5 -p 0.5 0.5 0.5 -p 0.5 -0.5 0.5 -p -0.5 -0.5 0.5 -p -0.5 0.5 0.5 -p 0.5 0.5 0.5 -k 0 -k 1 -k 2 -k 3 -k 4 -k 5 -k 6 -k 7 -k 8 -k 9 -k 10 -k 11 -k 12 -k 13 -k 14 -k 15 -k 16'
pm.mel.eval(mel_line)
def leg_ik():
'''
Leg IK'
Description: creates an Ik handle for leg (5 joints)
'''
import pymel.core as pm
# Given to us by the user.
joint_chain = pm.ls(selection=True, dag=True)
print joint_chain
# isolate importatn joint
root_joint = joint_chain[0]
ankle_joint = joint_chain[2]
ball_joint = joint_chain[3]
toe_joint = joint_chain[4]
# Apply the IKs
# This command is pm.ikHandle()
# default is single chain solver. We need RPS for first IK.
# Flag for solver is: solver (sol). So sol='ikRPsolver'
ankle_ik = pm.ikHandle(sol='ikRPsolver', sj=root_joint, ee=ankle_joint, name='lt_ankle_ik')
ball_ik = pm.ikHandle(sol='ikSCsolver', sj=ankle_joint, ee=ball_joint, name='lt_ball_ik')
toe_ik = pm.ikHandle(sol='ikSCsolver',sj=ball_joint, ee=toe_joint, name='lt_toe_ik')
|
[
"mclavan@gmail.com"
] |
mclavan@gmail.com
|
b09d3d9917697dc4d1359837a8f9339bfbc5bca3
|
a0d11652c19fa2a655511ceee87a8d2e05394adb
|
/3.py
|
b460667010201582af9bc37cc2401967426f5bc7
|
[] |
no_license
|
galerra/artem_got_angry
|
46b391f3ea1e6715e03441bb3050b81a2e7fe8fe
|
807a4d7f3f55fa42b4b73f2f877979ae65bddfa1
|
refs/heads/main
| 2023-08-26T22:11:01.589735
| 2021-10-21T07:31:00
| 2021-10-21T07:31:00
| 417,957,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
a3 = int(input("Введите a1:"))
a2 = int(input("Введите a2:"))
a1 = int(input("Введите a3:"))
b2 = int(input("Введите b1:"))
b1 = int(input("Введите b2:"))
c1 = (a1 + b1) % 10
c2 = (a2 + b2 + (a1 + b1) // 10) % 10
c3 = ((a2 + b2 + (a1 + b1) // 10) // 10) + a3
print("Сотни", c3)
print("Десятки", c2)
print("Единицы", c1)
|
[
"artem-newalex@mail.ru"
] |
artem-newalex@mail.ru
|
5f8183e14d9f9cacecae8ab0e3db53d3dece2a16
|
0244a193f052dcb507b1f45daee3391b26112440
|
/__openerp__.py
|
ea6a0f3ecb80e43b63ad94aa3dc979307dc7f19b
|
[] |
no_license
|
duongduong/report_voucher
|
aa340377457a3b042b63be349202ca49da9c3973
|
7559387b04e1d6fafd13165a7959faad49822077
|
refs/heads/master
| 2016-08-11T09:22:24.248383
| 2015-09-28T16:03:15
| 2015-09-28T16:03:15
| 43,309,989
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
{
'name' : 'PJ_Test',
'version' : '1.0',
'description' : "",
'depends' : [ 'product', 'account','base', 'sale','account_voucher'
],
'data': ['_test_view.xml',
'report/report_saleorder_inherit.xml',
'report_acc_voucher.xml',
'report/report_account_voucher_pdf.xml'],
'auto_install': False,
}
|
[
"huongduong328@gmail.com"
] |
huongduong328@gmail.com
|
c84a036fd6d594c1ed61f9b0056ebda6f4b8baa4
|
a35a2f9dc412b72bd07c36c2de2b8a577e880d11
|
/redactor_project/redactor/unredactor.py
|
277a193ec6ad830d8017adafd600a14157106d44
|
[] |
no_license
|
umeshG34/redactor
|
95750c93415d349beb2e978691b7d12c6c7f33f9
|
bcfb43ad33e05d1b3881f0fe36e7d217a990949a
|
refs/heads/master
| 2022-12-17T21:51:23.176130
| 2019-09-30T15:46:46
| 2019-09-30T15:46:46
| 172,621,679
| 0
| 0
| null | 2022-12-08T00:02:34
| 2019-02-26T02:23:33
|
Python
|
UTF-8
|
Python
| false
| false
| 7,359
|
py
|
#!/usr/bin/pythonw
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import argparse
import nltk
import glob
import csv
import os
import re
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.tree import Tree
from sklearn.feature_extraction import DictVectorizer
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
##returns a list of docs when given a list of directories/locations
def get(src_list):
#print("Files being extracted",src_list)
#Need to include try and except
docs = []
for src_dir in src_list:
for src in src_dir[0:50]:
#for src in src_dir:
#print("src",src)
src_string = open(src, 'r', encoding='utf-8')
src_string = src_string.read()
docs.append(src_string)
# print("Input:",src_string)
return docs
def find_entity(doc,doc_name):
p_name = '' #getting the previous name in the document if any
doc_length = len(doc)
m = re.findall(r'_(\d{1,2}).txt',doc_name)
rate = m[0]
prev_chunk = ('','')
# names of people present in the text
names_l = []
for chunk in nltk.ne_chunk(nltk.pos_tag(nltk.word_tokenize(doc))): #doc instead of sent
if type(chunk) == Tree: #checking for names using the type of the element
name = ''
length_name = 0
n_wrds = 0
if chunk.label() == 'PERSON':
name_dict = {}
#print(chunk)
name_l = [0,0,0]
for ind,i in enumerate(chunk): #For Full names we get back a tuple with multiple names so we iterat)e
#print(i)
name = name + ' ' +i[0]
n_wrds += 1
if ind < 3:
name_l[ind] = len(i[0]) #Captuting the length of each of the words in the name
name = name[1:]
tot_nam_len = len(name)
#print(prev_chunk)
#prev_c = '^' if prev_chunk[0] == '>' else prev_chunk[0]
name_dict = {'name': name,'l_1': name_l[0],'l_2': name_l[1],'l_3':name_l[2], 'no_words': n_wrds ,'name_length':tot_nam_len ,'rating': int(rate),'doc_length': doc_length}#, 'word_b': prev_chunk[0],'word_b_tag' : prev_chunk[1]}
#print(name_dict)
#p_name = name
names_l.append(name_dict)
prev_chunk = chunk
return names_l
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--train", type=str, required=True, help="Train Source File location", nargs='*', action='append')
parser.add_argument("--test", type=str, required=False, help="Test Source File location", nargs='*', action='append')
args = parser.parse_args()
#---------------------------------------------------------------------------------TRAIN------------------------------------------------------------------------------
if args.train:
#print("training args",args.train)
train_dirs = []
for i in args.train:
train_dirs.append(glob.glob(i[0]))
#print("Train dirs:",len(train_dirs))
train = get(train_dirs)
#print("End",train)
train_fts_d = []
train_dirs_flat = [item for sublist in train_dirs for item in sublist]
for ind,doc in enumerate(train):
train_fts_d = train_fts_d + find_entity(doc,train_dirs_flat[ind])
print("Train Features Extracted")
#creating the training dataset
target_train = []
fet_train = []
for name_dict in train_fts_d:
#print(name_dict['name'])
target_train.append(name_dict['name'])
del name_dict['name']
#print(name_dict)
fet_train.append(name_dict)
#print(y_train,X_train)
target_train = np.array(target_train)
#target_train = target_train.reshape(-1,1)
#Creating X in the training set
v = DictVectorizer(sparse=False)
fet_train = v.fit_transform(fet_train)
print("Features:",v.get_feature_names())
#Training the model
#clf = MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(50, 25), random_state=1)
mod = GaussianNB()
#clf = KNeighborsClassifier(n_neighbors=10)
mod.fit(fet_train, target_train)
print("Model trained")
#getting the test data
global get_entity_test
def get_entity_test(doc,doc_name):
#print(doc_name)
name_d = {}
names_list_t = []
doc_length = len(doc)
m = re.findall(r'_(\d{1,2}).txt',doc_name)
rate = m[0]
#Initializing the feature variables
prev_chunk = ' '
name = ''
l_1 =0
l_2 = 0
l_3 = 0
n_wrds = 0
total_length = 0
chunks = nltk.ne_chunk(nltk.pos_tag(nltk.word_tokenize(doc)))
flag = False
for chunk in chunks:
#print(chunk)
#Intiating name collection
if chunk[0][0] == '█' and flag != True:
flag = True
name = name + chunk[0]
a_prev_chunk = prev_chunk
if prev_chunk[0][0] == '█' and chunk[0][0] == '█':
name = name + ' ' +chunk[0]
elif prev_chunk[0][0] == '█' and chunk[0][0] != '█':
# name_dict['name'] = name
n = nltk.word_tokenize(name)
#print(n)
n_wrds = len(n)
l_1 = len(n[0])
if n_wrds > 1:
l_2 = len(n[1])
if n_wrds > 2:
l_3 = len(n[2])
total_length = len(name)
#prev_c = '^' if prev_chunk[0] == '>' else prev_chunk[0]
#print(a_prev_chunk)
name_d = {'l_1':l_1,'l_2':l_2,'l_3':l_3,'no_words':n_wrds, 'name_length':total_length,'rating':int(rate),'doc_length':doc_length}#,'word_b':a_prev_chunk[0],'doc_length':doc_length}
names_list_t.append(name_dict)
#Resetting values
l_1,l_2,l_3,n_wrds,tot_nam_len = 0,0,0,0,0
flag = False
name = ''
prev_chunk = chunk
return names_list_t
#test directroies - test text retreival
if args.test:
#print(args.test)
test_dirs = []
for i in args.test:
test_dirs.append(glob.glob(i[0]))
#print(test_dirs)
test = get(test_dirs)
#print(test)
test_dir_flat = [item for sublist in test_dirs for item in sublist]
test_fts_d = []
count = 0
for doc in test:
test_fts_d = test_fts_d + get_entity_test(doc, test_dir_flat[count])
count += 1
print("Test Features Extracted")
#print(test_fts_d)
fet_test = v.transform(test_fts_d)
print("Train set size:",fet_train.shape,target_train.shape)
#print(fts_test)
target_test_pred = mod.predict(fet_test)
#print(target_test_pred)
print("Accuracy on training set",mod.score(fet_train, target_train))
print("Test values predicted")
with open('pred_names.txt', 'w',encoding = 'utf-8') as f:
f.write("Predicted Names for the redacted test file\n")
for i in target_test_pred:
f.write(i+'\n')
print("pred_names.txt printed")
|
[
"umeshsai34@ou.edu"
] |
umeshsai34@ou.edu
|
f1c89f72df982ce0cf4460d3d18d203bb585aeb8
|
e4f60cbe8b727ce09e7cccf372183a71b90ea6bd
|
/venv/Scripts/pip3.7-script.py
|
02bafbe44323096c44b20fddfe2459c592e20992
|
[] |
no_license
|
yk3996/likelion_task2
|
82530bdeb3600a3ed3e29d2407d71f530e34c914
|
1e44c7d1466f27c7f162101fdc9b8053e4b9aa50
|
refs/heads/master
| 2020-05-24T01:26:17.363437
| 2019-06-07T00:06:19
| 2019-06-07T00:06:19
| 187,033,511
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
#!C:\Download\PyProject\likelion_task2\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
|
[
"bignfine@naver.com"
] |
bignfine@naver.com
|
90dadd58fbe69e5636c0d358cd9c496fb1c86ceb
|
30339420bc8827464ce9772e9ad5c7333e5f4ef1
|
/oscar/bin/easy_install
|
3e5b0c1c2c6f1447cc25b929411fbd837dd27923
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
Razvancbalaci/djangooscar
|
0df6425deb8f0a14e6cb9091460e4b36cc32f947
|
0a4ac1d7e7ccc92b4d4f786c8a2b4bf2a093078d
|
refs/heads/master
| 2020-04-18T07:24:39.982801
| 2019-01-24T19:07:39
| 2019-01-24T19:07:39
| 167,358,870
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
#!/home/razvan/django-oscar/oscar/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"razvan_1997@live.com.pt"
] |
razvan_1997@live.com.pt
|
|
67b279c2fe03624259dd54e4db9710063fe12276
|
ddf17869e08f847bd5d273c4ed27b2f9549011d8
|
/MachineLearningInAction/LogisticRegres/logregres/logRegres.py
|
f83aeef0ba6a869ed0f19e29f0392d33e74bb4d9
|
[] |
no_license
|
1astonm1/MachineLearning
|
a5e8885d4f24c9957bd272b971be067b8c8be033
|
443bbdfa2ff26f999918a4c4a22a5aea36ef9c0d
|
refs/heads/master
| 2020-05-15T22:13:35.607080
| 2019-07-06T15:48:40
| 2019-07-06T15:48:40
| 182,521,366
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,003
|
py
|
from numpy import *
def loadDataSet():
dataMat = [];
labelMat = []
fr = open('testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat, labelMat
def sigmoid(inX):
return 1.0 / (1 + exp(-inX))
def gradAscent(dataMatIn, classLabels):
dataMatrix = mat(dataMatIn) # convert to NumPy matrix
labelMat = mat(classLabels).transpose() # convert to NumPy matrix
m, n = shape(dataMatrix)
alpha = 0.001
maxCycles = 500
weights = ones((n, 1))
for k in range(maxCycles): # heavy on matrix operations
h = sigmoid(dataMatrix * weights) # matrix mult
error = (labelMat - h) # vector subtraction
weights = weights + alpha * dataMatrix.transpose() * error # matrix mult
return weights
def plotBestFit(weights):
import matplotlib.pyplot as plt
dataMat, labelMat = loadDataSet()
dataArr = array(dataMat)
n = shape(dataArr)[0]
xcord1 = [];
ycord1 = []
xcord2 = [];
ycord2 = []
for i in range(n):
if int(labelMat[i]) == 1:
xcord1.append(dataArr[i, 1]);
ycord1.append(dataArr[i, 2])
else:
xcord2.append(dataArr[i, 1]);
ycord2.append(dataArr[i, 2])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
ax.scatter(xcord2, ycord2, s=30, c='green')
x = arange(-3.0, 3.0, 0.1)
y = (-weights[0] - weights[1] * x) / weights[2]
ax.plot(x, y)
plt.xlabel('X1');
plt.ylabel('X2');
plt.show()
def stocGradAscent0(dataMatrix, classLabels):
m, n = shape(dataMatrix)
alpha = 0.01
weights = ones(n) # initialize to all ones
for i in range(m):
h = sigmoid(sum(dataMatrix[i] * weights))
error = classLabels[i] - h
weights = weights + alpha * error * dataMatrix[i]
return weights
def stocGradAscent1(dataMatrix, classLabels, numIter=150):
m, n = shape(dataMatrix)
weights = ones(n) # initialize to all ones
for j in range(numIter):
dataIndex = range(m)
for i in range(m):
alpha = 4 / (1.0 + j + i) + 0.0001 # apha decreases with iteration, does not
randIndex = int(random.uniform(0, len(dataIndex))) # go to 0 because of the constant
h = sigmoid(sum(dataMatrix[randIndex] * weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
# del (dataIndex[randIndex])
return weights
def classifyVector(inX, weights):
prob = sigmoid(sum(inX * weights))
if prob > 0.5:
return 1.0
else:
return 0.0
def colicTest():
frTrain = open('horseColicTraining.txt');
frTest = open('horseColicTest.txt')
trainingSet = [];
trainingLabels = []
for line in frTrain.readlines():
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(currLine[21]))
trainWeights = stocGradAscent1(array(trainingSet), trainingLabels, 1000)
errorCount = 0;
numTestVec = 0.0
for line in frTest.readlines():
numTestVec += 1.0
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
if int(classifyVector(array(lineArr), trainWeights)) != int(currLine[21]):
errorCount += 1
errorRate = (float(errorCount) / numTestVec)
print("the error rate of this test is: %f" % errorRate)
return errorRate
def multiTest():
numTests = 10;
errorSum = 0.0
for k in range(numTests):
errorSum += colicTest()
print("after %d iterations the average error rate is: %f" % (numTests, errorSum / float(numTests)))
|
[
"45306215+1astonm1@users.noreply.github.com"
] |
45306215+1astonm1@users.noreply.github.com
|
1a98406527162c67dcfcb2872cc5fc723e81b60f
|
f9ac8727c233600e494b74be1e1522004296bedf
|
/fenv3/catalog.py
|
1de43c0f494afb0698b6df72259bb40665e22b8d
|
[] |
no_license
|
mohammadghulmi/DOSfinalproject
|
072048b25ff266846cdc1956d19945817fe9d7f6
|
90b4e90aea7cab83929266363caf46573ed4a9bb
|
refs/heads/main
| 2023-01-24T03:52:22.799579
| 2020-12-14T16:49:43
| 2020-12-14T16:49:43
| 321,412,588
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,960
|
py
|
from flask import Flask
from flask_restful import Api, Resource
import json
app = Flask(__name__)
api = Api(app)
books = [["","","","",""],["","","","",""],["","","","",""],["","","","",""],["","","","",""],["","","","",""],["","","","",""]]
f = open("data.txt","r")
for i in range(7):
for j in range(5):
books[i][j]=f.readline()
books[i][j]=books[i][j].rstrip("\n")
print(books[i][j])
f.close()
class search(Resource):
def get(self, type):
print(type)
s=""
for p in range(7):
if books[p][1]==type:
s=s+"title : "+ books[p][0]+" , "
s=s+"id : " +books[p][4]+" , "
return{"books":s}
class lookup(Resource):
def get(self, id):
s=""
for p in range(7):
if books[p][4]==str(id):
s = s + "title : " + books[p][0] + " , "
s = s + "stock : " + books[p][3] + " , "
s = s + "price : " + books[p][2] + " , "
return {"books": s}
class buy(Resource):
def post(self,id):
s=""
x=0
for p in range(7):
if books[p][4] == str(id):
s=books[p][3]
x=int(s)
if x>0 :
return {"stock left": x}
else:
return {"stock left": 0}
class update(Resource):
def put(self,id):
for p in range(7):
if books[p][4] == str(id):
books[p][3] = str(int(books[p][3]-1));
f = open("data.txt", "w")
for i in range(4):
for j in range(5):
f.write(books[i][j] + "\n")
return {"result": "success"}
api.add_resource(search, "/search/<string:type>")
api.add_resource(lookup,"/lookup/<int:id>")
api.add_resource(buy,"/buy/<int:id>")
api.add_resource(update,"/update/<int:id>")
if __name__ == "__main__":
app.run(port=9000,debug=True)
|
[
"47061208+mohammadghulmi@users.noreply.github.com"
] |
47061208+mohammadghulmi@users.noreply.github.com
|
44298aa9008a62b04b09d7fe8e6ca9b449475bfd
|
596ee60e8b469b9e8550be616affad397da4534f
|
/build/rosserial/rosserial_esp32/catkin_generated/pkg.develspace.context.pc.py
|
abfb1a14b2c987684745df18d3e99483ef769c96
|
[] |
no_license
|
jmyth742/veg_picker
|
4e76fc1e46442d2d1cf3e7915fd419cfe551d5a6
|
9402d6ef4d89593507dd3bd118501130f2842c87
|
refs/heads/main
| 2023-02-23T12:51:16.688520
| 2021-01-27T17:40:50
| 2021-01-27T17:40:50
| 333,507,750
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rosserial_esp32"
PROJECT_SPACE_DIR = "/home/jonny/veg_picker/devel"
PROJECT_VERSION = "1.0.0"
|
[
"jmyth742@gmail.com"
] |
jmyth742@gmail.com
|
16e39501d561ff8f399a048debeeac46a05ae18f
|
7ec64481a66af49071db47c792e5b91390142ed9
|
/tests/test_order_by_nested.py
|
165f04e025111c51966b44e189c616bf99489ebb
|
[
"Apache-2.0"
] |
permissive
|
rafalstapinski/tortoise-orm
|
73943f0b3dcd84645db46df6dea047ab9dd586af
|
b310f4fc81e1b817e68110d118cd371280b05a8d
|
refs/heads/develop
| 2023-05-25T09:10:42.249656
| 2021-06-07T03:54:05
| 2021-06-07T03:54:05
| 348,920,652
| 1
| 0
|
Apache-2.0
| 2021-03-18T02:50:46
| 2021-03-18T02:50:45
| null |
UTF-8
|
Python
| false
| false
| 1,032
|
py
|
from tests.testmodels import Event, Tournament
from tortoise.contrib import test
class TestOrderByNested(test.TestCase):
async def test_basic(self):
await Event.create(
name="Event 1", tournament=await Tournament.create(name="Tournament 1", desc="B")
)
await Event.create(
name="Event 2", tournament=await Tournament.create(name="Tournament 2", desc="A")
)
self.assertEqual(
await Event.all().order_by("-name").values("name"),
[{"name": "Event 2"}, {"name": "Event 1"}],
)
self.assertEqual(
await Event.all().prefetch_related("tournament").values("tournament__desc"),
[{"tournament__desc": "B"}, {"tournament__desc": "A"}],
)
self.assertEqual(
await Event.all()
.prefetch_related("tournament")
.order_by("tournament__desc")
.values("tournament__desc"),
[{"tournament__desc": "A"}, {"tournament__desc": "B"}],
)
|
[
"noreply@github.com"
] |
rafalstapinski.noreply@github.com
|
a148d4e1e27e766ce235e0dc45f050400b6627e0
|
739a84f15a1242ec715c79f4f1fd7657c607f2e6
|
/intersection_arrays.py
|
4c16b663b2f428c3ba5571ec36ed912e065e7a64
|
[] |
no_license
|
ElshadaiK/Competitive-Programming
|
365683a7af61f881ee9cd56d2124f09cfb88e789
|
9757395120757a81fb5df0bd4719771e60410b47
|
refs/heads/master
| 2023-03-09T04:58:33.262626
| 2023-02-04T12:24:39
| 2023-02-04T12:24:39
| 225,391,846
| 10
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
def intersection(nums1, nums2):
inter = [0]*1001
result = []
for i in range(len(nums1)):
inter[nums1[i]] = 1
for i in range(len(nums2)):
if(inter[nums2[i]] == 1):
inter[nums2[i]] = 3
for k in range(len(inter)):
if(inter[k] == 3):
result.append(k)
return result
|
[
"elshadaikassutegegn@gmail.com"
] |
elshadaikassutegegn@gmail.com
|
43d0686cbaf9a0f83c2c555ed97d2fe855a322ba
|
2256e2a8d7958925a17a31871dc8c674a6f9e6df
|
/app/bot/core/middlewares.py
|
bfcf19f7819ad7440f3131662f9d2f6037c92df6
|
[] |
no_license
|
tabacdenis/fastapi-aiogram-template
|
7694c5cf2bdca0412d8d3b99b29893bd79cbbb1a
|
e0b72c9154ece56e5b5139a341ba0bf760b37b20
|
refs/heads/main
| 2023-08-26T13:04:42.710513
| 2021-10-26T09:00:13
| 2021-10-26T09:00:13
| 518,385,537
| 1
| 0
| null | 2022-07-27T09:06:54
| 2022-07-27T09:06:53
| null |
UTF-8
|
Python
| false
| false
| 2,608
|
py
|
import asyncio
from aiogram import Dispatcher, types
from aiogram.dispatcher.handler import CancelHandler, current_handler, Handler
from aiogram.dispatcher.middlewares import BaseMiddleware
from aiogram.utils.exceptions import Throttled
def rate_limit(limit: float, key=None):
def decorator(func):
setattr(func, 'throttling_rate_limit', limit)
if key:
setattr(func, 'throttling_key', key)
return func
return decorator
class ThrottlingMiddleware(BaseMiddleware):
def __init__(self, limit=.8, key_prefix='antiflood_'):
self.rate_limit = limit
self.prefix = key_prefix
super(ThrottlingMiddleware, self).__init__()
async def on_process_callback_query(self, query: types.CallbackQuery, data: dict):
if query.inline_message_id:
return
handler: Handler = current_handler.get()
# Get dispatcher from context
dispatcher = Dispatcher.get_current()
# If handler was configured, get rate limit and key from handler
notify = False
if handler:
limit = getattr(handler, 'throttling_rate_limit', None)
key = getattr(handler, 'throttling_key', f"{self.prefix}_{handler.__name__}")
if limit:
notify = True
else:
limit = self.rate_limit
else:
limit = self.rate_limit
key = f"{self.prefix}_message"
try:
await dispatcher.throttle(key, rate=limit)
except Throttled as t:
print('key:', key)
if notify:
await query.answer('Biroz kuting...')
else:
await query.answer()
delta = t.rate - t.delta
await asyncio.sleep(delta)
raise CancelHandler()
async def on_process_message(self, message: types.Message, data: dict):
handler = current_handler.get()
dispatcher = Dispatcher.get_current()
# If handler was configured, get rate limit and key from handler
if handler:
limit = getattr(handler, 'throttling_rate_limit', self.rate_limit)
key = getattr(handler, 'throttling_key', f"{self.prefix}_{handler.__name__}")
else:
limit = self.rate_limit
key = f"{self.prefix}_message"
# Use Dispatcher.throttle method.
try:
await dispatcher.throttle(key, rate=limit)
except Throttled as throttled:
delta = throttled.rate - throttled.delta
await asyncio.sleep(delta)
raise CancelHandler()
|
[
"oopanndaa@gmail.com"
] |
oopanndaa@gmail.com
|
a1be7cf483a9faca9b09118b5a2e540a23142cb3
|
46e6c78186015ccdf5dae056c106572bb4fb156b
|
/Poisson/2DPoissonNN.py
|
68a2530cabf4715436d03fe970dcaafa86adf832
|
[] |
no_license
|
dmontag23/NNAugmentationForPDEs
|
b28a67a18e89d19c37b08127b3505c65d8cac9a5
|
71168f812526ab698ebebcc956c2e28d3cb69d40
|
refs/heads/master
| 2021-05-21T05:49:52.598158
| 2020-04-09T03:56:26
| 2020-04-09T03:56:26
| 252,573,556
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,702
|
py
|
import numpy as np
from datetime import datetime
from fenics import *
from fenics_adjoint import *
from Poisson.FEM import FEM
from Poisson.NN import Network
from scipy.optimize import minimize
np.random.seed(2)
def create_fenics_vector(update_vector):
q_function = Function(Q)
vertex_to_dofs = vertex_to_dof_map(q_function.function_space())
for i in range(len(vertex_to_dofs)):
q_function.vector()[vertex_to_dofs[i]] = update_vector[i]
return q_function
def process_nn_parameters(params):
net.update_parameters(params)
output = net(mesh.coordinates())
q_vector = output.detach().numpy()
q_function = create_fenics_vector(q_vector)
return output, q_function
def assemble_functional(params):
output, q = process_nn_parameters(params)
FEMProblem.q = q
FEMProblem.solve_poisson_problem()
J = FEMProblem.error_functional(uex_with_noise, qex)
return output, q, J
def fun_to_minimize(params):
output, q_function, J = assemble_functional(params)
return float(J)
def jac_of_fun(params):
output, q_function, J = assemble_functional(params)
control = Control(q_function)
dJdq = interpolate(compute_gradient(J, control), Q).compute_vertex_values()
net.calculate_gradients(dJdq, output)
gradients_at_xi = np.concatenate([param.grad.numpy().flatten() for param in net.parameters()])
return gradients_at_xi
# ask user which "q" value they would like to observe, what noise level they want, and what value to use for Tikhonov regularization
usr_input = int(input("Please enter a number corresponding to the value of q you want:\n " +
"0: q = 1 \n 1: q = 1 + x + y \n 2: q = 1 + x^2 + y^2 \n 3: 1 + 0.5 * sin(2*pi*x) * sin(2*pi*y) \n"))
delta = float(input("Please enter a number between 0 and 1 corresponding to the value of noise you want:\n"))
alpha = Constant(float(input("Please enter Tikhonov regularization constant:\n")))
# store the exact value of q and f for the 4 different options for q
all_qex = [Constant(1.0),
Expression('1 + x[0] + x[1]', degree=2),
Expression('1 + pow(x[0], 2) + pow(x[1], 2)', degree=2),
Expression('1 + 0.5 * sin(2 * pi * x[0]) * sin(2 * pi * x[1])', degree=2)]
all_f = [Expression('2 * pow(pi, 2) * sin(pi * x[0]) * sin(pi * x[1])', degree=2),
Expression('-pi * sin(pi * x[1]) * (cos(pi * x[0]) - pi * (x[0] + x[1] + 1) * sin(pi * x[0])) - pi * sin(pi * x[0]) * (cos(pi * x[1]) - pi * (x[0] + x[1] + 1) * sin(pi * x[1]))', degree=2),
Expression('-pi * sin(pi * x[1]) * (2 * x[0] * cos(pi * x[0]) - pi * (pow(x[0], 2) + pow(x[1], 2) + 1) * sin(pi * x[0])) - pi * sin(pi * x[0]) * (2 * x[1] * cos(pi * x[1]) - pi * (pow(x[0], 2) + pow(x[1], 2) + 1) * sin(pi * x[1]))', degree=2),
Expression('-sin(pi * x[0]) * sin(pi * x[1]) * (-2* pow(pi, 2) * sin(2 * pi * x[0]) * sin(2 * pi * x[1]) - (2 * pow(pi, 2))) - (2 * pow(pi, 2)) * pow(cos(pi * x[0]), 3) * pow(sin(pi * x[1]), 2) * cos(pi * x[1]) - (2 * pow(pi, 2)) * pow(sin(pi * x[0]), 2) * cos(pi * x[0]) * pow(cos(pi * x[0]), 3)', degree=2)]
# create the neural network and initialize the weights to be used
net = Network(2, 10)
initial_params = np.random.rand(41)
initial_params[20:30] = 0.0
initial_params[40] = 0.0
# Create mesh and define the function spaces
mesh = UnitSquareMesh(100, 100)
V = FunctionSpace(mesh, 'P', 1)
Q = FunctionSpace(mesh, 'P', 1)
net.update_parameters(initial_params)
# define the exact solution for u and q
uex = interpolate(Expression('sin(pi * x[0]) * sin(pi * x[1])', degree=2), V)
qex = interpolate(all_qex[usr_input], Q)
# setup the FEM problem to be solved
f = all_f[usr_input]
FEMProblem = FEM(mesh, V, f, DirichletBC(V, uex, "on_boundary"), delta, alpha)
# find the optimal value for q
uex_with_noise = FEMProblem.add_noise(uex) # add noise to the original solution u to simulate noisy data
net.print_params()
startTime = datetime.now()
min_params = minimize(fun_to_minimize, initial_params, method='BFGS', jac=jac_of_fun, options={'disp':True, 'gtol':1e-07})
print("The time it took to optimize the functional: " + str(datetime.now() - startTime))
output, q_function = process_nn_parameters(min_params.x)
q_opt = interpolate(q_function, Q)
# solve the Poisson problem with the optimal value of q
FEMProblem.q = q_opt
FEMProblem.solve_poisson_problem()
# print the errors
FEMProblem.print_L2_errors(uex, qex)
# write the optimal value of q to a vtk file
vtkfile = File('poisson2d/optq.pvd')
vtkfile << q_opt
vtkfile = File('poisson2d/optu.pvd')
vtkfile << FEMProblem.u
|
[
"dmontag23@gmail.com"
] |
dmontag23@gmail.com
|
9d8a62fa8fd0d64ef61638773107d879bd039073
|
8d85393bdae5cdef580dacbf2f296a6882bf765f
|
/polls/models.py
|
18f57a0c31fd3bd42ca1519e7de3e53cbddd87ff
|
[] |
no_license
|
rednibia/djangoproject
|
6abb8462ad9aa2224553e4d5d2833a9e20e3ab74
|
679910b268c90f72ca8ec8ec93ca1c6fbe77b559
|
refs/heads/master
| 2021-01-10T05:06:34.946338
| 2016-02-01T19:37:59
| 2016-02-01T19:37:59
| 50,608,447
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 805
|
py
|
import datetime
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils import timezone
@python_2_unicode_compatible # only if you need to support Python 2
class Question(models.Model):
qtxt = models.CharField(max_length=200)
pd = models.DateTimeField('date published')
def __str__(self):
return self.qtxt
def publishrec(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pd <= now
@python_2_unicode_compatible # only if you need to support Python 2
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
ctxt = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.ctxt
|
[
"andrewaibinder@gmail.com"
] |
andrewaibinder@gmail.com
|
fd3ead0ab8456a1f33d4469a8d99a8f7f42e9320
|
0bc80f7bba8d7dfa7b293759b0974b9707d8d450
|
/testdir-9/test-99.py
|
72e8f627f0d744e3698025d63a8b04ad6dc17d64
|
[] |
no_license
|
delcypher/lit-concurrency-windows-bug
|
da3d5edd63f5feaf8b8b65650e39c877f71f46d8
|
029eb69219e4b84d33072c26e02663e267a38304
|
refs/heads/master
| 2020-06-06T07:24:50.360611
| 2014-05-19T15:31:52
| 2014-05-19T15:31:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,231
|
py
|
# RUN: python %s > %t
# RUN: %diff %s.expect %t
print('FOO 0x15bf60ecd0cd69f3')
print('FOO 0x60cd668746ccd0fa')
print('FOO 0x4ca8480914b6a7d8')
print('FOO 0x2e2a685136fefe6f')
print('FOO 0x515825fdda953ec7')
print('FOO 0x1ca9386430cfd8d2')
print('FOO 0x15aba69530aa97aa')
print('FOO 0x6c1c6788eaca56ec')
print('FOO 0x368eb8fb63fa212e')
print('FOO 0x8ab246e72aca81d')
print('FOO 0x54f10016b90500b9')
print('FOO 0x3cb71a1ef14d6438')
print('FOO 0x36bce9ab7d6c52a8')
print('FOO 0x5f4a484fcaf6505')
print('FOO 0x119a6272f8763132')
print('FOO 0x2fbf349067eef70e')
print('FOO 0x56d301bbf25afe21')
print('FOO 0x2b34b66a107a31d')
print('FOO 0x3a53516d2eb29169')
print('FOO 0x5ce0911999008cb0')
print('FOO 0x2dc276048d3eb3cd')
print('FOO 0x3aefbde806eeed6c')
print('FOO 0x7b8bbf45ca4e36fc')
print('FOO 0x7d78510bda2b6326')
print('FOO 0x1b91eb1c30da068a')
print('FOO 0xf3615072695147a')
print('FOO 0x2e5873619d4f35a1')
print('FOO 0x5727f59b22e2de1f')
print('FOO 0x7f7be1e40b08b25')
print('FOO 0x51ccaabca5285c14')
print('FOO 0x64fa0d76240fd031')
print('FOO 0xb9de254bd1f2040')
print('FOO 0x5072182766e59e88')
print('FOO 0x469db8c4c841bf4c')
print('FOO 0x11ce87487c1f9a6b')
print('FOO 0xdff498e6336a60a')
print('FOO 0x77fccf55636a7f4a')
print('FOO 0x2f20ad782f7e4ebd')
print('FOO 0x319b27532c1d5df6')
print('FOO 0x275d03b9954e229c')
print('FOO 0x3ccd67a6676e413e')
print('FOO 0x133e8262513eb148')
print('FOO 0x60443bbbe1e6e224')
print('FOO 0x737063821c52d2a9')
print('FOO 0x27317e1a3751e6ac')
print('FOO 0x88c7420838fc640')
print('FOO 0x7667b263f7d6f87d')
print('FOO 0x6ed2c0d16d69fccb')
print('FOO 0x6f115ff59fb86a50')
print('FOO 0x26805b62792403c7')
print('FOO 0x6a7a9cdaa8cd325a')
print('FOO 0x178a4334a283eff3')
print('FOO 0x7e04fb87e05096dd')
print('FOO 0x690c5ce413505791')
print('FOO 0x6796a3b41e54fa23')
print('FOO 0x6947ed679863daf7')
print('FOO 0x73baceb380d3a000')
print('FOO 0x288713e31cb685f0')
print('FOO 0x615a722612c3acf7')
print('FOO 0x488abc6b4c98e61')
print('FOO 0xa6e777bfcbb617c')
print('FOO 0x2d7b23e38f485b84')
print('FOO 0xf44b624290e3886')
print('FOO 0x12e3f89ca9f2e776')
print('FOO 0x2293b434710c87f')
print('FOO 0x61c1ed8fcad62252')
print('FOO 0x739d7a743940bc49')
print('FOO 0x41217a4eb575c911')
print('FOO 0x748feb65e4612e63')
print('FOO 0x4096649232960b30')
print('FOO 0x869218acf04b6ab')
print('FOO 0x6637f51238d1651f')
print('FOO 0x55a395ae68eece13')
print('FOO 0x2d85e365c6b49bc1')
print('FOO 0xcd27fb98dd381a')
print('FOO 0x5982981f2449a6f8')
print('FOO 0x69d2cbcf2b1bc51b')
print('FOO 0x6373d76fe1027d15')
print('FOO 0x2bdbadab611c7cb0')
print('FOO 0x52acac6cf4d7e5fb')
print('FOO 0x552b637e4fd2ea77')
print('FOO 0x1c7f996cc666b963')
print('FOO 0x496a42f3ee3f2e77')
print('FOO 0x3572d857f40775c9')
print('FOO 0x58f3c1171f06461f')
print('FOO 0x5bdc7cce8963f82e')
print('FOO 0x49d55108ec928e61')
print('FOO 0x17257436cf27d97c')
print('FOO 0x380870d2a5966421')
print('FOO 0x42b8610db99075b')
print('FOO 0xce9db3a5a92f865')
print('FOO 0x317531928bd0670d')
print('FOO 0x69d2965225478deb')
print('FOO 0x651dd6dfa0071231')
print('FOO 0x481eadf77fbd1f33')
print('FOO 0x47f63daf7296fbc1')
print('FOO 0x69137f4ef19dea2f')
print('FOO 0x7f1adf459fd598f5')
print('FOO 0x1c359cd2a93f83ac')
print('FOO 0x11c1b50ce36f9af5')
|
[
"daniel.liew@imperial.ac.uk"
] |
daniel.liew@imperial.ac.uk
|
e55646a8119970163aeb64b6b83d2aa3d3d683ba
|
5d60d0d3b474b8f225773330c2f0af63835e2625
|
/python/analyze_essentia.py
|
96269a0d03aaddfd9caaf367ef7e449c15fa3e7b
|
[] |
no_license
|
julesyoungberg/soundboy
|
3dcd62b031c6552c9ac99309d36245463b6ae4e6
|
c20bd44b811df5fd399bb03c73ca89e33b5d7609
|
refs/heads/main
| 2023-02-02T18:29:38.596939
| 2020-12-13T22:12:56
| 2020-12-13T22:12:56
| 301,028,668
| 0
| 4
| null | 2020-12-13T22:12:57
| 2020-10-04T03:01:17
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 4,707
|
py
|
import os
import shutil
import essentia.standard
import numpy as np
import essentia
from tqdm import tqdm
import matplotlib.pyplot as plt
import sklearn
def get_features(d):
try:
sr = 22050
duration = 3
nfft = 2048
#loader = essentia.standard.EasyLoader(filename=d, sampleRate = sr, endTime = 5)
#loader = essentia.standard.EqloudLoader(filename=d, sampleRate = sr, endTime = duration)
loader = essentia.standard.MonoLoader(filename=d, sampleRate=sr)
audio = loader()
if (len(audio) > duration * sr):
audio = audio[:duration * sr]
n_coef = 13
n_bands = 40
frame_size = 2048
hop = 1024
w = essentia.standard.Windowing(type='hann')
spectrum = essentia.standard.Spectrum()
mfcc = essentia.standard.MFCC(
numberCoefficients=n_coef, sampleRate=sr, numberBands=n_bands)
mfccs = []
for frame in essentia.standard.FrameGenerator(audio, frameSize=frame_size, hopSize=hop):
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
mfccs = essentia.array(mfccs).T
pad_width = 95 - mfccs.shape[1]
mfccs = np.pad(mfccs, pad_width=(
(0, 0), (0, pad_width)), mode='reflect')
# centroid = librosa.feature.spectral_centroid(y=audio, sr = sr, n_fft = nfft, hop_length = hop)
# bandwidth = librosa.feature.spectral_centroid(y= audio, sr = sr, n_fft = nfft, hop_length = hop)
# spectral_contrast = librosa.feature.spectral_contrast(y = audio, sr = sr, n_fft = nfft, hop_length = hop)
# spectral_flatness = librosa.feature.spectral_flatness(y = audio, n_fft = nfft, hop_length = hop)
# spectral_rolloff = librosa.feature.spectral_rolloff(y = audio, sr = sr, n_fft = nfft, hop_length = hop)
# zcr = librosa.feature.zero_crossing_rate(y = audio, hop_length = 1024)
# feature_vector = [
# centroid.mean(),
# centroid.std(),
# bandwidth.mean(),
# bandwidth.std(),
# # spectral_contrast[0].mean(),
# # spectral_contrast[0].std(),
# # spectral_contrast[1].mean(),
# # spectral_contrast[1].std(),
# # spectral_contrast[2].mean(),
# # spectral_contrast[2].std(),
# # spectral_contrast[3].mean(),
# # spectral_contrast[3].std(),
# # spectral_contrast[4].mean(),
# # spectral_contrast[4].std(),
# # spectral_contrast[5].mean(),
# # spectral_contrast[5].std(),
# spectral_flatness.mean(),
# spectral_flatness.std(),
# spectral_rolloff.mean(),
# spectral_rolloff.std(),
# zcr.mean(),
# zcr.std(),
# mfccs.mean(),
# mfccs.std()
# ]
# feature_vector = np.array(feature_vector)
#normalized_feature_vector = (feature_vector - feature_vector.mean(0)) / feature_vector.std(0)
return mfccs
except Exception as e:
print("error processing file ", dir, e)
# os.remove(dir)
print(f"Deleted file{dir}")
return [-1]
features = []
mfccs = []
labels = []
i = 10
current_label = ""
folder = "./Train"
for root, dirs, files in os.walk(folder, topdown=False):
for name in tqdm(files):
cur_dir = os.path.join(root, name)
label = cur_dir.split('/')[-2]
# # Testing code
# if (current_label != label):
# current_label = label
# i = 0
# if (i < 10):
# feature_vector, mfcc = get_features(cur_dir)
# i += 1
# if len(feature_vector) > 1:
# features.append(feature_vector)
# print(i)
# # Standardizing mfcc data so coefficient dimension has zero mean and unit variance
# mfcc = sklearn.preprocessing.scale(mfcc.astype(float), axis=0)
# mfccs.append(mfcc)
# labels.append(label)
# Real Code
mfcc = get_features(cur_dir)
if len(mfcc) > 1:
# features.append(feature_vector)
# Standardizing mfcc data so coefficient dimension has zero mean and unit variance
mfccs.append(mfcc)
labels.append(label)
features = np.array(features)
labels = np.array(labels)
mfccs = np.array(mfccs)
#np.save('features', features)
np.save('essentia_mfccs_new', mfccs)
np.save('essentia_labels_new', labels)
|
[
"noreply@github.com"
] |
julesyoungberg.noreply@github.com
|
aaadd0a0164a4f69dde6224c758243edc8c96d89
|
442ef15c5cba847a9ede88f7f29d8dab6d72f3dd
|
/apps/miniwallet/migrations/0001_initial.py
|
742c76d5cb436d4215aa2851d13d555d25a4aaec
|
[] |
no_license
|
roopagokul24/testproject
|
fd526be5dceb70da798282d6f808a32ebca01e09
|
b638c7e4b621532196e8846bbc650f0ad5cb8e25
|
refs/heads/main
| 2023-06-18T21:51:45.215864
| 2021-07-13T18:33:12
| 2021-07-13T18:33:12
| 319,672,245
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,084
|
py
|
# Generated by Django 2.2.3 on 2021-07-13 16:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Wallet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.CharField(max_length=15, verbose_name='Amount')),
('balance', models.CharField(max_length=15, null=True, verbose_name='Amount')),
('enable', models.BooleanField(default=False)),
('enabled_time', models.DateTimeField(blank=True, default=None, null=True, verbose_name='Created Time')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='wallet_owner', to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"roopa@impressads.com"
] |
roopa@impressads.com
|
5a6477db916abb1b00d44eb794a0c0276496e045
|
1afbf755dcb47649cdc5d1d3b908bcd4e60cf24f
|
/PreDisulfideBond/create_position.py
|
726740568a783d6101baa986253c30bb4f4c3f7f
|
[] |
no_license
|
romendlf/SSBONDPredict
|
cd64d74a4b1814e769ca196388687430fda3af34
|
524a1c1196377ad4960a1001f42fdd47f19c17bf
|
refs/heads/master
| 2022-02-15T16:54:52.964907
| 2019-08-17T12:19:16
| 2019-08-17T12:19:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,387
|
py
|
import rmsd
import numpy as np
import sys
# this function can produce the sixth position for redius who only have 5 atoms
def xyz5_position(i,lines):
template1 = np.array([[-0.93454, 0.91954, 1.11786],
[-0.01675, 0.03488, 0.58954],
[ 1.10621, 0.72253,-0.004 ],
[ 1.6148 , 1.14817,-0.57075],
[-0.64719,-0.82998,-0.40926]])
template2 = np.array([[-0.93454, 0.91954, 1.11786],
[-0.01675, 0.03488, 0.58954],
[ 1.10621, 0.72253,-0.004 ],
[ 1.6148 , 1.14817,-0.57075],
[-0.64719,-0.82998,-0.40926],
[-1.12252,-1.99515,-0.72338]])
amino_acid_position = []
for k in range(i, i + 5, 1):
try:
x = lines[k][30:38]
y = lines[k][38:46]
z = lines[k][46:54]
amino_acid_position.append(np.asarray([x, y ,z], dtype=float))
except:
sys.exit("Error parsing input for the following line: \n{0:s}".format(lines[k]))
amino_acid_position = np.asarray(amino_acid_position)
#print amino_acid_position
a_acid_p = amino_acid_position - rmsd.centroid(amino_acid_position)
template1 -= rmsd.centroid(template1)
#centroid P2 into f
template2 = template2 - rmsd.centroid(template2)
rot = rmsd.kabsch(template1,a_acid_p)
#pp is the rotated martix,Rotate matrix P2 unto Q
new_position = np.dot(template2,rot)
#translation the P2 into initial position after rotation
new_position += rmsd.centroid(amino_acid_position)
C = new_position.tolist()
#print new_position
#change the martix into list
#lenthg = len(C)
#print lenthg
#for n in range(0,6,1):
position = ('%8s'%str(float('%.3f'%C[5][0]))) + ('%8s'%str(float('%.3f'%C[5][1]))) + ('%8s'%str(float('%.3f'%C[5][2])))
return position
#GLY line5
# this function can produce the fivth position for redius who only have 4 atoms,
def xyz4a_position(i,lines):
template1 = np.array([[-0.93454, 0.91954, 1.11786],
[-0.01675, 0.03488, 0.58954],
[ 1.10621, 0.72253,-0.004 ],
[ 1.6148 , 1.14817,-0.57075]])
template2 = np.array([[-0.93454, 0.91954, 1.11786],
[-0.01675, 0.03488, 0.58954],
[ 1.10621, 0.72253,-0.004 ],
[ 1.6148 , 1.14817,-0.57075],
[-0.64719,-0.82998,-0.40926],
[-1.12252,-1.99515,-0.72338]])
amino_acid_position = []
for k in range(i, i + 4, 1):
try:
x = lines[k][30:38]
y = lines[k][38:46]
z = lines[k][46:54]
amino_acid_position.append(np.asarray([x, y ,z], dtype=float))
except:
sys.exit("Error parsing input for the following line: \n{0:s}".format(lines[k]))
amino_acid_position = np.asarray(amino_acid_position)
#print amino_acid_position
a_acid_p = amino_acid_position - rmsd.centroid(amino_acid_position)
template1 -= rmsd.centroid(template1)
#centroid P2 into f
template2 = template2 - rmsd.centroid(template2)
#for debug
#print '*************8888888888888888'
#print template1.shape#(4,3)
#print a_acid_p.shape#(5,3)
#print template2.shape#(6.3)
rot = rmsd.kabsch(template1,a_acid_p)
#pp is the rotated martix,Rotate matrix P2 unto Q
new_position = np.dot(template2,rot)
#translation the P2 into initial position after rotation
new_position += rmsd.centroid(amino_acid_position)
C = new_position.tolist()
#print new_position
#print lenthg
#for n in range(0,6,1):
position5 = ('%8s'%str(float('%.3f'%C[4][0]))) + ('%8s'%str(float('%.3f'%C[4][1]))) + ('%8s'%str(float('%.3f'%C[4][2])))
position6 = ('%8s'%str(float('%.3f'%C[5][0]))) + ('%8s'%str(float('%.3f'%C[5][1]))) + ('%8s'%str(float('%.3f'%C[5][2])))
print 'print the position of position 5 and position 6'
return position5,position6
#GLY line6
# this function can produce the sixth position for redius who only have 4 atoms,
def xyz4b_position(i,lines):
template1 = np.array([[-0.93454, 0.91954, 1.11786],
[-0.01675, 0.03488, 0.58954],
[ 1.10621, 0.72253,-0.004 ],
[ 1.6148 , 1.14817,-0.57075]])
template2 = np.array([[-0.93454, 0.91954, 1.11786],
[-0.01675, 0.03488, 0.58954],
[ 1.10621, 0.72253,-0.004 ],
[ 1.6148 , 1.14817,-0.57075],
[-0.64719,-0.82998,-0.40926],
[-1.12252,-1.99515,-0.72338]])
amino_acid_position = []
for k in range(i, i + 4, 1):
try:
x = lines[k][30:38]
y = lines[k][38:46]
z = lines[k][46:54]
amino_acid_position.append(np.asarray([x, y ,z], dtype=float))
except:
sys.exit("Error parsing input for the following line: \n{0:s}".format(lines[k]))
amino_acid_position = np.asarray(amino_acid_position)
#print amino_acid_position
a_acid_p = amino_acid_position - rmsd.centroid(amino_acid_position)
template1 -= rmsd.centroid(template1)
#centroid P2 into f
template2 = template2 - rmsd.centroid(template2)
rot = rmsd.kabsch(template1,a_acid_p)
#pp is the rotated martix,Rotate matrix P2 unto Q
new_position = np.dot(template2,rot)
#translation the P2 into initial position after rotation
new_position += rmsd.centroid(amino_acid_position)
C = new_position.tolist()
#print new_position
#print lenthg
#for n in range(0,6,1):
position = ('%8s'%str(float('%.3f'%C[5][0]))) + ('%8s'%str(float('%.3f'%C[5][1]))) + ('%8s'%str(float('%.3f'%C[5][2])))
return position
|
[
"2646510513@qq.com"
] |
2646510513@qq.com
|
91e913b259518577f842264f979128f64c4d2b13
|
2038db2aa17ec99980b8e942c0af3d1f55158c89
|
/app/auth/__init__.py
|
3133d442cf35c62f20bb47562253d9974c0545e9
|
[] |
no_license
|
Harithmetic1/Alium-Resume-Builder
|
098011d5ac3a5e5eed8d7f8b431262c293cd90a5
|
f147c15e0372d51449b41d5a29d4e40b61a7874a
|
refs/heads/master
| 2022-12-31T02:10:09.392725
| 2020-10-09T20:29:41
| 2020-10-09T20:29:41
| 297,050,208
| 2
| 2
| null | 2020-10-09T20:29:43
| 2020-09-20T10:17:29
|
HTML
|
UTF-8
|
Python
| false
| false
| 91
|
py
|
from flask import Blueprint
auth = Blueprint(__name__, 'auth')
from app.auth import views
|
[
"koikibabatunde14@gmail.com"
] |
koikibabatunde14@gmail.com
|
54085cee412af380b8a7357d471d4a1373a35789
|
18781dfa8e93aeffe05575bf146f54b55bf43587
|
/src/testscr/test.py
|
db4d69863700fb7f0ebd61a73552f0efebd7d272
|
[] |
no_license
|
sailepradh/Rare_variants_atherosclerosis
|
a7a34edf5cab8f35f0d0f82c96c5e1cb74a2707a
|
344b1710630ea24e8e04f1a1eab10cfbee57f222
|
refs/heads/master
| 2020-12-02T20:57:19.985950
| 2018-01-29T13:30:44
| 2018-01-29T13:30:44
| 96,233,122
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,345
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
''' This scripts overlaps the alleles that are present in rare frequency in Swed samples.
Considering the rare frequncy of < 1% in the swed frequency data we can estimate the allele count as 1% of 2000 = 20 and removing the private variants present only in one individual we set the lower threshold as 2.
'''
import sys
from pysam import VariantFile
Vcfin = VariantFile("/Users/salendrapradh/Documents/Rare_variants_atherosclerosis/data/swegen_20161223/SNP_VCF.gz")
with open ('/Users/salendrapradh/Documents/Rare_variants_atherosclerosis/data/BAV_HiCap/BAV_P-E_InteractionDataset.txt', 'r') as f:
for line in f:
line = line.strip().split("\t")
set = [(line[9])[3:], line[10], line[11]]
for field in line:
print (field, end = "\t")
for rec in Vcfin.fetch (set[0], int(set[1]), int(set[2])):
for i in range(0, len(rec.info["AC"])):
if rec.info["AC"][i] > 100 : # Common variant
# if rec.info["AC"][i] > 20 and (rec.info["AC"])[i] <= 100 : # Low frequency variant
# if rec.info["AC"][i] > 2 and (rec.info["AC"])[i] <= 20 : # Rare frequency variant
print (rec.chrom,rec.start,rec.stop,rec.alleles,(rec.info["AC"])[i] , end ="\t")
print("")
# break
|
[
"sailepradh@gmail.com"
] |
sailepradh@gmail.com
|
c39d1267df6c369e0dbf84fff115e1bc9bd83566
|
0342288382bdf29396ff25985b4a9502ad1f8037
|
/LeafNATS/engines/end2end.py
|
986e8775e42ac5fb61b186d0b524c285b337a519
|
[
"MIT",
"GPL-3.0-only"
] |
permissive
|
rajpratim/TREQS
|
321455e9bae3468040d525984401565882080a99
|
773e946aeb2013679b8824e177c4d5a3e6268118
|
refs/heads/master
| 2020-07-02T18:02:52.247008
| 2019-08-10T05:20:00
| 2019-08-10T05:20:00
| 201,615,111
| 0
| 1
|
MIT
| 2019-08-10T10:29:11
| 2019-08-10T10:29:11
| null |
UTF-8
|
Python
| false
| false
| 16,600
|
py
|
'''
@author Tian Shi
Please contact tshi@vt.edu
'''
import glob
import json
import os
import pickle
import re
import shutil
import time
from pprint import pprint
import numpy as np
import torch
from torch.autograd import Variable
from LeafNATS.data.utils import create_batch_file
from LeafNATS.utils.utils import show_progress
class natsEnd2EndBase(object):
'''
This engine is for the end2end training for seq2seq models.
Actually, it can also be used in other cases without any changes, e.g., classification and QA.
Here, we try to make it easy for multi-task, transfer learning, reuse of the pretrained models.
We have not tried the RL training.
'''
def __init__(self, args=None):
'''
Initialize
'''
self.args = args
self.base_models = {}
self.train_models = {}
self.batch_data = {}
self.global_steps = 0
def build_vocabulary(self):
'''
vocabulary
'''
raise NotImplementedError
def build_models(self):
'''
Models:
self.base_models: models that will be trained
Format: {'name1': model1, 'name2': model2}
self.train_models: models that will be trained.
Format: {'name1': model1, 'name2': model2}
'''
raise NotImplementedError
def init_base_model_params(self):
'''
Initialize Base Model Parameters.
self.base_models.
'''
raise NotImplementedError
def build_pipelines(self):
'''
Pipelines and loss here.
'''
raise NotImplementedError
def build_optimizer(self, params):
'''
define optimizer
'''
raise NotImplementedError
def print_info_train(self):
'''
Print additional information on screen.
'''
print('NATS Message: ')
def build_batch(self, batch_id):
'''
process batch data.
'''
raise NotImplementedError
def test_worker(self, _nbatch):
'''
Used in decoding.
Users can define their own decoding process.
You do not have to worry about path and prepare input.
'''
raise NotImplementedError
def app_worker(self):
'''
For application.
'''
raise NotImplementedError
def train(self):
'''
training here.
Don't overwrite.
'''
self.build_vocabulary()
self.build_models()
print(self.base_models)
print(self.train_models)
if len(self.base_models) > 0:
self.init_base_model_params()
# here it is necessary to put list. Instead of directly append.
for model_name in self.train_models:
try:
params += list(self.train_models[model_name].parameters())
except:
params = list(self.train_models[model_name].parameters())
if self.args.train_base_model:
for model_name in self.base_models:
try:
params += list(self.base_models[model_name].parameters())
except:
params = list(self.base_models[model_name].parameters())
# define optimizer
optimizer = self.build_optimizer(params)
# load checkpoint
uf_model = [0, -1]
out_dir = os.path.join('..', 'nats_results')
if not os.path.exists(out_dir):
os.mkdir(out_dir)
if self.args.continue_training:
model_para_files = glob.glob(os.path.join(out_dir, '*.model'))
if len(model_para_files) > 0:
uf_model = []
for fl_ in model_para_files:
arr = re.split(r'\/', fl_)[-1]
arr = re.split(r'\_|\.', arr)
arr = [int(arr[-3]), int(arr[-2])]
if arr not in uf_model:
uf_model.append(arr)
cc_model = sorted(uf_model)[-1]
try:
print("Try *_{}_{}.model".format(cc_model[0], cc_model[1]))
for model_name in self.train_models:
fl_ = os.path.join(
out_dir, model_name+'_'+str(cc_model[0])+'_'+str(cc_model[1])+'.model')
self.train_models[model_name].load_state_dict(
torch.load(fl_, map_location=lambda storage, loc: storage))
except:
cc_model = sorted(uf_model)[-2]
print("Try *_{}_{}.model".format(cc_model[0], cc_model[1]))
for model_name in self.train_models:
fl_ = os.path.join(
out_dir, model_name+'_'+str(cc_model[0])+'_'+str(cc_model[1])+'.model')
self.train_models[model_name].load_state_dict(
torch.load(fl_, map_location=lambda storage, loc: storage))
print(
'Continue training with *_{}_{}.model'.format(cc_model[0], cc_model[1]))
uf_model = cc_model
else:
shutil.rmtree(out_dir)
os.mkdir(out_dir)
# train models
fout = open('../nats_results/args.pickled', 'wb')
pickle.dump(self.args, fout)
fout.close()
start_time = time.time()
cclb = 0
for epoch in range(uf_model[0], self.args.n_epoch):
n_batch = create_batch_file(
path_data=self.args.data_dir,
path_work=os.path.join('..', 'nats_results'),
is_shuffle=True,
fkey_=self.args.task,
file_=self.args.file_corpus,
batch_size=self.args.batch_size,
is_lower=self.args.is_lower
)
print('The number of batches: {}'.format(n_batch))
self.global_steps = n_batch * max(0, epoch)
for batch_id in range(n_batch):
self.global_steps += 1
if cclb == 0 and batch_id <= uf_model[1]:
continue
else:
cclb += 1
self.build_batch(batch_id)
loss = self.build_pipelines()
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(params, self.args.grad_clip)
optimizer.step()
end_time = time.time()
if batch_id % self.args.checkpoint == 0:
for model_name in self.train_models:
fmodel = open(os.path.join(
out_dir, model_name+'_'+str(epoch)+'_'+str(batch_id)+'.model'), 'wb')
torch.save(
self.train_models[model_name].state_dict(), fmodel)
fmodel.close()
if batch_id % 1 == 0:
end_time = time.time()
print('epoch={}, batch={}, loss={}, time_escape={}s={}h'.format(
epoch, batch_id,
loss.data.cpu().numpy(),
end_time-start_time, (end_time-start_time)/3600.0
))
self.print_info_train()
del loss
for model_name in self.train_models:
fmodel = open(os.path.join(
out_dir, model_name+'_'+str(epoch)+'_'+str(batch_id)+'.model'), 'wb')
torch.save(self.train_models[model_name].state_dict(), fmodel)
fmodel.close()
def validate(self):
'''
Validation here.
Don't overwrite.
'''
self.build_vocabulary()
self.build_models()
pprint(self.base_models)
pprint(self.train_models)
if len(self.base_models) > 0:
self.init_base_model_params()
best_arr = []
val_file = os.path.join('..', 'nats_results', 'model_validate.txt')
if os.path.exists(val_file):
fp = open(val_file, 'r')
for line in fp:
arr = re.split(r'\s', line[:-1])
best_arr.append(
[arr[0], arr[1], arr[2], float(arr[3]), float(arr[4])])
fp.close()
for model_name in self.base_models:
self.base_models[model_name].eval()
for model_name in self.train_models:
self.train_models[model_name].eval()
with torch.no_grad():
while 1:
model_para_files = []
model_para_files = glob.glob(os.path.join(
'..', 'nats_results', sorted(list(self.train_models))[0]+'*.model'))
for j in range(len(model_para_files)):
arr = re.split(r'\_|\.', model_para_files[j])
arr = [int(arr[-3]), int(arr[-2]), model_para_files[j]]
model_para_files[j] = arr
model_para_files = sorted(model_para_files)
for fl_ in model_para_files:
best_model = {itm[0]: itm[3] for itm in best_arr}
if fl_[-1] in best_model:
continue
print('Validate *_{}_{}.model'.format(fl_[0], fl_[1]))
losses = []
start_time = time.time()
if os.path.exists(fl_[-1]):
time.sleep(3)
try:
for model_name in self.train_models:
fl_tmp = os.path.join(
'..', 'nats_results',
model_name+'_'+str(fl_[0])+'_'+str(fl_[1])+'.model')
self.train_models[model_name].load_state_dict(
torch.load(fl_tmp, map_location=lambda storage, loc: storage))
except:
continue
else:
continue
val_batch = create_batch_file(
path_data=self.args.data_dir,
path_work=os.path.join('..', 'nats_results'),
is_shuffle=True,
fkey_=self.args.task,
file_=self.args.file_val,
batch_size=self.args.batch_size
)
print('The number of batches (test): {}'.format(val_batch))
if self.args.val_num_batch > val_batch:
self.args.val_num_batch = val_batch
for batch_id in range(self.args.val_num_batch):
self.build_batch(batch_id)
loss = self.build_pipelines()
losses.append(loss.data.cpu().numpy())
show_progress(batch_id+1, self.args.val_num_batch)
print()
losses = np.array(losses)
end_time = time.time()
if self.args.use_move_avg:
try:
losses_out = 0.9*losses_out + \
0.1*np.average(losses)
except:
losses_out = np.average(losses)
else:
losses_out = np.average(losses)
best_arr.append(
[fl_[2], fl_[0], fl_[1], losses_out, end_time-start_time])
best_arr = sorted(best_arr, key=lambda bb: bb[3])
if best_arr[0][0] == fl_[2]:
out_dir = os.path.join('..', 'nats_results', 'model')
try:
shutil.rmtree(out_dir)
except:
pass
os.mkdir(out_dir)
for model_name in self.base_models:
fmodel = open(os.path.join(
out_dir, model_name+'.model'), 'wb')
torch.save(
self.base_models[model_name].state_dict(), fmodel)
fmodel.close()
for model_name in self.train_models:
fmodel = open(os.path.join(
out_dir, model_name+'.model'), 'wb')
torch.save(
self.train_models[model_name].state_dict(), fmodel)
fmodel.close()
try:
shutil.copy2(os.path.join(
self.args.data_dir, self.args.file_vocab), out_dir)
except:
pass
for itm in best_arr[:self.args.nbestmodel]:
print('model={}_{}, loss={}, time={}'.format(
itm[1], itm[2], itm[3], itm[4]))
for itm in best_arr[self.args.nbestmodel:]:
tarr = re.split(r'_|\.', itm[0])
if tarr[-2] == '0':
continue
if os.path.exists(itm[0]):
for model_name in self.train_models:
fl_tmp = os.path.join(
'..', 'nats_results',
model_name+'_'+str(itm[1])+'_'+str(itm[2])+'.model')
os.unlink(fl_tmp)
fout = open(val_file, 'w')
for itm in best_arr:
if len(itm) == 0:
continue
fout.write(' '.join([itm[0], str(itm[1]), str(
itm[2]), str(itm[3]), str(itm[4])])+'\n')
fout.close()
def test(self):
'''
testing
Don't overwrite.
'''
self.build_vocabulary()
self.build_models()
pprint(self.base_models)
pprint(self.train_models)
if len(self.base_models) > 0:
self.init_base_model_params()
_nbatch = create_batch_file(
path_data=self.args.data_dir,
path_work=os.path.join('..', 'nats_results'),
is_shuffle=False,
fkey_=self.args.task,
file_=self.args.file_test,
batch_size=self.args.test_batch_size
)
print('The number of batches (test): {}'.format(_nbatch))
for model_name in self.base_models:
self.base_models[model_name].eval()
for model_name in self.train_models:
self.train_models[model_name].eval()
with torch.no_grad():
if self.args.use_optimal_model:
model_valid_file = os.path.join(
'..', 'nats_results', 'model_validate.txt')
fp = open(model_valid_file, 'r')
for line in fp:
arr = re.split(r'\s', line[:-1])
model_optimal_key = ''.join(
['_', arr[1], '_', arr[2], '.model'])
break
fp.close()
else:
arr = re.split(r'\D', self.args.model_optimal_key)
model_optimal_key = ''.join(
['_', arr[0], '_', arr[1], '.model'])
print("You choose to use *{} for decoding.".format(model_optimal_key))
for model_name in self.train_models:
model_optimal_file = os.path.join(
'..', 'nats_results', model_name+model_optimal_key)
self.train_models[model_name].load_state_dict(torch.load(
model_optimal_file, map_location=lambda storage, loc: storage))
self.test_worker(_nbatch)
print()
def app2Go(self):
'''
For the application.
Don't overwrite.
'''
self.build_vocabulary()
self.build_models()
for model_name in self.train_models:
self.base_models[model_name] = self.train_models[model_name]
pprint(self.base_models)
if len(self.base_models) > 0:
self.init_base_model_params()
for model_name in self.base_models:
self.base_models[model_name].eval()
with torch.no_grad():
while 1:
self.app_worker()
|
[
"wangpinggl@gmail.com"
] |
wangpinggl@gmail.com
|
d845a35fbcb26effd1e00ba291cc588970a99974
|
58a0ba5ee99ec7a0bba36748ba96a557eb798023
|
/Olympiad Solutions/URI/1383.py
|
49e1281ab9a943fd05a98225692854fb1e43eef5
|
[
"MIT"
] |
permissive
|
adityanjr/code-DS-ALGO
|
5bdd503fb5f70d459c8e9b8e58690f9da159dd53
|
1c104c33d2f56fe671d586b702528a559925f875
|
refs/heads/master
| 2022-10-22T21:22:09.640237
| 2022-10-18T15:38:46
| 2022-10-18T15:38:46
| 217,567,198
| 40
| 54
|
MIT
| 2022-10-18T15:38:47
| 2019-10-25T15:50:28
|
C++
|
UTF-8
|
Python
| false
| false
| 823
|
py
|
# Ivan Carvalho
# Solution to https://www.urionlinejudge.com.br/judge/problems/view/1383
#!/usr/bin/env python2.7
# encoding : utf-8
ordem = int(raw_input())
padrao = range(1,10)
resposta = []
for j in xrange(ordem):
aquilo = "Instancia %d" % (j+1)
resposta.append(aquilo)
array = []
teste = 0
for i in xrange(9):
array.append([int(k) for k in raw_input().split(" ")])
for i in array:
if sorted(i) != padrao:
teste = 1
for i in zip(*array):
if sorted(list(i)) != padrao:
teste = 1
for i in [0,3,6]:
for k in [0,3,6]:
if sorted([array[i][k],array[i][k+1],array[i][k+2],array[i+1][k],array[i+1][k+1],array[i+1][k+2],array[i+2][k],array[i+2][k+1],array[i+2][k+2]]) != padrao:
teste = 1
if teste == 1:
resposta.extend(["NAO",""])
else:
resposta.extend(["SIM",""])
for j in resposta:
print j
|
[
"samant04aditya@gmail.com"
] |
samant04aditya@gmail.com
|
a2f3b5ebace3044d558ab70daa354bccd7830a35
|
9ca7c1c36ada61da86a631e2e1e867dfbbd728b0
|
/booksvenv/bin/pip2
|
bf31215135ccf45832e1eb58b17f7c75441885e3
|
[] |
no_license
|
SamCD/BooksLibrary
|
e4e4afb5e1e5fbeb258058d2437a6f57624cecf0
|
bed01595948c3520d7a9d5fa622efa7efccb039a
|
refs/heads/master
| 2021-01-01T17:47:13.912366
| 2015-05-20T15:26:28
| 2015-05-20T15:26:28
| 35,551,799
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
#!/Users/samdevries/BooksLibrary/booksvenv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"samuel.cohen-devries@spsmail.cuny.edu"
] |
samuel.cohen-devries@spsmail.cuny.edu
|
|
2eda5e250552c59ee04b8494c22fdb56912333d0
|
d05fa3feec04c19dad98479a136629039e3bc321
|
/casepro/contacts/migrations/0011_migrate_suspend_groups.py
|
6064ce3376342c6250de3627e62962f6353ea499
|
[
"BSD-3-Clause"
] |
permissive
|
digideskio/casepro
|
dc3f028ad8a6f49c01cff2dc56bf7c2ce92166c9
|
54ad1fb58300c86ae07344b2094681106d34b146
|
refs/heads/master
| 2020-05-29T11:02:58.368483
| 2016-03-10T10:55:40
| 2016-03-10T10:55:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,300
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from django.db import migrations
def create_suspend_groups(apps, schema_editor):
Org = apps.get_model('orgs', 'Org')
Group = apps.get_model('contacts', 'Group')
num_created = 0
num_updated = 0
for org in Org.objects.all():
config = json.loads(org.config) if org.config else {}
suspend_groups = config.get('suspend_groups', [])
for group_uuid in suspend_groups:
group = Group.objects.filter(org=org, uuid=group_uuid).first()
if group:
group.suspend_from = True
group.save(update_fields=('suspend_from',))
num_updated += 1
else:
# create placeholder which will be updated in next contacts sync
Group.objects.create(org=org, uuid=group_uuid, name="Syncing...", suspend_from=True, is_active=False)
num_created += 1
if num_created or num_updated:
print "Migrated org suspend groups (%d created, %d updated)" % (num_created, num_updated)
class Migration(migrations.Migration):
dependencies = [
('contacts', '0010_group_suspend_from'),
]
operations = [
migrations.RunPython(create_suspend_groups)
]
|
[
"rowanseymour@gmail.com"
] |
rowanseymour@gmail.com
|
e6ba3ac66a8898dea92e6348d518bdbf659925b9
|
f7f49b9270770cde802cb4cc2eea4f0ade10a37c
|
/main.py
|
6e3c82fd5e5b3925f52d3e3119d7cd6ef7c79263
|
[] |
no_license
|
dphillips97/get_naics
|
f590c4e50cef44e127bd2387fecb5eb6c737fec7
|
741578a381a81c71f1affaeb8595310cd48f0df2
|
refs/heads/master
| 2021-04-03T01:08:38.281556
| 2018-03-12T17:15:50
| 2018-03-12T17:15:50
| 124,593,805
| 0
| 0
| null | 2018-03-12T01:14:05
| 2018-03-09T21:12:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,952
|
py
|
import requests
from bs4 import BeautifulSoup
import openpyxl
import re
import time
'''
1st function call. Searches siccode.com for business name (from Excel)
and returns all <a> tags if there are multiple results. One of these urls
on the results_page will be the business in Madison Heights/MI.
'''
# biz_name is from spreadsheet
def get_site(biz_name):
# returns all search results on 1 page
search_url = r'https://siccode.com/en/business-list/' + biz_name
# get page object, save as string
results_page = requests.get(search_url)
results_page_doc = results_page.text
# create soup object
soup = BeautifulSoup(results_page_doc, 'html.parser')
# get all <a>s to use regex on
results_links = soup.find_all('a')
# return all soup <a> objects
return results_links
'''
2nd function call. Loops thru all <a> links on search results page and
uses regex to find link of business in Michigan (starts with 480..).
'''
# takes as argument all <a> links from business search results
def get_url(results_links):
for link in results_links:
# returns a string using a BS method
url = link.get('href')
# make regex object to find first(?) url that contains MI zip code
url_regex = r'.*48\d\d\d'
try:
# create match object
mo = re.search(url_regex, url)
# url string that points to business-specific page
biz_url = mo.group()
# might return None(?)
return biz_url
# if mo == None:
except:
pass
'''
3rd function call. Calls page specific to business and
gets NAICS code on that page. May eventually merge this
with 2nd function call.
'''
def call_url(biz_url):
# calls business-specific page
biz_page_url = r'http://siccode.com' + biz_url
# converts entire page object to text
biz_page = requests.get(biz_page_url)
biz_page_doc = biz_page.text
# gets all <a> text from page
soup_2 = BeautifulSoup(biz_page_doc, 'html.parser')
naics_a = soup_2.find_all('a')
# for each element in <a> object
for elem in naics_a:
# change each one into text; move outside for loop?
elem_doc = elem.text
# gets 6-digit naics code and description after dash
elem_regex = r'^\s(\d{6})\s-\s(.*)'
# may return None; handle this downstream
try:
mo = re.search(elem_regex, elem_doc)
code = mo.group(1)
descr = mo.group(2)
return code, descr
except:
pass
# main loop
def master():
# create workbook & sheet(s) objects
wb = openpyxl.load_workbook('biz.xlsx')
sheet = wb.active
max_col = sheet.max_column
# create headers
sheet.cell(row = 1, column = max_col + 1, value = 'naics code')
sheet.cell(row = 1, column = max_col + 2, value = 'description')
#loop through rows to get biz name
for row_iter in range(2, sheet.max_row + 1):
# get business names from Excel
biz_name = sheet.cell(row = row_iter, column = 5).value
# print record being looked up and print result on same line
print('Looking up code for %s...' % (biz_name), end = '');
# returns soup object of all results matching business name
results_links = get_site(biz_name)
# returns url segment specific to MI business
biz_url = get_url(results_links)
try:
# uses regex to get naics code from specific website
# handle None type below
code, descr = call_url(biz_url)
# let user know on CLI (mostly for fun)
print(' %s - %s' % (str(code), descr))
# write results to sheet
sheet.cell(row = row_iter, column = max_col + 1, value = code)
sheet.cell(row = row_iter, column = max_col + 2, value = descr)
# rest for a moment to avoid battering their servers
time.sleep(5)
# if biz_url is None
except:
sheet.cell(row = row_iter, column = max_col + 1, value = 'NOT FOUND')
print(' %s' % 'NOT FOUND')
# save temp file every 250 records
if (row_iter % 251 == 0):
wb.save('biz_temp_%i.xlsx' % (row_iter-1))
wb.save('biz_coded.xlsx')
master()
|
[
"dphillips97@gmail.com"
] |
dphillips97@gmail.com
|
282e01cb76a6f171cf0789150afa6235eb4fa83e
|
3a238c585bb77b6a810d187d571cb2531ad58652
|
/valuation.py.bak.py
|
031deba021575bc2a173320d20e62575d974d500
|
[] |
no_license
|
majidmvulle/vehicle-valuations-py
|
21b050b7fba6e6c619f4932dd535eeb2526bd536
|
24fc3e4426acf5bbd76aa1161ddf7db1d7b64cdb
|
refs/heads/master
| 2020-04-12T14:05:38.273507
| 2018-12-17T08:08:04
| 2018-12-17T08:08:04
| 162,541,830
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,432
|
py
|
__author__ = 'Majid Mvulle'
from sklearn import svm
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.svm import SVR
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
import pandas as pd
import json
import sys
from sklearn.feature_extraction import DictVectorizer
import numpy as np
from sklearn import ensemble
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier, VotingClassifier, BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--file", "-f", type=str, required=True)
args = parser.parse_args()
with open(args.file) as json_file:
full_dataset = json.dumps(json.load(json_file))
# clf1 = LogisticRegression(random_state=1)
# clf2 = RandomForestClassifier(random_state=1)
# clf3 = GaussianNB()
df = pd.read_json(full_dataset)
# df.drop('z_price', axis=1)
car = df.values
X, y = car[:, :-1], car[:, -1]
X, y = X.astype(int), y.astype(int)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.10, random_state=0)
#
scaler = preprocessing.MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.fit_transform(X_test)
#eclf5 = BaggingClassifier()
#eclf5.fit(X_train, y_train)
#prediction = eclf5.predict(X_test)
#print('{"price": '+str(prediction[0])+"}")
eclf6 = DecisionTreeClassifier()
eclf6.fit(X_train_scaled, y_train)
prediction = eclf6.predict(X_test_scaled)
print('{"price": '+str(prediction[0])+"}")
#lm = LinearRegression()
#lm.fit(X_train, y_train)
#prediction = lm.predict(X_test)
#print('{"price": '+str(prediction[0])+"}")
#rf = RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=None, max_features='auto', max_leaf_nodes=None, min_samples_leaf=1, min_samples_split=2, n_estimators=500, n_jobs=1, oob_score=False, random_state=None, verbose=0)
#rf.fit(X_train, y_train)
#prediction = rf.predict(X_test)
#print('{"price": '+str(prediction[0])+"}")
# print(clf.score(X_test_scaled, y_test))
#lr = LogisticRegression()
#lr.fit(X_train, y_train)
#prediction = lr.predict(X_test)
#print('{"price": '+str(prediction[0])+"}")
|
[
"majid@majidmvulle.com"
] |
majid@majidmvulle.com
|
2750d95d9a234865ce4e69664a00150eba2adc59
|
ff14cd290a7ddcce5de31ebf08dabb65181480d1
|
/test/solve.py
|
28384b5557449514300a30301b814cf10519c4e3
|
[] |
no_license
|
ohk990102/pwntools-addon-dockerized
|
ea56fbf1181972d7c3e6bd045f0bf4194dc524cb
|
361851b0fced672f62cc88fb7c17d416ce1a9f68
|
refs/heads/master
| 2022-11-12T14:51:01.963599
| 2020-07-04T16:01:02
| 2020-07-04T16:01:02
| 277,038,197
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 322
|
py
|
from pwn import *
from dpwn import dockerized
# context.log_level = 'debug'
context.terminal = [ '/home/ohk990102/vscode-terminal' ]
if __name__ == "__main__":
p = dockerized('./realloc', baseimage='ubuntu:19.04', prefer_dockerfile=False, withgdb=True)
gdb.attach(p.gdbsock, exe='./realloc')
p.interactive()
|
[
"ohk990102@naver.com"
] |
ohk990102@naver.com
|
7690eaa3ec1c6f41e0e564d537acf9b26d2baafb
|
717ed5f54ff1244f6d85df1c3207b584767dca35
|
/detect_face.py
|
4a7b647a34839b9fd5b28384218a8f82c4f72ae3
|
[] |
no_license
|
yasin-esfandiari/IranianCelebritiesFacialRecognition
|
e9a46f42d3deb15d982b6ee440022179ebe1e97e
|
4d2c718b49170ce1aa801b7aa74b423b7f1e1c8a
|
refs/heads/master
| 2020-07-26T22:13:43.938805
| 2019-09-16T11:17:26
| 2019-09-16T11:17:26
| 208,780,882
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 32,501
|
py
|
""" Tensorflow implementation of the face detection / alignment algorithm found at
https://github.com/kpzhang93/MTCNN_face_detection_alignment
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import string_types, iteritems
import numpy as np
import tensorflow as tf
#from math import floor
import cv2
import os
def layer(op):
"""Decorator for composable network layers."""
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.terminals) == 0:
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class Network(object):
def __init__(self, inputs, trainable=True):
# The input nodes for this network
self.inputs = inputs
# The current list of terminal nodes
self.terminals = []
# Mapping from layer names to layers
self.layers = dict(inputs)
# If true, the resulting variables are set as trainable
self.trainable = trainable
self.setup()
def setup(self):
"""Construct the network. """
raise NotImplementedError('Must be implemented by the subclass.')
def load(self, data_path, session, ignore_missing=False):
"""Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
"""
data_dict = np.load(data_path, encoding = 'latin1', allow_pickle = True).item() #pylint: disable=no-member
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in iteritems(data_dict[op_name]):
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise
def feed(self, *args):
"""Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
"""
assert len(args) != 0
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, string_types):
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError('Unknown layer name fed: %s' % fed_layer)
self.terminals.append(fed_layer)
return self
def get_output(self):
"""Returns the current network output."""
return self.terminals[-1]
def get_unique_name(self, prefix):
"""Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
"""
ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
return '%s_%d' % (prefix, ident)
def make_var(self, name, shape):
"""Creates a new TensorFlow variable."""
return tf.get_variable(name, shape, trainable=self.trainable)
def validate_padding(self, padding):
"""Verifies that the padding is one of the supported ones."""
assert padding in ('SAME', 'VALID')
@layer
def conv(self,
inp,
k_h,
k_w,
c_o,
s_h,
s_w,
name,
relu=True,
padding='SAME',
group=1,
biased=True):
# Verify that the padding is acceptable
self.validate_padding(padding)
# Get the number of channels in the input
c_i = int(inp.get_shape()[-1])
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
with tf.variable_scope(name) as scope:
kernel = self.make_var('weights', shape=[k_h, k_w, c_i // group, c_o])
# This is the common-case. Convolve the input without any further complications.
output = convolve(inp, kernel)
# Add the biases
if biased:
biases = self.make_var('biases', [c_o])
output = tf.nn.bias_add(output, biases)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def prelu(self, inp, name):
with tf.variable_scope(name):
i = int(inp.get_shape()[-1])
alpha = self.make_var('alpha', shape=(i,))
output = tf.nn.relu(inp) + tf.multiply(alpha, -tf.nn.relu(-inp))
return output
@layer
def max_pool(self, inp, k_h, k_w, s_h, s_w, name, padding='SAME'):
self.validate_padding(padding)
return tf.nn.max_pool(inp,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def fc(self, inp, num_out, name, relu=True):
with tf.variable_scope(name):
input_shape = inp.get_shape()
if input_shape.ndims == 4:
# The input is spatial. Vectorize it first.
dim = 1
for d in input_shape[1:].as_list():
dim *= int(d)
feed_in = tf.reshape(inp, [-1, dim])
else:
feed_in, dim = (inp, input_shape[-1].value)
weights = self.make_var('weights', shape=[dim, num_out])
biases = self.make_var('biases', [num_out])
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=name)
return fc
"""
Multi dimensional softmax,
refer to https://github.com/tensorflow/tensorflow/issues/210
compute softmax along the dimension of target
the native softmax only supports batch_size x dimension
"""
@layer
def softmax(self, target, axis, name=None):
max_axis = tf.reduce_max(target, axis, keep_dims=True)
target_exp = tf.exp(target-max_axis)
normalize = tf.reduce_sum(target_exp, axis, keep_dims=True)
softmax = tf.div(target_exp, normalize, name)
return softmax
class PNet(Network):
def setup(self):
(self.feed('data') #pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 10, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='PReLU1')
.max_pool(2, 2, 2, 2, name='pool1')
.conv(3, 3, 16, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='PReLU2')
.conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='PReLU3')
.conv(1, 1, 2, 1, 1, relu=False, name='conv4-1')
.softmax(3,name='prob1'))
(self.feed('PReLU3') #pylint: disable=no-value-for-parameter
.conv(1, 1, 4, 1, 1, relu=False, name='conv4-2'))
class RNet(Network):
def setup(self):
(self.feed('data') #pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 28, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='prelu1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(3, 3, 48, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='prelu2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.conv(2, 2, 64, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='prelu3')
.fc(128, relu=False, name='conv4')
.prelu(name='prelu4')
.fc(2, relu=False, name='conv5-1')
.softmax(1,name='prob1'))
(self.feed('prelu4') #pylint: disable=no-value-for-parameter
.fc(4, relu=False, name='conv5-2'))
class ONet(Network):
def setup(self):
(self.feed('data') #pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='prelu1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='prelu2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='prelu3')
.max_pool(2, 2, 2, 2, name='pool3')
.conv(2, 2, 128, 1, 1, padding='VALID', relu=False, name='conv4')
.prelu(name='prelu4')
.fc(256, relu=False, name='conv5')
.prelu(name='prelu5')
.fc(2, relu=False, name='conv6-1')
.softmax(1, name='prob1'))
(self.feed('prelu5') #pylint: disable=no-value-for-parameter
.fc(4, relu=False, name='conv6-2'))
(self.feed('prelu5') #pylint: disable=no-value-for-parameter
.fc(10, relu=False, name='conv6-3'))
def create_mtcnn(sess, model_path):
if not model_path:
model_path,_ = os.path.split(os.path.realpath(__file__))
with tf.variable_scope('pnet'):
data = tf.placeholder(tf.float32, (None,None,None,3), 'input')
pnet = PNet({'data':data})
pnet.load(os.path.join(model_path, 'det1.npy'), sess)
with tf.variable_scope('rnet'):
data = tf.placeholder(tf.float32, (None,24,24,3), 'input')
rnet = RNet({'data':data})
rnet.load(os.path.join(model_path, 'det2.npy'), sess)
with tf.variable_scope('onet'):
data = tf.placeholder(tf.float32, (None,48,48,3), 'input')
onet = ONet({'data':data})
onet.load(os.path.join(model_path, 'det3.npy'), sess)
pnet_fun = lambda img : sess.run(('pnet/conv4-2/BiasAdd:0', 'pnet/prob1:0'), feed_dict={'pnet/input:0':img})
rnet_fun = lambda img : sess.run(('rnet/conv5-2/conv5-2:0', 'rnet/prob1:0'), feed_dict={'rnet/input:0':img})
onet_fun = lambda img : sess.run(('onet/conv6-2/conv6-2:0', 'onet/conv6-3/conv6-3:0', 'onet/prob1:0'), feed_dict={'onet/input:0':img})
return pnet_fun, rnet_fun, onet_fun
def detect_face(img, minsize, pnet, rnet, onet, threshold, factor):
"""Detects faces in an image, and returns bounding boxes and points for them.
img: input image
minsize: minimum faces' size
pnet, rnet, onet: caffemodel
threshold: threshold=[th1, th2, th3], th1-3 are three steps's threshold
factor: the factor used to create a scaling pyramid of face sizes to detect in the image.
"""
factor_count=0
total_boxes=np.empty((0,9))
points=np.empty(0)
h=img.shape[0]
w=img.shape[1]
minl=np.amin([h, w])
m=12.0/minsize
minl=minl*m
# create scale pyramid
scales=[]
while minl>=12:
scales += [m*np.power(factor, factor_count)]
minl = minl*factor
factor_count += 1
# first stage
for scale in scales:
hs=int(np.ceil(h*scale))
ws=int(np.ceil(w*scale))
im_data = imresample(img, (hs, ws))
im_data = (im_data-127.5)*0.0078125
img_x = np.expand_dims(im_data, 0)
img_y = np.transpose(img_x, (0,2,1,3))
out = pnet(img_y)
out0 = np.transpose(out[0], (0,2,1,3))
out1 = np.transpose(out[1], (0,2,1,3))
boxes, _ = generateBoundingBox(out1[0,:,:,1].copy(), out0[0,:,:,:].copy(), scale, threshold[0])
# inter-scale nms
pick = nms(boxes.copy(), 0.5, 'Union')
if boxes.size>0 and pick.size>0:
boxes = boxes[pick,:]
total_boxes = np.append(total_boxes, boxes, axis=0)
numbox = total_boxes.shape[0]
if numbox>0:
pick = nms(total_boxes.copy(), 0.7, 'Union')
total_boxes = total_boxes[pick,:]
regw = total_boxes[:,2]-total_boxes[:,0]
regh = total_boxes[:,3]-total_boxes[:,1]
qq1 = total_boxes[:,0]+total_boxes[:,5]*regw
qq2 = total_boxes[:,1]+total_boxes[:,6]*regh
qq3 = total_boxes[:,2]+total_boxes[:,7]*regw
qq4 = total_boxes[:,3]+total_boxes[:,8]*regh
total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:,4]]))
total_boxes = rerec(total_boxes.copy())
total_boxes[:,0:4] = np.fix(total_boxes[:,0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
numbox = total_boxes.shape[0]
if numbox>0:
# second stage
tempimg = np.zeros((24,24,3,numbox))
for k in range(0,numbox):
tmp = np.zeros((int(tmph[k]),int(tmpw[k]),3))
tmp[dy[k]-1:edy[k],dx[k]-1:edx[k],:] = img[y[k]-1:ey[k],x[k]-1:ex[k],:]
if tmp.shape[0]>0 and tmp.shape[1]>0 or tmp.shape[0]==0 and tmp.shape[1]==0:
tempimg[:,:,:,k] = imresample(tmp, (24, 24))
else:
return np.empty()
tempimg = (tempimg-127.5)*0.0078125
tempimg1 = np.transpose(tempimg, (3,1,0,2))
out = rnet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1,:]
ipass = np.where(score>threshold[1])
total_boxes = np.hstack([total_boxes[ipass[0],0:4].copy(), np.expand_dims(score[ipass].copy(),1)])
mv = out0[:,ipass[0]]
if total_boxes.shape[0]>0:
pick = nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[pick,:]
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv[:,pick]))
total_boxes = rerec(total_boxes.copy())
numbox = total_boxes.shape[0]
if numbox>0:
# third stage
total_boxes = np.fix(total_boxes).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
tempimg = np.zeros((48,48,3,numbox))
for k in range(0,numbox):
tmp = np.zeros((int(tmph[k]),int(tmpw[k]),3))
tmp[dy[k]-1:edy[k],dx[k]-1:edx[k],:] = img[y[k]-1:ey[k],x[k]-1:ex[k],:]
if tmp.shape[0]>0 and tmp.shape[1]>0 or tmp.shape[0]==0 and tmp.shape[1]==0:
tempimg[:,:,:,k] = imresample(tmp, (48, 48))
else:
return np.empty()
tempimg = (tempimg-127.5)*0.0078125
tempimg1 = np.transpose(tempimg, (3,1,0,2))
out = onet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1,:]
points = out1
ipass = np.where(score>threshold[2])
points = points[:,ipass[0]]
total_boxes = np.hstack([total_boxes[ipass[0],0:4].copy(), np.expand_dims(score[ipass].copy(),1)])
mv = out0[:,ipass[0]]
w = total_boxes[:,2]-total_boxes[:,0]+1
h = total_boxes[:,3]-total_boxes[:,1]+1
points[0:5,:] = np.tile(w,(5, 1))*points[0:5,:] + np.tile(total_boxes[:,0],(5, 1))-1
points[5:10,:] = np.tile(h,(5, 1))*points[5:10,:] + np.tile(total_boxes[:,1],(5, 1))-1
if total_boxes.shape[0]>0:
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv))
pick = nms(total_boxes.copy(), 0.7, 'Min')
total_boxes = total_boxes[pick,:]
points = points[:,pick]
return total_boxes, points
def bulk_detect_face(images, detection_window_size_ratio, pnet, rnet, onet, threshold, factor):
"""Detects faces in a list of images
images: list containing input images
detection_window_size_ratio: ratio of minimum face size to smallest image dimension
pnet, rnet, onet: caffemodel
threshold: threshold=[th1 th2 th3], th1-3 are three steps's threshold [0-1]
factor: the factor used to create a scaling pyramid of face sizes to detect in the image.
"""
all_scales = [None] * len(images)
images_with_boxes = [None] * len(images)
for i in range(len(images)):
images_with_boxes[i] = {'total_boxes': np.empty((0, 9))}
# create scale pyramid
for index, img in enumerate(images):
all_scales[index] = []
h = img.shape[0]
w = img.shape[1]
minsize = int(detection_window_size_ratio * np.minimum(w, h))
factor_count = 0
minl = np.amin([h, w])
if minsize <= 12:
minsize = 12
m = 12.0 / minsize
minl = minl * m
while minl >= 12:
all_scales[index].append(m * np.power(factor, factor_count))
minl = minl * factor
factor_count += 1
# # # # # # # # # # # # #
# first stage - fast proposal network (pnet) to obtain face candidates
# # # # # # # # # # # # #
images_obj_per_resolution = {}
# TODO: use some type of rounding to number module 8 to increase probability that pyramid images will have the same resolution across input images
for index, scales in enumerate(all_scales):
h = images[index].shape[0]
w = images[index].shape[1]
for scale in scales:
hs = int(np.ceil(h * scale))
ws = int(np.ceil(w * scale))
if (ws, hs) not in images_obj_per_resolution:
images_obj_per_resolution[(ws, hs)] = []
im_data = imresample(images[index], (hs, ws))
im_data = (im_data - 127.5) * 0.0078125
img_y = np.transpose(im_data, (1, 0, 2)) # caffe uses different dimensions ordering
images_obj_per_resolution[(ws, hs)].append({'scale': scale, 'image': img_y, 'index': index})
for resolution in images_obj_per_resolution:
images_per_resolution = [i['image'] for i in images_obj_per_resolution[resolution]]
outs = pnet(images_per_resolution)
for index in range(len(outs[0])):
scale = images_obj_per_resolution[resolution][index]['scale']
image_index = images_obj_per_resolution[resolution][index]['index']
out0 = np.transpose(outs[0][index], (1, 0, 2))
out1 = np.transpose(outs[1][index], (1, 0, 2))
boxes, _ = generateBoundingBox(out1[:, :, 1].copy(), out0[:, :, :].copy(), scale, threshold[0])
# inter-scale nms
pick = nms(boxes.copy(), 0.5, 'Union')
if boxes.size > 0 and pick.size > 0:
boxes = boxes[pick, :]
images_with_boxes[image_index]['total_boxes'] = np.append(images_with_boxes[image_index]['total_boxes'],
boxes,
axis=0)
for index, image_obj in enumerate(images_with_boxes):
numbox = image_obj['total_boxes'].shape[0]
if numbox > 0:
h = images[index].shape[0]
w = images[index].shape[1]
pick = nms(image_obj['total_boxes'].copy(), 0.7, 'Union')
image_obj['total_boxes'] = image_obj['total_boxes'][pick, :]
regw = image_obj['total_boxes'][:, 2] - image_obj['total_boxes'][:, 0]
regh = image_obj['total_boxes'][:, 3] - image_obj['total_boxes'][:, 1]
qq1 = image_obj['total_boxes'][:, 0] + image_obj['total_boxes'][:, 5] * regw
qq2 = image_obj['total_boxes'][:, 1] + image_obj['total_boxes'][:, 6] * regh
qq3 = image_obj['total_boxes'][:, 2] + image_obj['total_boxes'][:, 7] * regw
qq4 = image_obj['total_boxes'][:, 3] + image_obj['total_boxes'][:, 8] * regh
image_obj['total_boxes'] = np.transpose(np.vstack([qq1, qq2, qq3, qq4, image_obj['total_boxes'][:, 4]]))
image_obj['total_boxes'] = rerec(image_obj['total_boxes'].copy())
image_obj['total_boxes'][:, 0:4] = np.fix(image_obj['total_boxes'][:, 0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(image_obj['total_boxes'].copy(), w, h)
numbox = image_obj['total_boxes'].shape[0]
tempimg = np.zeros((24, 24, 3, numbox))
if numbox > 0:
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = images[index][y[k] - 1:ey[k], x[k] - 1:ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = imresample(tmp, (24, 24))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
image_obj['rnet_input'] = np.transpose(tempimg, (3, 1, 0, 2))
# # # # # # # # # # # # #
# second stage - refinement of face candidates with rnet
# # # # # # # # # # # # #
bulk_rnet_input = np.empty((0, 24, 24, 3))
for index, image_obj in enumerate(images_with_boxes):
if 'rnet_input' in image_obj:
bulk_rnet_input = np.append(bulk_rnet_input, image_obj['rnet_input'], axis=0)
out = rnet(bulk_rnet_input)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1, :]
i = 0
for index, image_obj in enumerate(images_with_boxes):
if 'rnet_input' not in image_obj:
continue
rnet_input_count = image_obj['rnet_input'].shape[0]
score_per_image = score[i:i + rnet_input_count]
out0_per_image = out0[:, i:i + rnet_input_count]
ipass = np.where(score_per_image > threshold[1])
image_obj['total_boxes'] = np.hstack([image_obj['total_boxes'][ipass[0], 0:4].copy(),
np.expand_dims(score_per_image[ipass].copy(), 1)])
mv = out0_per_image[:, ipass[0]]
if image_obj['total_boxes'].shape[0] > 0:
h = images[index].shape[0]
w = images[index].shape[1]
pick = nms(image_obj['total_boxes'], 0.7, 'Union')
image_obj['total_boxes'] = image_obj['total_boxes'][pick, :]
image_obj['total_boxes'] = bbreg(image_obj['total_boxes'].copy(), np.transpose(mv[:, pick]))
image_obj['total_boxes'] = rerec(image_obj['total_boxes'].copy())
numbox = image_obj['total_boxes'].shape[0]
if numbox > 0:
tempimg = np.zeros((48, 48, 3, numbox))
image_obj['total_boxes'] = np.fix(image_obj['total_boxes']).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(image_obj['total_boxes'].copy(), w, h)
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = images[index][y[k] - 1:ey[k], x[k] - 1:ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = imresample(tmp, (48, 48))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
image_obj['onet_input'] = np.transpose(tempimg, (3, 1, 0, 2))
i += rnet_input_count
# # # # # # # # # # # # #
# third stage - further refinement and facial landmarks positions with onet
# # # # # # # # # # # # #
bulk_onet_input = np.empty((0, 48, 48, 3))
for index, image_obj in enumerate(images_with_boxes):
if 'onet_input' in image_obj:
bulk_onet_input = np.append(bulk_onet_input, image_obj['onet_input'], axis=0)
out = onet(bulk_onet_input)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1, :]
points = out1
i = 0
ret = []
for index, image_obj in enumerate(images_with_boxes):
if 'onet_input' not in image_obj:
ret.append(None)
continue
onet_input_count = image_obj['onet_input'].shape[0]
out0_per_image = out0[:, i:i + onet_input_count]
score_per_image = score[i:i + onet_input_count]
points_per_image = points[:, i:i + onet_input_count]
ipass = np.where(score_per_image > threshold[2])
points_per_image = points_per_image[:, ipass[0]]
image_obj['total_boxes'] = np.hstack([image_obj['total_boxes'][ipass[0], 0:4].copy(),
np.expand_dims(score_per_image[ipass].copy(), 1)])
mv = out0_per_image[:, ipass[0]]
w = image_obj['total_boxes'][:, 2] - image_obj['total_boxes'][:, 0] + 1
h = image_obj['total_boxes'][:, 3] - image_obj['total_boxes'][:, 1] + 1
points_per_image[0:5, :] = np.tile(w, (5, 1)) * points_per_image[0:5, :] + np.tile(
image_obj['total_boxes'][:, 0], (5, 1)) - 1
points_per_image[5:10, :] = np.tile(h, (5, 1)) * points_per_image[5:10, :] + np.tile(
image_obj['total_boxes'][:, 1], (5, 1)) - 1
if image_obj['total_boxes'].shape[0] > 0:
image_obj['total_boxes'] = bbreg(image_obj['total_boxes'].copy(), np.transpose(mv))
pick = nms(image_obj['total_boxes'].copy(), 0.7, 'Min')
image_obj['total_boxes'] = image_obj['total_boxes'][pick, :]
points_per_image = points_per_image[:, pick]
ret.append((image_obj['total_boxes'], points_per_image))
else:
ret.append(None)
i += onet_input_count
return ret
# function [boundingbox] = bbreg(boundingbox,reg)
def bbreg(boundingbox,reg):
"""Calibrate bounding boxes"""
if reg.shape[1]==1:
reg = np.reshape(reg, (reg.shape[2], reg.shape[3]))
w = boundingbox[:,2]-boundingbox[:,0]+1
h = boundingbox[:,3]-boundingbox[:,1]+1
b1 = boundingbox[:,0]+reg[:,0]*w
b2 = boundingbox[:,1]+reg[:,1]*h
b3 = boundingbox[:,2]+reg[:,2]*w
b4 = boundingbox[:,3]+reg[:,3]*h
boundingbox[:,0:4] = np.transpose(np.vstack([b1, b2, b3, b4 ]))
return boundingbox
def generateBoundingBox(imap, reg, scale, t):
"""Use heatmap to generate bounding boxes"""
stride=2
cellsize=12
imap = np.transpose(imap)
dx1 = np.transpose(reg[:,:,0])
dy1 = np.transpose(reg[:,:,1])
dx2 = np.transpose(reg[:,:,2])
dy2 = np.transpose(reg[:,:,3])
y, x = np.where(imap >= t)
if y.shape[0]==1:
dx1 = np.flipud(dx1)
dy1 = np.flipud(dy1)
dx2 = np.flipud(dx2)
dy2 = np.flipud(dy2)
score = imap[(y,x)]
reg = np.transpose(np.vstack([ dx1[(y,x)], dy1[(y,x)], dx2[(y,x)], dy2[(y,x)] ]))
if reg.size==0:
reg = np.empty((0,3))
bb = np.transpose(np.vstack([y,x]))
q1 = np.fix((stride*bb+1)/scale)
q2 = np.fix((stride*bb+cellsize-1+1)/scale)
boundingbox = np.hstack([q1, q2, np.expand_dims(score,1), reg])
return boundingbox, reg
# function pick = nms(boxes,threshold,type)
def nms(boxes, threshold, method):
if boxes.size==0:
return np.empty((0,3))
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
s = boxes[:,4]
area = (x2-x1+1) * (y2-y1+1)
I = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while I.size>0:
i = I[-1]
pick[counter] = i
counter += 1
idx = I[0:-1]
xx1 = np.maximum(x1[i], x1[idx])
yy1 = np.maximum(y1[i], y1[idx])
xx2 = np.minimum(x2[i], x2[idx])
yy2 = np.minimum(y2[i], y2[idx])
w = np.maximum(0.0, xx2-xx1+1)
h = np.maximum(0.0, yy2-yy1+1)
inter = w * h
if method is 'Min':
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
I = I[np.where(o<=threshold)]
pick = pick[0:counter]
return pick
# function [dy edy dx edx y ey x ex tmpw tmph] = pad(total_boxes,w,h)
def pad(total_boxes, w, h):
"""Compute the padding coordinates (pad the bounding boxes to square)"""
tmpw = (total_boxes[:,2]-total_boxes[:,0]+1).astype(np.int32)
tmph = (total_boxes[:,3]-total_boxes[:,1]+1).astype(np.int32)
numbox = total_boxes.shape[0]
dx = np.ones((numbox), dtype=np.int32)
dy = np.ones((numbox), dtype=np.int32)
edx = tmpw.copy().astype(np.int32)
edy = tmph.copy().astype(np.int32)
x = total_boxes[:,0].copy().astype(np.int32)
y = total_boxes[:,1].copy().astype(np.int32)
ex = total_boxes[:,2].copy().astype(np.int32)
ey = total_boxes[:,3].copy().astype(np.int32)
tmp = np.where(ex>w)
edx.flat[tmp] = np.expand_dims(-ex[tmp]+w+tmpw[tmp],1)
ex[tmp] = w
tmp = np.where(ey>h)
edy.flat[tmp] = np.expand_dims(-ey[tmp]+h+tmph[tmp],1)
ey[tmp] = h
tmp = np.where(x<1)
dx.flat[tmp] = np.expand_dims(2-x[tmp],1)
x[tmp] = 1
tmp = np.where(y<1)
dy.flat[tmp] = np.expand_dims(2-y[tmp],1)
y[tmp] = 1
return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph
# function [bboxA] = rerec(bboxA)
def rerec(bboxA):
"""Convert bboxA to square."""
h = bboxA[:,3]-bboxA[:,1]
w = bboxA[:,2]-bboxA[:,0]
l = np.maximum(w, h)
bboxA[:,0] = bboxA[:,0]+w*0.5-l*0.5
bboxA[:,1] = bboxA[:,1]+h*0.5-l*0.5
bboxA[:,2:4] = bboxA[:,0:2] + np.transpose(np.tile(l,(2,1)))
return bboxA
def imresample(img, sz):
im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA) #@UndefinedVariable
return im_data
# This method is kept for debugging purpose
# h=img.shape[0]
# w=img.shape[1]
# hs, ws = sz
# dx = float(w) / ws
# dy = float(h) / hs
# im_data = np.zeros((hs,ws,3))
# for a1 in range(0,hs):
# for a2 in range(0,ws):
# for a3 in range(0,3):
# im_data[a1,a2,a3] = img[int(floor(a1*dy)),int(floor(a2*dx)),a3]
# return im_data
|
[
"yasin.esfandiari1733@gmail.com"
] |
yasin.esfandiari1733@gmail.com
|
0d66598ee32ccfc0d64f148e09b532ab4ded4c4b
|
5711615cf36dd216a637b2bbe3ed57a93b86a308
|
/component.py
|
a1bd2ce145c54f2e752ce80476b684ab33497c59
|
[] |
no_license
|
kanandian/opencv_test
|
4f013734c10d3447989da45d66d5b65280ff48ee
|
a91d26cc4270486b8370e9a8004d085ad357726e
|
refs/heads/master
| 2020-09-11T21:04:05.745447
| 2019-11-29T13:12:34
| 2019-11-29T13:12:34
| 222,189,896
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,690
|
py
|
import constant
import random
import mthread
import math
import hand_dectect
from PyQt5.QtWidgets import QMainWindow, QLabel
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import QBasicTimer, QRect
class Leaf:
def __init__(self):
self.pixmap = QPixmap('images/yezi.png')
self.label = 0
self.position_x = float(random.randint(0, constant.screen_width))
self.position_y = float(0)
self.rect = QRect(int(self.position_x), int(self.position_y), constant.leaf_width, constant.leaf_height)
self.dirction_x = -1
self.dirction_y = -1
self.speed = constant.move_speed
def check_edges(self, geometry):
if self.rect.right() < 0 or self.rect.left() > geometry.width() or self.rect.bottom() < 0 or self.rect.top() > geometry.height():
return True
return False
def update(self):
if self.dirction_x == -1 and self.dirction_y == -1:
self.position_y += self.speed
else:
if self.dirction_x != -1:
self.position_x += self.dirction_x
if self.dirction_y != -1:
self.position_y += self.dirction_y
self.label.move(self.position_x, self.position_y)
self.rect.setX(self.position_x)
self.rect.setY(self.position_y)
class ApplicationByPyqt(QMainWindow):
def __init__(self):
super().__init__()
self.leaves = []
self.backgound_color = constant.background_color
self.hand_dectect = hand_dectect.HandDectectByHandXML()
self.initUI()
self.initTimer()
self.startThreads()
def initUI(self):
#添加初始叶子
for i in range(0,constant.initial_leaves_num):
self.add_leaf()
self.setGeometry(0, 0 , constant.screen_width, constant.screen_height)
self.setWindowTitle('interation_projection')
self.show()
def initTimer(self):
self.timer = QBasicTimer()
self.timer.start(30, self)
def startThreads(self):
self.add_leaf_thread = mthread.AddLeafThread(self)
self.add_leaf_thread.addLeafSingal.connect(self.add_leaf)
self.add_leaf_thread.start()
# 重写关闭应用事件
def closeEvent(self, event):
self.add_leaf_thread.running = False
self.vc.release()
def timerEvent(self, event):
if event.timerId() == self.timer.timerId():
self.handle_hand_motion_by_mode()
# self.capture_and_handle_frame()
self.update_leaves()
self.remove_invalid_leaves()
self.update()
else:
super(ApplicationByPyqt, self).timerEvent(event)
def handle_hand_motion_by_mode(self):
positions = self.hand_dectect.get_hand_positions(constant.dectect_mod)
for (x, y) in positions:
x *= constant.screen_width / constant.camera_width
y *= constant.screen_height / constant.camera_height
x = constant.screen_width - x
self.sweep_at(x, y)
def update_leaves(self):
for leaf in self.leaves:
leaf.update()
def add_leaf(self):
leaf = Leaf()
label = QLabel(self)
label.setPixmap(leaf.pixmap)
label.setGeometry(leaf.position_x, leaf.position_y, leaf.rect.width(), leaf.rect.height())
label.setScaledContents(True)
leaf.label = label
label.show()
self.leaves.append(leaf)
def remove_invalid_leaves(self):
for leaf in self.leaves:
if leaf.check_edges(self.geometry()):
self.leaves.remove(leaf)
def sweep_at(self, x, y):
fa = False
fb = False
for leaf in self.leaves:
dist_x = leaf.rect.center().x()-x
dist_y = leaf.rect.center().y()-y
distance = math.sqrt(math.pow(dist_x, 2) + math.pow(dist_y, 2))
if distance < constant.max_distance:
leaf.speed == constant.sweep_speed
if dist_x < 0:
fa = True
if dist_y < 0:
fb = True
if dist_x == 0:
leaf.dirction_x = -1
leaf.dirction_y = constant.sweep_speed
if fb:
leaf.dirction_y *= -1
else:
b = math.fabs(float(dist_y)/float(dist_x))
a = leaf.speed/math.sqrt(1+b*b)
b *= a
if fa:
a *= -1
if fb:
b *= -1
leaf.dirction_x = a
leaf.dirction_y = b
|
[
"893979892@qq.com"
] |
893979892@qq.com
|
7b982d89cc00e48375943057fe4852658acece6b
|
32be8eb323f230d3e97f321cf58ae799632289a0
|
/ddos/ddos_server.py
|
500309fe69c7a43e5cde5a60a44f84c470dc7d56
|
[] |
no_license
|
clouddreamfly/PyTools
|
838852c669423ccdaebf8cbb8fe29fece5fd24b4
|
329f985a0b981c41d9872158d6c9b814a9903c0c
|
refs/heads/master
| 2023-03-07T19:39:51.984162
| 2023-03-02T16:23:45
| 2023-03-02T16:23:45
| 240,286,058
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,387
|
py
|
#!/usr/bin/python
# coding:utf8
import socket
import argparse
import threading
import time
socketList = []
# 发送命令到所有的客户机上
def sendCmd(cmd):
print('Send command....')
for sock in socketList:
sock.send(cmd.encode())
# 等待连接,将建立好的连接加入到socketList列表中
def waitConnect(s):
while True:
sock, addr = s.accept()
if sock not in socketList:
socketList.append(sock)
def main():
# 创建tcp服务端
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1) #设置端口重用
s.bind(('0.0.0.0', 58888))
s.listen(1024)
# 线程创建等待连接请求
t = threading.Thread(target=waitConnect, args=(s,))
t.start()
print("Wait at least a client connection!")
while not len(socketList): # 没有连接则一直等待
time.sleep(0.1)
pass
print("It has been a client connection")
while True:
print('='*50)
# 命令格式
print('The command format:"#-H xxx.xxx.xxx.xxx -p xxxx -c start"')
# 等待输入命令
cmd_str = input('please input cmd:')
print(cmd_str)
if len(cmd_str):
if cmd_str[0] == '#':
sendCmd(cmd_str)
if __name__ == '__main__':
main()
|
[
"dreamfly6789@gmail.com"
] |
dreamfly6789@gmail.com
|
af28415c6367cc31cfcb82f3f071ada9026749b2
|
1b5f653955779f45e78ca6dda925518779d09e8f
|
/submissions/1492.py
|
c75247fa6f7781dc034982b8cc8bd8da3cd85ab8
|
[] |
no_license
|
LeonardoSaid/uri-py-solutions
|
ad285f552934ead54ad2410e23113e84b0724f72
|
43c10c0e99e99d22b4b5ae2871e5d897f8823b42
|
refs/heads/master
| 2020-08-11T00:28:48.661578
| 2020-04-23T20:21:39
| 2020-04-23T20:21:39
| 214,453,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
import math
def count(n):
ans = 0
acc = 1
while acc <= n:
a = n - acc + 1
b = 2*acc
ans += a // b * acc
ans += min(a % b, acc)
acc = 2* acc
return ans
def getAns(a,b):
return count(b) - count(a-1)
while True:
try:
a,b = map(int, input().split())
print(getAns(a,b))
except EOFError:
break
|
[
"noreply@github.com"
] |
LeonardoSaid.noreply@github.com
|
a5e19b23102e5dcf67188ec4e0263b1fcdb76ea3
|
54cfd2c2505da41fb4657c36ebc40e886bdb33c9
|
/model/graph.py
|
e87ee4a4a655d7e7ecf9854212a0ced865812f8a
|
[] |
no_license
|
VamshiTeja/Recommendation-Systems-Using-DL
|
c4bcf672e5b705a913a035c861d3082d357fc3e4
|
c92f04376645e80f9664baf39b41e2f6e23c0a52
|
refs/heads/master
| 2020-03-28T13:51:56.464480
| 2018-10-25T10:44:34
| 2018-10-25T10:44:34
| 148,436,653
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,152
|
py
|
# -*- coding: utf-8 -*-
# @Author: vamshi
# @Date: 2018-02-04 00:39:34
# @Last Modified by: vamshi
# @Last Modified time: 2018-02-10 12:32:44
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.append("../")
import dateutil.parser
import pickle
import os
import time
import pandas as pd
import numpy as np
import mmap
import argparse
from six.moves import cPickle
from functools import wraps
from tqdm import tqdm
from datetime import datetime
#from utils import hour2vec
import tensorflow as tf
from utils import *
lambdim = 180*24/0.5
def sur_loss_impl(lamb,b,e,sessLengths,dt,T):
'''
lamb: predicted lambda from rnn [batch_size, maxSessLen,lambdim]
e : end times of sessions batch_sizep [batch_size,maxSessLen]
g : gap times of sessions batch_sizep [batch_size,maxSessLen]
dt: 30mts
'''
loss = np.zeros(shape=(e.shape[0],),dtype=np.float32)
for i in range(lamb.shape[0]):
seslen = sessLengths[i]
st_time = b[i][0]
#calculate third term
loss3 = np.sum(lamb[i][seslen-1])*dt
loss2 = 0
for j in range(1,seslen):
loss2 += np.log(lamb[i][j][int(np.round((b[i][j]-e[i][j-1])/dt))])
loss1=0
for j in range(1,seslen):
et = e[i][j-1] - st_time
bnext = b[i][j] -st_time
dif = int((bnext -et)/dt)
for k in range(dif):
loss1 += lamb[i][j][k]
l = loss2+loss3+loss1
loss[i] = l
return np.float32(np.sum(loss))
def sur_loss(lamb,b,e,sessLengths,dt,T):
tf.RegisterGradient("sur_loss_grad")(sur_loss_grad)
g=tf.get_default_graph()
with g.gradient_override_map({"PyFunc":"sur_loss_grad"}):
return tf.py_func(sur_loss_impl,[lamb,b,e,sessLengths,dt,T],[tf.float32])[0]
def sur_loss_grad_impl(lamb):
grad = np.zeros_like(lamb)
#num_batches
return [np.float32(grad),np.int32(0),np.int32(0),np.int32(0),np.int32(0),np.int32(0)]
def sur_loss_grad(op,grad):
lamb,b,e,sessLengths,dt,T=op.inputs[0],op.inputs[1],op.inputs[2],op.inputs[3],op.inputs[4],op.inputs[5]
return tf.py_func(sur_loss_grad_impl,[lamb],[tf.float32,tf.int32,tf.int32,tf.int32,tf.int32,tf.int32])#assume grad=1
def build_model(args,maxSessLen,sessLengths,gaps,d):
'''
gaps,d: [batch_size,maxSessLen]
inputs : tuple (gaps,d,u) gaps dim: batch_size*maxSessionLen
inputX : dim: [batch_size, maxSessLen, 3*embed_dim]
'''
graph = tf.get_default_graph()
with tf.variable_scope("gap_embedding"):
gap_embedding = tf.get_variable("gap_embedding",[args.n_gaps, args.embed_dim])
with tf.variable_scope("d_embedding"):
d_embedding = tf.get_variable("d_embedding",[168,args.embed_dim])
#user_embedding = tf.get_variable("user_embedding",[args.num_users, args.embed_dim])
gap_embedded = tf.nn.embedding_lookup(gap_embedding, gaps)
d_embedded = tf.nn.embedding_lookup(d_embedding, d)
#user_embedded = tf.nn.embedding_lookup(user_embedding, u)
inputX = tf.concat((gap_embedded,d_embedded), axis=2)
with tf.variable_scope("cell_def"):
lstm_cell = tf.contrib.rnn.LSTMCell(args.n_hidden)
with tf.variable_scope("rnn_def"):
output,states = tf.nn.dynamic_rnn(lstm_cell, inputX, sequence_length=sessLengths, time_major=False,dtype=tf.float32)
W = tf.get_variable("weights", (args.batch_size,args.n_hidden,args.lambdim),
initializer=tf.random_normal_initializer())
#b = tf.get_variable("biases", bias_shape,initializer=tf.constant_initializer(0.0))
lamb = tf.matmul(output, W)
return tf.nn.softplus(lamb)
class Model():
def __init__(self,args,maxSessLen):
self.args = args
self.maxSessLen = maxSessLen
def build_graph(self,args,maxSessLen):
self.graph = tf.Graph()
with self.graph.as_default():
self.inputg = tf.placeholder(dtype=tf.int32, shape=(args.batch_size,maxSessLen))
self.inputd = tf.placeholder(dtype=tf.int32, shape=(args.batch_size,maxSessLen))
self.inputb = tf.placeholder(dtype=tf.int32, shape=(args.batch_size,maxSessLen))
self.inpute = tf.placeholder(dtype=tf.int32, shape=(args.batch_size,maxSessLen))
self.target_gaps = tf.placeholder(dtype=tf.int32, shape=(args.batch_size,1))
self.sessLengths = tf.placeholder(dtype=tf.int32,shape=[args.batch_size])
self.lamb = build_model(args,maxSessLen,self.sessLengths,self.inputg,self.inputd)
self.loss = sur_loss(self.lamb,self.inputb,self.inpute,self.sessLengths,args.dt,args.T)
self.var_op = tf.global_variables()
self.var_trainable_op = tf.trainable_variables()
if args.grad_clip == -1:
self.optimizer = tf.train.AdamOptimizer(args.learning_rate).minimize(self.loss)
else:
grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, self.var_trainable_op), args.grad_clip)
opti = tf.train.AdamOptimizer(args.learning_rate)
self.optimizer = opti.apply_gradients(zip(grads, self.var_trainable_op))
self.initial_op = tf.initialize_all_variables()
self.summary_op = tf.summary.merge_all()
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=5, keep_checkpoint_every_n_hours=1)
#if __name__ == "__main__":
#objName = Model()
#objName.build_graph()
|
[
"rachavamshiteja@gmail.com"
] |
rachavamshiteja@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.