max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
Convert_a_Number_to_Hexadecimal.py | thydeyx/LeetCode-Python | 1 | 12765751 | <gh_stars>1-10
# -*- coding:utf-8 -*-
#
# Author : TangHanYi
# E-mail : <EMAIL>
# Create Date : 2016-11-21 04:35:06 PM
# Last modified : 2016-11-21 04:51:12 PM
# File Name : Convert_a_Number_to_Hexadecimal.py
# Desc :
class Solution(object):
def toHex(self, num):
if num == 0:
return "0"
hexList = []
tmp = 0
hexDict = {10:'a', 11:'b', 12:'c', 13:'d', 14:'e', 15:'f'}
for i in range(33):
if i % 4 == 0 and i != 0:
hexList.append(tmp)
tmp = 0
if (num & (1 << i)) != 0:
tmp = tmp | 1 << (i % 4)
hexList = hexList[::-1]
ret = []
begin = 0
for i in hexList:
if i != 0:
break
begin += 1
for i in range(begin, 8):
if hexList[i] < 10:
ret.append(str(hexList[i]))
else:
ret.append(hexDict[hexList[i]])
return ''.join(ret)
if __name__ == "__main__":
s = Solution()
num = -1
print s.toHex(num)
| 3.578125 | 4 |
FLASH4.2.1_save/tools/python/flmake/log.py | mtsafarzadeh/FLASHverES | 1 | 12765752 | <reponame>mtsafarzadeh/FLASHverES
import os
from datetime import datetime
USAGE = ("Displays the history of all (or N) previously\n"
"executed flmake commands and their metadata.\n\n"
"usage: flmake log [-n <N>]")
def _parse_row(row):
t, cmd, user, logid, runid, d, msg = row.split(',', 6)
return float(t), cmd, user, logid, runid, d, msg[1:-1]
def main(ns, rc):
"""Displays the history of flmake commands."""
if not os.path.exists('flash.log'):
return
with open('flash.log') as f:
loglines = f.readlines()[::-1]
logtemplate = ("Run id: {runid}\n"
"Run dir: {rundir}\n"
"Command: {cmd}\n"
"User: {user}\n"
"Date: {dt}\n"
"Log id: {logid}\n\n"
" {msg}\n\n"
)
logstr = ""
for row in loglines[:ns.n]:
t, cmd, user, logid, runid, d, msg = _parse_row(row[:-1])
dt = datetime.fromtimestamp(t).strftime("%c")
kwlog = {'dt': dt, 'cmd': cmd, 'user': user, 'logid': logid,
'runid': runid, 'rundir': d, 'msg': msg}
logstr += logtemplate.format(**kwlog)
logstr = logstr[:-1]
print logstr
| 2.890625 | 3 |
tests/fixtures/config_teamocil/test1.py | rfoliva/tmuxp | 1,607 | 12765753 | <reponame>rfoliva/tmuxp<filename>tests/fixtures/config_teamocil/test1.py
from .._util import loadfixture
teamocil_yaml = loadfixture('config_teamocil/test1.yaml')
teamocil_conf = {
'windows': [
{
'name': 'sample-two-panes',
'root': '~/Code/sample/www',
'layout': 'even-horizontal',
'panes': [{'cmd': ['pwd', 'ls -la']}, {'cmd': 'rails server --port 3000'}],
}
]
}
expected = {
'session_name': None,
'windows': [
{
'window_name': 'sample-two-panes',
'layout': 'even-horizontal',
'start_directory': '~/Code/sample/www',
'panes': [
{'shell_command': ['pwd', 'ls -la']},
{'shell_command': 'rails server --port 3000'},
],
}
],
}
| 1.625 | 2 |
tests/unit/test_workspace.py | triton-inference-server/model_navigator | 49 | 12765754 | <reponame>triton-inference-server/model_navigator
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
from pathlib import Path
from model_navigator.utils.workspace import Workspace
def test_workspace_exists():
"""Workspace path exists - is created"""
with tempfile.TemporaryDirectory() as temp_dir:
workspace = Workspace(temp_dir)
assert workspace.path == Path(temp_dir)
assert workspace.path.exists()
assert workspace.exists()
dummy_workspace_path = Path(temp_dir) / "dummy/workspace"
workspace = Workspace(dummy_workspace_path)
assert workspace.path == dummy_workspace_path
assert workspace.path.exists()
assert workspace.exists()
def test_workspace_empty():
"""Verifying workspace empty method"""
with tempfile.TemporaryDirectory() as temp_dir:
workspace = Workspace(temp_dir)
assert workspace.path == Path(temp_dir)
assert workspace.empty()
_create_dummy_file(workspace)
assert not workspace.empty()
def test_workspace_cleaning():
"""Test cleaning of workspace"""
with tempfile.TemporaryDirectory() as temp_dir:
workspace = Workspace(temp_dir)
_create_dummy_file(workspace)
assert not workspace.empty()
workspace.clean()
assert workspace.exists()
assert workspace.empty()
def _create_dummy_file(workspace):
dummy_path = workspace.path / "foo/bar.txt"
dummy_path.parent.mkdir(parents=True)
with dummy_path.open("w") as dummy_file:
dummy_file.write("foo bar")
| 2.265625 | 2 |
src/lib/Bcfg2/Client/Tools/POSIX/Device.py | stpierre/bcfg2 | 0 | 12765755 | <reponame>stpierre/bcfg2
import os
import sys
try:
from base import POSIXTool, device_map
except ImportError:
# py3k, incompatible syntax with py2.4
exec("from .base import POSIXTool, device_map")
class POSIXDevice(POSIXTool):
__req__ = ['name', 'dev_type', 'perms', 'owner', 'group']
def fully_specified(self, entry):
if entry.get('dev_type') in ['block', 'char']:
# check if major/minor are properly specified
if (entry.get('major') == None or
entry.get('minor') == None):
return False
return True
def verify(self, entry, modlist):
"""Verify device entry."""
ondisk = self._exists(entry)
if not ondisk:
return False
# attempt to verify device properties as specified in config
rv = True
dev_type = entry.get('dev_type')
if dev_type in ['block', 'char']:
major = int(entry.get('major'))
minor = int(entry.get('minor'))
if major != os.major(ondisk.st_rdev):
msg = ("Major number for device %s is incorrect. "
"Current major is %s but should be %s" %
(entry.get("name"), os.major(ondisk.st_rdev), major))
self.logger.debug('POSIX: ' + msg)
entry.set('qtext', entry.get('qtext', '') + "\n" + msg)
rv = False
if minor != os.minor(ondisk.st_rdev):
msg = ("Minor number for device %s is incorrect. "
"Current minor is %s but should be %s" %
(entry.get("name"), os.minor(ondisk.st_rdev), minor))
self.logger.debug('POSIX: ' + msg)
entry.set('qtext', entry.get('qtext', '') + "\n" + msg)
rv = False
return POSIXTool.verify(self, entry, modlist) and rv
def install(self, entry):
if not self._exists(entry, remove=True):
try:
dev_type = entry.get('dev_type')
mode = device_map[dev_type] | int(entry.get('perms'), 8)
if dev_type in ['block', 'char']:
major = int(entry.get('major'))
minor = int(entry.get('minor'))
device = os.makedev(major, minor)
os.mknod(entry.get('name'), mode, device)
else:
os.mknod(entry.get('name'), mode)
except (KeyError, OSError, ValueError):
err = sys.exc_info()[1]
self.logger.error('POSIX: Failed to install %s: %s' %
(entry.get('name'), err))
return False
return POSIXTool.install(self, entry)
| 2.328125 | 2 |
buyfree_mall/buyfree_mall/apps/users/constants.py | GalphaXie/E-commerce | 0 | 12765756 | <gh_stars>0
# -*- coding: utf-8 -*-
# @File : constants.py
# @Author : Xie
# @Date : 9/15/18
# @Desc :
# email验证的过期时间
VERIFY_EMAIL_TOKEN_EXPIRES = 24 * 60 * 60
# 每个用户收货地址界面显示的最大地址数量
USER_ADDRESS_COUNTS_LIMIT = 20
# 用户浏览记录最大展示数量
USER_BROWSING_HISTORY_COUNTS_LIMIT = 5
| 1.070313 | 1 |
anaplanapi2/AnaplanConnection.py | response4amit/anaplanapi2 | 0 | 12765757 | <reponame>response4amit/anaplanapi2
#===============================================================================
# Created: 22 May 2019
# @author: AP (adapated from <NAME>)
# Description: Class to contain Anaplan connection details required for all API calls
# Input: Authorization header string, workspace ID string, and model ID string
# Output: None
#===============================================================================
class AnaplanConnection(object):
'''
classdocs
'''
def __init__(self, authorization, workspaceGuid, modelGuid):
'''
:param authorization: Authorization header string
:param workspaceGuid: ID of the Anaplan workspace
:param modelGuid: ID of the Anaplan model
'''
self.authorization = authorization
self.workspaceGuid = workspaceGuid
self.modelGuid = modelGuid
| 2.484375 | 2 |
Player.py | Vrim/TicTacToe | 0 | 12765758 | <filename>Player.py
from __future__ import annotations
from typing import Any, Tuple, List
class Player():
""" Tic Tac Toe
Author: <NAME>
Date: Oct. 11, 2019
----- Player -----
Player class represents the Player(s)
Keeps track of Players in the game and their scores, symbols, names.
"""
symbol: str
name: str
_wins: int
_losses: int
_ties: int
def __init__(self, symbol : str, name: str) -> None:
self.symbol = symbol
self.name = name
self._wins = 0
self._losses = 0
self._ties = 0
def getScore(self) -> Tuple[int, int, int]:
return (self._wins, self._losses, self._ties)
def calcGamesPlayed(self) -> int:
"""Returns the number of games played
>>>p1 = Player('x', "Hi")
>>>p2 = Player('y', "Bye")
>>>t = TicTac()
>>>t.play(0, p1)
>>>t.play(3, p2)
>>>t.play(1, p1)
>>>t.play(4, p2)
>>>t.play(2, p1)
>>>p1.calcGamesPlayed()
1
"""
gamesPlayed = self._wins + self._losses + self._ties
return gamesPlayed
if __name__ == "__main__":
import doctest
doctest.testmod() | 3.65625 | 4 |
tests/hosting/test_messages.py | DaeunYim/pgtoolsservice | 33 | 12765759 | <gh_stars>10-100
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from ossdbtoolsservice.hosting.json_message import JSONRPCMessage, JSONRPCMessageType
class JsonRpcMessageTests(unittest.TestCase):
def test_create_error(self):
# If: I create an error message
message = JSONRPCMessage.create_error(10, 20, 'msg', {})
# Then:
# ... The message should have all the properties I defined
self.assertIsNotNone(message)
self.assertEqual(message.message_id, 10)
self.assertIsNone(message.message_method)
self.assertIsNone(message.message_params)
self.assertIsNone(message.message_result)
self.assertIsNotNone(message.message_error)
self.assertEqual(message.message_error['code'], 20)
self.assertEqual(message.message_error['message'], 'msg')
self.assertDictEqual(message.message_error['data'], {})
self.assertEqual(message.message_type, JSONRPCMessageType.ResponseError)
# ... The dictionary should have the same values stored
dictionary = message.dictionary
self.assertIsNotNone(dictionary)
self.assertDictEqual(dictionary, {
'jsonrpc': '2.0',
'error': {
'code': 20,
'message': 'msg',
'data': {}
},
'id': 10
})
def test_create_response(self):
# If: I create a response
message = JSONRPCMessage.create_response(10, {})
# Then:
# ... The message should have all the properties I defined
self.assertIsNotNone(message)
self.assertEqual(message.message_id, 10)
self.assertIsNone(message.message_method)
self.assertIsNone(message.message_params)
self.assertIsNotNone(message.message_result)
self.assertDictEqual(message.message_result, {})
self.assertIsNone(message.message_error)
self.assertEqual(message.message_type, JSONRPCMessageType.ResponseSuccess)
# ... The dictionary should have the same values stored
dictionary = message.dictionary
self.assertIsNotNone(dictionary)
self.assertDictEqual(dictionary, {
'jsonrpc': '2.0',
'result': {},
'id': 10
})
def test_create_request(self):
# If: I create a request
message = JSONRPCMessage.create_request(10, "test/test", {})
# Then:
# ... The message should have all the properties I defined
self.assertIsNotNone(message)
self.assertEqual(message.message_id, 10)
self.assertEqual(message.message_method, "test/test")
self.assertDictEqual(message.message_params, {})
self.assertIsNone(message.message_result)
self.assertIsNone(message.message_error)
self.assertEqual(message.message_type, JSONRPCMessageType.Request)
# ... The dictionary should have the same values stored
dictionary = message.dictionary
self.assertIsNotNone(dictionary)
self.assertDictEqual(dictionary, {
'jsonrpc': '2.0',
'method': 'test/test',
'params': {},
'id': 10
})
def test_create_notification(self):
# If: I create a notification
message = JSONRPCMessage.create_notification("test/test", {})
# Then:
# ... The message should have all the properties I defined
self.assertIsNotNone(message)
self.assertIsNone(message.message_id)
self.assertEqual(message.message_method, "test/test")
self.assertDictEqual(message.message_params, {})
self.assertIsNone(message.message_result)
self.assertIsNone(message.message_error)
self.assertEqual(message.message_type, JSONRPCMessageType.Notification)
# ... The dictionary should have the same values stored
dictionary = message.dictionary
self.assertIsNotNone(dictionary)
self.assertDictEqual(dictionary, {
'jsonrpc': '2.0',
'method': 'test/test',
'params': {}
})
# FROM DICTIONARY TESTS ################################################
def test_from_dict_notification(self):
# If: I create a notification message from a dictionary
message = JSONRPCMessage.from_dictionary({
'method': 'test/test',
'params': {}
# No ID = Notification
})
# Then:
# ... The message should have all the properties I defined
self.assertIsNotNone(message)
self.assertIsNone(message.message_id)
self.assertEqual(message.message_method, "test/test")
self.assertDictEqual(message.message_params, {})
self.assertIsNone(message.message_result)
self.assertIsNone(message.message_error)
self.assertEqual(message.message_type, JSONRPCMessageType.Notification)
# ... The dictionary should have the same values stored
dictionary = message.dictionary
self.assertIsNotNone(dictionary)
self.assertDictEqual(dictionary, {
'jsonrpc': '2.0',
'method': 'test/test',
'params': {}
})
def test_from_dict_invalid_notification(self):
# If: I create a notification message from a dictionary that is missing a method
# Then: I should get an exception
with self.assertRaises(ValueError):
JSONRPCMessage.from_dictionary({
'params': {}
# No ID = Notification
# No method = Invalid
})
def test_from_dict_response(self):
# If: I create a successful response from a dictionary
message = JSONRPCMessage.from_dictionary({
'id': '10',
'result': {}
})
# Then:
# ... The message should have all the properties I defined
self.assertIsNotNone(message)
self.assertEqual(message.message_id, '10')
self.assertIsNone(message.message_method)
self.assertIsNone(message.message_params)
self.assertIsNotNone(message.message_result)
self.assertDictEqual(message.message_result, {})
self.assertIsNone(message.message_error)
self.assertEqual(message.message_type, JSONRPCMessageType.ResponseSuccess)
# ... The dictionary should have the same values stored
dictionary = message.dictionary
self.assertIsNotNone(dictionary)
self.assertDictEqual(dictionary, {
'jsonrpc': '2.0',
'result': {},
'id': '10'
})
def test_from_dict_error(self):
# If: I create an error response from a dictionary
message = JSONRPCMessage.from_dictionary({
'id': '10',
'error': {
'code': 20,
'message': 'msg',
'data': {}
}
})
# Then:
# ... The message should have all the properties I defined
self.assertIsNotNone(message)
self.assertEqual(message.message_id, '10')
self.assertIsNone(message.message_method)
self.assertIsNone(message.message_params)
self.assertIsNone(message.message_result)
self.assertIsNotNone(message.message_error)
self.assertEqual(message.message_error['code'], 20)
self.assertEqual(message.message_error['message'], 'msg')
self.assertDictEqual(message.message_error['data'], {})
self.assertEqual(message.message_type, JSONRPCMessageType.ResponseError)
# ... The dictionary should have the same values stored
dictionary = message.dictionary
self.assertIsNotNone(dictionary)
self.assertDictEqual(dictionary, {
'jsonrpc': '2.0',
'error': {
'code': 20,
'message': 'msg',
'data': {}
},
'id': '10'
})
def test_from_dict_response_invalid(self):
# If: I create an invalid response from a dictionary
# Then: I should get an exception
with self.assertRaises(ValueError):
JSONRPCMessage.from_dictionary({
'id': '10',
'error': {},
'result': {}
})
def test_from_dict_request(self):
# If: I create a request from a dictionary
message = JSONRPCMessage.from_dictionary({
'id': '10',
'method': 'test/test',
'params': {}
})
# Then:
# ... The message should have all the properties I defined
self.assertIsNotNone(message)
self.assertEqual(message.message_id, '10')
self.assertEqual(message.message_method, "test/test")
self.assertDictEqual(message.message_params, {})
self.assertIsNone(message.message_result)
self.assertIsNone(message.message_error)
self.assertEqual(message.message_type, JSONRPCMessageType.Request)
# ... The dictionary should have the same values stored
dictionary = message.dictionary
self.assertIsNotNone(dictionary)
self.assertDictEqual(dictionary, {
'jsonrpc': '2.0',
'method': 'test/test',
'params': {},
'id': '10'
})
def test_from_dict_request_invalid(self):
# If: I create an invalid request from a dictionary
# Then: I should get an exception
with self.assertRaises(ValueError):
JSONRPCMessage.from_dictionary({
'id': '10',
'params': {}
})
if __name__ == '__main__':
unittest.main()
| 2.640625 | 3 |
main.py | EroCallie/PSPrintChat | 0 | 12765760 | <filename>main.py
#!/usr/bin/env python
import PlexLib
import time
import calendar
class PrintChat:
@staticmethod
def on_message(channel, message):
utctime = time.strptime(message['date'], '%Y-%m-%dT%H:%M:%S+00:00')
timestamp = time.strftime("%H:%M:%S", time.localtime(calendar.timegm(utctime)))
if message['user']:
if message['type'] == 'normal':
print(f"[{channel}] ({timestamp}) <{message['user']['name']}>: {message['content']}")
elif message['type'] == 'tip':
print(
f"[{channel}] ({timestamp}) <Tip:{message['user']['name']}>: {message['content']} ({message['credits']} PD)")
elif message['type'] == 'subscription':
print(f"[{channel}] ({timestamp}) <Tip:{message['user']['name']} Has Just Subscribed!>")
if message['content'].split(' ')[0] == '!trigger':
PlexLib.send_message(channel, 'Reaction Message')
elif message['type'] == 'milestone':
print(f"[{channel}] ({timestamp}) <Milestone Reached>: {message['content']}")
elif message['type'] == 'system':
print(f"[{channel}] ({timestamp}) <System>: {message['content']}")
@staticmethod
def on_messagedeleted(channel, message_id):
print(f"[{channel}] Message Deleted with ID: {message_id}")
@staticmethod
def on_viewercountupdate(channel, viewers):
print(f"[{channel}] Viewer count update: {viewers}")
@staticmethod
def on_milestoneupdate(channel, milestones, progress):
print(f"[{channel}] Milestones (Update): {milestones} Progress: {progress}")
@staticmethod
def on_milestonereached(channel, milestones, progress):
print(f"[{channel}] Milestones (Reached): {milestones} Progress: {progress}")
@staticmethod
def on_tip(channel, milestones, progress, top):
if milestones:
print(f"[{channel}] Tip! Milestones (Reached): {milestones} Progress: {progress}\nTop Tippers: {top}")
else:
print(f"[{channel}] Tip! Top Tippers: {top}")
@staticmethod
def on_userupdate(channel, user):
print(f"[{channel}] User Update: Name:{user['name']}")
@staticmethod
def on_streamstart(channel, milestones, progress, top, title, tags, start_time, public):
print(
f"[{channel}] Stream Started!\nMilestones (Reached): {milestones} Progress: {progress}\nTop Tippers: {top}\nStream Title: {title}\nStream Tags: {tags}\nStart Time UTC: {start_time}\nPublic? {public}")
@staticmethod
def on_streamupdate(channel, milestones, progress, top, title, tags, start_time, public, nsfw):
print(
f"[{channel}] Stream Started!\nMilestones (Reached): {milestones} Progress: {progress}\nTop Tippers: {top}\nStream Title: {title}\nStream Tags: {tags}\nStart Time UTC: {start_time}\nPublic? {public} NSFW? {nsfw}")
@staticmethod
def on_streamend(channel, status):
print(f"[{channel}] Stream Status: {status}")
@staticmethod
def on_streamerupdate(channel, user):
print(f"[{channel}] Streamer Updated: {user}")
@staticmethod
def on_tipsuggestions(channel, tips):
print(f"[{channel}] Tip Suggestions Updated: {tips}")
@staticmethod
def on_experiencereceived(amount, level_stats):
print(f"Experience ({amount}): {level_stats}")
@staticmethod
def on_newreward(message, reason, amount):
print(f"Reward ({amount}): {message} ({reason})")
@staticmethod
def on_followedstreamstart(message, streamer):
print(f"Followed Stream Started: {message}\n{streamer}")
@staticmethod
def on_creditbalanceupdate(t_credits):
print(f"New credit balace: {t_credits} PD")
PlexLib.register_callback("on_message", PrintChat.on_message)
PlexLib.register_callback("on_messagedeleted", PrintChat.on_messagedeleted)
PlexLib.register_callback("on_viewercountupdate", PrintChat.on_viewercountupdate)
PlexLib.register_callback("on_milestoneupdate", PrintChat.on_milestoneupdate)
PlexLib.register_callback("on_milestonereached", PrintChat.on_milestonereached)
PlexLib.register_callback("on_tip", PrintChat.on_tip)
PlexLib.register_callback("on_userupdate", PrintChat.on_userupdate)
PlexLib.register_callback("on_streamstart", PrintChat.on_streamstart)
PlexLib.register_callback("on_streamupdate", PrintChat.on_streamupdate)
PlexLib.register_callback("on_streamend", PrintChat.on_streamend)
PlexLib.register_callback("on_streamerupdate", PrintChat.on_streamerupdate)
PlexLib.register_callback("on_tipsuggestions", PrintChat.on_tipsuggestions)
PlexLib.register_callback("on_experiencereceived", PrintChat.on_experiencereceived)
PlexLib.register_callback("on_newreward", PrintChat.on_newreward)
PlexLib.register_callback("on_followedstreamstart", PrintChat.on_followedstreamstart)
# PlexLib.set_tips(PlexLib.format_tips({'testtip': 10, 'testtip 2': 20, 'testtip 3': 30}))
# PlexLib.set_stream_info("Test Title", True, True, False, PlexLib.format_milestones(
# {"Milestone 1": 80, "Milestone 2": 120, "Milestone 3": 160, "Milestone 4": 200}), ["set", "some", "tags"])
| 2.515625 | 3 |
twitchstreams/apps.py | naelstrof/PugBot-Discord-Django | 3 | 12765761 | <filename>twitchstreams/apps.py
from django.apps import AppConfig
class TwitchstreamsConfig(AppConfig):
name = 'twitchstreams'
| 1.257813 | 1 |
leetcode/1022.py | sputnikW/algorithm | 0 | 12765762 | <reponame>sputnikW/algorithm
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def sumRootToLeaf(self, root: TreeNode) -> int:
if root is None:
return 0
sum = 0
def plusAllLeaves(node, parentPath):
nonlocal sum
currPath = parentPath + str(node.val)
if node.left is None and node.right is None:
sum += int(currPath, 2)
return
if node.left is not None:
plusAllLeaves(node.left, currPath)
if node.right is not None:
plusAllLeaves(node.right, currPath)
plusAllLeaves(root, '')
return sum | 3.734375 | 4 |
utils.py | Chrisae9/youtube-trending-prediction | 0 | 12765763 | <filename>utils.py
from textblob import TextBlob
from dateutil import parser
import random
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
def classifiedTextblob(data):
polar = TextBlob(data).sentiment.polarity
if (polar < 0):
return -1
if (polar == 0):
return 0
if (polar > 0):
return 1
def getTimeOfDay(publish):
# cut off timezone
publish = publish.split('.')[0]
publish = parser.parse(publish)
time = publish.hour
# night
if time <= 6:
return 3
# morning
if time <= 12:
return 0
# afternoon
if time <= 18:
return 1
# evening
if time <= 24:
return 2
return random.randint(0, 3)
def compute(title, description, tags, time_of_day, category):
df = pd.read_csv("data/USCAGBDEProcessedTextBlob.csv",
encoding="ISO-8859-1")
data = df[['category_id', 'title_sent_class', 'tags_sent_class',
'descrip_sent_class', 'time_of_day', 'label']]
data = data.dropna()
sentiment = data['label']
data = data.drop(columns=['label'])
class_title = classifiedTextblob(title)
class_description = classifiedTextblob(description)
class_tags = classifiedTextblob(('""|""').join(tags))
d = {'title_sent_class': class_title, 'tags_sent_class': class_tags,
'descrip_sent_class': class_description, 'time_of_day': time_of_day, 'category_id': category}
input = pd.DataFrame(data=d, index=[0])
decision_tree = DecisionTreeClassifier()
decision_tree = decision_tree.fit(data, sentiment)
prediction = decision_tree.predict(input)
return prediction[0]
| 3 | 3 |
flask_ecom_api/api/v1/customers/models.py | savilard/flask-ecom-api | 1 | 12765764 | from sqlalchemy_utils import EmailType, PhoneNumberType
from flask_ecom_api.api.v1.customers.admin import (
CustomerAdminView,
CustomerShippingAddressAdminView,
)
from flask_ecom_api.api.v1.orders.models import Order
from flask_ecom_api.app import admin, db
class Customer(db.Model):
"""Customer model."""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(
db.String(length=50),
index=True,
unique=True,
nullable=False,
)
date_of_birth = db.Column(db.DateTime)
email = db.Column(
EmailType,
index=True,
unique=True,
nullable=False,
)
shipping_addresses = db.relationship(
'CustomerShippingAddress',
backref='customer',
lazy='joined',
)
orders = db.relationship(Order, lazy='joined')
def __repr__(self):
"""Printable representation of Customer model."""
return f'<Customer id: {self.id}, customer name: {self.name}>'
class CustomerShippingAddress(db.Model):
"""Customer shipping address model."""
id = db.Column(db.Integer, primary_key=True)
customer_id = db.Column(
db.Integer,
db.ForeignKey('customer.id'),
index=True,
nullable=False,
)
first_name = db.Column(db.String(50), nullable=False)
last_name = db.Column(db.String(50), nullable=False)
phone_number = db.Column(PhoneNumberType())
country = db.Column(db.String(20), nullable=False)
city = db.Column(db.String(20), nullable=False)
street = db.Column(db.String(20), nullable=False)
house_number = db.Column(db.Integer, nullable=False)
apartment_number = db.Column(db.Integer, nullable=False)
postcode = db.Column(db.Integer, nullable=False)
comment = db.Column(db.String(140))
customers = db.relationship('Customer', lazy='joined')
def __repr__(self):
"""Printable representation of CustomerShippingAddress model."""
return f'<Customer shipping address id: {self.id}>'
admin.add_view(
CustomerAdminView(
Customer,
db.session,
category='Customers',
),
)
admin.add_view(
CustomerShippingAddressAdminView(
CustomerShippingAddress,
db.session,
category='Customers',
),
)
| 2.796875 | 3 |
tests/factories/net.py | marcelb98/pycroft | 0 | 12765765 | import factory
from ipaddr import IPv4Network
from pycroft.model.net import VLAN, Subnet
from tests.factories.base import BaseFactory
class VLANFactory(BaseFactory):
class Meta:
model = VLAN
name = factory.Sequence(lambda n: "vlan{}".format(n+1))
vid = factory.Sequence(lambda n: n+1)
class SubnetFactory(BaseFactory):
class Meta:
model = Subnet
exclude = ('str_address',)
str_address = factory.Faker('ipv4', network=True)
address = factory.LazyAttribute(lambda o: IPv4Network(o.str_address))
vlan = factory.SubFactory(VLANFactory)
| 2.453125 | 2 |
fedml_api/hook_test.py | SlimFun/FL_pruning | 0 | 12765766 | import torch
x = torch.Tensor([0, 1, 2, 3]).requires_grad_()
y = torch.Tensor([4, 5, 6, 7]).requires_grad_()
w = torch.Tensor([1, 2, 3, 4]).requires_grad_()
z = x+y
def hook_fn(grad):
print(grad)
handle_1 = z.register_hook(hook_fn)
o = w.matmul(z)
def hook_fn2(grad):
print('grad')
handle_2 = z.register_hook(hook_fn2)
handle_2.remove()
print('=====Start backprop=====')
o.backward()
print('=====End backprop=====')
print('x.grad:', x.grad)
print('y.grad:', y.grad)
print('w.grad:', w.grad)
print('z.grad:', z.grad) | 2.8125 | 3 |
adv/botan.py | qwewqa/dl | 0 | 12765767 | import adv.adv_test
from core.advbase import *
from module.bleed import Bleed
from slot.a import *
from slot.d import *
def module():
return Botan
class Botan(Adv):
# comment = "RR+Jewels"
a3 = ('prep_charge',0.05)
conf = {}
conf['slots.a'] = RR() + BN()
conf['slots.d'] = Shinobi()
conf['acl'] = """
`s2, pin='prep' or fsc
`s1, (x=5 or fsc) and self.bleed._static['stacks']<3
`s3, x=5 or fsc
`fs, x=5
"""
def init(self):
if self.condition('buff all team'):
self.s2_proc = self.c_s2_proc
def prerun(self):
self.bleed = Bleed("g_bleed",0).reset()
def s1_proc(self, e):
Bleed("s1", 1.46).on()
def c_s2_proc(self, e):
Teambuff('s2',0.1,15,'crit','chance').on()
def s2_proc(self, e):
Selfbuff('s2',0.1,15,'crit','chance').on()
if __name__ == '__main__':
conf = {}
adv.adv_test.test(module(), conf)
| 2.125 | 2 |
pixie_plugin/task_queue/redis_queue.py | bpschmitt/nr-pixie-security-plugin | 0 | 12765768 | <filename>pixie_plugin/task_queue/redis_queue.py
from urllib.parse import urlparse
from redis import Redis
from rq import Queue
def redis_connection(config):
url = urlparse(config["REDIS_URL"])
return Redis(host=url.hostname, port=url.port, password=config["REDIS_PASSWORD"])
def redis_queue(config):
redis = redis_connection(config)
queue = Queue(
name=config["REDIS_QUEUE_NAME"],
connection=redis,
is_async=config["REDIS_QUEUE_IS_ASYNC"],
)
return queue
| 2.65625 | 3 |
enumeration/util.py | VlachosGroup/AdsorptionConfiguration_MS2021 | 0 | 12765769 | <reponame>VlachosGroup/AdsorptionConfiguration_MS2021<filename>enumeration/util.py<gh_stars>0
import numpy as np
from rdkit import Chem
from rdkit.Chem import rdqueries, rdForceFieldHelpers
from rdkit.Chem.rdChemReactions import ChemicalReaction
from collections import defaultdict
from itertools import combinations
from scipy.spatial.distance import pdist
import itertools
from scipy.spatial.distance import cdist
from ase import Atoms as ASEAtoms
from ase import Atom as ASEAtom
from ase.data import atomic_numbers
from rdkit.Chem import AllChem
import random
from ase.io import write,read
import os
from collections import Counter
from ast import literal_eval
from rdkit import Geometry
def GetBondListFromAtomList(Mol, AtomList):
BondList = set()
for idx in AtomList:
atom = Mol.GetAtomWithIdx(idx)
for bond in atom.GetBonds():
if bond.GetOtherAtomIdx(atom.GetIdx()) in AtomList:
BondList.add(bond.GetIdx())
return list(BondList)
def GetSubMolFromIdx(Idxs,Mol):
Mapping = dict() # Original Mol Idx -> New Mol Idx
if len(Idxs) != 1:
BondList = GetBondListFromAtomList(Mol,Idxs)
NewMol = Chem.RWMol(Chem.PathToSubmol(Mol,BondList,atomMap = Mapping))
else:
NewMol = Chem.RWMol(Mol)
# Remove Non surface Atom
for idx in reversed(range(0,NewMol.GetNumAtoms())):
if idx not in Idxs:
NewMol.RemoveAtom(idx)
Mapping[Idxs[0]] = 0
ReverseMapping = dict()
for Idx in Mapping:
ReverseMapping[Mapping[Idx]] = Idx
return NewMol, Mapping, ReverseMapping
SurfaceElements = ('Ag','Au','Co','Cu','Fe','Ir','Ni','Pd','Pt','Re','Rh','Ru')
SurfaceAtomicNumbers = tuple([0]+[atomic_numbers[s] for s in SurfaceElements])
# Elements of adsorbate atoms
AdsorbateElements = ('H','C','O','N')
AdsorbateAtomicNumbers = tuple([atomic_numbers[s] for s in AdsorbateElements])
def BFSShortestPath(mol,idxs):
# Step1: Initialize
predecessors = []
for _ in idxs:
predecessors.append([[] for _ in range(mol.GetNumAtoms())])
for i,j in enumerate(idxs):
predecessors[i][j] = True
queues = [[mol.GetAtomWithIdx(i)] for i in idxs]
Checked = [set() for i in idxs]
MeetingPoints = [[[] for _ in idxs] for _ in idxs]
HaveWeMet = [[False for _ in idxs] for _ in idxs]
for i in range(len(idxs)):
HaveWeMet[i][i] = True
# Search
for _ in range(mol.GetNumAtoms()): # maximum possible depth.
for i,queue in enumerate(queues): # Starting node i
Checked[i] |= set([q.GetIdx() for q in queue])
newqueue = []
newqueueidx = []
for q in queue:
for na in q.GetNeighbors(): # Breath first search
naidx = na.GetIdx()
if naidx not in Checked[i]:
# append predecessor
predecessors[i][naidx].append(q.GetIdx())
# make new queues
if naidx not in newqueueidx:
newqueue.append(na)
newqueueidx.append(naidx)
# Check if it has met the searched nodes started from other nodes
for naidx in newqueueidx:
for j,pred in enumerate(predecessors):
if not HaveWeMet[i][j] and pred[naidx]:
MeetingPoints[i][j].append(naidx)
MeetingPoints[j][i].append(naidx)
# Check if meeting points has been set
for j in range(len(idxs)):
if not HaveWeMet[i][j] and MeetingPoints[i][j]:
HaveWeMet[i][j] = True
HaveWeMet[j][i] = True
if all(HaveWeMet[i]): # This has met all other nodes, so no need for further search
queues[i] = []
else: # It has not met all nodes. continue search
queues[i] = newqueue
# Check if every nodes have met each other
if not any(queues):
break
shortestpath = set()
for i in range(len(idxs)):
for j in range(len(idxs)):
pairshortestpath = set()
AtomIdx2Check = MeetingPoints[i][j].copy()
while AtomIdx2Check:
idx = AtomIdx2Check.pop()
if idx not in pairshortestpath:
pairshortestpath.add(idx)
if predecessors[i][idx] != True:
AtomIdx2Check += predecessors[i][idx]
shortestpath |=pairshortestpath
return(shortestpath)
def RemoveLatticeAmbiguity(OriginalMol):
"""
Remove ambiguity in the subgraph provided. See manuscript for the
mechanism of this.
Input:
OriginalMol - Chem.Mol or RWMol Object.
SubgraphIdx - List of Index of the atoms in subgraph.
Output:
Updated SubgraphIdx
"""
# Initialize
## isolate surface of subgraph
## Extract surface atom index in the subgraph
AAL = set() # (A)dsorbate (A)tom (L)ist
SSAL = set() # (S)elected (S)urface (A)tom (L)ist
SAL = set() # (S)urface (A)tom (L)ist
for Idx in range(0,OriginalMol.GetNumAtoms()):
atom = OriginalMol.GetAtomWithIdx(Idx)
if atom.GetAtomicNum() in SurfaceAtomicNumbers:
SAL.add(Idx)
for na in atom.GetNeighbors():
#if na.GetAtomicNum() in AdsorbateAtomicNumbers[1:]: # exclude hydrogen
if na.GetAtomicNum() in AdsorbateAtomicNumbers:
SSAL.add(Idx)
break
elif atom.GetAtomicNum() in AdsorbateAtomicNumbers:
AAL.add(Idx)
SubgraphIdx = AAL | SSAL
## Check if surface atoms are fragmented
AtomsToCheckList = list(SSAL)
Surface_Fragments = list()
while AtomsToCheckList:
# initialize
# here a single bridge is identified
Atom = OriginalMol.GetAtomWithIdx(AtomsToCheckList.pop())
Surface_Fragment = set()
Surface_Fragment.add(Atom.GetIdx())
NeighborsToCheck = list(Atom.GetNeighbors())
# find all possible surface atoms in this fragment
while NeighborsToCheck:
AtomBeingChecked = NeighborsToCheck.pop()
if AtomBeingChecked.GetIdx() in SSAL and \
AtomBeingChecked.GetIdx() not in Surface_Fragment:
Surface_Fragment.add(AtomBeingChecked.GetIdx())
NeighborsToCheck += AtomBeingChecked.GetNeighbors()
# Add to fragment list
Surface_Fragments.append(Surface_Fragment)
# Remove checked atoms
AtomsToCheckList = [value for value in AtomsToCheckList if value not in Surface_Fragment]
# if the length Surface_Fragments is more than 1, then the surface is fragmented
if len(Surface_Fragments) > 1:
# Extract surface
BondToBreak = set()
for idx in SSAL:
Atom = OriginalMol.GetAtomWithIdx(idx)
for Bond in Atom.GetBonds():
if Bond.GetOtherAtom(Atom).GetIdx() not in SAL:
BondToBreak.add(Bond.GetIdx())
SurfaceGraph = Chem.FragmentOnBonds(OriginalMol,list(BondToBreak),addDummies=False)
########################################################################
Surf = Surface_Fragments[0]
for s in Surface_Fragments[1:]:
Surf.update(s)
# BRS Based shortest path find
NewSurfIdx = BFSShortestPath(SurfaceGraph,list(Surf))
SubgraphIdx |= NewSurfIdx
SSAL |= NewSurfIdx
#######################################################################
SubgraphIdx = list(SubgraphIdx)
## initialize
NSAD = defaultdict(int) # (N)eighbor (S)urface (A)tom (D)ict
## intial dict list
for SSAIdx in SSAL:
for NeighborAtom in OriginalMol.GetAtomWithIdx(SSAIdx).GetNeighbors():
if NeighborAtom.GetAtomicNum() in SurfaceAtomicNumbers:
if NeighborAtom.GetIdx() not in SSAL:
NSAD[NeighborAtom.GetIdx()] += 1
# add nonselected surface atoms to subgraph
for idx in NSAD:
if NSAD[idx] > 1:
SubgraphIdx.append(idx)
## add second layer
for atom in OriginalMol.GetAtoms():
if atom.GetAtomicNum() == 2:
na = 0
for natom in atom.GetNeighbors():
if natom.GetIdx() in SubgraphIdx:
na += 1
if na ==3:
SubgraphIdx.append(atom.GetIdx())
BondList = GetBondListFromAtomList(OriginalMol, SubgraphIdx)
if BondList:
mol = Chem.PathToSubmol(OriginalMol,BondList)
else: # if adsorbate is one atom, there is no bond list, so just return the atom.
mol = Chem.RWMol(Chem.Mol())
mol.AddAtom(OriginalMol.GetAtomWithIdx(list(AAL)[0]).__copy__())
return mol
class Site(object):
"""
Object for a site
Attributes:
SiteType - site type in integer
Coordinate - 2D coordinates of the site location
Neighbors - id connected neighbor
DuplicateNeighborError - if True, the code spits error if there is a
intersection between the site neighbor list and the one being appended
"""
def __init__(self, SiteType, Coordinate, DuplicateNeighborError=False):
# Error check
assert isinstance(SiteType, int), 'SiteType is not an integer.'
assert isinstance(Coordinate, list) or isinstance(Coordinate, np.ndarray), 'Coordinate is not a list.'
assert len(Coordinate) == 3,'Coordinate is not 3 dimensional.'
assert (isinstance(Coordinate[0], float) or isinstance(Coordinate[0], int))\
and (isinstance(Coordinate[1], float) or isinstance(Coordinate[1], int))\
and (isinstance(Coordinate[2], float) or isinstance(Coordinate[2], int)), 'Coordinate element is not a float or int.'
# Construct a site
self._SiteType = SiteType # e.g. atop bridge hollow sites
self._Coordinate = np.array(Coordinate, float)
self._DuplicateNeighborError = DuplicateNeighborError
self._SiteNeighbors = set()
self._AtomNeighbors = set()
self._RepresentedAtoms = set() # list of actual Pt atoms
def __str__(self):
return '<Site(Type:%i,xyz:[%.2f,%.2f,%.2f],Number of Neighbors: %i>' \
%(self._SiteType,self._Coordinate[0],self._Coordinate[1],\
self._Coordinate[2],len(self._SiteNeighbors))
def __repr__(self):
s = 'Site(Type:%i, xyz:[%.2f,%.2f,%.2f], Neighbors:' \
%(self._SiteType,self._Coordinate[0],self._Coordinate[1],\
self._Coordinate[2])
for Neighbor in self._SiteNeighbors:
s += str(Neighbor) + ','
s += ', Associated_Pt_Atoms: '
for Pt_atoms in self._RepresentedAtoms:
s += str(Pt_atoms) + ','
s += ')'
return s
def AppendSiteNeighbors(self, Neighbors):
# Error check
try:
if not isinstance(Neighbors,(int,np.int64)):
A = iter(Neighbors)
for a in A:
if not isinstance(a,(int,np.int64)):
raise Exception
except Exception:
raise Exception("Neighbors is not iterable object with integer or an integer.")
# append neighbor
if isinstance(Neighbors,(int,np.int64)):
if self._DuplicateNeighborError:
if Neighbors in self._SiteNeighbors:
raise Exception("Neighbor " + str(Neighbors) + " is already in the neighbor list")
self._SiteNeighbors.add(Neighbors)
else:
for Neighbor in Neighbors:
if self._DuplicateNeighborError:
if Neighbor in self._SiteNeighbors:
raise Exception("Neighbor " + Neighbor +" is already in the neighbor list")
self._SiteNeighbors.add(Neighbor)
def AppendAtomNeighbors(self, Neighbors):
# Error check
try:
if not isinstance(Neighbors, (int,np.int64)):
A = iter(Neighbors)
for a in A:
if not isinstance(a,(int,np.int64)):
raise Exception
except Exception:
raise Exception("Neighbors is not iterable object with integer or an integer.")
# append neighbor
if isinstance(Neighbors, (int,np.int64)):
if self._DuplicateNeighborError:
if Neighbors in self._AtomNeighbors:
raise Exception("Neighbor " + str(Neighbors) + " is already in the neighbor list")
self._AtomNeighbors.add(Neighbors)
else:
for Neighbor in Neighbors:
if self._DuplicateNeighborError:
if Neighbor in self._AtomNeighbors:
raise Exception("Neighbor " + Neighbor +" is already in the neighbor list")
self._AtomNeighbors.add(Neighbor)
def AppendRepresentedAtoms(self, Pt_indexes):
# this is for actual Pt atoms associated with sites
# Error check
try:
if isinstance(Pt_indexes, str) and Pt_indexes == 'self':
pass
elif not isinstance(Pt_indexes, (int,np.int64)):
A = iter(Pt_indexes)
for a in A:
if not isinstance(a,(int,np.int64)):
raise Exception
except Exception:
raise Exception("Neighbors is not iterable object with integer or an integer.")
# append neighbor
if isinstance(Pt_indexes, str) and Pt_indexes == 'self':
self._RepresentedAtoms.add('self')
elif isinstance(Pt_indexes, (int,np.int64)):
if self._DuplicateNeighborError:
if Pt_indexes in self._RepresentedAtoms:
raise Exception(str(Pt_indexes) + " is already in the associated Pt site list")
self._RepresentedAtoms.add(Pt_indexes)
else:
for index in Pt_indexes:
if self._DuplicateNeighborError:
if index in self._RepresentedAtoms:
raise Exception(str(index) + " is already in the associated Pt site list")
self._RepresentedAtoms.add(index)
def GetCoordinate(self):
return self._Coordinate.copy()
def GetSiteType(self):
return self._SiteType.copy()
class Lattice(object):
def __init__(self,Sites=[],SiteNames=[], DistanceMultiplier=[],Cell=np.eye(3),PBC=False):
# Error Check
assert isinstance(Sites, list), 'Sites is not a list.'
for site in Sites:
assert isinstance(site,Site), 'Site is not a Site object'
self._SiteNames = SiteNames
self._DistanceMultiplier = DistanceMultiplier #This number is multiplied before deciding which atom is at which site.
self._Sites = Sites
self.SetCell(Cell)
self.SetPBC(PBC)
def SetCell(self, Cell, KeepAbsCoord=False):
Cell = np.array(Cell, float)
if Cell.shape == (3,):
Cell = np.diag(Cell)
elif Cell.shape != (3, 3):
raise ValueError('Cell must be length 3 sequence or 3x3 matrix')
if KeepAbsCoord:
Cell_inv = np.linalg.inv(Cell.transpose())
for i in range(0,len(self._Sites)):
pos = np.dot(self._Cell.transpose(),self._Sites[i]._Coordinate.transpose()).transpose()
self._Sites[i]._Coordinate = np.dot(Cell_inv,pos.transpose()).transpose()
self._Cell = Cell
def SetPBC(self, PBC):
"""Set periodic boundary condition flags."""
if isinstance(PBC, bool):
PBC = (PBC,) * 3
else:
try:
iter(PBC)
except TypeError:
raise TypeError('PBC must be iterable or a bool')
assert len(PBC) == 3, 'iterable PBC must be 3 sequence'
for cond in PBC:
assert isinstance(cond, bool), \
'each element in PBC must be bool'
self._PBC = np.array(PBC, bool)
def GetRdkitMol(self,SurfaceAtomSymbol = 'Pt',queryatom=True):
# initialize
surface = Chem.RWMol(Chem.Mol())
# add toms
for site in self._Sites:
if 'self' in site._RepresentedAtoms:
if queryatom:
atom = rdqueries.HasStringPropWithValueQueryAtom('Type','S')
atom.ExpandQuery(rdqueries.HasBoolPropWithValueQueryAtom('Occupied',False))
atom.SetProp('smilesSymbol','M')
atom.SetProp('Type','S')
atom.SetBoolProp('Occupied',False)
else:
if SurfaceAtomSymbol:
atom = Chem.Atom(SurfaceAtomSymbol)
else:
atom = Chem.Atom(0)
atom.SetProp('smilesSymbol','M')
atom.SetProp('Type','S')
atom.SetBoolProp('Occupied',False)
surface.AddAtom(atom)
# add bonds
for i in range(0,len(self._Sites)):
if 'self' in self._Sites[i]._RepresentedAtoms:
for j in self._Sites[i]._AtomNeighbors:
if not surface.GetBondBetweenAtoms(i,int(j)):
surface.AddBond(i,int(j),order=Chem.rdchem.BondType.ZERO)
Chem.SanitizeMol(surface)
surface = surface.GetMol()
Chem.SanitizeMol(surface)
return surface
def GetRdkitMolEnum(self):
# initialize
surface = Chem.RWMol(Chem.Mol())
# add toms
for site in self._Sites:
if 'self' in site._RepresentedAtoms:
atom = Chem.Atom(0)
surface.AddAtom(atom)
# add bonds
for i in range(0,len(self._Sites)):
if 'self' in self._Sites[i]._RepresentedAtoms:
for j in self._Sites[i]._AtomNeighbors:
if not surface.GetBondBetweenAtoms(i,int(j)):
surface.AddBond(i,int(j),order=Chem.rdchem.BondType.SINGLE)
Chem.SanitizeMol(surface)
surface = surface.GetMol()
Chem.SanitizeMol(surface)
return surface
def AppendSurfaceToRdkitMol(self,mol,SurfaceAtomSymbol = 'Pt',queryatom=True):
# initialize
if isinstance(mol,Chem.Mol):
mol = Chem.RWMol(mol)
assert isinstance(mol,Chem.RWMol)
NAtoms = mol.GetNumAtoms()
LatticeToMolMap = dict()
MolToLatticeMap = dict()
# add atoms
for i in range(0,len(self._Sites)):
if 'self' in self._Sites[i]._RepresentedAtoms:
if queryatom:
atom = rdqueries.HasStringPropWithValueQueryAtom('Type','S')
atom.ExpandQuery(rdqueries.HasBoolPropWithValueQueryAtom('Occupied',False))
atom.SetProp('smilesSymbol','M')
atom.SetProp('Type','S')
atom.SetBoolProp('Occupied',False)
else:
atom = Chem.Atom(SurfaceAtomSymbol)
atom.SetProp('smilesSymbol','M')
atom.SetProp('Type','S')
atom.SetBoolProp('Occupied',False)
rdkitidx = mol.AddAtom(atom)
LatticeToMolMap[i] = rdkitidx
MolToLatticeMap[rdkitidx] = i
# add bonds
for i in range(0,len(self._Sites)):
if 'self' in self._Sites[i]._RepresentedAtoms:
for j in self._Sites[i]._AtomNeighbors:
try:
mol.AddBond(NAtoms+i,NAtoms+int(j),order=Chem.rdchem.BondType.ZERO)
except:
pass
return mol, LatticeToMolMap, MolToLatticeMap
def GetFracCoordinates(self):
mat = list()
for site in self._Sites:
mat.append(site._Coordinate)
return np.array(mat)
def GetCoordinates(self):
mat = self.GetFracCoordinates()
return np.dot(self._Cell.transpose(),mat.transpose()).transpose()
def GetCoordinatesWithCell(self,Cell):
if not Cell.__class__ == np.ndarray:
Cell = np.array(Cell)
mat = self.GetFracCoordinates()
return np.dot(Cell.transpose(),mat.transpose()).transpose()
def TranslateCoordinates(self,coordinate):
B_inv = np.linalg.inv(self._Cell.transpose())
for site in self._Sites:
pos = np.dot(self._Cell.transpose(),site._Coordinate.transpose()).transpose()
pos += coordinate
site._Coordinate = np.dot(B_inv,pos.transpose()).transpose()
def MakeASEAtoms(self,highlight = None):
atoms = ASEAtoms()
coord = self.GetCoordinates()*2.5
for i in range(0,coord.shape[0]):
if highlight and i in highlight:
atoms.append(ASEAtom('Pt',coord[i,:]))
elif self._Sites[i]._SiteType == 0:
atoms.append(ASEAtom('C',coord[i,:]))
elif self._Sites[i]._SiteType == 1:
atoms.append(ASEAtom('O',coord[i,:]))
elif self._Sites[i]._SiteType == 2:
atoms.append(ASEAtom('N',coord[i,:]))
return atoms
@classmethod
def ConstructRectangularClosePackedLattice(cls, x_max,y_max, PBC=True):
# option
rd = 10 # rounding decimals
# Error check
assert x_max > 1, "x_max too small"
assert y_max > 1, "y_max too small"
# set unit cell size
Cell = [[2*x_max,0,0],[0,2*np.sqrt(3)/2*y_max,0],[0,0,1]]
Cell = np.array(Cell)
# Construct atop site coordinates
ac = np.zeros((4*x_max*y_max,3))
for y in range(0,y_max):
for x in range(0,x_max):
ac[4*(x+y*x_max),0] = 2*x
ac[4*(x+y*x_max),1] = 2*np.sqrt(3)/2*y
ac[4*(x+y*x_max)+1,0] = 2*x + 1
ac[4*(x+y*x_max)+1,1] = 2*np.sqrt(3)/2*y
ac[4*(x+y*x_max)+2,0] = 2*x + 0.5
ac[4*(x+y*x_max)+2,1] = np.sqrt(3)/2 + 2*np.sqrt(3)/2*y
ac[4*(x+y*x_max)+3,0] = 2*x + 1.5
ac[4*(x+y*x_max)+3,1] = np.sqrt(3)/2 + 2*np.sqrt(3)/2*y
# Construct bridge site coordinates
bc = np.zeros((12*x_max*y_max,3))
for y in range(0,y_max):
for x in range(0,x_max):
bc[12*(x+y*x_max),0] = 0.5+2*x
bc[12*(x+y*x_max),1] = 2*np.sqrt(3)/2*y
bc[12*(x+y*x_max)+1,0] = 1.5+2*x
bc[12*(x+y*x_max)+1,1] = 2*np.sqrt(3)/2*y
bc[12*(x+y*x_max)+2,0] = 0.25+2*x
bc[12*(x+y*x_max)+2,1] = np.sqrt(3)/2/2 + 2*np.sqrt(3)/2*y
bc[12*(x+y*x_max)+3,0] = 0.75+2*x
bc[12*(x+y*x_max)+3,1] = np.sqrt(3)/2/2 + 2*np.sqrt(3)/2*y
bc[12*(x+y*x_max)+4,0] = 1.25+2*x
bc[12*(x+y*x_max)+4,1] = np.sqrt(3)/2/2 + 2*np.sqrt(3)/2*y
bc[12*(x+y*x_max)+5,0] = 1.75+2*x
bc[12*(x+y*x_max)+5,1] = np.sqrt(3)/2/2 + 2*np.sqrt(3)/2*y
bc[12*(x+y*x_max)+6,0] = 2*x
bc[12*(x+y*x_max)+6,1] = np.sqrt(3)/2 + 2*np.sqrt(3)/2*y
bc[12*(x+y*x_max)+7,0] = 1+2*x
bc[12*(x+y*x_max)+7,1] = np.sqrt(3)/2 + 2*np.sqrt(3)/2*y
bc[12*(x+y*x_max)+8,0] = 0.25+2*x
bc[12*(x+y*x_max)+8,1] = np.sqrt(3)/2/2*3 + 2*np.sqrt(3)/2*y
bc[12*(x+y*x_max)+9,0] = 0.75+2*x
bc[12*(x+y*x_max)+9,1] = np.sqrt(3)/2/2*3 + 2*np.sqrt(3)/2*y
bc[12*(x+y*x_max)+10,0] = 1.25+2*x
bc[12*(x+y*x_max)+10,1] = np.sqrt(3)/2/2*3 + 2*np.sqrt(3)/2*y
bc[12*(x+y*x_max)+11,0] = 1.75+2*x
bc[12*(x+y*x_max)+11,1] = np.sqrt(3)/2/2*3 + 2*np.sqrt(3)/2*y
# Construct fcc site
fccc = np.zeros((x_max*y_max*4,3))
for x in range(0,x_max):
for y in range(0,y_max):
fccc[4*(x+y*(x_max)),0] = 0.5 + 2*x
fccc[4*(x+y*(x_max)),1] = np.sqrt(3)/6+2*np.sqrt(3)/2*y
fccc[4*(x+y*(x_max))+1,0] = 1.5 + 2*x
fccc[4*(x+y*(x_max))+1,1] = np.sqrt(3)/6+2*np.sqrt(3)/2*y
fccc[4*(x+y*(x_max))+2,0] = 2*x
fccc[4*(x+y*(x_max))+2,1] = np.sqrt(3)/2 + np.sqrt(3)/6+2*np.sqrt(3)/2*y
fccc[4*(x+y*(x_max))+3,0] = 1 + 2*x
fccc[4*(x+y*(x_max))+3,1] = np.sqrt(3)/2 + np.sqrt(3)/6+2*np.sqrt(3)/2*y
hcpc = np.zeros((x_max*y_max*4,3))
for x in range(0,x_max):
for y in range(0,y_max):
hcpc[4*(x+y*(x_max)),0] = 2*x
hcpc[4*(x+y*(x_max)),1] = np.sqrt(3)/6*2+2*np.sqrt(3)/2*y
hcpc[4*(x+y*(x_max))+1,0] = 1 + 2*x
hcpc[4*(x+y*(x_max))+1,1] = np.sqrt(3)/6*2+2*np.sqrt(3)/2*y
hcpc[4*(x+y*(x_max))+2,0] = 0.5 + 2*x
hcpc[4*(x+y*(x_max))+2,1] = np.sqrt(3)/2 + np.sqrt(3)/6*2+2*np.sqrt(3)/2*y
hcpc[4*(x+y*(x_max))+3,0] = 1.5 + 2*x
hcpc[4*(x+y*(x_max))+3,1] = np.sqrt(3)/2 + np.sqrt(3)/6*2+2*np.sqrt(3)/2*y
# Construct Sites list
SiteNames = ['Atop','Bridge','Hollow']
DistanceMultiplier = [1,2.5, 2.5]
Sites = list()
## Atop Site
for i in range(0,ac.shape[0]):
Sites.append(Site(0,ac[i]))
## Bridge Site
for i in range(0,bc.shape[0]):
Sites.append(Site(1,bc[i]))
## Hollow Site
for i in range(0,fccc.shape[0]):
Sites.append(Site(2,fccc[i]))
## Hollow Site
for i in range(0,hcpc.shape[0]):
Sites.append(Site(3,hcpc[i]))
# Append Neighbors
# set up periodic condition
if PBC:
pcs = np.array([[0,0,0],[1,0,0],[1,1,0],[0,1,0],[-1,1,0],[-1,0,0],[-1,-1,0],[0,-1,0],[1,-1,0]])
else:
pcs = np.array([[0,0,0]])
# actually calculate how much translation is requred
pcts = list()
for pc in pcs:
pcts.append([2*x_max*pc[0],2*np.sqrt(3)/2*y_max*pc[1],0])
pcts = np.array(pcts)
# periodic coordinate
for pc in pcts:
try:
apc = np.concatenate((apc,np.add(ac,pc)))
bpc = np.concatenate((bpc,np.add(bc,pc)))
fccpc = np.concatenate((fccpc,np.add(fccc,pc)))
hcppc = np.concatenate((hcppc,np.add(hcpc,pc)))
except NameError:
apc = np.add(ac,pc)
bpc = np.add(bc,pc)
fccpc = np.add(fccc,pc)
hcppc = np.add(hcpc,pc)
## atop site
for i in range(0,ac.shape[0]):
Sites[i].AppendRepresentedAtoms('self')
# to other atop sites
match = FindNeighbor(ac[i],apc,rd,1.0)
match = np.remainder(match,ac.shape[0])
Sites[i].AppendAtomNeighbors(match)
# to other bridge sites
match = FindNeighbor(ac[i],bpc,rd,0.5)
match = np.remainder(match,bc.shape[0])
Sites[i].AppendSiteNeighbors(match+ac.shape[0])
# to other fcc sites
match = FindNeighbor(ac[i],fccpc,rd,np.sqrt(3)/6*2)
match = np.remainder(match,fccc.shape[0])
Sites[i].AppendSiteNeighbors(match+ac.shape[0]+bc.shape[0])
# to other hcp sites
match = FindNeighbor(ac[i],hcppc,rd,np.sqrt(3)/6*2)
match = np.remainder(match,hcpc.shape[0])
Sites[i].AppendSiteNeighbors(match+ac.shape[0]+bc.shape[0]+fccc.shape[0])
## bridge site
for i in range(0,bc.shape[0]):
# to other atop sites
match = FindNeighbor(bc[i],apc,rd,0.5)
match = np.remainder(match,ac.shape[0])
Sites[i+ac.shape[0]].AppendSiteNeighbors(match)
Sites[i+ac.shape[0]].AppendRepresentedAtoms(match)
# to other bridge sites
# match = FindNeighbor(bc[i],bpc,rd,0.5)
# match = np.remainder(match,bc.shape[0])
# Sites[i+ac.shape[0]].AppendSiteNeighbors(match+ac.shape[0])
# to other hollow sites
match = FindNeighbor(bc[i],fccpc,rd,np.sqrt(3)/6)
match = np.remainder(match,fccc.shape[0])
Sites[i+ac.shape[0]].AppendSiteNeighbors(match+ac.shape[0]+bc.shape[0])
# hollow
match = FindNeighbor(bc[i],hcppc,rd,np.sqrt(3)/6)
match = np.remainder(match,hcpc.shape[0])
Sites[i+ac.shape[0]].AppendSiteNeighbors(match+ac.shape[0]+bc.shape[0]+fccc.shape[0])
## fcc site
for i in range(0,fccc.shape[0]):
# to other atop sites
match = FindNeighbor(fccc[i],apc,rd,np.sqrt(3)/6*2)
match = np.remainder(match,ac.shape[0])
Sites[i+ac.shape[0]+bc.shape[0]].AppendSiteNeighbors(match)
Sites[i+ac.shape[0]+bc.shape[0]].AppendRepresentedAtoms(match)
# to other bridge sites
match = FindNeighbor(fccc[i],bpc,rd,np.sqrt(3)/6)
match = np.remainder(match,bc.shape[0])
Sites[i+ac.shape[0]+bc.shape[0]].AppendSiteNeighbors(match+ac.shape[0])
# to other hcp sites
# match = FindNeighbor(fccc[i],hcppc,rd,np.sqrt(3)/3)
# match = np.remainder(match,hcpc.shape[0])
# Sites[i+ac.shape[0]+bc.shape[0]].AppendSiteNeighbors(match+ac.shape[0]+bc.shape[0]+fccc.shape[0])
## hcp site
for i in range(0,hcpc.shape[0]):
# to other atop sites
match = FindNeighbor(hcpc[i],apc,rd,np.sqrt(3)/6*2)
match = np.remainder(match,ac.shape[0])
Sites[i+ac.shape[0]+bc.shape[0]+fccc.shape[0]].AppendSiteNeighbors(match)
Sites[i+ac.shape[0]+bc.shape[0]+fccc.shape[0]].AppendRepresentedAtoms(match)
# to other bridge sites
match = FindNeighbor(hcpc[i],bpc,rd,np.sqrt(3)/6)
match = np.remainder(match,bc.shape[0])
Sites[i+ac.shape[0]+bc.shape[0]+fccc.shape[0]].AppendSiteNeighbors(match+ac.shape[0])
# to other hcp sites
# match = FindNeighbor(hcpc[i],fccpc,rd,np.sqrt(3)/3)
# match = np.remainder(match,fccc.shape[0])
# Sites[i+ac.shape[0]+bc.shape[0]+fccc.shape[0]].AppendSiteNeighbors(match+ac.shape[0]+bc.shape[0])
# change basis from absolute to fractional
# Basis1' * coordinate1' = Basis2' * coordinate2'
B_inv = np.linalg.inv(Cell.transpose())
for site in Sites:
site._Coordinate = np.dot(B_inv,site._Coordinate).transpose()
# periodic boundary condition
if PBC:
PBC = (True,True,False)
else:
PBC = (False,False,False)
# Return
return cls(Sites=Sites,SiteNames=SiteNames,DistanceMultiplier=DistanceMultiplier,Cell=Cell,PBC=PBC)
def FindNeighbor(xyz,mat,round_decimal,desired_distance):
mat = np.subtract(mat,xyz)
ds = np.linalg.norm(mat,axis=1)
ds = np.around(ds,decimals=round_decimal)
desired_distance = np.around(desired_distance,decimals=round_decimal)
return np.where(np.equal(ds,desired_distance))[0] # because it gives tuple of tuple
class SurfHelper(object):
def __init__(self,size):
# Construct Surface
surf = Lattice.ConstructRectangularClosePackedLattice(size,size,PBC=False)
# Get Surfrace Mol
atomidx = []
for i,s in enumerate(surf._Sites):
if 'self' in s._RepresentedAtoms:
atomidx.append(i)
self.xyz = surf.GetCoordinates()[atomidx,:]
self.sites = []
for i,s in enumerate(surf._Sites):
if 'self' in s._RepresentedAtoms:
self.sites.append(frozenset([int(i)]))
else:
self.sites.append(frozenset([int(ss) for ss in s._RepresentedAtoms]))
self.SurfMol = surf.GetRdkitMolEnum()
# Get Center Atom index in rdkit mol
SurfAtomCoordinates = list()
for i in range(0,len(surf._Sites)):
if 'self' in surf._Sites[i]._RepresentedAtoms:
SurfAtomCoordinates.append(surf._Sites[i].GetCoordinate())
CenterAtomIdx = np.linalg.norm(SurfAtomCoordinates - np.array([0.5,0.5,0]),axis=1).argmin()
self.SurfMol.GetAtomWithIdx(int(CenterAtomIdx)).SetBoolProp('CenterSurfAtom',True)
def AddAdsorbateToSurf(self,AdsorbateSmiles):
# Prepare Adsorbate
AdsorbateMol = Chem.MolFromSmiles(AdsorbateSmiles,sanitize=False)
# Get list of Surface Atom Indices
SurfIdxs = list()
GasIdxs = list()
for atom in AdsorbateMol.GetAtoms():
if atom.GetAtomicNum() not in [1,6,8]:
SurfIdxs.append(atom.GetIdx())
else:
GasIdxs.append(atom.GetIdx())
#Chem.SanitizeMol(AdsorbateMol)
AdsorbateMol.UpdatePropertyCache(False)
## Get SurfMol
AdsorbateSurfMol, AdsorbateToAdsorbateSurf, AdsorbateSurfToAdsorbate = GetSubMolFromIdx(SurfIdxs,AdsorbateMol)
### Set up for matching
SA = rdqueries.AtomNumEqualsQueryAtom(0)
for idx in range(0,AdsorbateSurfMol.GetNumAtoms()):
AdsorbateSurfMol.ReplaceAtom(idx,SA)
SA.ExpandQuery(rdqueries.HasPropQueryAtom('CenterSurfAtom'))
AdsorbateSurfMol.ReplaceAtom(0,SA)
## Get GasMol
AdsorbateGasMol, AdsorbateToAdsorbateGas, AdsorbateGasToAdsorbate = GetSubMolFromIdx(GasIdxs,AdsorbateMol)
Chem.SanitizeMol(AdsorbateGasMol)
AdsorbateGasMol = AdsorbateGasMol.GetMol()
## Match Surface
ProjectedSurfIdxs = self.SurfMol.GetSubstructMatches(AdsorbateSurfMol)[0]
# Combine Two mol
NewMol = Chem.RWMol(Chem.CombineMols(self.SurfMol,AdsorbateGasMol))
OccupiedSurfIdxs = set()
for bond in AdsorbateMol.GetBonds():
# Find Surface-Adsorbate bond
SurfAtomIdx = None
GasAtomIdx = None
atoms = [bond.GetBeginAtom(),bond.GetEndAtom()]
for atom in atoms:
if atom.GetAtomicNum() in [1,6,8]:
GasAtomIdx = atom.GetIdx()
else:
SurfAtomIdx = atom.GetIdx()
# if the bond between adsorbate and surface
if SurfAtomIdx is not None and GasAtomIdx is not None:
GasMappedIdx = AdsorbateToAdsorbateGas[GasAtomIdx] + self.SurfMol.GetNumAtoms()
SurfMappedIdx = ProjectedSurfIdxs[AdsorbateToAdsorbateSurf[SurfAtomIdx]]
NewMol.AddBond(GasMappedIdx,SurfMappedIdx,order=Chem.rdchem.BondType.SINGLE)
OccupiedSurfIdxs.add(SurfMappedIdx)
# Set up property
for idx in AdsorbateGasToAdsorbate:
idx = idx + self.SurfMol.GetNumAtoms()
atom = NewMol.GetAtomWithIdx(idx)
atom.SetNumRadicalElectrons(0)
M = Chem.Atom(0)
for idx in OccupiedSurfIdxs:
NewMol.ReplaceAtom(idx,M)
# Indexing via isotope
# This is done to record original index
for i,atom in enumerate(NewMol.GetAtoms()):
atom.SetIsotope(i+1) # start counting from 1 since 0 is default value
return NewMol
def GetCanonicalSmiles(self,s):
reloadedmol = self.AddAdsorbateToSurf(s)
reloadedmol = Chem.RWMol(RemoveLatticeAmbiguity(reloadedmol))
reloadedmol = reloadedmol.GetMol()
for atom in reloadedmol.GetAtoms():
atom.SetIsotope(0)
if atom.GetAtomicNum() in [1,6,8]:
atom.SetNumRadicalElectrons(1)
return Chem.MolToSmiles(reloadedmol)
def SetUpReaction(smiles):
"""
Pair Enumeration rules
"""
Rules = []
# Prepare molecule
Graph = Chem.MolFromSmiles(smiles,sanitize=False)
# renumber for speed
a = []
s = []
for atom in Graph.GetAtoms():
if atom.GetAtomicNum() == 0:
s.append(atom.GetIdx())
else:
a.append(atom.GetIdx())
Graph = Chem.RenumberAtoms(Graph,a+s)
Graph = Chem.RWMol(Graph)
# set bond properties. Needed to limit connecting more than 3 C to one C
for bond in Graph.GetBonds():
if bond.GetBeginAtom().GetAtomicNum() == 0 or bond.GetEndAtom().GetAtomicNum() == 0 :
bond.SetBondType(Chem.BondType.ZERO)
for atom in Graph.GetAtoms():
atom.SetNoImplicit(True)
## Set up molAtomMapNumber
i = 1
for atom in Graph.GetAtoms():
atom.SetProp('molAtomMapNumber',str(i))
i += 1
## Set Atom Type
Anchors = list()
for Atom in Graph.GetAtoms():
if Atom.GetAtomicNum()==6:
nS = 0
for NBRAtom in Atom.GetNeighbors():
if NBRAtom.GetAtomicNum() == 0:
nS += 1
Atom.SetIntProp('nS',nS)
Anchors.append(Atom.GetIdx())
elif Atom.GetAtomicNum()==0:
Atom.SetBoolProp('Occ',False)
for NBRAtom in Atom.GetNeighbors():
if NBRAtom.GetAtomicNum() == 6:
Atom.SetBoolProp('Occ',True)
break
## Check for Symmetry
if len(set([Graph.GetAtomWithIdx(i).GetIntProp('nS') for i in Anchors])) == 1:
symm = True
else:
symm = False
#Chem.SanitizeMol(Graph)
#Graph.UpdatePropertyCache(False)
# Set up Product
p = Graph.__copy__()
## set atom properties for occupide and nonoccupied surface atom.
OSA = rdqueries.AtomNumEqualsQueryAtom(0)
OSA.SetBoolProp('Occ',True)
OSA.ExpandQuery(rdqueries.HasBoolPropWithValueQueryAtom('Occ',True))
## Set up unoccupied Surface Atom
NOSA = rdqueries.AtomNumEqualsQueryAtom(0)
NOSA.SetBoolProp('Occ',False)
NOSA.ExpandQuery(rdqueries.HasBoolPropWithValueQueryAtom('Occ',False))
# Rule 1 Set up
## Set up Reactant
r = p.__copy__()
## Set up Other Anchor Atom
AdsorbedAnchor = rdqueries.AtomNumEqualsQueryAtom(6)
AdsorbedAnchor.ExpandQuery(rdqueries.HasIntPropWithValueQueryAtom('nS',
r.GetAtomWithIdx(Anchors[1]).GetIntProp('nS')))
AdsorbedAnchor.ExpandQuery(rdqueries.TotalValenceLessQueryAtom(3))
AdsorbedAnchor.SetProp('molAtomMapNumber',r.GetAtomWithIdx(Anchors[1]).GetProp('molAtomMapNumber'))
r.ReplaceAtom(Anchors[1],AdsorbedAnchor)
## replace surfaceatom with query atom
for Atom in r.GetAtoms():
if Atom.GetAtomicNum() == 0:
Occupied = False
for NBRAtom in Atom.GetNeighbors():
if NBRAtom.GetAtomicNum() == 6:
Occupied = True
break
if not Occupied:
NOSA.SetProp('molAtomMapNumber',Atom.GetProp('molAtomMapNumber'))
r.ReplaceAtom(Atom.GetIdx(),NOSA)
## Remove Anchor Atom
r.RemoveAtom(Anchors[0])
if len(Chem.GetMolFrags(r)) == 1: # Fragmented can be [C].[*][*][*] which is like unconstrained bridge rule
## set reaction
rxn = ChemicalReaction()
## add reactant
#Chem.SanitizeMol(r)
#r.UpdatePropertyCache(False)
rxn.AddReactantTemplate(r.GetMol())
## add product
#Chem.SanitizeMol(p)
#p.UpdatePropertyCache(False)
p.GetAtomWithIdx(Anchors[0]).SetBoolProp('NewAtom',True)
rxn.AddProductTemplate(p.GetMol())
rxn.Initialize()
Rules.append(rxn)
# Make rule2 if applicable
if not symm:
# Rule 1 Set up
## Set up Reactant
r = p.__copy__()
## Set up Other Anchor Atom
AdsorbedAnchor = rdqueries.AtomNumEqualsQueryAtom(6)
AdsorbedAnchor.ExpandQuery(rdqueries.HasIntPropWithValueQueryAtom('nS',
r.GetAtomWithIdx(Anchors[0]).GetIntProp('nS')))
AdsorbedAnchor.ExpandQuery(rdqueries.TotalValenceLessQueryAtom(3))
AdsorbedAnchor.SetProp('molAtomMapNumber',r.GetAtomWithIdx(Anchors[0]).GetProp('molAtomMapNumber'))
r.ReplaceAtom(Anchors[0],AdsorbedAnchor)
## replace surfaceatom with query atom
for Atom in r.GetAtoms():
if Atom.GetAtomicNum() == 0:
Occupied = False
for NBRAtom in Atom.GetNeighbors():
if NBRAtom.GetAtomicNum() == 6:
Occupied = True
break
if not Occupied:
NOSA.SetProp('molAtomMapNumber',Atom.GetProp('molAtomMapNumber'))
r.ReplaceAtom(Atom.GetIdx(),NOSA)
## Remove Anchor Atom
r.RemoveAtom(Anchors[1])
if len(Chem.GetMolFrags(r)) == 1: # Fragmented can be [C].[*][*][*] which is like unconstrained bridge rule
## set reaction
rxn = ChemicalReaction()
## add reactant
#Chem.SanitizeMol(r)
#r.UpdatePropertyCache(False)
rxn.AddReactantTemplate(r.GetMol())
## add product
p.GetAtomWithIdx(Anchors[1]).SetBoolProp('NewAtom',True)
p.GetAtomWithIdx(Anchors[0]).ClearProp('NewAtom')
#Chem.SanitizeMol(p)
#p.UpdatePropertyCache(False)
rxn.AddProductTemplate(p.GetMol())
rxn.Initialize()
Rules.append(rxn)
return Rules
def SetUpRingReaction(smiles):
"""
Ring Enumeration rules
"""
Rules = []
# Prepare molecule
Graph = Chem.MolFromSmiles(smiles,sanitize=False)
# renumber for speed
a = []
s = []
for atom in Graph.GetAtoms():
if atom.GetAtomicNum() == 0:
s.append(atom.GetIdx())
else:
a.append(atom.GetIdx())
Graph = Chem.RenumberAtoms(Graph,a+s)
Graph = Chem.RWMol(Graph)
# set bond properties. Needed to limit connecting more than 3 C to one C
for bond in Graph.GetBonds():
if bond.GetBeginAtom().GetAtomicNum() == 0 or bond.GetEndAtom().GetAtomicNum() == 0 :
bond.SetBondType(Chem.BondType.ZERO)
for atom in Graph.GetAtoms():
atom.SetNoImplicit(True)
## Set up molAtomMapNumber
i = 1
for atom in Graph.GetAtoms():
atom.SetProp('molAtomMapNumber',str(i))
i += 1
## Set Atom Type
Anchors = list()
for Atom in Graph.GetAtoms():
if Atom.GetAtomicNum()==6:
nS = 0
for NBRAtom in Atom.GetNeighbors():
if NBRAtom.GetAtomicNum() == 0:
nS += 1
Atom.SetIntProp('nS',nS)
Anchors.append(Atom.GetIdx())
elif Atom.GetAtomicNum()==0:
Atom.SetBoolProp('Occ',False)
for NBRAtom in Atom.GetNeighbors():
if NBRAtom.GetAtomicNum() == 6:
Atom.SetBoolProp('Occ',True)
break
#Chem.SanitizeMol(Graph)
# Set up Product
p = Graph.__copy__()
## set atom properties for occupide and nonoccupied surface atom.
OSA = rdqueries.AtomNumEqualsQueryAtom(0)
OSA.SetBoolProp('Occ',True)
OSA.ExpandQuery(rdqueries.HasBoolPropWithValueQueryAtom('Occ',True))
## Set up unoccupied Surface Atom
NOSA = rdqueries.AtomNumEqualsQueryAtom(0)
NOSA.SetBoolProp('Occ',False)
NOSA.ExpandQuery(rdqueries.HasBoolPropWithValueQueryAtom('Occ',False))
# Check whether it's a ring or chain
bonds = []
for anchorpair in itertools.combinations(Anchors,2):
if Graph.GetBondBetweenAtoms(anchorpair[0],anchorpair[1]):
bonds.append(anchorpair)
if len(bonds) == 2: # Chain
AnchorsToRemoves = list(set(bonds[0]).intersection(bonds[1]))
else:
AnchorsToRemoves = Anchors.copy()
uniquesmiles = []
for AnchorToRemove in AnchorsToRemoves:
anc = Anchors.copy()
del anc[anc.index(AnchorToRemove)]
p.GetAtomWithIdx(AnchorToRemove).SetBoolProp('NewAtom',True)
for a in anc:
p.GetAtomWithIdx(a).ClearProp('NewAtom')
# Rule 1 Set up
## Set up Reactant
r = p.__copy__()
## Set up Anchor
for an in anc:
AdsorbedAnchor = rdqueries.AtomNumEqualsQueryAtom(6)
AdsorbedAnchor.ExpandQuery(rdqueries.HasIntPropWithValueQueryAtom('nS',
r.GetAtomWithIdx(an).GetIntProp('nS')))
AdsorbedAnchor.ExpandQuery(rdqueries.TotalValenceLessQueryAtom(3))
AdsorbedAnchor.SetProp('molAtomMapNumber',r.GetAtomWithIdx(an).GetProp('molAtomMapNumber'))
r.ReplaceAtom(an,AdsorbedAnchor)
## replace surfaceatom with query atom
for Atom in r.GetAtoms():
if Atom.GetAtomicNum() == 0:
Occupied = False
for NBRAtom in Atom.GetNeighbors():
if NBRAtom.GetAtomicNum() == 6:
Occupied = True
break
if not Occupied:
NOSA.SetProp('molAtomMapNumber',Atom.GetProp('molAtomMapNumber'))
r.ReplaceAtom(Atom.GetIdx(),NOSA)
## Remove Anchor Atom
r.RemoveAtom(AnchorToRemove)
smiles = Chem.MolToSmiles(r)
if smiles not in uniquesmiles:
uniquesmiles.append(smiles)
## set reaction
rxn = ChemicalReaction()
## add reactant
#Chem.SanitizeMol(r)
#r.UpdatePropertyCache(False)
rxn.AddReactantTemplate(r.GetMol())
## add product
#Chem.SanitizeMol(p)
#p.UpdatePropertyCache(False)
rxn.AddProductTemplate(p.GetMol())
rxn.Initialize()
Rules.append(rxn)
return Rules
class BridgeRule(object):
_a=2.125210
_b=-0.992577
def __init__(self,xyz,siteidx,maxbridge=12):
sitexyz = []
for sidx in siteidx:
xyzs = []
for s in sidx:
xyzs.append(xyz[s,:])
sitexyz.append(np.mean(xyzs,0))
sitexyz = np.array(sitexyz)[:,0:2]
self.Data = {idxs:[[] for _ in range(maxbridge-3)] for idxs in siteidx}
dists = pdist(sitexyz)
n=0
for i in range(len(siteidx)):
for j in range(i+1,len(siteidx)):
nmaxgaslength = self._a*dists[n]+self._b
for k in range(maxbridge-3): # Index starts from gas length 3
if k+3>nmaxgaslength:
self.Data[siteidx[i]][k].append(siteidx[j])
self.Data[siteidx[j]][k].append(siteidx[i])
n+=1
self.C = Chem.Atom('C')
def _AnalyzeReactant(self,reactant):
"""
Set up reactant properties. Identify Bridges
"""
# Identify Bridges
## find all non-surface bound atoms
AtomsToCheckList = list()
for a in reactant.GetAtoms():
if a.GetAtomicNum() == 6:
adsorbed = False
for na in a.GetNeighbors():
if na.GetAtomicNum() ==0:
adsorbed = True
break
if not adsorbed:
a.SetBoolProp('Adsorbed',False)
AtomsToCheckList.append(a)
else:
a.SetBoolProp('Adsorbed',True)
HangingC_Anchor_BridgeLens = list()
"""
HangingC_Anchor_BridgeLens:
List of [HangingC, AnchorInfo]
AnchorInfo:
List of [Anchor Idx (using Isotope to refer to original lattice), Bridge Length]
"""
while AtomsToCheckList:
# initialize
# here a single bridge is identified
NeighborsToCheck = [AtomsToCheckList.pop()]
CheckedAtomIdx = []
Anchors = []
HangingCs = []
while NeighborsToCheck:
AtomBeingChecked = NeighborsToCheck.pop()
CheckedAtomIdx.append(AtomBeingChecked.GetIdx())
if AtomBeingChecked.GetBoolProp('Adsorbed'): # if it's adsorbed, it's anchor
Anchors.append(AtomBeingChecked)
else:
# if not anchor, check whether it's hanging, or to continue search
nhbs = AtomBeingChecked.GetNeighbors()
if len(nhbs) == 1: # This is a Hanging atom
HangingCs.append(AtomBeingChecked.GetIdx())
for neighbor_atom in AtomBeingChecked.GetNeighbors():
if neighbor_atom.GetIdx() not in CheckedAtomIdx:
NeighborsToCheck.append(neighbor_atom)
# Remove checked atoms
AtomsToCheckList = [atom for atom in AtomsToCheckList if atom.GetIdx() not in CheckedAtomIdx]
# For Path through organic atoms
BondToBreak = list()
for Atom in Anchors:
for Bond in Atom.GetBonds():
if Bond.GetOtherAtom(Atom).GetIdx() not in CheckedAtomIdx:
BondToBreak.append(Bond.GetIdx())
MolForMolPath = Chem.FragmentOnBonds(reactant,list(set(BondToBreak)))
# Get Bridge Length
for HangingC in HangingCs:
Anchor_BridgeLen = []
for Anchor in Anchors:
bridgelen = len(Chem.GetShortestPath(MolForMolPath,HangingC,Anchor.GetIdx()))
Anchor_BridgeLen.append([frozenset([GetBeforeIdx(na) for na in Anchor.GetNeighbors()
if na.GetAtomicNum() == 0]),bridgelen])
HangingC_Anchor_BridgeLens.append([HangingC,Anchor_BridgeLen])
return HangingC_Anchor_BridgeLens
def RunReactants(self,reactants):
# Initialize
reactant = Chem.RWMol(reactants[0])
HangingC_Anchor_BridgeLens = self._AnalyzeReactant(reactant)
## End of While
products = []
# iterate over Each HangingC+Anchors
for HangingC_Anchor_BridgeLen in HangingC_Anchor_BridgeLens:
HangingC = HangingC_Anchor_BridgeLen[0]
AvailableSites = []
# There could be multiple anchor. Intersecting available sites are foudn
for Anchor_BridgeLen in HangingC_Anchor_BridgeLen[1]:
AvailableSites.append(set(self.Data[Anchor_BridgeLen[0]][Anchor_BridgeLen[1]-2]))
AvailableSites = set.intersection(*AvailableSites)
# Add Bond
for sidx in AvailableSites:
p = reactant.__copy__()
NewAnchorCIdx = p.AddAtom(self.C)
p.AddBond(HangingC, NewAnchorCIdx, order=Chem.BondType.SINGLE)
for s in sidx:
p.AddBond(s, NewAnchorCIdx, order=Chem.BondType.SINGLE)
products.append((p,))
return products
def ConnectBrgNewAtom(self,reactants):
# Initialize
reactant = Chem.RWMol(reactants[0])
HangingC_Anchor_BridgeLens = self._AnalyzeReactant(reactant)
# Find the new atom Anchor
NewAtomAnchor = []
for atom in reactant.GetAtoms():
if atom.HasProp('NewAtom'):
NewAtomIdx = atom.GetIdx()
for na in atom.GetNeighbors():
if na.GetAtomicNum() == 0:
NewAtomAnchor.append(GetBeforeIdx(na))
NewAtomAnchor = frozenset(NewAtomAnchor)
## End of While
products = []
# iterate over Each HangingC+Anchors
for HangingC_Anchor_BridgeLen in HangingC_Anchor_BridgeLens:
HangingC = HangingC_Anchor_BridgeLen[0]
AvailableSites = []
# There could be multiple anchor. Intersecting available sites are foudn
for Anchor_BridgeLen in HangingC_Anchor_BridgeLen[1]:
AvailableSites.append(set(self.Data[Anchor_BridgeLen[0]][Anchor_BridgeLen[1]-2]))
AvailableSites = set.intersection(*AvailableSites)
if NewAtomAnchor in AvailableSites:
p = reactant.__copy__()
p.AddBond(HangingC,NewAtomIdx, order=Chem.BondType.SINGLE)
products.append((p,))
return products
def GetBeforeIdx(atom):
iso = atom.GetIsotope()
if iso != 0:
return atom.GetIsotope()-1
else:
return None
def CleanUp(mol):
# Set properties
for Atom in mol.GetAtoms():
if Atom.GetAtomicNum() == 0: # update occupancy
Occupied = False
for na in Atom.GetNeighbors():
if na.GetAtomicNum() == 6:
Occupied = True
break
Atom.SetBoolProp('Occ',Occupied)
if Atom.GetAtomicNum() == 6: # This update Total valence
Atom.UpdatePropertyCache()
def SetUpConstraintMol(s):
mol = Chem.RWMol(Chem.MolFromSmiles(s,sanitize=False))
todelete = []
for atom in mol.GetAtoms():
if atom.GetAtomicNum() == 0:
occupied = False
for na in atom.GetNeighbors():
if na.GetAtomicNum() == 6:
occupied = True
break
if not occupied:
todelete.append(atom.GetIdx())
# Remove unoccupied atoms
path = []
for bond in mol.GetBonds():
if bond.GetBeginAtomIdx() not in todelete or bond.GetEndAtomIdx() not in todelete:
path.append(bond.GetIdx())
mol = Chem.PathToSubmol(mol,path)
# # bond set
# for bond in mol.GetBonds():
# if bond.GetBeginAtom().GetAtomicNum() == 0 or bond.GetEndAtom().GetAtomicNum() == 0 :
# bond.SetBondType(Chem.BondType.ZERO)
# Renumbers
a = []
s = []
for atom in mol.GetAtoms():
if atom.GetAtomicNum() == 6:
a.append(atom.GetIdx())
else:
s.append(atom.GetIdx())
aa = []
unprocessed = [a[0]]
while unprocessed:
i = unprocessed.pop()
aa.append(i)
atom = mol.GetAtomWithIdx(i)
for atom in atom.GetNeighbors():
if atom.GetIdx() not in aa and atom.GetAtomicNum() == 6 :
unprocessed.insert(0,atom.GetIdx())
return Chem.RenumberAtoms(mol,aa+s)
def CheckConfig(s):
# Optimize
out, atoms = GraphToOptStruc(s,SurfAtomNum=0)
if out == -1:
return -1, s # Embeding failde
ans = atoms.get_atomic_numbers()
# set rcov
## 1.65 is max for CC, and 0.8 is the min for CC
rcov = np.zeros((len(atoms)))
rcov[ans==6] = 0.7174
rcov[ans==0] = 1.3695565
# Determine connectivity of the organic atoms
pos = atoms.get_positions()
# Get pairwise distance
dist = pdist(pos)
# See if CC distance is too short or long
# Check if atoms are too close
mol = Chem.MolFromSmiles(s,sanitize=False)
CC=[]
for bond in mol.GetBonds():
if bond.GetBeginAtom().GetAtomicNum() == 6 and bond.GetEndAtom().GetAtomicNum() == 6:
i = bond.GetBeginAtom().GetIdx()
j = bond.GetEndAtom().GetIdx()
if j < i:
t = i
i = j
j = t
CC.append([i,j])
CC = np.array(CC)
n = mol.GetNumAtoms()
CCIdx = CC[:,0]*n + CC[:,1] - CC[:,0]*(CC[:,0]+1)/2 - CC[:,0] - 1
CCIdx = CCIdx.astype(int)
if np.any(dist[CCIdx] > 1.65) or np.any(dist[CCIdx] < 0.8):
return -2, s # distance criterum didn't meet
# get index
index = np.array(list(combinations(range(len(atoms)),2)))
# Get distance criteria
dist_max = np.sum(rcov[index],axis=1)*1.15
# Bool mask for atoms with bond
YesBond = dist<dist_max
# Make Mol
RdkitMol = Chem.RWMol(Chem.Mol())
for an in ans:
atom = Chem.Atom(int(an))
RdkitMol.AddAtom(atom)
for i,j in index[YesBond]:
RdkitMol.AddBond(int(i),int(j),order=Chem.rdchem.BondType.SINGLE)
if Chem.MolToSmiles(RdkitMol) == s:
return 1, s
else:
return -3, s # Wrong smiles
CovalentRadius = {'Ag':1.46, 'Al':1.11, 'As':1.21, 'Au':1.21, 'C':0.77, 'Ca':1.66, 'Cd':1.41,
'Co':1.21, 'Cr':1.26, 'Cu':1.21, 'Fe':1.26, 'Ga':1.16, 'Ge':1.22, 'H':0.37,
'In':1.41, 'Ir':1.21, 'Mn':1.26, 'Mo':1.31, 'N':0.74, 'Na':1.66, 'Nb':1.31,
'Ni':1.21, 'O':0.74, 'Os':1.16, 'Pb':1.66, 'Pd':1.26, 'Pt':1.21, 'Re':1.21,
'Rh':1.21, 'Ru':1.16, 'S':1.04, 'Sb':1.41, 'Se':1.17, 'Si':1.17, 'Sn':1.4,
'Ti':1.26, 'V':1.21, 'W':1.21, 'Zn':1.21}
class AtomDB(object):
def __init__(self):
PT = Chem.GetPeriodicTable()
self.SurfaceAtomicNumbers = set()
self.AdsorbateAtomicNumbers = set()
self.CovalentRadius = dict()
for Symbol in SurfaceElements:
self.SurfaceAtomicNumbers.add(PT.GetAtomicNumber(Symbol))
self.SurfaceAtomicNumbers.add(0)
for Symbol in AdsorbateElements:
self.AdsorbateAtomicNumbers.add(PT.GetAtomicNumber(Symbol))
for Symbol in CovalentRadius:
self.CovalentRadius[PT.GetAtomicNumber(Symbol)] = CovalentRadius[Symbol]
def IsAdsorbateAtomNum(self,AtomicNumber):
if AtomicNumber in self.AdsorbateAtomicNumbers:
return True
return False
def IsSurfaceAtomNum(self,AtomicNumber):
if AtomicNumber in self.SurfaceAtomicNumbers:
return True
return False
def GetCovalentRadius(self, AtomicNumber):
if AtomicNumber in self.CovalentRadius:
return self.CovalentRadius[AtomicNumber]
else:
raise(NotImplementedError, 'Missing covalent radius information')
def IsSurfaceAtomOccupied(Atom):
# Assumes supplied atom is surface atom
if not isinstance(Atom,(Chem.Atom,Chem.QueryAtom)):
raise TypeError('Atom has to be rdkit.Chem.rdchem.Atom/QueryAtom')
for NeighborAtom in Atom.GetNeighbors():
if ADB.IsAdsorbateAtomNum(NeighborAtom.GetAtomicNum()):
return True
break
return False
ADB = AtomDB()
def GetCovalentRadius(AtomicNumber):
# Assumes supplied atom is adsorbate atom
if not isinstance(AtomicNumber,(int,np.int64,np.int32)):
raise TypeError('AtomicNumber has to be int')
return ADB.GetCovalentRadius(AtomicNumber)
def IsAdsorbateAtomNum(AtomicNumber):
if not isinstance(AtomicNumber,(int,np.int32,np.int64)):
raise TypeError('AtomicNumber has to be int')
return ADB.IsAdsorbateAtomNum(AtomicNumber)
def IsAdsorbateAtomAdsorbed(Atom):
# Assumes supplied atom is adsorbate atom
if not isinstance(Atom,(Chem.Atom,Chem.QueryAtom)):
raise TypeError('Atom has to be rdkit.Chem.rdchem.Atom/QueryAtom')
for NeighborAtom in Atom.GetNeighbors():
if ADB.IsSurfaceAtomNum(NeighborAtom.GetAtomicNum()):
return True
break
return False
def IsSurfaceAtomNum(AtomicNumber,ZeroIsMetal=True):
if not isinstance(AtomicNumber,(int,np.int32,np.int64)):
raise TypeError('AtomicNumber has to be int')
if ZeroIsMetal and AtomicNumber==0:
return True
else:
return ADB.IsSurfaceAtomNum(AtomicNumber)
def GetNumSurfAtomNeighbor(Atom):
# Assumes supplied atom is adsorbate atom
if not isinstance(Atom,(Chem.Atom,Chem.QueryAtom)):
raise TypeError('Atom has to be rdkit.Chem.rdchem.Atom/QueryAtom')
n = 0
for NeighborAtom in Atom.GetNeighbors():
if NeighborAtom.HasProp('Type'):
if NeighborAtom.GetProp('Type') == 'S':
n += 1
elif ADB.IsSurfaceAtomNum(NeighborAtom.GetAtomicNum()):
n += 1
return n
def SetAdsorbateMolAtomProps(Mol,ZeroIsMetal = True):
if not isinstance(Mol,(Chem.Mol,Chem.RWMol,Chem.EditableMol)):
raise TypeError('Mol has to be rdkit.Chem.rdchem.Mol/RWMol/EditableMol')
#Set Atom Type
for Atom in Mol.GetAtoms():
if Atom.HasProp('Type'):
pass
if ADB.IsAdsorbateAtomNum(Atom.GetAtomicNum()):
Atom.SetProp('Type','A')
elif ADB.IsSurfaceAtomNum(Atom.GetAtomicNum()) or (ZeroIsMetal and Atom.GetAtomicNum() == 0):
Atom.SetProp('Type','S')
Atom.SetProp('smilesSymbol','M')
if IsSurfaceAtomOccupied(Atom):
Atom.SetBoolProp('Occupied',True)
else:
Atom.SetBoolProp('Occupied',False)
# Set Bond Type and assign radical electrons
for Bond in Mol.GetBonds():
if Bond.GetBeginAtom().GetProp('Type') == 'S' or Bond.GetEndAtom().GetProp('Type') == 'S':
Bond.SetBondType(Chem.rdchem.BondType.ZERO)
else:
Bond.SetBondType(Chem.rdchem.BondType.SINGLE)
Chem.AssignRadicals(Mol)
# Set smilesSymbol and Adsorbed
for Atom in Mol.GetAtoms():
if Atom.GetProp('Type') == 'A':
NSurf = GetNumSurfAtomNeighbor(Atom)
if Atom.GetAtomicNum() != 1:
Atom.SetProp('smilesSymbol',Atom.GetSymbol() + str(Atom.GetNumRadicalElectrons())+ str(NSurf))
if NSurf != 0:
Atom.SetBoolProp('Adsorbed',True)
else:
Atom.SetBoolProp('Adsorbed',False)
if Atom.GetAtomicNum() != 1 and Atom.GetNumRadicalElectrons() == 0:
Atom.SetProp('smilesSymbol','[' + Atom.GetSymbol() + '0]')
else:
ValueError, 'Unrecognized Atom Element Type! See GraphLearning.Settings'
def _PretreatSMILESorMol(SMILESorMol):
if isinstance(SMILESorMol,str):
SMILESorMol = Chem.MolFromSmiles(SMILESorMol,sanitize=False)
SetAdsorbateMolAtomProps(SMILESorMol)
# for bond in SMILESorMol.GetBonds():
# if bond.GetBeginAtom().GetProp('Type') == 'S' or bond.GetBeginAtom().GetProp('Type') == 'S':
# bond.SetBondType(Chem.rdchem.BondType.ZERO)
# SMILESorMol = AllChem.AddHs(SMILESorMol)
if isinstance(SMILESorMol,(Chem.Mol,Chem.EditableMol,Chem.RWMol)):
SMILESorMol = SMILESorMol.__copy__()
SetAdsorbateMolAtomProps(SMILESorMol,ZeroIsMetal=True)
mol = Chem.RWMol(SMILESorMol)
Chem.SanitizeMol(mol)
for i in range(0,mol.GetNumAtoms()):
atom = mol.GetAtomWithIdx(i)
if atom.GetProp('Type') == 'S':
SurfAtom = Chem.Atom(78) # Platinum. This needs to be done otherwise rdkit forcefield does not work
SurfAtom.SetProp('Type','S')
SurfAtom.SetBoolProp('Occupied',atom.GetBoolProp('Occupied'))
mol.ReplaceAtom(i,SurfAtom)
mol.UpdatePropertyCache()
else:
raise TypeError('Unrecognized adsorbate graph input')
return mol
def GraphToOptStruc(SMILESorMol, OutputPath=None, LatticeConstant=3.924, Quiet = True, SurfAtomNum = 46,ZStrain=150.0):
# Initialize
mol = _PretreatSMILESorMol(SMILESorMol)
NearestNeighborDistance = LatticeConstant/np.sqrt(2)
# Get list of Surface Atom Indices
SurfIdxs = list()
for atom in mol.GetAtoms():
if atom.GetProp('Type') == 'S':
SurfIdxs.append(atom.GetIdx())
# Get Surface conformer
output = AllChem.EmbedMolecule(mol)
if output == -1:
return -1,None # Embedding failed
conf = mol.GetConformer(0)
if len(SurfIdxs) == 0:
# Gas Phase Molecule
ff = AllChem.UFFGetMoleculeForceField(mol)
# Optimize Molecule
ff.Initialize()
output = ff.Minimize()
else:
# Set up Surface Coordinate
"""
Algorithm:
Set first and second atoms to eliminate two degree of freedom, and then start
setting other atom's position based on first two
"""
if len(SurfIdxs) == 1:
conf.SetAtomPosition(SurfIdxs[0], (0,0,0))
elif len(SurfIdxs) == 2:
conf.SetAtomPosition(SurfIdxs[0], (0,0,0))
conf.SetAtomPosition(SurfIdxs[1], (NearestNeighborDistance,0,0))
elif len(SurfIdxs) > 2:
# Error check
for idx in SurfIdxs:
Atom = mol.GetAtomWithIdx(idx)
NSurfNeighbor = 0
for NeighborAtom in Atom.GetNeighbors():
if NeighborAtom.GetProp('Type') == 'S':
NSurfNeighbor += 1
if NSurfNeighbor < 2:
raise ValueError('Dangling Surface Atom detected. Make sure surface atoms are attached to at least 2 other connected surface atoms')
# Vector that checks whether or not surface atom is plotted
Plotted = list()
# (N)on-(p)lotted (S)urface Atom (I)dx (T)o (P)lotted (S)urface (N)eighbor (C)ount
NPSITPSNC = defaultdict(int)
# plot first atom
conf.SetAtomPosition(SurfIdxs[0], (0,0,0))
Plotted.append(SurfIdxs[0])
FirstAtom = mol.GetAtomWithIdx(SurfIdxs[0])
# plot second atom and update NPSITPSNC
for NeighborAtom in FirstAtom.GetNeighbors():
if NeighborAtom.GetProp('Type') == 'S':
NPSITPSNC[NeighborAtom.GetIdx()] += 1
SecondAtom = NeighborAtom
conf.SetAtomPosition(SecondAtom.GetIdx(), (NearestNeighborDistance,0,0))
Plotted.append(SecondAtom.GetIdx())
del NPSITPSNC[SecondAtom.GetIdx()]
for NeighborAtom in SecondAtom.GetNeighbors():
if NeighborAtom.GetProp('Type') == 'S' and NeighborAtom.GetIdx() not in Plotted:
NPSITPSNC[NeighborAtom.GetIdx()] += 1
# plot other atoms
while len(NPSITPSNC) != 0:
# Find Atom with more than two plotted neighbor atom
NonPlottedIdx = list(NPSITPSNC.keys())
random.shuffle(NonPlottedIdx)
for AtomIdx in NonPlottedIdx:
if NPSITPSNC[AtomIdx] >= 2:
Atom = mol.GetAtomWithIdx(AtomIdx)
break
# Find Two Neighbor Atoms that are connected to each other.
## Find plotted Neighbors
NeighborIdx = list()
for NeighborAtom in Atom.GetNeighbors():
if NeighborAtom.GetProp('Type') == 'S' and NeighborAtom.GetIdx() in Plotted:
NeighborIdx.append(NeighborAtom.GetIdx())
match = False
## Find two atoms that are connected to each other
for idx in NeighborIdx:
# get neighbor atom object
Atom1 = mol.GetAtomWithIdx(idx)
# see if its neighbor is also neighbor of picked atom
for Atom1Neighbor in Atom1.GetNeighbors():
if Atom1Neighbor.GetIdx() in NeighborIdx:
Atom2 = Atom1Neighbor
match = True
break
if match:
break
if not match:
continue # There could be non plotted surface atom with two plotted surface atom that are not neighbor to each other
else:
# make a vector relative to the first atom
vector = np.array([NearestNeighborDistance/2,3 ** (0.5)/2*NearestNeighborDistance])
# rotate the vector
atom1pos = conf.GetAtomPosition(Atom1.GetIdx())
atom2pos = conf.GetAtomPosition(Atom2.GetIdx())
angle = np.arctan2((atom2pos.y-atom1pos.y),(atom2pos.x-atom1pos.x))
rotation_matrix = np.matrix([[np.cos(angle),-np.sin(angle)],[np.sin(angle),np.cos(angle)]])
vector = np.dot(rotation_matrix, vector)
# move to where first atom is
vector += [atom1pos.x, atom1pos.y]
# vector is set so that it's normal to the bond direction,
# however, the space could be occupied by other surface atom,
# so we check for duplicate and if found, assign negative normal direction
for idx in Plotted:
if round(vector[0,0] - conf.GetAtomPosition(idx).x,2) == 0 \
and round(vector[0,1] - conf.GetAtomPosition(idx).y,2) == 0:
vector = np.array([NearestNeighborDistance/2,-3 ** (0.5)/2*NearestNeighborDistance])
vector = np.dot(rotation_matrix, vector)
vector += [atom1pos.x, atom1pos.y]
break
conf.SetAtomPosition(AtomIdx, (vector[0,0],vector[0,1],0))
# Update
Plotted.append(AtomIdx)
del NPSITPSNC[AtomIdx]
for NeighborAtom in Atom.GetNeighbors():
if NeighborAtom.GetProp('Type') == 'S' and NeighborAtom.GetIdx() not in Plotted:
NPSITPSNC[NeighborAtom.GetIdx()] += 1
ff = _SetUpForceField(mol,InitialGuessRun=True,ZStrain=ZStrain)
# Optimize Molecule
ff.Initialize()
output = ff.Minimize();
# output = ff.Minimize(maxIts=1000000, forceTol=1e-10, energyTol=1e-010);
#
ff = _SetUpForceField(mol,InitialGuessRun=False,ZStrain=ZStrain)
# Optimize Molecule
ff.Initialize()
output = ff.Minimize(maxIts=1000000, forceTol=1e-10, energyTol=1e-010);
if output == -1:
output = -2
# report minimization result
if not Quiet:
if output == -2:
print('Minimization did not converge ('+str(output)+')')
else:
print('Minimization Successful ('+str(output)+')')
# Output to XSD
## Save atomic number
AtomicNumbers= list()
for atom in mol.GetAtoms():
AtomicNumber = atom.GetAtomicNum()
if AtomicNumber in [0,78]:
AtomicNumber = SurfAtomNum
AtomicNumbers.append(AtomicNumber)
## Save Positions
positions = list()
for i in range(0, mol.GetNumAtoms()):
pos = mol.GetConformer().GetAtomPosition(i)
positions.append([pos.x, pos.y, pos.z])
positions = np.array(positions)
## Make ASE atoms object
aseatoms = ASEAtoms(numbers = AtomicNumbers, positions = positions)
# ASEAtoms.cell = np.ones((3,3))
if OutputPath:
## make connectivity object
connectivity = np.zeros((mol.GetNumAtoms(),mol.GetNumAtoms()))
for i in range(0,mol.GetNumAtoms()):
atom = mol.GetAtomWithIdx(i)
for neighboratom in atom.GetNeighbors():
connectivity[i,neighboratom.GetIdx()] = 1
## Make xsd file
write(OutputPath,aseatoms,connectivity = connectivity)
return output, aseatoms
def _SetUpForceField(mol, SetHybridization = True, AdsorbateSurfaceRepulsion = True, cell = np.diag((1,1,1)), ZLattVecI = 2, InitialGuessRun=False,ZStrain=150.0):
## Compute Perpendicular direction and other diections
OtherVeci = [i for i in [0,1,2] if i != ZLattVecI]
Zvector = cell[ZLattVecI,:]
Zvector = Zvector/np.linalg.norm(Zvector)
Xvector = cell[OtherVeci[0],:]
Xvector = Xvector/np.linalg.norm(Xvector)
XZPerpvector = np.cross(Zvector,Xvector)
XZPerpvector = XZPerpvector/np.linalg.norm(XZPerpvector)
Yvector = cell[OtherVeci[1],:]
Yvector = Yvector/np.linalg.norm(Yvector)
CenterOfSurf = list()
for i in range(0, mol.GetNumAtoms()):
atom = mol.GetAtomWithIdx(i)
if atom.GetProp('Type') == 'S':
CenterOfSurf.append(mol.GetConformer().GetAtomPosition(i))
CenterOfSurf = np.average(CenterOfSurf,axis=0)
# Manually add force field
# Set up hybrdization: Some bugs in Rdkit. hydrogen get positioned on top of each other
AtomsToSetFF = []
if SetHybridization:
for atom in mol.GetAtoms():
if atom.GetProp('Type') == 'A' and atom.GetNumRadicalElectrons() > 0:
NumNeighbors = atom.GetTotalDegree()
if 'S' in [na.GetProp('Type') for na in atom.GetNeighbors()]:
if atom.GetAtomicNum() == 6:
if NumNeighbors == 4:
atom.SetHybridization(Chem.HybridizationType.SP3)
elif NumNeighbors == 3:
atom.SetHybridization(Chem.HybridizationType.SP2)
elif NumNeighbors == 2:
atom.SetHybridization(Chem.HybridizationType.SP)
elif NumNeighbors == 1:
atom.SetHybridization(Chem.HybridizationType.SP)
elif atom.GetAtomicNum() == 8:
if NumNeighbors == 2:
atom.SetHybridization(Chem.HybridizationType.SP3)
elif NumNeighbors == 1:
atom.SetHybridization(Chem.HybridizationType.SP2)
else: # These are error prone and parameters are set later
atom.SetHybridization(Chem.HybridizationType.UNSPECIFIED)
AtomsToSetFF.append(atom.GetIdx())
ff = AllChem.UFFGetMoleculeForceField(mol)
# Take care of Error Prone one. Just manually setting what UFF should have done
if SetHybridization:
for atomi in AtomsToSetFF:
atom = mol.GetAtomWithIdx(atomi)
NumNeighbors = atom.GetTotalDegree()
if atom.GetAtomicNum() == 6:
if NumNeighbors == 4:
atom.SetHybridization(Chem.HybridizationType.SP3)
elif NumNeighbors == 3:
atom.SetHybridization(Chem.HybridizationType.SP2)
elif NumNeighbors == 2:
atom.SetHybridization(Chem.HybridizationType.SP)
elif NumNeighbors == 1:
atom.SetHybridization(Chem.HybridizationType.SP)
elif atom.GetAtomicNum() == 8:
if NumNeighbors == 2:
atom.SetHybridization(Chem.HybridizationType.SP3)
elif NumNeighbors == 1:
atom.SetHybridization(Chem.HybridizationType.SP2)
for atomi in AtomsToSetFF:
atom = mol.GetAtomWithIdx(atomi)
idx = [na.GetIdx() for na in atom.GetNeighbors()]
# Bond stretch
for a1 in idx:
ka, r = rdForceFieldHelpers.GetUFFBondStretchParams(mol,a1,atomi)
ff.UFFAddDistanceConstraint(a1,atomi,False,r,r,ka)
# Angle
for a1,a2 in combinations(idx,2):
ka, ang = rdForceFieldHelpers.GetUFFAngleBendParams(mol,a1,atomi,a2)
ff.UFFAddAngleConstraint(a1,atomi,a2,False,ang,ang,ka)
# Angle between neighbor atom and the atom
for natom in atom.GetNeighbors():
idx = [na.GetIdx() for na in natom.GetNeighbors() if na.GetIdx() != atomi and IsAdsorbateAtomNum(na.GetAtomicNum())]
for j in idx:
ka, ang = rdForceFieldHelpers.GetUFFAngleBendParams(mol,j,natom.GetIdx(),atomi)
ff.UFFAddAngleConstraint(j,natom.GetIdx(),atomi,False,ang,ang,ka)
## Vertical
pos = CenterOfSurf + Xvector*10000000
IdxSurfFixedPlusX = ff.AddExtraPoint(pos[0],pos[1],pos[2],fixed=True)-1
pos = CenterOfSurf + XZPerpvector*10000000
IdxSurfFixedPlusXZPerp = ff.AddExtraPoint(pos[0],pos[1],pos[2],fixed=True)-1
## Fix surface atom
for atom in mol.GetAtoms():
if IsSurfaceAtomNum(atom.GetAtomicNum()) or (atom.HasProp('smilesSymbol') and atom.GetProp('smilesSymbol') == 'M'):
# More Flexible in Z direction but not X and Y direction
ff.UFFAddPositionConstraint(atom.GetIdx(), 0.0, 10.0e2)
ff.UFFAddDistanceConstraint(IdxSurfFixedPlusX, atom.GetIdx(), True, 0.0,0.0, 10.0e4)
ff.UFFAddDistanceConstraint(IdxSurfFixedPlusXZPerp, atom.GetIdx(), True, 0.0,0.0, 10.0e4)
## Find surface-adsorbates bond
surfacebond = list()
for bonds in mol.GetBonds():
atom1 = bonds.GetBeginAtom()
atom2 = bonds.GetEndAtom()
surfbond = 0;
# non-organic atoms are treated as surface atom
if IsAdsorbateAtomNum(atom1.GetAtomicNum()):
surfbond += 1;
if IsAdsorbateAtomNum(atom2.GetAtomicNum()):
surfbond += 1;
# surfbond == 0, then two bonded atoms are metal.
# surfbond == 1, then two bonded atoms are metal and organic atom.
# surfbond == 2, then two bonded atoms are both organic atoms.
if surfbond == 1:
# write surfacebond, but put it so that metal index comes first
if IsSurfaceAtomNum(atom1.GetAtomicNum()):
surfacebond.append([bonds.GetBeginAtomIdx(), bonds.GetEndAtomIdx()]) # [Surf atom, adsorbate atom]
elif IsSurfaceAtomNum(atom2.GetAtomicNum()):
surfacebond.append([bonds.GetEndAtomIdx(), bonds.GetBeginAtomIdx()])
# make it into array
surfacebond = np.array(surfacebond)
# Distance constraing for surface bonds
for i in range(0, surfacebond.shape[0]):
if InitialGuessRun:
ff.UFFAddDistanceConstraint(int(surfacebond[i,0]), int(surfacebond[i,1]), False, 2.0, 2.0, 5000.0)
else:
atom = mol.GetAtomWithIdx(int(surfacebond[i,1]))
atom.UpdatePropertyCache()
ff.UFFAddDistanceConstraint(int(surfacebond[i,0]), int(surfacebond[i,1]), False, 2.0,2.0, 4000.0)
# constraint for atoms wanting to be perpendicular to the surface atom.
pos = CenterOfSurf - Zvector*10000000
IdxSurfFixedMinusZ = ff.AddExtraPoint(pos[0],pos[1],pos[2],fixed=True)-1
for i in range(0, surfacebond.shape[0]):
# find corresponding fixed point
if InitialGuessRun:
ff.UFFAddAngleConstraint(int(surfacebond[i,0]), int(surfacebond[i,1]), IdxSurfFixedMinusZ, False, 0, 0, 10000.0)
else:
ff.UFFAddAngleConstraint(int(surfacebond[i,0]), int(surfacebond[i,1]), IdxSurfFixedMinusZ, False, 0, 0, ZStrain)
# Add repulsive force between atoms and surface atom.
# this is pseudo done by setting a point above surface, and apply distance constraint
if AdsorbateSurfaceRepulsion:
pos = CenterOfSurf + Zvector*10000000
IdxSurfFixed = ff.AddExtraPoint(pos[0],pos[1],pos[2],fixed=True)-1
# distance strain method
for atom in mol.GetAtoms():
if atom.HasProp('Adsorbed'):
if InitialGuessRun:
ff.UFFAddDistanceConstraint(atom.GetIdx(), IdxSurfFixed, False, 0, 9999997.8, 1000.0)
else:
ff.UFFAddDistanceConstraint(atom.GetIdx(), IdxSurfFixed, False, 0, 9999997.8, 200.0)
# Add angle constraint for neighbors of adsorbed atoms (AdsorbedAtomNeighbor-AdsorbedAtom-Metal)
for i in range(0, surfacebond.shape[0]):
centeratom = mol.GetAtomWithIdx(int(surfacebond[i,1]))
surfaceatomidx = int(surfacebond[i,0])
centeratomidx = int(surfacebond[i,1])
bondedorganicatom = list()
# go through each bond record each bonded atom
for NBRAtom in centeratom.GetNeighbors():
if NBRAtom.GetProp('Type') == 'A':
bondedorganicatom.append(NBRAtom.GetIdx())
#following debugging code print out index of each atoms
#print bondedorganicatom
#print 'surface atom: {0:.0f}, center atom: {1:.0f}'.format(surfaceatom, centeratom)
## (AdsorbedAtomNeighbor-AdsorbedAtom-Metal)
for organicatomidx in bondedorganicatom:
# print 'surf', mol.GetAtomWithIdx(surfaceatomidx).GetSymbol()
# print 'cent', mol.GetAtomWithIdx(centeratomidx).GetSymbol()
# print 'org', mol.GetAtomWithIdx(organicatomidx).GetSymbol()
if mol.GetAtomWithIdx(centeratomidx).GetAtomicNum() == 6:
if len(bondedorganicatom) == 3:
if mol.GetAtomWithIdx(organicatomidx).GetHybridization() == Chem.rdchem.HybridizationType.SP2 and\
mol.GetAtomWithIdx(organicatomidx).GetAtomicNum() == 6:
ff.UFFAddAngleConstraint(surfaceatomidx, centeratomidx, organicatomidx, False, 90, 90, 300.0)
else:
ff.UFFAddAngleConstraint(surfaceatomidx, centeratomidx, organicatomidx, False, 109.5, 109.5, 300.0)
if len(bondedorganicatom) == 2:
ff.UFFAddAngleConstraint(surfaceatomidx, centeratomidx, organicatomidx, False, 145.0, 180.0, 150.0)
if len(bondedorganicatom) == 1:
ff.UFFAddAngleConstraint(surfaceatomidx, centeratomidx, organicatomidx, False, 180.0, 180.0, 150.0)
elif mol.GetAtomWithIdx(centeratomidx).GetAtomicNum() == 8:
if len(bondedorganicatom) == 2:
ff.UFFAddAngleConstraint(surfaceatomidx, centeratomidx, organicatomidx, False, 109.5, 109.5, 150.0)
if len(bondedorganicatom) == 1:
ff.UFFAddAngleConstraint(surfaceatomidx, centeratomidx, organicatomidx, False, 120.0, 120.0, 150.0)
## (AdsorbedAtomNeighbor-AdsorbedAtom-AdsorbedAtomNeighbor)
for organicatomidx1 in bondedorganicatom:
for organicatomidx2 in bondedorganicatom:
if organicatomidx1 != organicatomidx2:
if InitialGuessRun:
ff.UFFAddAngleConstraint(organicatomidx1, centeratomidx, organicatomidx2, False, 120, 180.0, 1000.0)
else:
if len(bondedorganicatom) == 3:
ff.UFFAddAngleConstraint(organicatomidx1, centeratomidx, organicatomidx2, False, 109.5, 120.0, 200.0)
elif len(bondedorganicatom) == 2:
ff.UFFAddAngleConstraint(organicatomidx1, centeratomidx, organicatomidx2, False, 145, 180, 200.0)
elif len(bondedorganicatom) == 1:
ff.UFFAddAngleConstraint(organicatomidx1, centeratomidx, organicatomidx2, False, 180, 180, 200.0)
for Atom in mol.GetAtoms():
if Atom.GetProp('Type') == 'A' and IsAdsorbateAtomAdsorbed(Atom):
nS = 0
SurfIdx = list()
for NBRAtom in Atom.GetNeighbors():
if NBRAtom.GetProp('Type') == 'S':
nS += 1
SurfIdx.append(NBRAtom.GetIdx())
if nS > 1:
combs = combinations(SurfIdx,2)
if Atom.GetTotalValence() + nS == 4:
for comb in combs:
ff.UFFAddAngleConstraint(comb[0], Atom.GetIdx(), comb[1], False, 109.5, 109.5, 4000.0)
return ff
class Surface(object):
def __init__(self,path, name=None,ZLattVecI = 2,SecondLayerAtom='He'):
self.Surf = LoadNonPeriodicGraphByCovalentRadius(path,PBCContainingAdsorbateOnly=False)
if name:
self.name = name
else:
self.name = self.Surf.aseatoms.get_chemical_formula()
self.ZLattVecI = ZLattVecI
self.ns = self.Surf.RdkitMol.GetNumAtoms()
self.SecondLayerAtom = Chem.Atom(SecondLayerAtom)
# Find second layers' connectivity to first layer
layer2xyz = self.Surf.aseatoms.get_scaled_positions()[self.Surf.LayerIdxs[1]]
layer2xyz = np.concatenate(np.array(self.Surf.AddedPBCs)[:,None,:]+layer2xyz[None,:,:])
layer2xyz = np.dot(layer2xyz,self.Surf.aseatoms.cell)
# Add Bond
xyzs = []
refxyzs = self.Surf.aseatoms.get_scaled_positions(wrap=False)
for i in range(len(self.Surf.RdKitAtomIndex2ASEAtomIndex)):
l = literal_eval(self.Surf.RdKitAtomIndex2ASEAtomIndex[i])
xyzs.append(refxyzs[l[0]]+l[1:])
xyzs = np.dot(xyzs,self.Surf.aseatoms.cell)
dist = cdist(layer2xyz,xyzs)
mdist = np.min(dist)
self.SecondLayerConnectivity = [[] for _ in range(layer2xyz.shape[0])]
for i,j in zip(*np.where(np.isclose(dist,mdist,atol=0.2))):
self.SecondLayerConnectivity[int(i)].append(int(j))
# Remove those at the end
lengths = Counter([len(i) for i in self.SecondLayerConnectivity])
nbond = lengths.most_common(1)[0][0]
self.SecondLayerConnectivity = [set(i) for i in self.SecondLayerConnectivity if len(i) == nbond]
SurfAtom = Chem.Atom(0)
def __repr__(self):
return '<GraphLearning.io.Surface|'+self.name+'>'
@classmethod
def GetCanonicalSmiles(cls,mol):
# Convert surface atom to *
for i in reversed(range(mol.GetNumAtoms())):
if mol.GetAtomWithIdx(i).HasProp('Type') and mol.GetAtomWithIdx(i).GetProp('Type') == 'S':
mol.ReplaceAtom(i,cls.SurfAtom)
# change atom set up
for atom in mol.GetAtoms():
# atom.SetIsotope(0)# Not sure what this does...
atom.ClearProp('smilesSymbol')
if atom.GetAtomicNum() != 0:
atom.SetNoImplicit(True)
atom.SetNumRadicalElectrons(1)
# Change bond to single bond
for bond in mol.GetBonds():
bond.SetBondType(Chem.BondType.SINGLE)# Put bracket around atoms
return Chem.MolToSmiles(mol)
def GetProjection(self,SMILESorMol, Quiet = True,ZStrain=150.0):
"""
Output:
output : -1 Minimisation failed, -2 No surface
"""
# Initialize
mol = _PretreatSMILESorMol(SMILESorMol)
# Get list of Surface Atom Indices
SurfIdxs = list()
for atom in mol.GetAtoms():
if atom.GetProp('Type') == 'S':
SurfIdxs.append(atom.GetIdx())
if len(SurfIdxs) == 0:
# print 'No Connectivity To Surface'
return -2, None
OriginalToSurf = dict() # Original Mol Idx -> New Mol Idx
# Get Surface Graph
if len(SurfIdxs) != 1:
BondList = GetBondListFromAtomList(mol,SurfIdxs)
SurfMol = Chem.RWMol(Chem.PathToSubmol(mol,BondList,atomMap = OriginalToSurf).__copy__())
else:
SurfMol = mol.__copy__()
## Non surface Atom
for idx in reversed(range(0,SurfMol.GetNumAtoms())):
atom = SurfMol.GetAtomWithIdx(idx)
if atom.GetProp('Type') == 'A':
SurfMol.RemoveAtom(atom.GetIdx())
OriginalToSurf[SurfIdxs[0]] = 0
# Get mapping
SurfToOriginal = dict()
for OriginalIdx in OriginalToSurf:
SurfToOriginal[OriginalToSurf[OriginalIdx]] = OriginalIdx
# For searching the pattern on ASERdkit, we gotta use special atoms
SurfMolForSearch = Chem.RWMol(SurfMol.__copy__())
SA = rdqueries.HasStringPropWithValueQueryAtom('Type','S')
SA.ExpandQuery(rdqueries.HasBoolPropWithValueQueryAtom('Occupied',False))
SA.SetProp('smilesSymbol','M')
for idx in range(0,SurfMolForSearch.GetNumAtoms()):
SurfMolForSearch.ReplaceAtom(idx,SA)
# The adsorbate surface is projected to surface
# Also find the ones that are closest to the center of the cell
ProjectedSurfIdxsSetsTemp = self.Surf.RdkitMol.GetSubstructMatches(SurfMolForSearch)
# remove projection that includes surface atoms at the edge
ProjectedSurfIdxsSets = []
for ProjectedSurfIdxsSet in ProjectedSurfIdxsSetsTemp:
if not set(self.Surf.EdgeSurf) & set(ProjectedSurfIdxsSet):
ProjectedSurfIdxsSets.append(ProjectedSurfIdxsSet)
Dist2Centers = defaultdict(list) # Distance from center
ProjectedSurfIdxsSetsCategorized = defaultdict(list)
Mol = {}
Center = np.average(self.Surf.aseatoms.cell[0:2,:],axis=0)[:2]
scaledxyz = self.Surf.aseatoms.get_scaled_positions()
for ProjectedSurfIdxs in ProjectedSurfIdxsSets:
SurfCent = list()
for idx in ProjectedSurfIdxs:
ProjectedASEIdx = literal_eval(self.Surf.RdKitAtomIndex2ASEAtomIndex[idx])
SurfCent.append(np.dot(scaledxyz[ProjectedASEIdx[0]] + ProjectedASEIdx[1:],self.Surf.aseatoms.cell)[:2])
SurfCent = np.average(SurfCent,axis=0)
dist = np.linalg.norm(SurfCent - Center)
tmol = mol.__copy__()
for SLC in self.SecondLayerConnectivity:
if SLC and SLC.issubset(set(ProjectedSurfIdxs)):
i = tmol.AddAtom(self.SecondLayerAtom)
for j in SLC:
tmol.AddBond(i,SurfToOriginal[ProjectedSurfIdxs.index(j)])
s = Chem.MolToSmiles(tmol)
Dist2Centers[s].append(dist)
ProjectedSurfIdxsSetsCategorized[s].append(ProjectedSurfIdxs)
if s not in Mol:
Mol[s] = tmol
# Select closest to the center for each smiles
SelectedProjectedSurfIdxsSets = dict()
for s in Dist2Centers:
i = np.argmin(Dist2Centers[s])
SelectedProjectedSurfIdxsSets[s] = ProjectedSurfIdxsSetsCategorized[s][i]
# The adsorbate surface is projected to surface
atoms = []
for s in SelectedProjectedSurfIdxsSets:
Tmol = mol.__copy__()
# Get Coordinates
ProjectedASEIdxs = list()
for idx in SelectedProjectedSurfIdxsSets[s]:
ProjectedASEIdxs.append(literal_eval(self.Surf.RdKitAtomIndex2ASEAtomIndex[idx]))
# Record Mol To ASE
MolToASE = dict()
for i in range(0,len(ProjectedASEIdxs)):
MolToASE[SurfToOriginal[i]] = ProjectedASEIdxs[i][0]
# Set Surface Atom Position
## initialize adsorbate atom positions
SurfCoordMap = dict()
CenterSurf = list()
for i in range(0,len(ProjectedASEIdxs)):
ProjectedASEIdx = ProjectedASEIdxs[i]
pos = self.Surf.aseatoms[ProjectedASEIdx[0]].position + np.dot(ProjectedASEIdx[1:],self.Surf.aseatoms.cell)
CenterSurf.append(pos)
Coord = Geometry.Point3D(pos[0],pos[1],pos[2])
SurfCoordMap[SurfToOriginal[i]] = Coord
CenterSurf = np.average(CenterSurf,axis=0)
conf = Chem.Conformer()
for atom in Tmol.GetAtoms():
if atom.GetProp('Type') == 'A':
conf.SetAtomPosition(atom.GetIdx(),(CenterSurf[0]+np.random.rand()*10-5,CenterSurf[1]+np.random.rand()*10-5,CenterSurf[2]+20))
for idx in SurfCoordMap:
conf.SetAtomPosition(idx,SurfCoordMap[idx])
Tmol.AddConformer(conf)
## Preliminary treatment before optimization
# More options available here:
# http://www.rdkit.org/Python_Docs/rdkit.Chem.rdDistGeom.EmbedParameters-class.html
# More Discussions
# https://sourceforge.net/p/rdkit/mailman/message/32082674/
#EmbedTmolecule(class RDKit::ROTmol {lvalue} Tmol, unsigned int maxAttempts=0,
# int randomSeed=-1, bool clearConfs=True, bool useRandomCoords=False,
# double boxSizeMult=2.0, bool randNegEig=True, unsigned int numZeroFail=1,
# class boost::python::dict {lvalue} coordMap={}, double forceTol=0.001,
# bool ignoreSmoothingFailures=False, bool enforceChirality=True,
# bool useExpTorsionAnglePrefs=False, bool useBasicKnowledge=False,
# bool printExpTorsionAngles=False)
ff = _SetUpForceField(Tmol,cell = self.Surf.aseatoms.cell, ZLattVecI = self.ZLattVecI,InitialGuessRun=True,ZStrain=ZStrain)
# Optimize Tmolecule
ff.Initialize()
output = ff.Minimize();
# output = ff.Minimize(maxIts=10000000, forceTol=1e-10, energyTol=1e-010);
#
ff = _SetUpForceField(Tmol,cell = self.Surf.aseatoms.cell, ZLattVecI = self.ZLattVecI,InitialGuessRun=False,ZStrain=ZStrain)
# Optimize Tmolecule
ff.Initialize()
output = ff.Minimize(maxIts=10000000, forceTol=1e-12, energyTol=1e-012);
# report minimization result
if not Quiet:
if output == -1:
print('Minimization did not converge ('+str(output)+')')
else:
print('Minimization Successful ('+str(output)+')')
## Append Position
aseatoms = self.Surf.aseatoms.copy()
for i in range(0, Tmol.GetNumAtoms()):
atom = Tmol.GetAtomWithIdx(i)
if atom.GetProp('Type') == 'A':
atom = ASEAtom(atom.GetSymbol(),Tmol.GetConformer().GetAtomPosition(i))
aseatoms.append(atom)
MolToASE[i] = len(aseatoms)-1
elif atom.GetProp('Type') == 'S':
aseatoms[MolToASE[i]].position = Tmol.GetConformer().GetAtomPosition(i)
atoms.append((aseatoms,self.GetCanonicalSmiles(Mol[s]),output))
return atoms
def LoadNonPeriodicGraphByCovalentRadius(CoordinateFPathOrASEAtoms, \
rfacup = 1.35,rfacdown = 0.6, z_vector = 2, PBCContainingAdsorbateOnly=False, CutOffTol=0.3, SetMetalAtomNumToZero = False):
def MakeAdsorbateAtom(AtomicNumber):
if isinstance(AtomicNumber,(np.int64,np.int32)):
AtomicNumber = int(AtomicNumber)
atom = Chem.Atom(AtomicNumber)
atom.SetNoImplicit(True) # this allows molecule to have radical atoms
atom.SetProp('Type','A')
atom.SetBoolProp('Adsorbed',False)
return atom
def MakeSurfAtom(AtomicNumber):
if isinstance(AtomicNumber,(np.int64,np.int32)):
AtomicNumber = int(AtomicNumber)
if SetMetalAtomNumToZero:
atom = Chem.Atom(0)
else:
atom = Chem.Atom(AtomicNumber)
atom.SetProp('Type','S')
atom.SetBoolProp('Occupied',False)
return atom
"""
This function reads file using ASE read, and construts molecular graph
in rdkit object, Mol. Then, the cell is enlarged to include neighbor cells,
and the adsorbates are isolated. Useful for getting graph descriptors
Input List
CoordinateFPathOrASEAtoms: path to ASE readable coordinate file or ASE atoms object
rfacup: Upper percentage limit for determining connectivity.
rfacdown: Lower percentage limit for determining connectivity.
z_vector: index of cell basis vector that is orthogonal to surface.
Output List
adsorbate class
"""
# load POSCAR
if isinstance(CoordinateFPathOrASEAtoms,str) and os.path.exists(CoordinateFPathOrASEAtoms):
AseAtoms = read(CoordinateFPathOrASEAtoms)
elif isinstance(CoordinateFPathOrASEAtoms,ase_Atoms):
AseAtoms = CoordinateFPathOrASEAtoms
else:
raise ValueError(CoordinateFPathOrASEAtoms, 'Unrecognized input format, or nonexisting file path')
# initialize
ASEAtomIndex2RdKitAtomIndex = dict()
RdKitAtomIndex2ASEAtomIndex = dict()
# (p)eriodic (b)oundary (c)ondition(s)
PBCs = [[0,0,0]]
if AseAtoms.pbc[0]:
temp = np.add(PBCs,[1,0,0])
temp = np.concatenate((temp,np.add(PBCs,[-1,0,0])))
PBCs = np.concatenate((PBCs,temp))
if AseAtoms.pbc[1]:
temp = np.add(PBCs,[0,1,0])
temp = np.concatenate((temp,np.add(PBCs,[0,-1,0])))
PBCs = np.concatenate((PBCs,temp))
if AseAtoms.pbc[2]:
temp = np.add(PBCs,[0,0,1])
temp = np.concatenate((temp,np.add(PBCs,[0,0,-1])))
PBCs = np.concatenate((PBCs,temp))
if not AseAtoms.pbc[0] and not AseAtoms.pbc[1] and not AseAtoms.pbc[2]:
AseAtoms.cell = np.diag((1,1,1))
PBCs = list(PBCs)
for i in range(0,len(PBCs)):
PBCs[i] = list(PBCs[i])
# Get organic atoms from the DFT calculations (their index and atomic number)
oai = list() #organic atom index in the atoms object
ASEIdxToCheck = list()
for i in range(0,AseAtoms.__len__()):
if IsAdsorbateAtomNum(int(AseAtoms[i].number)):
oai.append(i)
ASEIdxToCheck.append(i)
# construct mol object
RdkitMol = Chem.Mol()
RdkitMol = Chem.RWMol(RdkitMol)
#%% Determine connectivity and each atoms' periodic condition.
Adsorbates = list()
while ASEIdxToCheck:
InitialASEIdx = ASEIdxToCheck.pop()
# Pick and atom find all connected atoms to make an adsorbate
MolASEIdxToCheck = list()
MolASEIdxToCheck.append(InitialASEIdx)
# List of Picked atoms and PBC
MolASEIdxAndPBC = dict()
MolASEIdxAndPBC[InitialASEIdx] = [0,0,0]
# Add Atom
RdkitIdx = RdkitMol.AddAtom(MakeAdsorbateAtom(AseAtoms[InitialASEIdx].number))
ASEAtomIndex2RdKitAtomIndex[InitialASEIdx] = RdkitIdx
RdKitAtomIndex2ASEAtomIndex[RdkitIdx] = InitialASEIdx
# recursively find all atoms in the adsorbate containing this atom
while MolASEIdxToCheck:
ASEIdxBeingChecked = MolASEIdxToCheck.pop()
# Determine Neighbors
## potential atoms
NeighborIdx = [RdKitAtomIndex2ASEAtomIndex[atom.GetIdx()] for atom in RdkitMol.GetAtomWithIdx(ASEAtomIndex2RdKitAtomIndex[ASEIdxBeingChecked]).GetNeighbors()]
ASEidxlist = [oai[i] for i in range(0,len(oai)) if oai[i] not in NeighborIdx]
for j in ASEidxlist:
# if this atom has already been accounted
if j in MolASEIdxAndPBC:
Bool,_,_ = _DetermineConnectivity(AseAtoms,ASEIdxBeingChecked,j,[MolASEIdxAndPBC[j]],1.15,rfacdown,PBCi=MolASEIdxAndPBC[ASEIdxBeingChecked])
if Bool:
RdkitMol.AddBond(ASEAtomIndex2RdKitAtomIndex[ASEIdxBeingChecked],ASEAtomIndex2RdKitAtomIndex[j],order=Chem.rdchem.BondType.SINGLE)
else:
Bool,PBC,_ = _DetermineConnectivity(AseAtoms,ASEIdxBeingChecked,j,PBCs,1.15,rfacdown,PBCi=MolASEIdxAndPBC[ASEIdxBeingChecked])
if Bool:
MolASEIdxAndPBC[j] = list(PBC)
MolASEIdxToCheck.append(j)
# Add Atom
RdkitIdx = RdkitMol.AddAtom(MakeAdsorbateAtom(AseAtoms[j].number))
ASEAtomIndex2RdKitAtomIndex[j] = RdkitIdx
RdKitAtomIndex2ASEAtomIndex[RdkitIdx] = j
RdkitMol.AddBond(ASEAtomIndex2RdKitAtomIndex[ASEIdxBeingChecked],ASEAtomIndex2RdKitAtomIndex[j],order=Chem.rdchem.BondType.SINGLE)
# Add made molecule to the adsorbate list
Adsorbates.append(MolASEIdxAndPBC)
ASEIdxToCheck = [Idx for Idx in ASEIdxToCheck if Idx not in MolASEIdxAndPBC]
# For each adsorbate, adjust its PBC location to where most adsorbate atom is found
PBCWithAdsorbateList = list()
AllMolASEIdxAndPBC = dict()
for MolASEIdxAndPBC in Adsorbates:
PBCList = list()
Count = list()
for idx in MolASEIdxAndPBC:
if MolASEIdxAndPBC[idx] not in PBCWithAdsorbateList:
PBCWithAdsorbateList.append(MolASEIdxAndPBC[idx])
if MolASEIdxAndPBC[idx] not in PBCList:
PBCList.append(MolASEIdxAndPBC[idx])
Count.append(1)
else:
i = PBCList.index(MolASEIdxAndPBC[idx])
Count[i] +=1
# Adjust PBC of the adsorbate
PBC = PBCList[np.argmax(Count)]
for idx in MolASEIdxAndPBC:
AllMolASEIdxAndPBC[idx] = np.subtract(MolASEIdxAndPBC[idx],PBC)
# %% Get Surface.
## if none given for surface layer z coordinate, average the top layer atomic coordinate
_, SurfaceAtomIndex,LayerIdxs = _DetermineSurfaceLayerZ(AseAtoms, ZVecIndex = z_vector)
## Construct Surface in each PBC
positions = dict()
SurfMol = Chem.RWMol(Chem.Mol())
for Idx in SurfaceAtomIndex:
RdkitIdx = SurfMol.AddAtom(MakeSurfAtom(AseAtoms[Idx].number))
ASEAtomIndex2RdKitAtomIndex[str([Idx,0,0,0])] = RdkitIdx+RdkitMol.GetNumAtoms()
RdKitAtomIndex2ASEAtomIndex[RdkitIdx+RdkitMol.GetNumAtoms()] = str([Idx,0,0,0])
positions[RdkitIdx+RdkitMol.GetNumAtoms()] = AseAtoms[Idx].position
## Make Bonds and find bond to other
BondsToOtherPBC = list()
AddedPBC = list()
# print(SurfaceAtomIndex) # TODO:
# print(AseAtoms[22].position,AseAtoms[31].position,np.linalg.norm(AseAtoms[22].position-AseAtoms[31].position))# TODO:
for i in range(0,len(SurfaceAtomIndex)):
for j in range(i+1,len(SurfaceAtomIndex)):
Bool,PBC,_ = _DetermineConnectivity(AseAtoms,SurfaceAtomIndex[i],SurfaceAtomIndex[j],PBCs,rfacup,rfacdown)
# if SurfaceAtomIndex[i] == 22 and SurfaceAtomIndex[j] ==31:# TODO:
# print(Bool,PBC)# TODO:
if Bool:
if PBC == [0,0,0]:
# Add Atom
SurfMol.AddBond(i,j,order=Chem.rdchem.BondType.ZERO)
else:
if PBC not in AddedPBC:
AddedPBC.append(PBC)
NPBC = [-PBC[0],-PBC[1],-PBC[2]]
if NPBC not in AddedPBC:
AddedPBC.append(NPBC)
BondsToOtherPBC.append([SurfaceAtomIndex[i],0,0,0,SurfaceAtomIndex[j]]+PBC)
# BondToOtherPBC: [idx1,pbc,idx2,pbc]
# print ASEAtomIndex2RdKitAtomIndex #DEBUG
## assign radicals
Chem.AssignRadicals(RdkitMol)
## set smilesSymbol
for atom in RdkitMol.GetAtoms():
if atom.GetSymbol() in ['C','O'] and atom.GetNumRadicalElectrons() == 0:
atom.SetProp("smilesSymbol",'[' + atom.GetSymbol() + str(atom.GetNumRadicalElectrons())+ '0]')
elif atom.GetNumRadicalElectrons() > 0:
atom.SetProp("smilesSymbol",atom.GetSymbol() + str(atom.GetNumRadicalElectrons()))
#%% Find surface binding adsorbate atom. This is done by finding all the radical atoms
rai_rdkit = list() # radical atom index for rdkit mol
rai_ase = list() # radical atom index for rdkit ase atoms object
for atom in RdkitMol.GetAtoms():
if atom.GetNumRadicalElectrons() > 0:
rai_rdkit.append(atom.GetIdx())
rai_ase.append(RdKitAtomIndex2ASEAtomIndex[atom.GetIdx()])
# %% Surface connectivity
SurfBondDict = dict() #{AtomIdx:BondDistDict}
for i in rai_ase:
PBCi = AllMolASEIdxAndPBC[i]
BondDistPBCDict = dict() #{SurfAtomIdx:(Distance,PBCj)}
for j in SurfaceAtomIndex:
Bool,PBCj,d = _DetermineConnectivity(AseAtoms,i,j,PBCs,rfacup,rfacdown,PBCi = PBCi)
if Bool:
BondDistPBCDict[j] = (d,PBCj)
if PBCj not in PBCWithAdsorbateList:
PBCWithAdsorbateList.append(PBCj)
if len(BondDistPBCDict) != 0:
SurfBondDict[i] = BondDistPBCDict
# %% Apend surface
if not PBCContainingAdsorbateOnly:
# This just add PBCs that the surface spans on
## Find absolute number PBCs that surface spans on.
PBCMax = np.max(np.abs(AddedPBC),axis=0)
## Other PBC to other PBC bonds
PBCToAdd = [[0,0,0]]
if PBCMax[0]:
temp = np.add(PBCToAdd,[1,0,0])
temp = np.concatenate((temp,np.add(PBCToAdd,[-1,0,0])))
PBCToAdd = np.concatenate((PBCToAdd,temp))
if PBCMax[1]:
temp = np.add(PBCToAdd,[0,1,0])
temp = np.concatenate((temp,np.add(PBCToAdd,[0,-1,0])))
PBCToAdd = np.concatenate((PBCToAdd,temp))
if PBCMax[2]:
temp = np.add(PBCToAdd,[0,0,1])
temp = np.concatenate((temp,np.add(PBCToAdd,[0,0,-1])))
PBCToAdd = np.concatenate((PBCToAdd,temp))
PBCToAdd = PBCToAdd.tolist()
else:
# Add all PBC with adsorbates on it
## here if we have e.g., [0,0,0] and [-1,1,0], the following for loop
## enumerates [0,0,0],[-1,1,0],[-1,0,0],[0,1,0]
PBCToAdd = copy.deepcopy(PBCWithAdsorbateList)
for PBC in PBCToAdd:
nonzeros = list()
for i in range(0,3):
if PBC[i] != 0:
nonzeros.append(i)
combs = [p for p in itertools.product([0,1], repeat=len(nonzeros))]
for comb in combs:
TempPBC = [0,0,0]
for i in range(0,len(nonzeros)):
if comb[i] == 1:
TempPBC[nonzeros[i]] = PBC[nonzeros[i]]
if TempPBC not in PBCToAdd:
PBCToAdd.append(TempPBC)
## Make Bonds
NewBondsToOtherPBC = list()
for PBC in PBCToAdd:
for j in range(0,len(BondsToOtherPBC)):
pbc1 = list(np.add(BondsToOtherPBC[j][1:4],PBC))
pbc2 = list(np.add(BondsToOtherPBC[j][5:8],PBC))
if np.all(np.abs(pbc1)<2) and np.all(np.abs(pbc2)<2) and\
pbc1 in PBCToAdd and pbc2 in PBCToAdd:
NewBondsToOtherPBC.append([BondsToOtherPBC[j][0]]+list(pbc1)+[BondsToOtherPBC[j][4]]+list(pbc2))
## Add 0,0,0 Surface
RdkitMol = Chem.RWMol(Chem.CombineMols(RdkitMol,SurfMol))
## Add Other Surface
for PBC in PBCToAdd:
if PBC != [0,0,0]:
for k in range(0,len(SurfaceAtomIndex)):
ASEAtomIndex2RdKitAtomIndex[str([SurfaceAtomIndex[k]]+PBC)] = k+RdkitMol.GetNumAtoms()
RdKitAtomIndex2ASEAtomIndex[k+RdkitMol.GetNumAtoms()] = str([SurfaceAtomIndex[k]]+PBC)
positions[k+RdkitMol.GetNumAtoms()] = AseAtoms[SurfaceAtomIndex[k]].position + np.dot(PBC,AseAtoms.cell)
RdkitMol = Chem.RWMol(Chem.CombineMols(RdkitMol,SurfMol))
## Make bonds between surfaces
for bond in NewBondsToOtherPBC:
RdkitMol.AddBond(ASEAtomIndex2RdKitAtomIndex[str(bond[0:4])],ASEAtomIndex2RdKitAtomIndex[str(bond[4:8])],order=Chem.rdchem.BondType.ZERO)
#%% Apply cut off
for i in SurfBondDict: # i is idx of surface bonding adsorbate atom.
# Determine Minimum Distance
MinD = 1000 # Fake Large number.
for j in SurfBondDict[i]: # j is idx of binding surface atom.
if SurfBondDict[i][j][0] < MinD:
MinD = SurfBondDict[i][j][0]
# Apply cut off
for j in SurfBondDict[i]: # j is idx of binding surface atom.
if SurfBondDict[i][j][0] < MinD + CutOffTol:
RdkitMol.AddBond(ASEAtomIndex2RdKitAtomIndex[i],ASEAtomIndex2RdKitAtomIndex[str([j]+list(SurfBondDict[i][j][1]))],order=Chem.rdchem.BondType.ZERO)
RdkitMol.GetAtomWithIdx(ASEAtomIndex2RdKitAtomIndex[str([j]+list(SurfBondDict[i][j][1]))]).SetBoolProp('Occupied',True)
RdkitMol.GetAtomWithIdx(ASEAtomIndex2RdKitAtomIndex[i]).SetBoolProp('Adsorbed',True)
#%% Find surface atoms at the edges
nsurf = defaultdict(int)
for atom in RdkitMol.GetAtoms():
if atom.GetProp('Type') == 'S':
for neighbor_atom in atom.GetNeighbors():
if neighbor_atom.GetProp('Type') == 'S':
nsurf[atom.GetIdx()] += 1
nbond = Counter(nsurf.values()).most_common(1)[0][0]
edgesurf = []
for idx in nsurf:
if nsurf[idx] != nbond:
edgesurf.append(idx)
# %%assign binding site.
for i in rai_rdkit:
a = RdkitMol.GetAtomWithIdx(i)
nsurf = 0
for neighbor_atom in a.GetNeighbors():
if neighbor_atom.GetProp('Type') == 'S':
nsurf += 1
a.SetProp("smilesSymbol",a.GetProp("smilesSymbol") + str(nsurf))
adsorbate = AdsorbateDatum(AseAtoms,RdkitMol, \
ASEAtomIndex2RdKitAtomIndex, RdKitAtomIndex2ASEAtomIndex)
adsorbate.LayerIdxs = LayerIdxs
adsorbate.AddedPBCs = PBCToAdd
adsorbate.EdgeSurf = edgesurf
return adsorbate
def _DetermineSurfaceLayerZ(aseatoms, ZVecIndex = 2, ztol = 1.65):
"""
Find top layer surface atom z coordinates by averaging
atoms within ztol (angstrom) of the top most atoms are selected for averaging
Input List
aseatoms: ASE atoms containing adsorbate/surface system.
ZVecIndex: index of cell basis vector that is orthogonal to surface.
ztol: Atoms within ztol(angstrom) of the top most atoms are selected as
surface atoms.
Output List
SurfaceLayerZ: z coordinate of surface layer.
SurfaceAtomIndex: Index of surface atoms.
Ideas: This may be smartly done by first finding surf atom connected to adsorbates
"""
assert isinstance(aseatoms,ASEAtoms)
# get highest surface atom coordinate
zmax = 0
zs = aseatoms.get_scaled_positions()[:,ZVecIndex]
zs = np.round(zs,decimals = 5)
zs[zs==1.0] = 0.0
for i in range(0,len(aseatoms)):
if IsSurfaceAtomNum(aseatoms[i].number) and zmax < zs[i]:
zmax = zs[i]
# determine z coordinate. average out top layer
ztol = ztol/np.linalg.norm(aseatoms.cell[2,:])
SurfaceAtomIndex = list()
SurfZs = list()
for i in range(0,len(aseatoms)):
if IsSurfaceAtomNum(aseatoms[i].number) and zmax - ztol < zs[i]:
SurfZs.append(zs[i])
SurfaceAtomIndex.append(i)
SurfaceLayerZ = np.array(SurfZs).mean()
OrderedIdx = np.argsort(zs)[::-1]
nl = 0
LayerIdxs = []
while (nl+1)*len(SurfZs) <=len(zs):
LayerIdxs.append(OrderedIdx[len(SurfZs)*nl:len(SurfZs)*(nl+1)].tolist())
nl +=1
return SurfaceLayerZ, SurfaceAtomIndex, LayerIdxs
def _DetermineConnectivity(AseAtoms,i,j,PBCs,rfacup,rfacdown,PBCi = [0,0,0]):
"""
Determine connectivity between atom i and j. See equation (1) in the
manuscript.
Input List
ASEAtoms: ASE atoms containing adsorbate/surface system
PBCs: Periodic Boundary Conditions. e.g., (1,0,0) means
cell repeats in first basis vector but not others.
rfacup: upper tolerance factor
rfacdown: lower tolerance factor
PBCi: PBC of atom i
Output List
Bool: True if connected, false if not.
PBC: What PBC it's connected to
"""
xyz1 = AseAtoms[i].position + np.dot(PBCi,AseAtoms.cell)
# compute distances to each periodic cell
d = np.linalg.norm(np.dot(PBCs,AseAtoms.cell) + AseAtoms[j].position - xyz1, axis=1)
idx = np.argmin(d)
d = d[idx]
i_d = GetCovalentRadius(AseAtoms[i].number) + GetCovalentRadius(AseAtoms[j].number) # ideal distance
if d <= i_d*rfacup and d >= i_d*rfacdown:
return True, PBCs[idx], d
else:
return False, [0,0,0], 0
class AdsorbateDatum(object):
"""
This is an object contains aseatoms and the extracted graph
Class Attributes
aseatoms: ASE Atoms object.
RdkitMol: Rdkit Mol object.
ASEAtomIndex2RdKitAtomIndex: Index mapping from ASE atoms to Rdkit Mol
RdKitAtomIndex2ASEAtomIndex: Index mapping from Rdkit Mol to ASE Atoms.
"""
def __init__(self,aseatoms,RdkitMol, ASEAtomIndex2RdKitAtomIndex, \
RdKitAtomIndex2ASEAtomIndex):
assert isinstance(aseatoms,ASEAtoms)
assert isinstance(RdkitMol,Chem.Mol)
assert isinstance(ASEAtomIndex2RdKitAtomIndex,dict)
assert isinstance(RdKitAtomIndex2ASEAtomIndex,dict)
self.aseatoms = aseatoms
self.RdkitMol = RdkitMol
self.ASEAtomIndex2RdKitAtomIndex = ASEAtomIndex2RdKitAtomIndex
self.RdKitAtomIndex2ASEAtomIndex = RdKitAtomIndex2ASEAtomIndex
def GetLatticeAppendedASEAtom(self, Lattice):
"""
This one returns ase atoms that can be turned into XSD
"""
# remove surface atoms
atoms = self.AseAtoms.copy()
for i in range(len(atoms)-1,-1,-1):
if IsSurfaceAtomNum(atoms[i].number):
#TODO: assumes 3rd vector is z-axis
del atoms[i]
# append surface atom
for site in Lattice._Sites:
pos = np.append(site._Coordinate[0:2],self._SurfaceLayerZ)
# pos = np.append(site._Coordinate[0:2],0)
pos = np.dot(atoms.get_cell().transpose(),pos.transpose()).transpose()
if site._SiteType == 0:
atoms.append(ase_Atom('Pt', pos))
elif site._SiteType == 1:
atoms.append(ase_Atom('B', pos))
elif site._SiteType == 2:
atoms.append(ase_Atom('F', pos))
return atoms
| 2.046875 | 2 |
accounts/models.py | igemsoftware/HFUT-China_2015 | 0 | 12765770 | from django.db import models
import datetime
# Create your models here.
class User(models.Model):
username = models.CharField(max_length=16, primary_key=True)
password = models.CharField(max_length=64)
email = models.EmailField()
is_confirmed = models.BooleanField()
def __unicode__(self):
return self.username
class Meta:
db_table="bio_user"
class UserSafety(models.Model):
user = models.ForeignKey(User)
activation_key = models.CharField(max_length=64, blank=True)
key_expires = models.DateTimeField(default=datetime.date.today())
def __unicode__(self):
return self.user.username
class Meta:
db_table = 'bio_usersafety'
class loginRecord(models.Model):
identity = models.CharField(max_length=64)
login_time = models.DateTimeField(auto_now_add=True)
login_ip = models.CharField(max_length=64, null=True)
isSuccess = models.BooleanField(default=False)
def __unicode__(self):
return self.identity
class Meta:
db_table = 'record_login_record' | 2.421875 | 2 |
part/utils.py | chdemko/py-part | 0 | 12765771 | """Utility module that defines the :class:`Singleton` class."""
class Singleton:
"""
Singleton class.
The :class:`Singleton` class is used to force a unique instantiation.
"""
_instance = None
def __new__(cls, *args) -> "Singleton":
"""Control single instance creation."""
if cls._instance is None:
cls._instance = super().__new__(cls, *args)
return cls._instance
def __hash__(self) -> int:
"""Return hash(self)."""
return id(self)
| 3.421875 | 3 |
LabVIEWCode/Subs/pySerial/RogersSerial.py | EricYufengWu/Summer2020 | 0 | 12765772 | import serial,time
ser = None
def InitSerial(port,baudrate,timeout):
global ser
reply = 'None'
try:
ser = serial.Serial(port,baudrate = baudrate,timeout = timeout) # open serial port
except Exception as e:
reply = e
return reply
def N_Serial():
global ser
n = ser.inWaiting()
return str(n)
def WriteSerial(text):
global ser
send = bytes(text, 'ascii')
n = ser.write(send)
return str(n)
def ReadSerial(len):
global ser
return str(ser.read(len).decode())
def CloseSerial():
global ser
ser.close() # close port | 2.8125 | 3 |
athena/tools/process_decode_result.py | leixiaoning/Athena-Giga | 0 | 12765773 | # coding=utf-8
# Copyright (C) 2020 ATHENA AUTHORS; <NAME>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
import sys
import codecs
def process_files(decode_log_file, vocab_file):
""" process decode log, generate label file and results files
"""
vocab = {}
with codecs.open(vocab_file, "r", "utf-8") as vocab_file:
for line in vocab_file:
phone, num = line.strip().split()
vocab[int(num)] = phone
decode_result = open(decode_log_file + ".result", "w", encoding="utf8")
label_result = open(decode_log_file + ".label", "w", encoding="utf8")
with open(decode_log_file, "r") as fin:
to_continue = False
total_line = ""
for line in fin.readlines():
if "predictions" in line:
total_line = line.strip() + " "
to_continue = True
elif to_continue:
total_line += line.strip() + " "
if "avg_acc" in total_line and "Message" not in total_line:
predictions = [int(item) for item in
total_line.split("[[")[1].split("]]")[0].split()][:-1]
labels = [int(item) for item in
total_line.split("[[")[2].split("]]")[0].split()]
decode_result.write(" ".join(
" ".join(vocab[item] for item in predictions).split()) + "\n")
label_result.write(" ".join(
" ".join(vocab[item] for item in labels).split()) + "\n")
decode_result.flush()
label_result.flush()
to_continue = False
total_line = ""
decode_result.close()
label_result.close()
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: python process_decode_result.py inference.log vocab")
sys.exit()
_, decode_log, vocab = sys.argv
process_files(decode_log, vocab)
| 2.265625 | 2 |
bindings/cython/examples/brain_highres.py | djhoese/datoviz | 0 | 12765774 | """
# 3D high-res brain mesh
Showing a ultra-high resolution mesh of a human brain, acquired with a 7 Tesla MRI.
The data is not yet publicly available.
Data courtesy of <NAME> et al.:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME> (2020)
*7 Tesla MRI Followed by Histological 3D Reconstructions in Whole-Brain Specimens*
Front. Neuroanat. 14:536838
doi: 10.3389/fnana.2020.536838
Acknowledgements to <NAME> and <NAME> for data access.
"""
from pathlib import Path
import numpy as np
from datoviz import canvas, run, colormap
c = canvas(show_fps=True, width=1024, height=768)
panel = c.panel(controller='arcball')
visual = panel.visual('mesh', transform='auto')
ROOT = Path(__file__).parent.parent.parent.parent
pos = np.load(ROOT / "data/mesh/brain_highres.vert.npy")
faces = np.load(ROOT / "data/mesh/brain_highres.faces.npy")
assert pos.ndim == 2
assert pos.shape[1] == 3
assert faces.ndim == 2
assert faces.shape[1] == 3
print(f"Mesh has {len(faces)} triangles and {len(pos)} vertices")
visual.data('pos', pos)
visual.data('index', faces.ravel())
visual.data('clip', np.array([0, 0, 1, 1]))
gui = c.gui("GUI")
@gui.control("slider_float", "clip", vmin=-1, vmax=+1, value=+1)
def on_change(value):
visual.data('clip', np.array([0, 0, 1, value]))
run()
| 2.34375 | 2 |
files/download/lib/mwbot.py | ikn/ikn.org.uk | 0 | 12765775 | """A MediaWiki browser and editor.
These are functions to fetch and process data from, and make changes to,
MediaWiki installations. Everything is done through the Wiki class.
Python version: 2.
Release: 6.
Licensed under the GNU Lesser General Public License, version 3; if this was
not included, you can find it here:
https://www.gnu.org/licenses/lgpl-3.0.txt
"""
# TODO:
# uploader: original uploader of file
# recent changes
# delete cookies (current/all in this instance/all in folder)
# logout: actually call action=logout
from os import sep as path_sep, makedirs, remove
from os.path import abspath, basename, expanduser, exists as path_exists
from shutil import rmtree
from time import strftime, gmtime
from re import compile, sub
from urllib import urlencode
from urllib2 import URLError
import json
from pycurl import FORM_FILE
from fetch import get, fetch_pages
class Wiki (object):
"""Create a wiki instance.
Wiki(url[, user, pwd][, trust_me])
url: the base URL of the wiki, after which 'index.php' would normally come.
user/pwd: credentials to log in straight away.
trust_me: set to True to skip checking if wiki can be reached and is valid.
All functions use the active user, specified through Wiki.set_active. Logging
in successfully when no active user is set sets the logged in user to the
active user. Once a username has logged in, it is found in Wiki.logged_in,
and can be set as the active user.
METHODS
UTILITIES
api_raw
api
login
logout
is_logged_in
set_active
READ
source
exists
list_pages
file_url
cats_on_page
WRITE
edit
delete
move [FIX]
move_cat [FIX]
upload
transfer_files [FIX]
ATTRIBUTES
logged_in: list of logged in users
active: the active user, or None
folder: the folder all cookies are stored in
api_url: API URL
"""
def __init__ (self, url, user = None, pwd = <PASSWORD>, trust_me = False):
self.url = self._fix_url(url)
# initialise some stuff
self.api_url = self.url + '/api.php'
self.logged_in = []
self.active = None
self.folder = expanduser('~') + path_sep + '.mwbot' + path_sep
if not path_exists(self.folder):
makedirs(self.folder)
# check wiki exists if need to
if not trust_me:
if self.api('query') != []:
raise ValueError('can\'t access wiki API at \'{0}\''.format(self.api_url))
# log in if asked
if user is not None:
if self.login(user, pwd):
self.active = self.logged_in[0]
def _fix_url (self, url):
# remove protocol prefixes and trailing slash
try:
url = url.lower()
except TypeError:
raise TypeError('url must be a string')
if not url:
raise ValueError('invalid url')
while url[-1] == '/':
url = url[:-1]
dot = url.find('.')
while '/' in url[:dot]:
url = url[url.find('/') + 1:]
dot = url.find('.')
return url
def _cookie (self, user = None):
# construct cookie filepath
if user is None:
user = self.active
if user is None:
raise Exception('no user specified and no active user exists')
return '%scookie_%s_%s' % (self.folder, self.url.encode('hex'), user)
def api_raw (self, action, args = {}, req = 'post', user = None,
format = 'json'):
"""Make API request.
Wiki.api_raw(action, args = {}, req = 'get'[, user], format = 'json') -> page
action: 'action' parameter.
args: arguments to send to the API.
req: 'get', 'post' or 'httppost'.
user: user to perform the request as (defaults to the active user); if there is
no active user, no cookie is used (anonymous request).
format: 'format' parameter.
"""
try:
c = self._cookie(user)
except Exception:
c = None
if req == 'get':
GET = args
POST = {}
else: # req == *'post':
GET = {}
POST = args
GET['action'] = action
GET['format'] = format
url = 'http://{0}?{1}'.format(self.api_url, urlencode(GET))
httppost = req == 'httppost'
if httppost:
POST = [(str(k), v if isinstance(v, (list, tuple)) else str(v))
for k, v in POST.iteritems()]
else:
POST = urlencode(POST)
data = get(url, POST, c, c, httppost = httppost, info = True)
page, code, real_url = data
if real_url != url:
# got redirected: POST might not work properly, so fix self.url
base = 'http://' + self.url
if real_url.endswith(url[len(base):]):
self.url = self._fix_url(real_url[:len(base) - len(url)])
self.api_url = self.url + '/api.php'
return page
def api (self, *args, **kwargs):
"""Return the parsed JSON of an API query.
See Wiki.api_raw for argument details.
"""
args = args[:5]
if 'format' in kwargs:
del kwargs['format']
return json.loads(self.api_raw(*args, **kwargs))
def login (self, user, pwd, token = None, api = False):
"""Log in.
Wiki.login(user, pwd) -> login_successful.
Adds users successfully logged in to Wiki.logged_in and stores a cookie at
~/.mwbot/cookie_user.
"""
if user in self.logged_in:
return True
# check if already logged in through cookies
res = self.api('query', {'meta': 'userinfo'}, user=user)
if 'anon' not in res['query']['userinfo']:
success = True
else:
args = {'lgname': user, 'lgpassword': <PASSWORD>}
if token is not None:
args['lgtoken'] = token
page = self.api('login', args, 'post', user)['login']
if page['result'] == 'NeedToken':
return token is None and self.login(user, pwd, page['token'])
else:
success = page['result'] == 'Success'
if success:
self.logged_in.append(user)
if self.active is None:
self.active = user
return success
def logout (self, user = None):
"""Log a user out.
Wiki.logout(user = Wiki.active)
"""
if user is None:
user = self.active
if user is None:
raise Exception('no user specified and no active user exists')
if user == self.active:
self.set_active(None)
try:
self.logged_in.remove(user)
except ValueError:
pass
def is_logged_in (self, user = None):
"""Check if a user is still logged in.
Wiki.is_logged_in(user = Wiki.active) -> is_logged_in
"""
if user is None:
user = self.active
if user is None:
raise Exception('no user specified and no active user exists')
return 'anon' not in self.get_tree('query', {'meta': 'userinfo'})[0][0]
def set_active (self, user):
"""Set the active user.
Wiki.set_active(user)
Pass user = None to be anonymous.
"""
if user in self.logged_in or user is None:
self.active = user
else:
raise ValueError('user \'{0}\' is not logged in'.format(user))
def source (self, page):
"""Fetch the source of a page.
Wiki.source(page) -> page_source
Raises ValueError if the page doesn't exist.
"""
if not page:
raise ValueError('page name must not be zero-length')
page = self.api(
'query', {'prop': 'revisions', 'rvprop': 'content', 'titles': page}
)['query']['pages'].values()[0]
if 'missing' in page:
raise ValueError(
'page \'{0}\' doesn\'t seem to exist'.format(page['title'])
)
elif 'invalid' in page:
raise ValueError(
'invalid page name: \'{0}\''.format(page['title'])
)
else:
return page['revisions'][0]['*']
def exists (self, page):
"""Check whether a page exists."""
if not page:
return False
page = self.api(
'query', {'prop': 'info', 'titles': page}
)['query']['pages'].values()[0]
return 'missing' not in page and 'invalid' not in page
def list_pages (self, ns=None, start='', lim=None):
"""List pages given by Special:Allpages.
Wiki.list_pages([ns]) -> page_list
ns: namespace, either a number (faster) or string (TODO). If not given, all
namespaces are checked (TODO).
"""
pages = []
nxt = None
while True:
# get pages up to given limit or a bot maximum, if allowed
get = lim - len(pages) if lim is not None else 500
if get == 0:
break
args = {'list': 'allpages', 'apnamespace': ns, 'aplimit': get}
if pages and nxt is not None:
# already got some: continue from last
args['apcontinue'] = nxt
elif start:
# use given start if any
args['apfrom'] = start
res = self.api('query', args)
try:
pages += [page['title'] for page in res['query']['allpages']]
except (TypeError, KeyError):
raise RuntimeError('unexpected response:', res)
try:
nxt = res['query-continue']['allpages']
nxt = nxt['apfrom'] or nxt['apcontinue']
except (TypeError, KeyError):
break
else:
if not nxt:
# no more to get
break
return pages
def list_cat (self, cat, start='', lim=None):
"""List pages in a category.
Wiki.list_cat(cat) -> page_list
"""
if not cat.lower().startswith('category:'):
cat = 'Category:' + cat
pages = []
while True:
# get pages up to given limit or a bot maximum, if allowed
get = lim - len(pages) if lim is not None else 500
if get == 0:
break
args = {'cmtitle': cat, 'list': 'categorymembers', 'cmlimit': get}
if pages:
# already got some: continue from last
args['cmcontinue'] = \
res['query-continue']['categorymembers']['cmcontinue']
elif start:
# use given start if any
args['cmfrom'] = start
res = self.api('query', args)
pages += [page['title']
for page in res['query']['categorymembers']]
if 'query-continue' not in res:
# no more to get
break
return pages
def file_url (self, page, width=-1, height=-1):
"""Get uploaded file URL.
Wiki.file_url(page[, width])
width: width in pixels of the resulting image.
"""
if any(page.lower().startswith(prefix)
for prefix in ('file:', 'image:')):
page = page[page.find(':') + 1:]
# Image: for compatibility with older MW versions
res = self.api(
'query',
{
'prop': 'imageinfo', 'iiprop': 'url', 'iiurlwidth': width,
'iiurlheight': height, 'titles': 'Image:' + page
}
)
try:
info = res['query']['pages'].values()[0]['imageinfo'][0]
except (TypeError, KeyError, IndexError):
raise RuntimeError('unexpected response:', res)
# thumburl can be an empty string
url = info.get('thumburl') or info.get('url')
return url
def cats_on_page (self, page):
"""Get the categories that the given page is in.
Wiki.cats_in_page(page)
"""
cats = []
while True:
if get == 0:
break
args = {'prop': 'categories', 'titles': page, 'cllimit': 500}
if cats:
# already got some: continue from last
args['clcontinue'] = \
res['query-continue']['categories']['clcontinue']
res = self.api('query', args)
page_data = res['query']['pages'].values()[0]
if 'missing' in page_data or 'invalid' in page_data:
raise ValueError('no such page: \'{0}\''.format(page))
cats += [cat['title'] for cat in page_data['categories']]
if 'query-continue' not in res:
# no more to get
break
return cats
def edit (self, page, content, summary='', minor=False, mode='replace'):
"""Edit a page.
Wiki.edit(page, content[, summary], minor=False, mode='replace')
mode: 'replace', 'append' or 'prepend'.
"""
res = self.api(
'query', {'prop': 'info', 'intoken': 'edit', 'titles': page}
)
token = res['query']['pages'].values()[0]['edittoken']
if token == '+\\':
raise Exception('invalid token returned (missing permissions?)')
args = {'title': page, 'token': token, 'summary': summary, 'bot': 'y'}
if minor:
args['minor'] = 'y'
args[{
'replace': 'text', 'append': 'appendtext', 'prepend': 'prependtext'
}[mode]] = content
res = self.api('edit', args)
if res['edit']['result'] != 'Success':
raise Exception('edit failed')
def move (self, page, to, reason='', leave_redirect=True, move_talk=True):
"""Move a page.
Wiki.move(page, to[, reason], leave_redirect = True, move_talk = True)
page: the page to move.
to: the new name of the page.
reason: a reason for the move.
leave_redirect: leave behind a redirect.
move_talk: also move talk page.
"""
return NotImplemented
if page == to:
print 'no change in name; page not moved'
return
# get token
tree = self.get_tree('query', {'prop': 'info', 'intoken': 'move', 'titles': page})
token = tree.find('query').find('pages').find('page').attrib['movetoken']
# perform move
args = {'from': page, 'to': to, 'token': token, 'reason': reason, 'ignorewarnings': 1}
if move_talk:
args['movetalk'] = ''
if not leave_redirect:
args['noredirect'] = ''
tree = self.get_tree('move', args, 'post')
if not leave_redirect and 'redirectcreated' in tree.find('move').attrib:
print 'redirect created: might need to delete'
# TODO: check for errors <error code="..." info="...">
def move_cat (self, cat, to, reason = '', overwrite_if_exists = False):
"""Move a category and recategorise all pages in it.
Wiki.move_cat(cat, to[, reason], overwrite_if_exists = False)
cat: the category to move.
to: the target category.
reason: a reason for the move.
overwrite_if_exists: if the target category exists, whether to edit it with
the source of cat and delete cat. Otherwise, only the
category of the pages in cat is changed.
"""
return NotImplemented
cat, to = self._fix_cat(cat), self._fix_cat(to)
def callback (match):
s = self._temp[match.start():match.end()]
if '|' in s:
return '[[Category:%s|%s]]' % (to, s[s.find('|') + 1:-2])
else:
return '[[Category:%s]]' % to
pattern = compile(r'(?i)\[\[category *: *%s(\|.*)?\]\]' % cat)
summary = 'changing category from \'%s\' to \'%s\'' % (cat, to) + ' (%s)' % reason if reason else ''
pages = self.pages_in_cat(cat)
for page in pages:
self._temp = self.source(page)
self.edit(page, sub(pattern, callback, self._temp), summary, True)
del self._temp
# move category
if not overwrite_if_exists:
if self.exists('Category:' + to):
self.delete('Category:' + cat, 'moving to \'%s\' without overwriting' + ' (%s)' % reason if reason else '')
return
if self.exists('Category:' + cat):
# TODO: if fails, try to edit new cat with old cat's contents then delete old one
self.move('Category:' + cat, 'Category:' + to, reason, False)
def delete (self, page, reason=''):
"""Delete a page.
Wiki.delete(page, reason='')
page: the page to delete.
reason: a reason for the deletion.
"""
res = self.api(
'query', {'prop': 'info', 'intoken': 'delete', 'titles': page}
)
token = res['query']['pages'].values()[0]['deletetoken']
if token == '+\\':
raise Exception('invalid token returned (missing permissions?)')
res = self.api('delete',
{'title': page, 'token': token, 'reason': reason})
if 'error' in res:
raise Exception('deletion failed', res)
def upload (self, fn, name = None, desc = '', destructive = True):
"""Upload a file.
Wiki.upload(fn[, name], desc = '')
fn: file path.
name: name to save the file as at the wiki (without the 'File:'); defaults to
the file's local name.
desc: description (full page content).
destructive: ignore any warnings.
"""
if name is None:
name = basename(fn)
elif any(name.lower().startswith(prefix)
for prefix in ('file:', 'image:')):
name = name[name.find(':') + 1:]
# get token
res = self.api('query', {
'prop': 'info', 'intoken': 'edit', 'titles': 'File:' + name
})
try:
token = res['query']['pages'].values()[0]['edittoken']
except (TypeError, KeyError, IndexError):
raise RuntimeError('token request: unexpected response', res)
# perform upload
args = {'filename': name, 'file': (FORM_FILE, fn), 'text': desc,
'token': token}
if destructive:
args['ignorewarnings'] = 1
res = self.api('upload', args, 'httppost')
if 'error' in res:
raise RuntimeError('upload failed', res['error'])
def transfer_files (self, target, *pages, **kwargs):
"""Move files and their descriptions from one wiki to another.
Wiki.transfer_files(target, *pages, destructive = True) -> failed_pages
target: a Wiki instance or tuple of Wiki constructor arguments to create a new
instance.
pages: files' page names on this wiki (without namespace).
destructive: ignore any warnings (otherwise add that image to the failed list).
This is a keyword-only argument.
failed_pages: list of (page, error_msg) tuples.
"""
if not pages:
return []
destructive = kwargs.get('destructive', True)
if not isinstance(target, Wiki):
print '\tcreating Wiki instance...'
target = Wiki(*target)
# add/replace namespaces
pages_arg = '|'.join('Image:' + page[page.find(':') + 1:]
for page in pages)
def all_failures (err):
return dict((name, err) for name in pages)
# get file details
res = self.api('query', {
'prop': 'revisions|imageinfo', 'rvprop': 'content',
'iiprop': 'url', 'titles': pages_arg
}, 'post')
try:
pages_info = res['query']['pages'].values()
except (TypeError, KeyError, AttributeError):
return all_failures(('page info: unexpected response', res))
for page in pages_info:
if not isinstance(page, dict) or 'title' not in page:
return all_failures(
('page info: unexpected page in response', page))
# get edit tokens
res = target.api('query', {
'prop': 'info', 'intoken': 'edit', 'titles': pages_arg
})
try:
tokens = dict((page['title'], page['edittoken'])
for page in res['query']['pages'].values())
except (TypeError, KeyError, AttributeError):
return all_failures(('token request: unexpected response', res))
failed = {}
for page in pages_info:
name = page['title']
info = page.get('imageinfo')
if 'missing' in page or not info:
failed[name] = 'doesn\'t exist'
else:
# upload
token = tokens[name]
try:
url = info[0]['url']
except (TypeError, IndexError, KeyError):
failed[name] = ('unexpected page info', info)
continue
try:
content = page['revisions'][0].values()[0]
except (TypeError, KeyError, IndexError, AttributeError):
content = ''
args = {
# get rid of namespace
'filename': name[name.find(':') + 1:],
'text': content.encode('utf-8'),
'url': url, 'token': token
}
if destructive:
args['ignorewarnings'] = 1
res = target.api('upload', args, 'post')
if isinstance(res, dict) and 'error' in res:
failed[name] = res['error']
if not destructive:
# TODO: check for warnings
pass
return failed
| 3.078125 | 3 |
api/example_3.py | cfe-lab/Kive | 2 | 12765776 | """Create and use a dataset using an external file.
Note that this example:
- Only works when it's run on the same host as the Kive server and Kive worker
(e.g. in the `dev-env` environment). On a production server, external files
are kept in a network share, so they can be accessed from different hosts.
- Requires an instance of `librarian.models.ExternalFileDirectory` called "tmp"
pointing at `/tmp` to be created and saved on the server. This can be done
through the Django shell (`python manage.py shell` in the `kive` directory).
"""
import io
import pathlib
import pprint
import kiveapi
# Use HTTPS on a real server, so your password is encrypted.
# Don't put your real password in source code, store it in a text file
# that is only readable by your user account or some more secure storage.
session = kiveapi.KiveAPI("http://localhost:8000")
session.login('kive', 'kive')
# Set up an External File to use in an example run.
EFD_DIRECTORY = pathlib.Path("/tmp")
EFD_DIRECTORY_NAME = "tmp"
EFD_NAME = "api_example_external_file.csv"
EFD_CONTENT = "name\nCamus"
with (EFD_DIRECTORY / EFD_NAME).open("w") as outf:
outf.write(EFD_CONTENT)
# Upload data
try:
uploaded_dataset = session.add_dataset(
'API Example 3 External Dataset',
'None',
None,
None,
None,
["Everyone"],
externalfiledirectory=EFD_DIRECTORY_NAME,
external_path=EFD_NAME,
)
except kiveapi.KiveMalformedDataException as e:
print(e)
pass
# Now get the file and check that the results make sense.
retrieved_dataset = session.find_datasets(
dataset_id=uploaded_dataset.dataset_id)[0]
pprint.pprint(retrieved_dataset.__dict__)
assert retrieved_dataset.dataset_id == uploaded_dataset.dataset_id
assert retrieved_dataset.filename == uploaded_dataset.filename
assert retrieved_dataset.name == "API Example 3 External Dataset"
assert retrieved_dataset.users_allowed == []
assert retrieved_dataset.groups_allowed == ["Everyone"]
assert retrieved_dataset.externalfiledirectory == EFD_DIRECTORY_NAME
assert retrieved_dataset.external_path == EFD_NAME
buffer = io.StringIO()
retrieved_dataset.download(buffer)
assert buffer.getvalue() == EFD_CONTENT
| 2.90625 | 3 |
djangoapp/migrations/0007_auto_20170607_0426.py | Laure129/findheadposes | 0 | 12765777 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-06-07 01:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djangoapp', '0006_remove_gallery_slug'),
]
operations = [
migrations.AlterField(
model_name='photo',
name='image',
field=models.ImageField(height_field='height', upload_to='', width_field='width'),
),
]
| 1.40625 | 1 |
mantle_simulation/LAB.py | Johnson-A/UNM_Research | 4 | 12765778 | <reponame>Johnson-A/UNM_Research<filename>mantle_simulation/LAB.py<gh_stars>1-10
from dolfin import tanh, sqrt
from constants import mesh_width, mesh_height
keel_width = 0.2 * mesh_width
scale = keel_width / 4
LAB_height = 0.75 * mesh_height
keel_height = mesh_height / 8
def ridge(r, offset):
return 1.0 - tanh((r - offset) / scale)
def hump(r):
return ridge(r, keel_width) - ridge(-r, -keel_width)
def height_at(x):
r = sqrt((x[0] - mesh_width / 2) ** 2 + (x[1] - mesh_width / 2) ** 2)
return LAB_height - keel_height * hump(r) / hump(0)
| 2.421875 | 2 |
noxfile.py | stbraun/loganalyzer | 0 | 12765779 | # coding=utf-8
""" Configuration of nox test automation tool. """
import nox
@nox.session(python=['3.8', '3.9'])
def lint(session):
"""Run static analysis."""
session.run("pipenv", "install", "--dev", external=True)
session.run("pipenv", "run", "flake8", "loganalysis/", "tests/")
@nox.session(python=['3.8', '3.9'])
def tests(session):
"""Run tests for all supported versions of Python."""
session.run("pipenv", "install", "--dev", external=True)
session.run("pipenv", "run", "pytest", "tests/")
| 1.859375 | 2 |
api/migrations/0003_auto_20170129_1135.py | LuchaComics/comicscantina-django | 0 | 12765780 | <filename>api/migrations/0003_auto_20170129_1135.py
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2017-01-29 11:35
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20160525_1726'),
]
operations = [
migrations.AlterField(
model_name='comic',
name='condition_rating',
field=models.PositiveSmallIntegerField(blank=True, choices=[(10.0, '10.0 Gem Mint'), (9.9, '9.9 Mint'), (9.8, '9.8 Near Mint/Mint'), (9.6, '9.6 Near Mint +'), (9.4, '9.4 Near Mint'), (9.2, '9.2 Near Mint -'), (9.0, '9.0 Very Fine/Near Mint'), (8.5, '8.5 Very Fine +'), (8.0, '8.0 Very Fine'), (7.5, '7.5 Very Fine -'), (7.0, '7.0 Fine/Very Fine'), (6.5, '6.5 Fine +'), (6.0, '6.0 Fine'), (5.5, '5.5 Fine -'), (5.0, '5.0 Very Good/Fine'), (4.5, '4.5 Very Good +'), (4.0, '4.0 Very Good'), (3.5, '3.5 Very Good -'), (3.0, '3.0 Good/Very Good'), (2.5, '2.5 Good +'), (2.0, '2.0 Good'), (1.8, '1.8 Good -'), (1.5, '1.5 Fair/Good'), (1.0, '1.0 Fair'), (0.5, '.5 Poor'), (0, 'NG')], null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)]),
),
migrations.AlterField(
model_name='gcdindiciapublisher',
name='country',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='api.GCDCountry'),
),
migrations.AlterField(
model_name='gcdpublisher',
name='country',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='api.GCDCountry'),
),
migrations.AlterField(
model_name='gcdseries',
name='country',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='api.GCDCountry'),
),
migrations.AlterField(
model_name='gcdseries',
name='language',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='api.GCDLanguage'),
),
]
| 1.585938 | 2 |
StripeCaller/caller/StripeCaller.py | seanpatrickmoran/StripeCaller | 0 | 12765781 | <reponame>seanpatrickmoran/StripeCaller
import sys
sys.path.append("..")
from utils.load_HiC import *
from .functions import enrichment_score2, find_max_slice, phased_max_slice_arr, merge_positions, get_stripe_and_widths
from .mat_ops import strata2vertical, strata2horizontal, blank_diagonal_sparse_from_strata, blank_diagonal
import numpy as np
from multiprocessing import Pool, cpu_count
# from functools import partial
from itertools import repeat
__version__ = '0.0.1'
def _stripe_caller(
mat, positions,
max_range=150000, resolution=1000,
min_length=30000, closeness=50000,
merge=1, window_size=8, threshold=0.01,
N=1,
norm_factors=None, stats_test_log=({}, {})
):
assert max_range % resolution == 0
assert min_length % resolution == 0
if norm_factors is None:
norm_factors = np.ones((len(mat),))
def pack_tuple(*args):
return (*args,)
all_positions = []
targeted_range = pack_tuple(0, max_range // resolution)
if N > 1: # parallel if #CPUs set
lst = [idx for idx in list(sorted(positions.keys())) if
not idx <= window_size or not idx >= mat.shape[0] - window_size]
wtd = [max(int(positions[idx]), 1) for idx in list(sorted(positions.keys())) if
not idx <= window_size or not idx >= mat.shape[0] - window_size]
with Pool(N) as pool:
# arr = pool.starmap(enrichment_score2, zip(lst, wtd, len(lst)*[targeted_range], len(lst)*[window_size]))
arr = pool.starmap(enrichment_score2,
zip(repeat(mat), lst, wtd, repeat(norm_factors), repeat(targeted_range), repeat(window_size)))
arr += np.log10(threshold)
with Pool(N) as pool:
all_positions = (pool.starmap(phased_max_slice_arr, zip(lst, arr, wtd)))
else:
# f2 = open(f'peaks_enrichment.txt', 'w')
lst = [idx for idx in list(sorted(positions.keys())) if
not idx <= window_size or not idx >= mat.shape[0] - window_size]
wtd = [max(int(positions[idx]), 1) for idx in list(sorted(positions.keys())) if
not idx <= window_size or not idx >= mat.shape[0] - window_size]
# print(lst,wtd)
for i, idx in enumerate(lst):
if idx <= window_size or idx >= mat.shape[0] - window_size:
continue
arr = enrichment_score2(mat, idx, int(wtd[i]),
distance_range=targeted_range,
window_size=window_size,
norm_factors=norm_factors, stats_test_log=stats_test_log
)
arr = arr + np.log10(threshold)
head, tail, _max = find_max_slice(arr)
all_positions.append((idx, head, tail, _max, wtd[i]))
# f2.write(f'{i} {idx * resolution} {head} {tail} {_max}\n')
# f2.close()
# Step 4: Merging
print(' Merging...')
if not all_positions:
raise ValueError("No statistically significant candidate stripes found(enrichment_score()). "
"Try different args: stripe_width, max_range, resolution, window_size")
all_positions = merge_positions(all_positions)#, merge)
print(len(all_positions))
print(' Filtering by distance and length ...')
new_positions = []
for elm in all_positions:
# print(elm, end=' ')
if (elm[3] - elm[2]) * resolution >= min_length and elm[2] * resolution <= closeness:
# print(True)
new_positions.append(elm)
else:
# print(False)
pass
print(len(new_positions))
# Step 5: Statistical test
results = []
print(' Statistical Tests...')
for elm in new_positions:
[st, ed, head, tail, score] = elm
# p = stat_test(mat, st, ed, stripe_width, head, tail, window_size)
# print(idx * resolution, p)
if score > threshold:
results.append((st, (ed + 1), head, tail, score))
print(len(results))
return results
def stripe_caller_all(
hic_file, reference_genome,
chromosomes,
output_file,
norm='balanced',
threshold=0.01,
max_range=150000, resolution=1000,
min_length=30000, min_distance=50000,
merge=1, window_size=8,
centromere_file=None,
N_threads=1,
nstrata_blank=0, step=36, sigma=12., rel_height=0.3
):
"""
The main function for calling stripes
Args:
hic_file (str): file path
reference_genome (str): reference genome
chromosomes (list): which chromosomes to calculate
output_file (str): output bedpe path
norm (str): recommend: "balanced", can also be "none"
threshold (float): p value threshold
max_range (int): max distance off the diagonal to be calculated
resolution (int): resolution
min_length (int): minimum length of stripes
min_distance (int): threshold for removing stripes too far away from the diagonal
merge (int): merge stripes which are close to each other (# of bins)
window_size (int): size of the window for calculating enrichment score
"""
centro = {}
if centromere_file is not None:
for line in open(centromere_file):
[ch, st, ed] = line.strip().split()[:3]
st, ed = int(st), int(ed)
assert ch.startswith('chr')
if ch not in centro:
centro[ch] = []
centro[ch].append((st, ed))
if hic_file.lower().endswith('hic'):
_format = 'hic'
elif hic_file.lower().endswith('cool'):
_format = 'cool'
elif hic_file.lower().endswith('pairs') or hic_file.lower().endswith('pairs.gz'):
_format = 'pairs'
else:
raise ValueError('Unrecognized format for: ' + hic_file)
f = open(output_file, 'w')
f.write('#chr1\tx1\tx2\tchr2\ty1\ty2\tenrichment\n')
# Stats test record
_calculated_values = {}
_poisson_stats = {}
for ch in chromosomes:
print(f'Calling for {ch}...')
print(' Loading contact matrix...')
strata, norm_factors = load_HiC(
file=hic_file, ref_genome=reference_genome, format=_format,
chromosome=ch, resolution=resolution, norm=norm,
max_distance=max(max_range + min_length, resolution * step)
)
print(' Finish loading contact matrix...')
# full mat for calling candidate stripes
print(' Finding candidate peaks:')
mat = blank_diagonal_sparse_from_strata(strata, nstrata_blank)
h_Peaks, v_Peaks = get_stripe_and_widths(
mat, step=step, sigma=sigma, rel_height=rel_height
)
print(' H:', len(h_Peaks), ', V:', len(v_Peaks))
# f2 = open(f'peaks_{ch}.txt', 'w')
# f2.write('H\n')
# for h in h_Peaks:
# f2.write(f'{h * resolution}\t{h_Peaks[h]}\n')
# f2.write('V\n')
# for v in v_Peaks:
# f2.write(f'{v * resolution}\t{v_Peaks[v]}\n')
# f2.close()
# horizontal
print(' Horizontal:')
mat = strata2horizontal(strata)
if h_Peaks:
results = _stripe_caller(mat, positions=h_Peaks, threshold=threshold,
max_range=max_range, resolution=resolution,
min_length=min_length, closeness=min_distance,
merge=merge, window_size=window_size, N=N_threads,
norm_factors=norm_factors, stats_test_log=(_calculated_values, _poisson_stats)
)
else:
results = []
for (st, ed, hd, tl, sc) in results:
in_centro = False
if ch in centro:
for (centro_st, centro_ed) in centro[ch]:
if centro_st <= st * resolution <= centro_ed or centro_st <= ed * resolution <= centro_ed:
in_centro = True
if not in_centro:
f.write(f'{ch}\t{st*resolution}\t{ed*resolution}\t{ch}\t{max((st+hd), ed)*resolution}\t{(ed+tl)*resolution}\t{sc}\n')
# vertical
print(' Vertical:')
mat = strata2vertical(strata)
if v_Peaks:
results = _stripe_caller(mat, positions=v_Peaks, threshold=threshold,
max_range=max_range, resolution=resolution,
min_length=min_length, closeness=min_distance,
merge=merge, window_size=window_size, N=N_threads,
norm_factors=norm_factors, stats_test_log=(_calculated_values, _poisson_stats)
)
else:
results = []
for (st, ed, hd, tl, sc) in results:
in_centro = False
if ch in centro:
for (centro_st, centro_ed) in centro[ch]:
if centro_st <= st * resolution <= centro_ed or centro_st <= ed * resolution <= centro_ed:
in_centro = True
if not in_centro:
f.write(f'{ch}\t{(st-tl)*resolution}\t{min((ed-hd), st)*resolution}\t{ch}\t{st*resolution}\t{ed*resolution}\t{sc}\n')
f.close()
| 1.734375 | 2 |
interactions/util.py | Bhaskers-Blu-Org2/SARA | 78 | 12765782 | <reponame>Bhaskers-Blu-Org2/SARA<filename>interactions/util.py
# coding=utf8
import json
ACTIVITY_TAG = '[Activity]'
DIALOG_TAG = '[Dialog]'
POPUPWINDOW_TAG = '[PopupWindow]'
VIEW_TAG = '[ViewOnTouchEvent]'
EDITABLE_INPUT_CONNECTION_TAG = '[EditableInputConnection]'
SPANNER_STRING_BUILDER_TAG = '[SpannerStringBuilder]'
TEXT_VIEW_KEY_TAG = '[TextViewKeyboard]'
def extract_info(log):
splits = log.split('-')
tag = splits[0]
package = splits[-1].strip()
plid = int(splits[-2].strip())
ts = splits[-3].strip()
return {
'tag': tag,
'plid': plid,
'package': package,
'content': json.loads('-'.join(splits[1:-3]).strip())['payload'],
'ts': ts
} | 1.8125 | 2 |
Task2C.py | mavevor/flood-warning-system | 0 | 12765783 | from floodsystem.stationdata import build_station_list,update_water_levels
from floodsystem.flood import stations_highest_rel_level
def run():
stations = build_station_list()
update_water_levels(stations)
N = 10
a = stations_highest_rel_level(stations, N)
for i in a:
print("{}, {}".format(i.name, i.latest_level))
if __name__ == "__main__":
print("*** Task 2A: CUED Part IA Flood Warning System ***")
run()
| 2.921875 | 3 |
brainfeed.py | martinpflaum/bachelor_thesis | 0 | 12765784 | <gh_stars>0
"""
MIT License
Copyright (c) 2021 martinpflaum
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class Brainfeed(nn.Module):
def __init__(self,down_dims = [4096,2048,1024],act_layer = nn.GELU,dropout = [0.1,0.1,0.025]):
super(Brainfeed,self).__init__()
#nn.SyncBatchNorm.convert_sync_batchnorm()
pre = down_dims[0]
layers = []
for k in range(1,len(down_dims)):
layers += [nn.BatchNorm1d(pre),nn.Linear(pre, down_dims[k]),act_layer(),nn.Dropout(dropout[k])]
pre = down_dims[k]
#layer +=
final = [nn.BatchNorm1d(pre),nn.Linear(pre,512),nn.BatchNorm1d(512)]
self.layers = nn.Sequential(*layers)
self.final = nn.Sequential(*final)
self.bias = torch.nn.Parameter(torch.zeros(()))#torch.zeros(())##torch.nn.Parameter
self.mul = torch.nn.Parameter(torch.ones(()))#torch.ones(())#
def forward(self,x):
#if not isinstance(x, list):
# x = [x]
#if len(x[0].shape)==1:
# x = [elem[None] for elem in x]
#x = torch.cat(x,dim=0)
x = self.layers(x)
x = self.final(x)
x = x*self.mul + self.bias
return x
class Brainwrapper(nn.Module):
def __init__(self,backbone,head):
super(Brainwrapper,self).__init__()
self.backbone = backbone
self.head = head
def forward(self,x):
x = self.backbone(x)
x = self.head(x)
return x
#from brainloading import BrainLoader,BRAIN_FILE_NAME_TRAIN
"""class Braindset(torch.utils.data.Dataset):
def __init__(self):
super().__init__()
brain_dataset_root = "D:/Datasets/BrainData"
brain_data_file = BRAIN_FILE_NAME_TRAIN
self.brain_loader = BrainLoader(brain_dataset_root,brain_data_file,mapToCupe=False,rel_n=4096)
self.size = self.brain_loader.BRAIN_DATA_ARRAY.shape[0]
def __len__(self):
return self.size
def __getitem__(self, index):
alpha = torch.rand(3)
alpha = nn.functional.softmax(alpha).reshape(3,1)
x = []
x += [self.brain_loader(index,0)[None]]
x += [self.brain_loader(index,1)[None]]
x += [self.brain_loader(index,2)[None]]
x = torch.sum(alpha*torch.cat(x,dim=0),dim=0)
#xb = torch.sum(alpha*torch.cat(x,dim=0),dim=0)
return x,torch.rand(1)"""
#backbone = Backbone()
#backbone(torch.rand(3,4096)).shape
#%%
# %%
| 1.851563 | 2 |
kmeans-vae/utils/misc.py | darylperalta/computer-vision | 8 | 12765785 | <reponame>darylperalta/computer-vision
'''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
import torchvision.transforms as transforms
import argparse
from scipy.optimize import linear_sum_assignment
# linear assignment algorithm
def unsupervised_labels(y, yp, n_classes, n_clusters):
assert n_classes == n_clusters
# initialize count matrix
C = np.zeros([n_clusters, n_classes])
# populate count matrix
for i in range(len(y)):
C[int(yp[i]), int(y[i])] += 1
# optimal permutation using Hungarian Algo
# the higher the count, the lower the cost
# so we use -C for linear assignment
row, col = linear_sum_assignment(-C)
# compute accuracy
accuracy = C[row, col].sum() / C.sum()
return accuracy * 100
def get_device(verbose=False):
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
#if torch.cuda.device_count() > 1:
# print("Available GPUs:", torch.cuda.device_count())
# # model = nn.DataParallel(model)
if verbose:
print("Device:", device)
return device
def init_weights(model, std=0.01):
if type(model) == nn.Linear:
nn.init.normal_(model.weight, 0, std)
model.bias.data.zero_()
if type(model) == nn.Conv2d:
nn.init.kaiming_normal_(model.weight)
model.bias.data.zero_()
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
x_train = dataset(root='./data',
train=True,
download=True,
transform=transforms.ToTensor())
dataloader = torch.utils.data.DataLoader(x_train, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std...')
for inputs, targets in dataloader:
channels = inputs.size()[1]
for i in range(channels):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(x_train))
std.div_(len(x_train))
return mean, std
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
def get_args():
parser = argparse.ArgumentParser(description='MIMax')
parser.add_argument('--single',
default=False,
action='store_true',
help='Use single branch model (supervised)')
parser.add_argument('--sgd',
default=False,
action='store_true',
help='Use optimizer')
parser.add_argument('--supervised',
default=False,
action='store_true',
help='Use double branch model (supervised)')
parser.add_argument('--div-loss',
default="l1",
help='MI divergence loss')
parser.add_argument('--alpha',
type=float,
default=2,
metavar='N',
help='Divergence loss alpha weight')
parser.add_argument('--n-heads',
type=int,
default=2,
metavar='N',
help='Number of heads')
parser.add_argument('--overcluster',
type=int,
default=0,
metavar='N',
help='If overcluster, 10x n_classes')
parser.add_argument('--channels',
type=int,
default=1,
metavar='N',
help='Number of channels')
parser.add_argument('--weight-std',
type=float,
default=0.5,
metavar='N',
help='Linear layer initial weights std')
parser.add_argument('--weight-decay',
type=float,
default=1e-3,
metavar='N',
help='Linear layer initial weights std')
parser.add_argument('--batch-size',
type=int,
default=512,
metavar='N',
help='Batch size for training')
parser.add_argument('--epochs',
type=int,
default=300,
metavar='N',
help='Number of epochs to train')
parser.add_argument('--lr',
type=float,
default=4e-4,
metavar='N',
help='Learning rate')
parser.add_argument('--no-augment',
default=False,
action='store_true',
help='Do not use data augmentation')
parser.add_argument('--vae-latent-dim',
type=int,
default=0,
help='VAE latent dim (enabled when >0)')
parser.add_argument('--vae-weights',
default=None,
help='VAE weights')
parser.add_argument('--kmeans',
default=None,
help='KMeans pickle file')
parser.add_argument('--train',
default=False,
action='store_true',
help='Train model')
parser.add_argument('--eval',
default=False,
action='store_true',
help='Eval model')
parser.add_argument('--save-dir',
default="weights",
help='Folder of model files')
parser.add_argument('--save-weights',
default="classifier.pt",
help='Save current model weights on this file (pt)')
parser.add_argument('--restore-weights',
default="classifier.pt",
help='Load saved model weights from this file (pt)')
parser.add_argument('--summary',
default=False,
action='store_true',
help='Print model summary')
parser.add_argument('--dataset',
default="mnist",
metavar='N',
help='Dataset for training an unsupervised classifier')
args = parser.parse_args()
return args
| 3.1875 | 3 |
MoTrack Therapy Mac/MoTrack Therapy Mac/photos/photo_renaming/rename_data_images_files_v2.py | ryerrabelli/MoTrackTherapyMobilePublic | 2 | 12765786 | #<NAME>
#MoTrack Therapy
#Created Mon Oct 14, 2019
#GOAL: Convert standard iOS file names ("IMG_1750.JPG") to MoTrack data image standard names ("IMG_0001_A_RAW.JPG").
#Description:
#Doesn't rename the files in place in case there is a bug. Instead takes input images in one folder, and makes output in another folder
#Assumes that the images are sequentially named in the original pairing (but not necessarily consecutive if some bad images were deleted)
#Assumes that the RAW image is first, followed by the CV image, for each pair
import os
import re
import shutil
################################################################
############################ PART A ############################
###################### CONFIGURE DETAILS ######################
################################################################
#VALUES THAT MIGHT NEED CHANGING
num_letters_per_set = -1 #Make this 8 for A->H system. Do -1 to enter in specific points
set_starters = [8, 34, 36, 52, 78, 102, 104, 108, 128, 130, 152, 172, 192, 214, 230, 248, 266, 284, 286,
310, 312, 314, 316, 334, 352, 370] #Enter first image in each set
start_set_num = 205 #The first set number
image_pair_names = ["RAW", "CV"] #makes images_in_each_pair 2
original_files_folder = 'original_names' #What folder the original files with their original names are located in
new_files_folder = 'new_names' #Where to put the renamed files
################################################################
############################ PART B ############################
###################### DO THE OPERATIONS ######################
################################################################
#GET INPUT FILES IN FOLDER
all_orig_file_names = os.listdir(original_files_folder)
all_orig_image_names = [k for k in all_orig_file_names if re.match(r'IMG_\d{4}', k)]
all_orig_image_names.sort() #put the files in alphabetical order. IMPORTANT!
print( "# of Files to Rename: " + str(len(all_orig_image_names)))
#(STARTING) CONSTANTS
images_in_each_pair = len(image_pair_names) #2 for RAW and CV
ct = 0
ct_within_set = 0 #doesn't count duplicate _A1 _A2 etc
tot_ct_within_set = 0
set_num = start_set_num
set_names = []
set_lengths = []
set_A_cts = {}
all_new_file_names = []
set_starters_str = ["IMG_{:04d}.JPG".format(img_num) for img_num in set_starters]
error_str = ""
renamed_count = 0
#DO THE RENAMING OPERATIONS
for orig_image_name in all_orig_image_names:
if num_letters_per_set>0 and ct % (num_letters_per_set*images_in_each_pair)==0:
set_names.append("{:04d}".format(set_num))
set_lengths.append(tot_ct_within_set)
set_num += 1
tot_ct_within_set = 0
ct_within_set = 0
elif orig_image_name in set_starters_str:
if ct_within_set>2*images_in_each_pair: #2 here because only A and B are allowed to be duplicated. Anything else, make a new set
set_names.append("{:04d}".format(set_num))
set_lengths.append(tot_ct_within_set)
set_num += 1
tot_ct_within_set = 0
ct_within_set = 0
else:
#Don't increment set number. Just make it named like _A1_RAW.JPG, etc
ct_within_set -= images_in_each_pair
if ct_within_set < 0:
ct_within_set = 0
if ct_within_set >= 26*images_in_each_pair:
error_str += "ERROR: EXCEEDED ALL 26 ALPHABET LETTER OPTIONS.\n"
print("Error. Breaking out of loop.")
break
letter = chr(65+(int(ct_within_set/images_in_each_pair))) #65 represents first letter in alphabet, "A"
if letter=="A":
set_A_cts["{:04d}".format(set_num)] = 0
category = image_pair_names[ct%images_in_each_pair]
subcategoryNum = 0
while True:
new_file_name = "IMG_{:04d}_{}{}_{}.JPG".format(set_num, letter, "" if subcategoryNum==0 else str(subcategoryNum), category )
if letter=="A":
set_A_cts["{:04d}".format(set_num)] = set_A_cts["{:04d}".format(set_num)] + 1
if new_file_name not in all_new_file_names:
break
print("While renaming '" + orig_image_name + "', already found intended a file that is already named '"+new_file_name+"'.")
subcategoryNum += 1
#UNCOMMENT THIS TO ACTUALLY DO THE REMAINING, NOT JUST TO TEST
#shutil.copyfile(original_files_folder+'/'+orig_image_name, new_files_folder+'/'+new_file_name)
renamed_count += 1
all_new_file_names.append(new_file_name)
ct += 1
ct_within_set += 1
tot_ct_within_set += 1
#FINISH OFF FOR LOOP BY COMPLETING LAST SET
set_names.append("{:04d}".format(set_num))
set_lengths.append(tot_ct_within_set)
################################################################
############################ PART C ############################
####################### PRINT OUT OUTPUT #######################
################################################################
#PRINT OUT THE FILE NAMES TO PUT IN EXCEL
print()
#print(all_new_file_names)
print("#\tOld File Name\tNew File Name")
for i,new_file_name in enumerate(all_new_file_names):
print(str(i) + "\t" + all_orig_image_names[i] + "\t" + new_file_name)
#PRINT OUT SET LENGTHS
print()
print("#\tName\tTot\tPairs\tAs\tLetters")
renamed_count_check = 0
for i,set_name in enumerate(set_names):
set_length_pair = int(set_lengths[i]/images_in_each_pair+0.5)
str_to_print = str(i)+"\t" + set_name + "\t" + str(set_length_pair) + "\t" + str(set_A_cts[set_name])
str_to_print += "\tA-{}".format( chr(65+set_lengths[i]-set_length_pair-1) )
if set_A_cts[set_name] > 1:
for i2 in range(2,set_A_cts[set_name]+1):
str_to_print += ",A"+str(i2)
print(str_to_print)
renamed_count_check += set_lengths[i]
#PRINT OUT TOTAL NUMBER OF IMAGES RENAMED
print()
print("Renamed a total of " + str(renamed_count) + " images (Double checked value=" + str(renamed_count_check) + ")")
if renamed_count != renamed_count_check:
error_str += "ERROR: DOUBLING CHECK TOTAL COUNT OF RENAMED IMAGES FAILED." \
" {} != {}. len(all_orig_image_names)={}. len(all_new_file_names)={}" \
".\n".format(renamed_count,renamed_count_check,len(all_orig_image_names),len(all_new_file_names))
#PRINT OUT ANY ERRORS
print(error_str)
| 2.796875 | 3 |
movies-apis/custom-recipes/movies-apis-omdb-details/recipe.py | acloudfrontier/dataiku-contrib | 1 | 12765787 | import pandas as pd
import requests
import dataiku
from dataiku.customrecipe import *
input_dataset = dataiku.Dataset(get_input_names_for_role('input_dataset')[0])
lookup_col = get_recipe_config().get('title_col','')
lookup_name = 'title_queried'
if lookup_col != '':
use_id = False
else:
use_id = True
lookup_col = get_recipe_config().get('imdb_id_col','')
lookup_name = 'IMDb_id_queried'
if lookup_col == '':
raise Exception('Please provide either a column containing titles or a column containing IMDb ids.')
base_query = 'http://www.omdbapi.com/?' \
+ "tomatoes=true" \
+ {
"all" : "",
"movie" : "&type=movie",
"series" : "&type=series",
"episode": "&type=episode",
}[get_recipe_config()['type']]
# y year of relase, plot={short,full}
output_dataset = dataiku.Dataset(get_output_names_for_role('output_dataset')[0])
output_writer = output_dataset.get_writer()
def write_output_schema(sample_line):
print "setting schema"
output_schema = [
{'name':lookup_name, 'type':'string'},
{'name':'Title', 'type':'string'},
{'name':'imdbID', 'type':'string'},
{'name':'imdbRating', 'type':'double'},
{'name':'imdbVotes', 'type':'bigint'},
{'name':'Metascore', 'type':'bigint'},
{'name':'tomatoConsensus', 'type':'string'},
{'name':'tomatoFresh', 'type':'bigint'},
{'name':'tomatoImage', 'type':'string'},
{'name':'tomatoMeter', 'type':'bigint'},
{'name':'tomatoRating', 'type':'double'},
{'name':'tomatoReviews', 'type':'bigint'},
{'name':'tomatoRotten', 'type':'bigint'},
{'name':'tomatoUserMeter', 'type':'bigint'},
{'name':'tomatoUserRating', 'type':'double'},
{'name':'tomatoUserReviews', 'type':'bigint'},
{'name':'Actors',
"type":"array", "timestampNoTzAsDate": False, "maxLength": -1,
"arrayContent": {"type": "string", "timestampNoTzAsDate": False, "maxLength": 1000}},
{'name':'Director',
"type":"array", "timestampNoTzAsDate": False, "maxLength": -1,
"arrayContent": {"type": "string", "timestampNoTzAsDate": False, "maxLength": 1000}},
{'name':'Writer',
"type":"array", "timestampNoTzAsDate": False, "maxLength": -1,
"arrayContent": {"type": "string", "timestampNoTzAsDate": False, "maxLength": 1000}},
{'name':'Awards', 'type':'string'},
{'name':'BoxOffice', 'type':'string'},
{'name':'Country',
"type":"array", "timestampNoTzAsDate": False, "maxLength": -1,
"arrayContent": {"type": "string", "timestampNoTzAsDate": False, "maxLength": 1000}},
{'name':'Genre',
"type":"array", "timestampNoTzAsDate": False, "maxLength": -1,
"arrayContent": {"type": "string", "timestampNoTzAsDate": False, "maxLength": 1000}},
{'name':'Language',
"type":"array", "timestampNoTzAsDate": False, "maxLength": -1,
"arrayContent": {"type": "string", "timestampNoTzAsDate": False, "maxLength": 1000}},
{'name':'Plot', 'type':'string'},
{'name':'Poster', 'type':'string'},
{'name':'Production', 'type':'string'},
{'name':'Rated', 'type':'string'},
{'name':'Released', 'type':'string'},
{'name':'Year', 'type':'bigint'},
{'name':'DVD', 'type':'string'},
{'name':'Runtime', 'type':'bigint'},
{'name':'Type', 'type':'string'},
{'name':'Website', 'type':'string'},
]
known_keys = frozenset([e['name'] for e in output_schema])
for key,v in sample_line.items():
if key not in known_keys:
output_schema.append({'name':key, 'type':'string'})
output_dataset.write_schema(output_schema)
output_schema_set = output_dataset.read_schema(raise_if_empty=False) != []
results_notFound = []
for row in input_dataset.iter_rows(log_every=10):
lookup = row[lookup_col]
print "looking up", lookup.encode('utf-8')
query = base_query + ('&i=' if use_id else '&t=') + lookup.encode('utf-8')
movie = requests.get(query).json()
if movie['Response'] == 'True':
# some obvious cleaning:
del movie['Response']
movie['imdbVotes'] = movie['imdbVotes'].replace(',','')
for col in ['Actors', 'Country', 'Director', 'Genre', 'Language', 'Writer']:
movie[col] = '["' + movie[col].replace(', ','","') + '"]'
if movie['Runtime'].endswith(' min'):
movie['Runtime'] = movie['Runtime'][:-len(' min')]
for col in ['Poster', 'Website', 'tomatoConsensus', 'tomatoImage']:
if movie[col] == 'N/A': del movie[col]
for col in ['Metascore', 'Runtime', 'Year', 'imdbVotes',
'tomatoFresh', 'tomatoMeter', 'tomatoReviews', 'tomatoRotten',
'tomatoUserMeter', 'tomatoUserReviews']:
try:
if movie[col] == 'N/A': del movie[col]
else: movie[col] = int(movie[col])
except:
print "cannot cast to int:", col, movie[col]
for col in ['imdbRating', 'tomatoRating', 'tomatoUserRating']:
try:
if movie[col] == 'N/A': del movie[col]
else: movie[col] = float(movie[col])
except:
print "cannot cast to float:", col, movie[col]
movie[lookup_name] = row[lookup_col]
if not output_schema_set:
write_output_schema(movie)
output_schema_set = True
output_writer.write_row_dict(movie)
else:
print 'Error'
results_notFound.append({lookup_name: lookup, 'error': movie['Error']})
assert movie['Response'] == 'False'
output_writer.close()
if get_output_names_for_role('movies_not_found'):
notFound_dataset = dataiku.Dataset(get_output_names_for_role('movies_not_found')[0])
notFound_dataset.write_with_schema(pd.DataFrame(results_notFound))
| 2.703125 | 3 |
WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/Overflow/Beginners-Python-Examples-master/square_root_algorithm.py | webdevhub42/Lambda | 5 | 12765788 | <filename>WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/Overflow/Beginners-Python-Examples-master/square_root_algorithm.py
""" Function uses prime factorisation method
to find square root of number """
def prime_factors(c):
pos = 2
factors = []
while (pos <= c):
if (c % pos == 0):
c = c // pos
factors.append(pos)
continue
else:
pos = pos + 1
return factors
def extract_common(li):
final = []
if (len(li) % 2 != 0):
return "Number is not perfect root."
else:
pre = len(li) - 1
for n in range(0, pre, 2):
a = li[n]
b = li[n + 1]
if (a == b):
final.append(b)
else:
return "Number is not perfect root."
return final
def square_root(take_in):
res = 1
for c in take_in:
res *= c
return res
get_num = int(raw_input("\nNumber : "))
print square_root(extract_common(prime_factors(get_num))), " " | 4.09375 | 4 |
postgrest/constants.py | bariqhibat/postgrest-py | 34 | 12765789 | DEFAULT_POSTGREST_CLIENT_HEADERS = {
"Accept": "application/json",
"Content-Type": "application/json",
}
DEFAULT_POSTGREST_CLIENT_TIMEOUT = 5
| 1.03125 | 1 |
tests/test_basic.py | zackw/pyamf | 14 | 12765790 | # -*- encoding: utf-8 -*-
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
General tests.
@since: 0.1.0
"""
from __future__ import absolute_import
import six
from types import ModuleType
import unittest
import miniamf
from .util import ClassCacheClearingTestCase, replace_dict, Spam
class ASObjectTestCase(unittest.TestCase):
"""
I exercise all functionality relating to the L{ASObject<miniamf.ASObject>}
class.
"""
def test_init(self):
bag = miniamf.ASObject(spam='eggs', baz='spam')
self.assertEqual(bag, dict(spam='eggs', baz='spam'))
self.assertEqual(bag.spam, 'eggs')
self.assertEqual(bag.baz, 'spam')
def test_eq(self):
bag = miniamf.ASObject()
self.assertEqual(bag, {})
self.assertNotEqual(bag, {'spam': 'eggs'})
bag2 = miniamf.ASObject()
self.assertEqual(bag2, {})
self.assertEqual(bag, bag2)
self.assertNotEqual(bag, None)
def test_setitem(self):
bag = miniamf.ASObject()
self.assertEqual(bag, {})
bag['spam'] = 'eggs'
self.assertEqual(bag.spam, 'eggs')
def test_delitem(self):
bag = miniamf.ASObject({'spam': 'eggs'})
self.assertEqual(bag.spam, 'eggs')
del bag['spam']
self.assertRaises(AttributeError, lambda: bag.spam)
def test_getitem(self):
bag = miniamf.ASObject({'spam': 'eggs'})
self.assertEqual(bag['spam'], 'eggs')
def test_iter(self):
bag = miniamf.ASObject({'spam': 'eggs'})
x = []
for k, v in six.iteritems(bag):
x.append((k, v))
self.assertEqual(x, [('spam', 'eggs')])
def test_hash(self):
bag = miniamf.ASObject({'spam': 'eggs'})
self.assertNotEqual(None, hash(bag))
class HelperTestCase(unittest.TestCase):
"""
Tests all helper functions in C{miniamf.__init__}
"""
def setUp(self):
self.default_encoding = miniamf.DEFAULT_ENCODING
def tearDown(self):
miniamf.DEFAULT_ENCODING = self.default_encoding
def test_get_decoder(self):
self.assertRaises(ValueError, miniamf.get_decoder, 'spam')
decoder = miniamf.get_decoder(miniamf.AMF0, stream=b'123', strict=True)
self.assertEqual(decoder.stream.getvalue(), b'123')
self.assertTrue(decoder.strict)
decoder = miniamf.get_decoder(miniamf.AMF3, stream=b'456', strict=True)
self.assertEqual(decoder.stream.getvalue(), b'456')
self.assertTrue(decoder.strict)
def test_get_encoder(self):
miniamf.get_encoder(miniamf.AMF0)
miniamf.get_encoder(miniamf.AMF3)
self.assertRaises(ValueError, miniamf.get_encoder, b'spam')
encoder = miniamf.get_encoder(miniamf.AMF0, stream=b'spam')
self.assertEqual(encoder.stream.getvalue(), b'spam')
self.assertFalse(encoder.strict)
encoder = miniamf.get_encoder(miniamf.AMF3, stream=b'eggs')
self.assertFalse(encoder.strict)
encoder = miniamf.get_encoder(miniamf.AMF0, strict=True)
self.assertTrue(encoder.strict)
encoder = miniamf.get_encoder(miniamf.AMF3, strict=True)
self.assertTrue(encoder.strict)
def test_encode(self):
self.assertEqual(
miniamf.encode(u'connect', 1.0).getvalue(),
b'\x06\x0fconnect\x05?\xf0\x00\x00\x00\x00\x00\x00'
)
def test_decode(self):
self.assertEqual(
list(miniamf.decode(
b'\x06\x0fconnect\x05?\xf0\x00\x00\x00\x00\x00\x00')),
[u'connect', 1.0]
)
def test_default_encoding(self):
miniamf.DEFAULT_ENCODING = miniamf.AMF3
x = miniamf.encode('foo').getvalue()
self.assertEqual(x, b'\x06\x07foo')
miniamf.DEFAULT_ENCODING = miniamf.AMF0
x = miniamf.encode('foo').getvalue()
self.assertEqual(x, b'\x02\x00\x03foo')
class UnregisterClassTestCase(ClassCacheClearingTestCase):
def test_klass(self):
alias = miniamf.register_class(Spam, 'spam.eggs')
miniamf.unregister_class(Spam)
self.assertTrue('spam.eggs' not in miniamf.CLASS_CACHE)
self.assertTrue(Spam not in miniamf.CLASS_CACHE)
self.assertTrue(alias not in miniamf.CLASS_CACHE)
def test_alias(self):
alias = miniamf.register_class(Spam, 'spam.eggs')
miniamf.unregister_class('spam.eggs')
self.assertTrue('spam.eggs' not in miniamf.CLASS_CACHE)
self.assertTrue(alias not in miniamf.CLASS_CACHE)
class ClassLoaderTestCase(ClassCacheClearingTestCase):
def test_register(self):
self.assertTrue(chr not in miniamf.CLASS_LOADERS)
miniamf.register_class_loader(chr)
self.assertTrue(chr in miniamf.CLASS_LOADERS)
def test_bad_register(self):
self.assertRaises(TypeError, miniamf.register_class_loader, 1)
miniamf.register_class_loader(ord)
def test_unregister(self):
self.assertTrue(chr not in miniamf.CLASS_LOADERS)
miniamf.register_class_loader(chr)
self.assertTrue(chr in miniamf.CLASS_LOADERS)
miniamf.unregister_class_loader(chr)
self.assertTrue(chr not in miniamf.CLASS_LOADERS)
self.assertRaises(LookupError, miniamf.unregister_class_loader, chr)
def test_load_class(self):
def class_loader(x):
self.assertEqual(x, 'spam.eggs')
return Spam
miniamf.register_class_loader(class_loader)
self.assertTrue('spam.eggs' not in miniamf.CLASS_CACHE)
miniamf.load_class('spam.eggs')
self.assertTrue('spam.eggs' in miniamf.CLASS_CACHE)
def test_load_unknown_class(self):
def class_loader(x):
return None
miniamf.register_class_loader(class_loader)
with self.assertRaises(miniamf.UnknownClassAlias):
miniamf.load_class('spam.eggs')
def test_load_class_by_alias(self):
def class_loader(x):
self.assertEqual(x, 'spam.eggs')
return miniamf.ClassAlias(Spam, 'spam.eggs')
miniamf.register_class_loader(class_loader)
self.assertTrue('spam.eggs' not in miniamf.CLASS_CACHE)
miniamf.load_class('spam.eggs')
self.assertTrue('spam.eggs' in miniamf.CLASS_CACHE)
def test_load_class_bad_return(self):
def class_loader(x):
return 'xyz'
miniamf.register_class_loader(class_loader)
self.assertRaises(TypeError, miniamf.load_class, 'spam.eggs')
def test_load_class_by_module(self):
miniamf.load_class('unittest.TestCase')
def test_load_class_by_module_bad(self):
with self.assertRaises(miniamf.UnknownClassAlias):
miniamf.load_class('unittest.TestCase.')
class TypeMapTestCase(unittest.TestCase):
def setUp(self):
self.tm = miniamf.TYPE_MAP.copy()
self.addCleanup(replace_dict, self.tm, miniamf.TYPE_MAP)
def test_add_invalid(self):
mod = ModuleType('spam')
self.assertRaises(TypeError, miniamf.add_type, mod)
self.assertRaises(TypeError, miniamf.add_type, {})
self.assertRaises(TypeError, miniamf.add_type, 'spam')
self.assertRaises(TypeError, miniamf.add_type, u'eggs')
self.assertRaises(TypeError, miniamf.add_type, 1)
self.assertRaises(TypeError, miniamf.add_type, 234234)
self.assertRaises(TypeError, miniamf.add_type, 34.23)
self.assertRaises(TypeError, miniamf.add_type, None)
self.assertRaises(TypeError, miniamf.add_type, object())
class A:
pass
self.assertRaises(TypeError, miniamf.add_type, A())
def test_add_same(self):
miniamf.add_type(chr)
self.assertRaises(KeyError, miniamf.add_type, chr)
def test_add_class(self):
class A:
pass
class B(object):
pass
miniamf.add_type(A)
self.assertTrue(A in miniamf.TYPE_MAP)
miniamf.add_type(B)
self.assertTrue(B in miniamf.TYPE_MAP)
def test_add_callable(self):
td = miniamf.add_type(ord)
self.assertTrue(ord in miniamf.TYPE_MAP)
self.assertTrue(td in miniamf.TYPE_MAP.values())
def test_add_multiple(self):
td = miniamf.add_type((chr,))
class A(object):
pass
class B(object):
pass
class C(object):
pass
td = miniamf.add_type([A, B, C])
self.assertEqual(td, miniamf.get_type([A, B, C]))
def test_get_type(self):
self.assertRaises(KeyError, miniamf.get_type, chr)
td = miniamf.add_type((chr,))
self.assertRaises(KeyError, miniamf.get_type, chr)
td2 = miniamf.get_type((chr, ))
self.assertEqual(td, td2)
td2 = miniamf.get_type([chr, ])
self.assertEqual(td, td2)
def test_remove(self):
self.assertRaises(KeyError, miniamf.remove_type, chr)
td = miniamf.add_type((chr,))
self.assertRaises(KeyError, miniamf.remove_type, chr)
td2 = miniamf.remove_type((chr,))
self.assertEqual(td, td2)
class ErrorClassMapTestCase(unittest.TestCase):
"""
I test all functionality related to manipulating L{miniamf.ERROR_CLASS_MAP}
"""
def setUp(self):
self.map_copy = miniamf.ERROR_CLASS_MAP.copy()
self.addCleanup(replace_dict, self.map_copy, miniamf.ERROR_CLASS_MAP)
def test_add(self):
class A:
pass
class B(Exception):
pass
self.assertRaises(TypeError, miniamf.add_error_class, None, 'a')
# class A does not sub-class Exception
self.assertRaises(TypeError, miniamf.add_error_class, A, 'a')
miniamf.add_error_class(B, 'b')
self.assertEqual(miniamf.ERROR_CLASS_MAP['b'], B)
miniamf.add_error_class(B, 'a')
self.assertEqual(miniamf.ERROR_CLASS_MAP['a'], B)
class C(Exception):
pass
self.assertRaises(ValueError, miniamf.add_error_class, C, 'b')
def test_remove(self):
class B(Exception):
pass
miniamf.ERROR_CLASS_MAP['abc'] = B
self.assertRaises(TypeError, miniamf.remove_error_class, None)
miniamf.remove_error_class('abc')
self.assertFalse('abc' in miniamf.ERROR_CLASS_MAP)
self.assertRaises(KeyError, miniamf.ERROR_CLASS_MAP.__getitem__, 'abc')
miniamf.ERROR_CLASS_MAP['abc'] = B
miniamf.remove_error_class(B)
self.assertRaises(KeyError, miniamf.ERROR_CLASS_MAP.__getitem__, 'abc')
self.assertRaises(ValueError, miniamf.remove_error_class, B)
self.assertRaises(ValueError, miniamf.remove_error_class, 'abc')
class DummyAlias(miniamf.ClassAlias):
pass
class RegisterAliasTypeTestCase(unittest.TestCase):
def setUp(self):
self.old_aliases = miniamf.ALIAS_TYPES.copy()
self.addCleanup(replace_dict, self.old_aliases, miniamf.ALIAS_TYPES)
def test_bad_klass(self):
self.assertRaises(TypeError, miniamf.register_alias_type, 1)
def test_subclass(self):
self.assertFalse(issubclass(self.__class__, miniamf.ClassAlias))
with self.assertRaises(ValueError):
miniamf.register_alias_type(self.__class__)
def test_no_args(self):
self.assertTrue(issubclass(DummyAlias, miniamf.ClassAlias))
self.assertRaises(ValueError, miniamf.register_alias_type, DummyAlias)
def test_type_args(self):
self.assertTrue(issubclass(DummyAlias, miniamf.ClassAlias))
self.assertRaises(TypeError,
miniamf.register_alias_type, DummyAlias, 1)
def test_single(self):
class A(object):
pass
miniamf.register_alias_type(DummyAlias, A)
self.assertTrue(DummyAlias in miniamf.ALIAS_TYPES)
self.assertEqual(miniamf.ALIAS_TYPES[DummyAlias], (A,))
def test_multiple(self):
class A(object):
pass
class B(object):
pass
with self.assertRaises(TypeError):
miniamf.register_alias_type(DummyAlias, A, 'hello')
miniamf.register_alias_type(DummyAlias, A, B)
self.assertTrue(DummyAlias in miniamf.ALIAS_TYPES)
self.assertEqual(miniamf.ALIAS_TYPES[DummyAlias], (A, B))
def test_duplicate(self):
class A(object):
pass
miniamf.register_alias_type(DummyAlias, A)
with self.assertRaises(RuntimeError):
miniamf.register_alias_type(DummyAlias, A)
def test_unregister(self):
"""
Tests for L{miniamf.unregister_alias_type}
"""
class A(object):
pass
self.assertFalse(DummyAlias in miniamf.ALIAS_TYPES)
self.assertEqual(miniamf.unregister_alias_type(A), None)
miniamf.register_alias_type(DummyAlias, A)
self.assertTrue(DummyAlias in miniamf.ALIAS_TYPES)
self.assertEqual(miniamf.unregister_alias_type(DummyAlias), (A,))
class TypedObjectTestCase(unittest.TestCase):
def test_externalised(self):
o = miniamf.TypedObject(None)
self.assertRaises(miniamf.DecodeError, o.__readamf__, None)
self.assertRaises(miniamf.EncodeError, o.__writeamf__, None)
def test_alias(self):
class Foo:
pass
alias = miniamf.TypedObjectClassAlias(Foo, 'bar')
self.assertEqual(alias.klass, miniamf.TypedObject)
self.assertNotEqual(alias.klass, Foo)
class PackageTestCase(ClassCacheClearingTestCase):
"""
Tests for L{miniamf.register_package}
"""
class NewType(object):
pass
class ClassicType:
pass
def setUp(self):
ClassCacheClearingTestCase.setUp(self)
self.module = ModuleType("foo")
self.module.Classic = self.ClassicType
self.module.New = self.NewType
self.module.b = b'binary'
self.module.i = 12323
self.module.f = 345.234
self.module.u = u"Unicöde"
self.module.l = ["list", "of", "junk"]
self.module.d = {"foo": "bar", "baz": "gak"}
self.module.obj = object()
self.module.mod = self.module
self.module.lam = lambda _: None
self.NewType.__module__ = "foo"
self.ClassicType.__module__ = "foo"
self.spam_module = Spam.__module__
Spam.__module__ = "foo"
self.names = (self.module.__name__,)
def tearDown(self):
ClassCacheClearingTestCase.tearDown(self)
Spam.__module__ = self.spam_module
self.module.__name__ = self.names
def check_module(self, r, base_package):
self.assertEqual(len(r), 2)
for c in [self.NewType, self.ClassicType]:
alias = r[c]
self.assertTrue(isinstance(alias, miniamf.ClassAlias))
self.assertEqual(alias.klass, c)
self.assertEqual(alias.alias, base_package + c.__name__)
def test_module(self):
r = miniamf.register_package(self.module, 'com.example')
self.check_module(r, 'com.example.')
def test_all(self):
self.module.Spam = Spam
self.module.__all__ = ['Classic', 'New']
r = miniamf.register_package(self.module, 'com.example')
self.check_module(r, 'com.example.')
def test_ignore(self):
self.module.Spam = Spam
r = miniamf.register_package(self.module, 'com.example',
ignore=['Spam'])
self.check_module(r, 'com.example.')
def test_separator(self):
r = miniamf.register_package(self.module, 'com.example', separator='/')
self.ClassicType.__module__ = 'com.example'
self.NewType.__module__ = 'com.example'
self.check_module(r, 'com.example/')
def test_name(self):
self.module.__name__ = 'spam.eggs'
self.ClassicType.__module__ = 'spam.eggs'
self.NewType.__module__ = 'spam.eggs'
r = miniamf.register_package(self.module)
self.check_module(r, 'spam.eggs.')
def test_dict(self):
"""
@see: #585
"""
d = dict()
d['Spam'] = Spam
r = miniamf.register_package(d, 'com.example', strict=False)
self.assertEqual(len(r), 1)
alias = r[Spam]
self.assertTrue(isinstance(alias, miniamf.ClassAlias))
self.assertEqual(alias.klass, Spam)
self.assertEqual(alias.alias, 'com.example.Spam')
def test_odd(self):
self.assertRaises(TypeError, miniamf.register_package, object())
self.assertRaises(TypeError, miniamf.register_package, 1)
self.assertRaises(TypeError, miniamf.register_package, 1.2)
self.assertRaises(TypeError, miniamf.register_package, 23897492834)
self.assertRaises(TypeError, miniamf.register_package, [])
self.assertRaises(TypeError, miniamf.register_package, b'')
self.assertRaises(TypeError, miniamf.register_package, u'')
def test_strict(self):
self.module.Spam = Spam
Spam.__module__ = self.spam_module
r = miniamf.register_package(self.module, 'com.example', strict=True)
self.check_module(r, 'com.example.')
def test_not_strict(self):
self.module.Spam = Spam
Spam.__module__ = self.spam_module
r = miniamf.register_package(self.module, 'com.example', strict=False)
self.assertEqual(len(r), 3)
for c in [self.NewType, self.ClassicType, Spam]:
alias = r[c]
self.assertTrue(isinstance(alias, miniamf.ClassAlias))
self.assertEqual(alias.klass, c)
self.assertEqual(alias.alias, 'com.example.' + c.__name__)
def test_list(self):
class Foo:
pass
class Bar:
pass
ret = miniamf.register_package([Foo, Bar], 'spam.eggs')
self.assertEqual(len(ret), 2)
for c in [Foo, Bar]:
alias = ret[c]
self.assertTrue(isinstance(alias, miniamf.ClassAlias))
self.assertEqual(alias.klass, c)
self.assertEqual(alias.alias, 'spam.eggs.' + c.__name__)
class UndefinedTestCase(unittest.TestCase):
"""
Tests for L{miniamf.Undefined}
"""
def test_none(self):
"""
L{miniamf.Undefined} is not referentially identical to C{None}.
"""
self.assertFalse(miniamf.Undefined is None)
def test_non_zero(self):
"""
Truth test for L{miniamf.Undefined} == C{False}.
"""
self.assertFalse(miniamf.Undefined)
class TestAMF0Codecs(unittest.TestCase):
"""
Tests for getting encoder/decoder for AMF0 with extension support.
"""
def test_default_decoder(self):
"""
If the extension is available, it must be returned by default.
"""
try:
from miniamf._accel import amf0
except ImportError:
from miniamf import amf0
decoder = miniamf.get_decoder(miniamf.AMF0)
self.assertIsInstance(decoder, amf0.Decoder)
def test_ext_decoder(self):
"""
With `use_ext=True` specified, the extension must be returned.
"""
try:
from miniamf._accel import amf0
except ImportError:
self.skipTest('amf0 extension not available')
decoder = miniamf.get_decoder(miniamf.AMF0, use_ext=True)
self.assertIsInstance(decoder, amf0.Decoder)
def test_pure_decoder(self):
"""
With `use_ext=False` specified, the extension must NOT be returned.
"""
from miniamf import amf0
decoder = miniamf.get_decoder(miniamf.AMF0, use_ext=False)
self.assertIsInstance(decoder, amf0.Decoder)
def test_default_encoder(self):
"""
If the extension is available, it must be returned by default.
"""
try:
from miniamf._accel import amf0
except ImportError:
from miniamf import amf0
encoder = miniamf.get_encoder(miniamf.AMF0)
self.assertIsInstance(encoder, amf0.Encoder)
def test_ext_encoder(self):
"""
With `use_ext=True` specified, the extension must be returned.
"""
try:
from miniamf._accel import amf0
except ImportError:
self.skipTest('amf0 extension not available')
encoder = miniamf.get_encoder(miniamf.AMF0, use_ext=True)
self.assertIsInstance(encoder, amf0.Encoder)
def test_pure_encoder(self):
"""
With `use_ext=False` specified, the extension must NOT be returned.
"""
from miniamf import amf0
encoder = miniamf.get_encoder(miniamf.AMF0, use_ext=False)
self.assertIsInstance(encoder, amf0.Encoder)
class TestAMF3Codecs(unittest.TestCase):
"""
Tests for getting encoder/decoder for amf3 with extension support.
"""
def test_default_decoder(self):
"""
If the extension is available, it must be returned by default.
"""
try:
from miniamf._accel import amf3
except ImportError:
from miniamf import amf3
decoder = miniamf.get_decoder(miniamf.AMF3)
self.assertIsInstance(decoder, amf3.Decoder)
def test_ext_decoder(self):
"""
With `use_ext=True` specified, the extension must be returned.
"""
try:
from miniamf._accel import amf3
except ImportError:
self.skipTest('amf3 extension not available')
decoder = miniamf.get_decoder(miniamf.AMF3, use_ext=True)
self.assertIsInstance(decoder, amf3.Decoder)
def test_pure_decoder(self):
"""
With `use_ext=False` specified, the extension must NOT be returned.
"""
from miniamf import amf3
decoder = miniamf.get_decoder(miniamf.AMF3, use_ext=False)
self.assertIsInstance(decoder, amf3.Decoder)
def test_default_encoder(self):
"""
If the extension is available, it must be returned by default.
"""
try:
from miniamf._accel import amf3
except ImportError:
from miniamf import amf3
encoder = miniamf.get_encoder(miniamf.AMF3)
self.assertIsInstance(encoder, amf3.Encoder)
def test_ext_encoder(self):
"""
With `use_ext=True` specified, the extension must be returned.
"""
try:
from miniamf._accel import amf3
except ImportError:
self.skipTest('amf3 extension not available')
encoder = miniamf.get_encoder(miniamf.AMF3, use_ext=True)
self.assertIsInstance(encoder, amf3.Encoder)
def test_pure_encoder(self):
"""
With `use_ext=False` specified, the extension must NOT be returned.
"""
from miniamf import amf3
encoder = miniamf.get_encoder(miniamf.AMF3, use_ext=False)
self.assertIsInstance(encoder, amf3.Encoder)
| 2.578125 | 3 |
apps/main/migrations/0005_auto_20180213_2216.py | andrewixl/peopleshop | 0 | 12765791 | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2018-02-14 06:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0004_apparel_shipping'),
]
operations = [
migrations.AddField(
model_name='apparel_shipping',
name='shipping_method',
field=models.CharField(default='12', max_length=255, verbose_name='Shipping Method'),
preserve_default=False,
),
migrations.AlterField(
model_name='apparel_shipping',
name='shipping_price',
field=models.CharField(max_length=255, verbose_name='Shipping Price'),
),
]
| 1.671875 | 2 |
src/get.causal.v2b.py | gxiaolab/cGMAS | 1 | 12765792 | <reponame>gxiaolab/cGMAS<filename>src/get.causal.v2b.py
#!/usr/bin/python
import sys
import argparse
import glob
from time import strftime
import os
import time
from collections import defaultdict
import numpy as np
###########
# If 1 candid causal snv has multiple tags, need to make sure all the candid-tag pairs are predicted to be causal!!
# => real causal snv doesn't matter which tag it is. all tags show show the same results!
# Filter out also the snv & exon not on the same strd! in this case, the snv usually have both +/-! so it's okay to filter out one.
# output: combine all chrm and generate 1 file for each tissue
#####
#12/21/2017: add anno => putative causal snp dist in neigh exons & neigh introns
# + strand:
# ===[* upE *]* === flankIup === *[* targetE *]* === flankIdn === *[* dnE *]===
# + - + - + - + - + -
#
# 1) causal snp in target exon: distType = targE
# a) exonic causal snp is upstream of targetE end: dist < 0
# b) exonic causal snp is downstream of targetE start: dist > 0
# 2) causal snp in upstream intron: distType = flankI
# a) intronic causal snp is upstream of targetE start: dist < 0
# b) intronic causal snp is downstream of upE end: dist > 0
# 3) causal snp in upstream exon: distType = upE
# a) exonic causal snp is upstream of upE end: dist < 0
# b) exonic causal snp is downstream of upE start: dist > 0
# 4) causal snp in downstream intron: distType = flankI
# a) intronic causal snp is upstream of dnE start: dist < 0
# b) intronic causal snp is downstream of targetE end: dist > 0
# 5) causal snp in downstream exon: distType = dnE
# a) exonic causal snp is upstream of dnE end: dist < 0
# b) exonic causal snp is downstream of dnE start: dist > 0
###########
#V2 new:
# 1) take out cases w/ all indiv who are homozygous
# 2) look for cases w/ !100% effects => focusing on nComp == 2 only!!!
# a) 100% effects: 1 peak at Si = 1
# b) !100% effects: 2 peaks, 1 at Si = 1, the other at Si = X, X != 0. peak height 1:1
#V2B:
# for nComp = 1 => need to take out this requirement!=> NEW on 4/25/2018
###########
parser = argparse.ArgumentParser(description='Script descriptions here')
parser.add_argument('-i', metavar='annoI', required=True, help='intron anno bed')
parser.add_argument('-e', metavar='annoE', required=True, help='exon anno bed')
parser.add_argument('-r', metavar='causalf', required=True, help='ref causal si file dir') #peak.si-minI10.minT2/Artery-Aorta/chr16.peak.si.txt
parser.add_argument('-o', metavar='outf', required=True, help='Output file')
parser.add_argument('-t', metavar='tissue', required=True, help='tissue of interest')
parser.add_argument('-s', metavar='gtr', required=True, help='min GT ratio: RV/totalIndiv')
parser.add_argument('-p', metavar='pval', required=True, help='min pval; pval is testing whether si is diff from 1')
parser.add_argument('-n', metavar='minPt', required=True, help='min data points (indiv) per causal-exon-tag pair')
parser.add_argument('-m', metavar='major', required=True, help='min membership ratio of the major component')
opts = parser.parse_args()
print 'intron bed anno: %s' % opts.i
print 'exon bed anno: %s' % opts.e
print 'Outf: %s' % opts.o
print 'causal ref: %s' % opts.r
print 'tissue: %s' % opts.t
print 'min pval: %s' % opts.p
print 'min GTR: %s' % opts.s
print 'min points: %s' % opts.n
print 'min membership ratio of the major component: %s' % opts.m
#SI = float(opts.s)
GTR = float(opts.s)
PV1,PV0 = map(float,opts.p.split(','))
MEM = float(opts.m)
N = int(opts.n)
#use dirs to store all candid-tag pairs
mem = defaultdict(dict) #membership ratio
#si = defaultdict(dict) #peak si
nn = defaultdict(dict) #total number of si
pv1 = defaultdict(dict) #pvals for comp w/ large x => close to 1
pv0 = defaultdict(dict) #pvals for comp w/ small x => diff from 0 or 1
li = defaultdict(dict) #entire lines
res = defaultdict(dict) #entire lines for final results
intronup = defaultdict(set) #neighboring upstream intron
introndn = defaultdict(set) #neighboring downstream intron
exonup = defaultdict(set) #neighboring upstream exon
exondn = defaultdict(set) #neighboring downstream exon
# When AS region is exon,
# annoTarget: intron bed
# annoNeigh: exon bed
# When AS region is intron,
# annoTarget: exon bed
# annoNeigh: intron bed
def calcDist(annoTarget,annoNeigh,r1,r2,r3,out):
#The comments here is assuming the AS region is exon
#get introns
setup = lambda: {'iup':'NA', 'idn':'NA', 'eup':'NA', 'edn':'NA'}
setup2=lambda: defaultdict(setup)
anno = defaultdict(setup2) #anno[(cand,exon,tag)][trx]:{'iup':coord, 'idn':coord, 'eup':coord, 'edn':coord}
with open(annoTarget) as f:
for l in f:
chrm,st0,end,info,x,strd = l.strip().split('\t')
#g,trx,x = info.split('|')
trx = info.split('_')[0]
if strd == '+':
if (chrm,end,strd) in intronup:
for (cand,exon,tag) in intronup[(chrm,end,strd)]:
anno[(cand,exon,tag)][trx]['iup'] = (st0,end)
exonup[(chrm,st0,strd)].add((cand,exon,tag))
if (chrm,st0,strd) in introndn:
for (cand,exon,tag) in introndn[(chrm,st0,strd)]:
anno[(cand,exon,tag)][trx]['idn'] = (st0,end)
exondn[(chrm,end,strd)].add((cand,exon,tag))
else:
if (chrm,st0,strd) in intronup:
for (cand,exon,tag) in intronup[(chrm,st0,strd)]:
anno[(cand,exon,tag)][trx]['iup'] = (st0,end)
exonup[(chrm,end,strd)].add((cand,exon,tag))
if (chrm,end,strd) in introndn:
for (cand,exon,tag) in introndn[(chrm,end,strd)]:
anno[(cand,exon,tag)][trx]['idn'] = (st0,end)
exondn[(chrm,st0,strd)].add((cand,exon,tag))
#get exons
with open(annoNeigh) as f:
for l in f:
chrm,st0,end,info,x,strd = l.strip().split('\t')
#g,trx,x = info.split('|')
trx = info.split('_')[0]
if strd == '+':
if (chrm,end,strd) in exonup:
for (cand,exon,tag) in exonup[(chrm,end,strd)]:
if trx in anno[(cand,exon,tag)]: anno[(cand,exon,tag)][trx]['eup'] = (st0,end)
if (chrm,st0,strd) in exondn:
for (cand,exon,tag) in exondn[(chrm,st0,strd)]:
if trx in anno[(cand,exon,tag)]: anno[(cand,exon,tag)][trx]['edn'] = (st0,end)
else:
if (chrm,st0,strd) in exonup:
for (cand,exon,tag) in exonup[(chrm,st0,strd)]:
if trx in anno[(cand,exon,tag)]: anno[(cand,exon,tag)][trx]['eup'] = (st0,end)
if (chrm,end,strd) in exondn:
for (cand,exon,tag) in exondn[(chrm,end,strd)]:
if trx in anno[(cand,exon,tag)]: anno[(cand,exon,tag)][trx]['edn'] = (st0,end)
#calc dist and write output
#+ strand:
# ===[* upE *]* === flankIup === *[* targetE *]* === flankIdn === *[* dnE *]===
# + - + - + - + - + -
#anno[(cand,exon,tag)][trx]:{'iup':coord, 'idn':coord, 'eup':coord, 'edn':coord}
for (cand,exon) in res.iterkeys():
for tag,l in res[(cand,exon)].iteritems():
chrm,pos,strd = cand.split('.')
pos = int(pos)
targst1, targend = map(int,exon.split(':')[1:-1])
#causal snp in targE
if pos >= targst1 and pos <= targend:
dist = min(pos-targst1+1, pos-targend-1, key=abs)
if strd == '-': dist = -1*dist
for trx in anno[(cand,exon,tag)].iterkeys():
if 'NA' not in anno[(cand,exon,tag)][trx].values():
out.write('{}\t{}\t{}\t{}\t{}\t{}\n'.format(l,trx,r1,dist,pv0[(cand,exon)][tag],pv1[(cand,exon)][tag]))
#causal snp is upstream of target exon
elif pos < targst1:
for trx in anno[(cand,exon,tag)].iterkeys():
if 'NA' not in anno[(cand,exon,tag)][trx].values():
if strd == '+':
ist0,iend = map(int,anno[(cand,exon,tag)][trx]['iup'])
est0,eend = map(int,anno[(cand,exon,tag)][trx]['eup'])
side = 'up'
else:
ist0,iend = map(int,anno[(cand,exon,tag)][trx]['idn'])
est0,eend = map(int,anno[(cand,exon,tag)][trx]['edn'])
side = 'down'
#causal snp is in upstream intron
if pos > ist0 and pos <= iend:
dist = min(pos-ist0, pos-iend-1, key=abs)
if strd == '-': dist = -1*dist
out.write('{}\t{}\tflank{}{}\t{}\t{}\t{}\n'.format(l,trx,r2,side,dist,pv0[(cand,exon)][tag],pv1[(cand,exon)][tag]))
#causal snp is in upstream exon
elif pos > est0 and pos <= eend:
dist = min(pos-est0, pos-eend-1, key=abs)
if strd == '-': dist = -1*dist
out.write('{}\t{}\t{}{}\t{}\t{}\t{}\n'.format(l,trx,side,r3,dist,pv0[(cand,exon)][tag],pv1[(cand,exon)][tag]))
#not in range of interest in this trx!
#else: print 'filter out {}stream:'.format(side),cand,exon,tag,trx
#causal snp is downstream of target exon
else:
for trx in anno[(cand,exon,tag)].iterkeys():
if 'NA' not in anno[(cand,exon,tag)][trx].values():
if strd == '+':
ist0,iend = map(int,anno[(cand,exon,tag)][trx]['idn'])
est0,eend = map(int,anno[(cand,exon,tag)][trx]['edn'])
side = 'down'
else:
ist0,iend = map(int,anno[(cand,exon,tag)][trx]['iup'])
est0,eend = map(int,anno[(cand,exon,tag)][trx]['eup'])
side = 'up'
#causal snp is in downstream intron
if pos > ist0 and pos <= iend:
dist = min(pos-ist0, pos-iend-1, key=abs)
if strd == '-': dist = -1*dist
out.write('{}\t{}\tflank{}{}\t{}\t{}\t{}\n'.format(l,trx,r2,side,dist,pv0[(cand,exon)][tag],pv1[(cand,exon)][tag]))
#causal snp is in downstream exon
elif pos > est0 and pos <= eend:
dist = min(pos-est0, pos-eend-1, key=abs)
if strd == '-': dist = -1*dist
out.write('{}\t{}\t{}{}\t{}\t{}\t{}\n'.format(l,trx,side,r3,dist,pv0[(cand,exon)][tag],pv1[(cand,exon)][tag]))
#not in range of interest in this trx!
#else: print 'filter out {}stream:'.format(side),cand,exon,tag,trx
def main(argv):
print 'job starts', strftime('%a, %d %b %Y %I:%M:%S')
start_time = time.time()
#get causality info
for ff in glob.glob('{}/{}/*.txt'.format(opts.r,opts.t)): #ff: 1 chrm in 1 tissue at a time
with open(ff) as f:
for ll in f:
if not ll.startswith('causalCandidate'):
l = ll.split('\t')
if int(l[5]) == int(l[6]) or int(l[5]) == int(l[8]): continue
cstrd = l[0][-1]
estrd = l[3][-1]
tstrd = l[4][-1]
if len(set([cstrd,estrd,tstrd])) == 1: #and l[5] not in l[6:9]: #strands agree AND not everyone has the same GT!! ==> 1/9/2017: it is okay to have same gt!!!
totalIndiv,RR,RV,VV = map(int,l[5:9])
if 1.*RV/totalIndiv < GTR: continue
if totalIndiv >= N: mem[(l[0],l[3])][l[4]] = map(float,l[-1].split('|'))
nn[(l[0],l[3])][l[4]] = totalIndiv
pvals = l[-6].split('|')
means = map(float,l[13].split('|'))
maxsi = np.argmax(means)
try:
p0,p1 = map(float,pvals[maxsi].split(';'))
pv1[(l[0],l[3])][l[4]] = p1
minsi = np.argmin(means)
p0,p1 = map(float,pvals[minsi].split(';'))
pv0[(l[0],l[3])][l[4]] = p0
li[(l[0],l[3])][l[4]] = ll
except ValueError: continue #this means the pval of the major component is 'NA' => meaning it didn't pass rm.bg in the previous step
for (cand,exon) in li.iterkeys():
LEN = len(li[(cand,exon)])
LEN2 = len([x for x in nn[(cand,exon)].values() if x >= N])
if len([x for x in pv1[(cand,exon)].values() if x > PV1]) == 0: #all tag snvs have sig pv for the single peak (si away from 1)
if len([x for x in pv0[(cand,exon)].values() if x > PV0]) == 0: #all tag snvs have sig pv for the single peak (si away from 0)
if len([x for x in mem[(cand,exon)].values() if x >= MEM]) == LEN2: #all tag snvs that have enough indiv (N) pass membership ratio thresh
for tag in li[(cand,exon)].iterkeys():
if nn[(cand,exon)][tag] >= N: #only print out the entry that the tag snv has enough individuals.
#don't need to require it for all tags of a cand-exon pair because not all indiv have the same tag.
chrm,st1,end,strd = exon.split(':')
if strd == '+':
intronup[(chrm,str(int(st1)-1),strd)].add((cand,exon,tag))
introndn[(chrm,end,strd)].add((cand,exon,tag))
else:
introndn[(chrm,str(int(st1)-1),strd)].add((cand,exon,tag))
intronup[(chrm,end,strd)].add((cand,exon,tag))
res[(cand,exon)][tag] = li[(cand,exon)][tag].strip()
out = open(opts.o,'w')
out.write('causalCandidate\tsource\tnt\texon\ttagSNV\ttotalIndiv\tRR\tRV\tVV\tpeakSi\tzScore\tpValue\tnComp\tpeakSiMean\tpeakSiStdev\tpeakSiN\tpeakSiR\ttrx\tdistType\tdist\tp0\tp1\n')
#AS region is exon
calcDist(opts.i,opts.e,'targE','I','E',out)
#AS region is intron
calcDist(opts.e,opts.i,'targI','E','I',out)
out.close()
print("--- %s seconds ---" % (time.time() - start_time))
print 'DONE!', strftime('%a, %d %b %Y %I:%M:%S')
if __name__ == '__main__':
main(sys.argv[1:])
| 2.125 | 2 |
scripts/check_license.py | SanctuaryComponents/layer_management | 1 | 12765793 | #!/usr/bin/python
###########################################################################
#
# Copyright 2013 BMW Car IT GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
import sys, re, string
from common_modules.common import *
from common_modules.config import G_LICENSE_TEMPLATES
def clean_comment_chars(s):
"""
Removes comment characters from a string
"""
s = string.replace(s, "/", "")
s = string.replace(s, "*", "")
s = string.replace(s, "#", "")
return s
def make_license_re(license_text):
"""
Makes a regular expression for every line in the license, this would match the license
text tolerating extra spaces
"""
license_lines = license_text.split("\n")
license_lines_re = {}
for i in range(len(license_lines)):
license_line = license_lines[i]
re_text = clean_comment_chars(license_line)
#remove white space paddings
re_text = re_text.strip(" \n\t\r\f")
#replace special characters
re_text = string.replace(re_text, "(", "\(")
re_text = string.replace(re_text, ")", "\)")
re_text = string.replace(re_text, " ", "(\s+)")
re_text = string.replace(re_text, "\n", "(\s*)")
re_text = string.replace(re_text, "\t", "(\s+)")
re_text = string.replace(re_text, ".", "\.")
#this replaces the text [YYYY] with a regex that mathces years in one of the following forms:
#2013 or 2000-2013 or 2000 or 2000, 2001, 2002, 2013
re_text = string.replace(re_text, "[YYYY]", r"(\d{4})(((-(\d{4}))|(((\s*),(\s*)\d{4})+))?)")
if len(re_text) > 0:
re_text = "(\s*)" + re_text + "(\s*)"
current_text = ""
#remove unneeded space matches
while current_text != re_text:
current_text = re_text
re_text = string.replace(re_text, "(\s*)(\s*)", "(\s*)")
re_text = string.replace(re_text, "(\s+)(\s+)", "(\s+)")
re_text = string.replace(re_text, "(\s*)(\s+)", "(\s+)")
license_lines_re[i] = re_text
return license_lines_re
def check_specific_license_in_file(filename, clean_file_contents, license_text):
"""
Checks if the file contains a valid license according to the license template provided
"""
license_lines = license_text.split("\n")
license_re = make_license_re(license_text)
#search for the first line of the license in the target file
line_re = re.compile(license_re.values()[0])
found_match = line_re.search(clean_file_contents)
if found_match:
clean_file_contents = clean_file_contents[found_match.start():]
#check that license exists without any added or removed words
for (line_num, line_re_text) in license_re.items():
line_re = re.compile(line_re_text)
found_match = line_re.match(clean_file_contents)
if found_match:
clean_file_contents = clean_file_contents[found_match.end():]
else:
#log_warning(filename, 1, "license does not match at", license_lines[line_num])
return (line_num, license_lines[line_num])
return None # success
def check_license_in_file(filename, file_contents):
"""
Checks if the file contains a valid license.
It tries to find a match inside the file with any of the licenses configured
"""
clean_file_contents = clean_comment_chars(file_contents)
#license that had the best match with the file
best_match = (-1, None)
#try to match with every license
for license in G_LICENSE_TEMPLATES:
call_result = check_specific_license_in_file(filename, clean_file_contents, license)
#if match is found just return
if call_result == None:
return None
#if no match found check if this license was a good candidate for the match
else:
best_match = call_result if call_result[0] > best_match[0] else best_match
#(this else clause is executed if the for loop exists naturally)
#if loop ended without return, this means no license matched
else:
#if no license matched at all
if best_match[1] == None:
log_warning(filename, 1, "no license found")
#get the license with the best match
else:
log_warning(filename, 1, "license does not match at", best_match[1])
if __name__ == "__main__":
targets = sys.argv[1:]
targets = get_all_files(targets)
if len(targets) == 0:
print """
\t**** No input provided ****
\tTakes a list of files/directories as input and performs specific style checking on all files/directories.
\tGives warnings if the file does not contain a valid license text. It does not check if Copyright statements are included.
"""
exit(0)
for t in targets:
file_contents, _, _, _ = read_file(t)
check_license_in_file(t, file_contents)
| 2.578125 | 3 |
pbrl/algorithms/dqn/policy.py | jjccero/rliccd | 3 | 12765794 | <reponame>jjccero/rliccd<filename>pbrl/algorithms/dqn/policy.py<gh_stars>1-10
import copy
from typing import Optional, List, Type
import numpy as np
import torch
from gym.spaces import Space
from pbrl.algorithms.dqn.net import QNet
from pbrl.policy.policy import BasePolicy
class Policy(BasePolicy):
def __init__(
self,
observation_space: Space,
action_space: Space,
hidden_sizes: List,
activation: Type[torch.nn.Module],
rnn: Optional[str] = None,
clip_fn='clip',
obs_norm: bool = False,
reward_norm: bool = False,
gamma: float = 0.99,
obs_clip: float = 10.0,
reward_clip: float = 10.0,
device=torch.device('cpu'),
critic=True
):
super(Policy, self).__init__(
observation_space=observation_space,
action_space=action_space,
hidden_sizes=hidden_sizes,
activation=activation,
rnn=rnn,
clip_fn=clip_fn,
obs_norm=obs_norm,
reward_norm=reward_norm,
gamma=gamma,
obs_clip=obs_clip,
reward_clip=reward_clip,
device=device
)
config_net = dict(
obs_dim=self.observation_space.shape,
action_dim=self.action_space.n,
hidden_sizes=self.hidden_sizes,
activation=self.activation,
rnn=rnn
)
self.critic = QNet(**config_net).to(self.device)
self.critic_target: Optional[QNet] = None
if critic:
self.critic_target = copy.deepcopy(self.critic)
self.critic_target.eval()
@torch.no_grad()
def step(
self,
observations: np.ndarray,
states_actor,
random=False
):
observations = self.normalize_observations(observations, True)
if random:
actions = self.random_action(observations.shape[0])
else:
observations = self.n2t(observations)
q_values, states_actor = self.critic.forward(observations, states_actor)
actions = torch.argmax(q_values, -1)
actions = self.t2n(actions)
return actions, states_actor
@torch.no_grad()
def act(
self,
observations: np.ndarray,
states_actor
):
observations = self.normalize_observations(observations)
observations = self.n2t(observations)
q_values, states_actor = self.critic.forward(observations, states_actor)
actions = torch.argmax(q_values, -1)
actions = self.t2n(actions)
return actions, states_actor
| 1.992188 | 2 |
Problems/Binary Tree/236. Lowest Common Ancestor of a Binary Tree.py | BYJRK/LeetCode-Solutions | 0 | 12765795 | <gh_stars>0
# https://leetcode.com/problems/lowest-common-ancestor-of-a-binary-tree/
class TreeNode:
def __init__(self, x, left=None, right=None):
self.val = x
self.left = left
self.right = right
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
res = None
def findLCA(node):
if node is None:
return 0
count = 0
if node == p or node == q:
count += 1
count += findLCA(node.left)
count += findLCA(node.right)
if count == 2:
nonlocal res
if res is not None:
return
res = node
return count
findLCA(root)
return res
| 3.484375 | 3 |
feature_selection/tests/check_test.py | gonzalesMK/MetaHeuristic | 12 | 12765796 | import numpy as np
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.testing import assert_array_equal
from sklearn.datasets import load_breast_cancer
from sklearn.svm import SVC
from feature_selection import HarmonicSearch
from feature_selection import GeneticAlgorithm
from feature_selection import RandomSearch
from feature_selection import BinaryBlackHole
from feature_selection import SimulatedAnneling
from feature_selection import BRKGA
from feature_selection import SPEA2
from feature_selection import PSO
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
import nose.plugins.multiprocess
# Those are nose tests: to run it, write: python -m nose
_multiprocess_can_split_ = True
METACLASSES = [
SimulatedAnneling, PSO, HarmonicSearch, GeneticAlgorithm, RandomSearch,
BinaryBlackHole, BRKGA,
SPEA2]
def test_check_estimator():
for metaclass in METACLASSES:
print("check_estimator: ", metaclass.__class__.__name__)
check_estimator(metaclass)
def test_overall():
dataset = load_breast_cancer()
X, y = dataset['data'], dataset['target_names'].take(dataset['target'])
# Classifier to be used in the metaheuristic
clf = SVC(gamma='auto')
for metaclass in METACLASSES:
meta = metaclass(estimator=clf, random_state=0, verbose=False,
make_logbook=True, repeat=1, number_gen=2,
)
print("Checking: ", meta.__class__.__name__)
# Fit the classifier
meta.fit(X, y, normalize=True)
# Transformed dataset
X_1 = meta.transform(X)
meta = metaclass(estimator=clf, random_state=0,
make_logbook=True, repeat=1, number_gen=2, )
# Fit and Transform
X_2 = meta.fit_transform(X=X, y=y, normalize=True)
assert_array_equal(X_1, X_2)
meta.best_pareto()
meta.all_paretos()
meta.best_solution()
meta.all_solutions()
def test_parallel():
dataset = load_breast_cancer()
X, y = dataset['data'], dataset['target_names'].take(dataset['target'])
# Classifier to be used in the metaheuristic
clf = SVC(gamma="auto")
for metaclass in METACLASSES :
meta = metaclass(estimator=clf, random_state=0, make_logbook=False,
repeat=2, number_gen=2, parallel=True, verbose=True,
)
print("Checking parallel ", meta.__class__.__name__)
# Fit the classifier
meta.fit(X, y, normalize=True)
# Transformed dataset
X_1 = meta.transform(X)
meta = metaclass(estimator=clf, random_state=0, make_logbook=False,
repeat=2, number_gen=2, parallel=True, )
# Fit and Transform
X_2 = meta.fit_transform(X=X, y=y, normalize=True)
# Check Function
assert_array_equal(X_1, X_2)
def test_unusual_errors():
dataset = load_breast_cancer()
X, y = dataset['data'], dataset['target_names'].take(dataset['target'])
# Classifier to be used in the metaheuristic
clf = SVC(gamma='auto')
for metaclass in METACLASSES:
meta = metaclass(estimator=clf, random_state=0, verbose=0,
make_logbook=True, repeat=1, number_gen=2, )
print("Checking unusual error: ", meta.__class__.__name__)
meta.fit(X, y, normalize=True)
# Let's suppose you have a empty best
assert_raises(ValueError, meta.safe_mask, X, [])
meta = metaclass(estimator=clf, random_state=0, verbose=0,
make_logbook=True, repeat=1, number_gen=2, )
#assert_raises(ValueError, meta.score_func_to_gridsearch, meta)
for metaclass in [BRKGA]:
meta = metaclass(estimator=clf, random_state=0, verbose=0,
make_logbook=True, repeat=1, number_gen=2,
elite_size=5)
assert_raises(ValueError, meta.fit, [ [1, 1, 1], [1,2,3] ], [1, 0])
def test_predict():
dataset = load_breast_cancer()
X, y = dataset['data'], dataset['target_names'].take(dataset['target'])
# Classifier to be used in the metaheuristic
sa = SimulatedAnneling( number_gen=2)
sa.fit(X,y, normalize=True)
sa.predict(X)
"""
def test_score_grid_func():
dataset = load_breast_cancer()
X, y = dataset['data'], dataset['target_names'].take(dataset['target'])
# Classifier to be used in the metaheuristic
clf = SVC()
for metaclass in METACLASSES:
meta = metaclass(classifier=clf, random_state=0, verbose=True,
make_logbook=True, repeat=1, number_gen=3,
)
print("Checking Grid: ", meta.__class__.__name__)
# Fit the classifier
meta.fit(X, y, normalize=True)
# See score
meta.score_func_to_gridsearch(meta)
""" | 2.3125 | 2 |
pyrep/objects/vision_sensor.py | WeiWeic6222848/PyRep | 505 | 12765797 | import math
from typing import List, Union, Sequence
from pyrep.backend import sim
from pyrep.objects.object import Object, object_type_to_class
import numpy as np
from pyrep.const import ObjectType, PerspectiveMode, RenderMode
class VisionSensor(Object):
"""A camera-type sensor, reacting to light, colors and images.
"""
def __init__(self, name_or_handle: Union[str, int]):
super().__init__(name_or_handle)
self.resolution = sim.simGetVisionSensorResolution(self._handle)
@staticmethod
def create(resolution: List[int], explicit_handling=False,
perspective_mode=True, show_volume_not_detecting=True,
show_volume_detecting=True, passive=False,
use_local_lights=False, show_fog=True,
near_clipping_plane=1e-2, far_clipping_plane=10.0,
view_angle=60.0, ortho_size=1.0, sensor_size=None,
render_mode=RenderMode.OPENGL3,
position=None, orientation=None) -> 'VisionSensor':
""" Create a Vision Sensor
:param resolution: List of the [x, y] resolution.
:param explicit_handling: Sensor will be explicitly handled.
:param perspective_mode: Sensor will be operated in Perspective Mode.
Orthographic mode if False.
:param show_volume_not_detecting: Sensor volume will be shown when not
detecting anything.
:param show_volume_detecting: Sensor will be shown when detecting.
:param passive: Sensor will be passive (use an external image).
:param use_local_lights: Sensor will use local lights.
:param show_fog: Sensor will show fog (if enabled).
:param near_clipping_plane: Near clipping plane.
:param far_clipping_plane: Far clipping plane.
:param view_angle: Perspective angle (in degrees) if in Perspective Mode.
:param ortho_size: Orthographic projection size [m] if in Orthographic
Mode.
:param sensor_size: Size [x, y, z] of the Vision Sensor object.
:param render_mode: Sensor rendering mode, one of:
RenderMode.OPENGL
RenderMode.OPENGL_AUXILIARY
RenderMode.OPENGL_COLOR_CODED
RenderMode.POV_RAY
RenderMode.EXTERNAL
RenderMode.EXTERNAL_WINDOWED
RenderMode.OPENGL3
RenderMode.OPENGL3_WINDOWED
:param position: The [x, y, z] position, if specified.
:param orientation: The [x, y, z] orientation in radians, if specified.
:return: The created Vision Sensor.
"""
options = 0
if explicit_handling:
options |= 1
if perspective_mode:
options |= 2
if not show_volume_not_detecting:
options |= 4
if not show_volume_detecting:
options |= 8
if passive:
options |= 16
if use_local_lights:
options |= 32
if not show_fog:
options |= 64
int_params = [
resolution[0], # 0
resolution[1], # 1
0, # 2
0 # 3
]
if sensor_size is None:
sensor_size = [0.01, 0.01, 0.03]
float_params = [
near_clipping_plane, # 0
far_clipping_plane, # 1
math.radians(view_angle) if perspective_mode else ortho_size, # 2
sensor_size[0], # 3
sensor_size[1], # 4
sensor_size[2], # 5
0.0, # 6
0.0, # 7
0.0, # 8
0.0, # 9
0.0, # 10
]
vs = VisionSensor(
sim.simCreateVisionSensor(options, int_params, float_params, None)
)
vs.set_render_mode(render_mode)
if position is not None:
vs.set_position(position)
if orientation is not None:
vs.set_orientation(orientation)
return vs
def _get_requested_type(self) -> ObjectType:
return ObjectType.VISION_SENSOR
def handle_explicitly(self) -> None:
"""Handle sensor explicitly.
This enables capturing image (e.g., capture_rgb())
without PyRep.step().
"""
if not self.get_explicit_handling():
raise RuntimeError('The explicit_handling is disabled. '
'Call set_explicit_handling(value=1) to enable explicit_handling first.')
sim.simHandleVisionSensor(self._handle)
def capture_rgb(self) -> np.ndarray:
"""Retrieves the rgb-image of a vision sensor.
:return: A numpy array of size (width, height, 3)
"""
return sim.simGetVisionSensorImage(self._handle, self.resolution)
def capture_depth(self, in_meters=False) -> np.ndarray:
"""Retrieves the depth-image of a vision sensor.
:param in_meters: Whether the depth should be returned in meters.
:return: A numpy array of size (width, height)
"""
return sim.simGetVisionSensorDepthBuffer(
self._handle, self.resolution, in_meters)
def capture_pointcloud(self) -> np.ndarray:
"""Retrieves point cloud in word frame.
:return: A numpy array of size (width, height, 3)
"""
d = self.capture_depth(in_meters=True)
return self.pointcloud_from_depth(d)
def pointcloud_from_depth(self, depth: np.ndarray) -> np.ndarray:
"""Converts depth (in meters) to point cloud in word frame.
:return: A numpy array of size (width, height, 3)
"""
intrinsics = self.get_intrinsic_matrix()
return VisionSensor.pointcloud_from_depth_and_camera_params(
depth, self.get_matrix(), intrinsics)
@staticmethod
def pointcloud_from_depth_and_camera_params(
depth: np.ndarray, extrinsics: np.ndarray,
intrinsics: np.ndarray) -> np.ndarray:
"""Converts depth (in meters) to point cloud in word frame.
:return: A numpy array of size (width, height, 3)
"""
upc = _create_uniform_pixel_coords_image(depth.shape)
pc = upc * np.expand_dims(depth, -1)
C = np.expand_dims(extrinsics[:3, 3], 0).T
R = extrinsics[:3, :3]
R_inv = R.T # inverse of rot matrix is transpose
R_inv_C = np.matmul(R_inv, C)
extrinsics = np.concatenate((R_inv, -R_inv_C), -1)
cam_proj_mat = np.matmul(intrinsics, extrinsics)
cam_proj_mat_homo = np.concatenate(
[cam_proj_mat, [np.array([0, 0, 0, 1])]])
cam_proj_mat_inv = np.linalg.inv(cam_proj_mat_homo)[0:3]
world_coords_homo = np.expand_dims(_pixel_to_world_coords(
pc, cam_proj_mat_inv), 0)
world_coords = world_coords_homo[..., :-1][0]
return world_coords
def get_intrinsic_matrix(self):
res = np.array(self.get_resolution())
pp_offsets = res / 2
ratio = res[0] / res[1]
pa_x = pa_y = math.radians(self.get_perspective_angle())
if ratio > 1:
pa_y = 2 * np.arctan(np.tan(pa_y / 2) / ratio)
elif ratio < 1:
pa_x = 2 * np.arctan(np.tan(pa_x / 2) * ratio)
persp_angles = np.array([pa_x, pa_y])
focal_lengths = -res / (2 * np.tan(persp_angles / 2))
return np.array(
[[focal_lengths[0], 0., pp_offsets[0]],
[0., focal_lengths[1], pp_offsets[1]],
[0., 0., 1.]])
def get_resolution(self) -> List[int]:
""" Return the Sensor's resolution.
:return: Resolution [x, y]
"""
return sim.simGetVisionSensorResolution(self._handle)
def set_resolution(self, resolution: List[int]) -> None:
""" Set the Sensor's resolution.
:param resolution: New resolution [x, y]
"""
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_resolution_x, resolution[0]
)
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_resolution_y, resolution[1]
)
self.resolution = resolution
def get_perspective_mode(self) -> PerspectiveMode:
""" Retrieve the Sensor's perspective mode.
:return: The current PerspectiveMode.
"""
perspective_mode = sim.simGetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_perspective_operation,
)
return PerspectiveMode(perspective_mode)
def set_perspective_mode(self, perspective_mode: PerspectiveMode) -> None:
""" Set the Sensor's perspective mode.
:param perspective_mode: The new perspective mode, one of:
PerspectiveMode.ORTHOGRAPHIC
PerspectiveMode.PERSPECTIVE
"""
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_perspective_operation,
perspective_mode.value
)
def get_render_mode(self) -> RenderMode:
""" Retrieves the Sensor's rendering mode
:return: RenderMode for the current rendering mode.
"""
render_mode = sim.simGetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_render_mode
)
return RenderMode(render_mode)
def set_render_mode(self, render_mode: RenderMode) -> None:
""" Set the Sensor's rendering mode
:param render_mode: The new sensor rendering mode, one of:
RenderMode.OPENGL
RenderMode.OPENGL_AUXILIARY
RenderMode.OPENGL_COLOR_CODED
RenderMode.POV_RAY
RenderMode.EXTERNAL
RenderMode.EXTERNAL_WINDOWED
RenderMode.OPENGL3
RenderMode.OPENGL3_WINDOWED
"""
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_render_mode,
render_mode.value
)
def get_windowed_size(self) -> Sequence[int]:
"""Get the size of windowed rendering.
:return: The (x, y) resolution of the window. 0 for full-screen.
"""
size_x = sim.simGetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_windowed_size_x)
size_y = sim.simGetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_windowed_size_y)
return size_x, size_y
def set_windowed_size(self, resolution: Sequence[int] = (0, 0)) -> None:
"""Set the size of windowed rendering.
:param resolution: The (x, y) resolution of the window.
0 for full-screen.
"""
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_windowed_size_x,
resolution[0])
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_windowed_size_y,
resolution[1])
def get_perspective_angle(self) -> float:
""" Get the Sensor's perspective angle.
:return: The sensor's perspective angle (in degrees).
"""
return math.degrees(sim.simGetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_perspective_angle
))
def set_perspective_angle(self, angle: float) -> None:
""" Set the Sensor's perspective angle.
:param angle: New perspective angle (in degrees)
"""
sim.simSetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_perspective_angle,
math.radians(angle)
)
def get_orthographic_size(self) -> float:
""" Get the Sensor's orthographic size.
:return: The sensor's orthographic size (in metres).
"""
return sim.simGetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_ortho_size
)
def set_orthographic_size(self, ortho_size: float) -> None:
""" Set the Sensor's orthographic size.
:param angle: New orthographic size (in metres)
"""
sim.simSetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_ortho_size, ortho_size
)
def get_near_clipping_plane(self) -> float:
""" Get the Sensor's near clipping plane.
:return: Near clipping plane (metres)
"""
return sim.simGetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_near_clipping
)
def set_near_clipping_plane(self, near_clipping: float) -> None:
""" Set the Sensor's near clipping plane.
:param near_clipping: New near clipping plane (in metres)
"""
sim.simSetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_near_clipping, near_clipping
)
def get_far_clipping_plane(self) -> float:
""" Get the Sensor's far clipping plane.
:return: Near clipping plane (metres)
"""
return sim.simGetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_far_clipping
)
def set_far_clipping_plane(self, far_clipping: float) -> None:
""" Set the Sensor's far clipping plane.
:param far_clipping: New far clipping plane (in metres)
"""
sim.simSetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_far_clipping, far_clipping
)
def set_entity_to_render(self, entity_to_render: int) -> None:
""" Set the entity to render to the Sensor, this can be an object or more usefully a collection.
-1 to render all objects in scene.
:param entity_to_render: Handle of the entity to render
"""
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_entity_to_render, entity_to_render
)
def get_entity_to_render(self) -> None:
""" Get the entity to render to the Sensor, this can be an object or more usefully a collection.
-1 if all objects in scene are rendered.
:return: Handle of the entity to render
"""
return sim.simGetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_entity_to_render
)
def _create_uniform_pixel_coords_image(resolution: np.ndarray):
pixel_x_coords = np.reshape(
np.tile(np.arange(resolution[1]), [resolution[0]]),
(resolution[0], resolution[1], 1)).astype(np.float32)
pixel_y_coords = np.reshape(
np.tile(np.arange(resolution[0]), [resolution[1]]),
(resolution[1], resolution[0], 1)).astype(np.float32)
pixel_y_coords = np.transpose(pixel_y_coords, (1, 0, 2))
uniform_pixel_coords = np.concatenate(
(pixel_x_coords, pixel_y_coords, np.ones_like(pixel_x_coords)), -1)
return uniform_pixel_coords
def _transform(coords, trans):
h, w = coords.shape[:2]
coords = np.reshape(coords, (h * w, -1))
coords = np.transpose(coords, (1, 0))
transformed_coords_vector = np.matmul(trans, coords)
transformed_coords_vector = np.transpose(
transformed_coords_vector, (1, 0))
return np.reshape(transformed_coords_vector,
(h, w, -1))
def _pixel_to_world_coords(pixel_coords, cam_proj_mat_inv):
h, w = pixel_coords.shape[:2]
pixel_coords = np.concatenate(
[pixel_coords, np.ones((h, w, 1))], -1)
world_coords = _transform(pixel_coords, cam_proj_mat_inv)
world_coords_homo = np.concatenate(
[world_coords, np.ones((h, w, 1))], axis=-1)
return world_coords_homo
object_type_to_class[ObjectType.VISION_SENSOR] = VisionSensor
| 2.5 | 2 |
tools/build_defs/shell_toolchain/polymorphism/generate_overloads.bzl | bshi/rules_foreign_cc | 0 | 12765798 | <reponame>bshi/rules_foreign_cc
# buildifier: disable=module-docstring
def _provider_text(symbols):
return """
WRAPPER = provider(
doc = "Wrapper to hold imported methods",
fields = [{}]
)
""".format(", ".join(["\"%s\"" % symbol_ for symbol_ in symbols]))
def _getter_text():
return """
def id_from_file(file_name):
(before, middle, after) = file_name.partition(".")
return before
def get(file_name):
id = id_from_file(file_name)
return WRAPPER(**_MAPPING[id])
"""
def _mapping_text(ids):
data_ = []
for id in ids:
data_.append("{id} = wrapper_{id}".format(id = id))
return "_MAPPING = dict(\n{data}\n)".format(data = ",\n".join(data_))
def _load_and_wrapper_text(id, file_path, symbols):
load_list = ", ".join(["{id}_{symbol} = \"{symbol}\"".format(id = id, symbol = symbol_) for symbol_ in symbols])
load_statement = "load(\":{file}\", {list})".format(file = file_path, list = load_list)
data = ", ".join(["{symbol} = {id}_{symbol}".format(id = id, symbol = symbol_) for symbol_ in symbols])
wrapper_statement = "wrapper_{id} = dict({data})".format(id = id, data = data)
return struct(
load_ = load_statement,
wrapper = wrapper_statement,
)
def id_from_file(file_name):
(before, middle, after) = file_name.partition(".")
return before
def get_file_name(file_label):
(before, separator, after) = file_label.partition(":")
return id_from_file(after)
def _copy_file(rctx, src):
src_path = rctx.path(src)
copy_path = src_path.basename
rctx.template(copy_path, src_path)
return copy_path
_BUILD_FILE = """\
exports_files(
[
"toolchain_data_defs.bzl",
],
visibility = ["//visibility:public"],
)
"""
def _generate_overloads(rctx):
symbols = rctx.attr.symbols
ids = []
lines = ["# Generated overload mappings"]
loads = []
wrappers = []
for file_ in rctx.attr.files:
id = id_from_file(file_.name)
ids.append(id)
copy = _copy_file(rctx, file_)
load_and_wrapper = _load_and_wrapper_text(id, copy, symbols)
loads.append(load_and_wrapper.load_)
wrappers.append(load_and_wrapper.wrapper)
lines += loads
lines += wrappers
lines.append(_mapping_text(ids))
lines.append(_provider_text(symbols))
lines.append(_getter_text())
rctx.file("toolchain_data_defs.bzl", "\n".join(lines))
rctx.file("BUILD", _BUILD_FILE)
generate_overloads = repository_rule(
implementation = _generate_overloads,
attrs = {
"symbols": attr.string_list(),
"files": attr.label_list(),
},
)
| 2.171875 | 2 |
utilityhelper/common/HelperMagic.py | leileigong/utility-helper | 0 | 12765799 | <reponame>leileigong/utility-helper
#coding:utf-8
from __future__ import (print_function, unicode_literals)
def wrapper_round_n_float(radix):
def flyable_to_return(cls):
def r(self, num):
return round(num, radix)
cls.r = r
return cls
return flyable_to_return
@wrapper_round_n_float(5)
class R(float):
pass
class Round2Float(float):
"""派生不可变类型
关于”__new__”有一个重要的用途就是用来派生不可变类型。
例如,Python中float是不可变类型,如果想要从float中派生一个子类,就要实现”__new__”方法:"""
def __new__(cls, num):
num = round(num, 2)
return super(Round2Float, cls).__new__(cls, num)
# return float.__new__(Round2Float, num)
if __name__ == "__main__":
f = Round2Float(4.14159)
print(f)
rr = R()
# print rr.r(5.1111111) | 3.09375 | 3 |
NetworkTracerouteCollector.py | djw8605/ps-ingest | 0 | 12765800 | #!/usr/bin/env python
import os
import time
import copy
import json
from datetime import datetime
import threading
import collector
import siteMapping
class NetworkTracerouteCollector(collector.Collector):
def __init__(self):
self.TOPIC = "/topic/perfsonar.raw.packet-trace"
self.INDEX_PREFIX = 'ps_trace-'
super(NetworkTracerouteCollector, self).__init__()
def eventCreator(self, message):
m = json.loads(message)
data = {
'_type': 'doc'
}
# print(m)
source = m['meta']['source']
destination = m['meta']['destination']
data['MA'] = m['meta']['measurement_agent']
data['src'] = source
data['dest'] = destination
data['src_host'] = m['meta']['input_source']
data['dest_host'] = m['meta']['input_destination']
data['ipv6'] = False
if ':' in source or ':' in destination:
data['ipv6'] = True
so = siteMapping.getPS(source)
de = siteMapping.getPS(destination)
if so != None:
data['src_site'] = so[0]
data['src_VO'] = so[1]
if de != None:
data['dest_site'] = de[0]
data['dest_VO'] = de[1]
data['src_production'] = siteMapping.isProductionThroughput(source)
data['dest_production'] = siteMapping.isProductionThroughput(
destination)
if not 'datapoints' in m:
print(threading.current_thread().name,
"no datapoints found in the message")
return
dp = m['datapoints']
# print(su)
for ts in dp:
dati = datetime.utcfromtimestamp(float(ts))
data['_index'] = self.es_index_prefix + self.INDEX_PREFIX + str(dati.year) + "." + str(dati.month) + "." + str(dati.day)
data['timestamp'] = int(float(ts) * 1000)
data['_id'] = hash((m['meta']['org_metadata_key'], data['timestamp']))
data['hops'] = []
data['rtts'] = []
data['ttls'] = []
hops = dp[ts]
for hop in hops:
if 'ttl' not in hop or 'ip' not in hop or 'query' not in hop:
continue
nq = int(hop['query'])
if nq != 1:
continue
data['hops'].append(hop['ip'])
data['ttls'].append(int(hop['ttl']))
if 'rtt' in hop and hop['rtt'] != None:
data['rtts'].append(float(hop['rtt']))
else:
data['rtts'].append(0.0)
# print(data)
hs = ''
for h in data['hops']:
if h == None:
hs += "None"
else:
hs += h
data['n_hops'] = len(data['hops'])
if len(data['rtts']):
data['max_rtt'] = max(data['rtts'])
data['hash'] = hash(hs)
self.aLotOfData.append(copy.copy(data))
def main():
collector = NetworkTracerouteCollector()
collector.start()
if __name__ == "__main__":
main() | 2.171875 | 2 |
utils/files.py | phenmp/atassist-api | 0 | 12765801 | from os.path import dirname, join, isfile
# Constants
PROJECT_ROOT_DIRECTORY = dirname(dirname(__file__))
DUMP_FILE_SUFFIX = "_dump.csv"
def getFullPath(*path):
return join(PROJECT_ROOT_DIRECTORY, *path)
def getUserLastDumpFilePath(userId):
return getFullPath('resources', 'dump_files', "{0}{1}".format(userId, DUMP_FILE_SUFFIX))
def writeToCsvFile(userId, headers, rows):
target = open(getUserLastDumpFilePath(userId),'w+')
target.truncate()
# #dump same data to file without format
rows[0] = headers
for i in range(len(rows)):
value = ', '.join([ rows[i][index] for index in range(len(rows[i])) ])
target.write(value + "\n")
target.close()
| 3.046875 | 3 |
beastx/modules/sangmata.py | Digasi123percy/Beast-X | 11 | 12765802 | <reponame>Digasi123percy/Beast-X
import datetime
#team mates @danish_00,@Shivam_Patel,@xditya,@The_Siddharth_Nigam
from telethon import events
#team mates @danish_00,@Shivam_Patel,@xditya,@The_Siddharth_Nigam
from telethon.errors.rpcerrorlist import YouBlockedUserError
#team mates @danish_00,@Shivam_Patel,@xditya,@The_Siddharth_Nigam
from telethon.tl.functions.account import UpdateNotifySettingsRequest
#team mates @danish_00,@Shivam_Patel,@xditya,@The_Siddharth_Nigam
#team mates @danish_00,@Shivam_Patel,@xditya,@The_Siddharth_Nigam
#team mates @danish_00,@Shivam_Patel,@xditya,@The_Siddharth_Nigam
from . import *
@beast.on(admin_cmd(pattern="sg ?(.*)"))
#team mates @danish_00,@Shivam_Patel,@xditya,@The_Siddharth_Nigam
async def _(event):
if event.fwd_from:
return
if not event.reply_to_msg_id:
await event.edit("```Reply to any user message.```")
return
reply_message = await event.get_reply_message()
chat = "Sangmatainfo_bot"
sender = reply_message.sender.id
if reply_message.sender.bot:
await event.edit("```Reply to actual users message.```")
return
await event.edit("```Checking...```")
async with event.client.conversation(chat) as conv:
try:
#Fixed By @Shivam_Patel
response1 = conv.wait_event(events.NewMessage(incoming=True,from_users=461843263))#team mates @danish_00,@Shivam_Patel,@xditya,@AP_XD,@The_Siddharth_Nigam#team mates
response2 = conv.wait_event(events.NewMessage(incoming=True,from_users=461843263))#team mates @danish_00,@Shivam_Patel,@xditya,@AP_XD,@The_Siddharth_Nigam#team mates
response3 = conv.wait_event(events.NewMessage(incoming=True,from_users=461843263))#team mates @danish_00,@Shivam_Patel,@xditya,@AP_XD,@The_Siddharth_Nigam#team mates
await conv.send_message("/search_id {}".format(sender))#team mates @danish_00,@Shivam_Patel,@xditya,@AP_XD,@The_Siddharth_Nigam#team mates
response1 = await response1 #team mates @danish_00,@Shivam_Patel,@xditya,@AP_XD,@The_Siddharth_Nigam#team mates
response2 = await response2 #team mates @danish_00,@Shivam_Patel,@xditya,@AP_XD,@The_Siddharth_Nigam#team mates
response3= await response3 #team mates @danish_00,@Shivam_Patel,@xditya,@AP_XD,@The_Siddharth_Nigam#team mates
except YouBlockedUserError:
await event.reply("```Please unblock (@Sangmatainfo_bot) ```")
return
if response1.text.startswith("No records found"):
await event.edit("```User never changed his Username...```")
else:
await event.delete()
await event.client.send_message(event.chat_id, response1.message)
await event.client.send_message(event.chat_id, response2.message)
await event.client.send_message(event.chat_id, response3.message)
CMD_HELP.update({
"sangmatab Info":
"`.sg <reply to user or @usernamre> "
})
| 2.109375 | 2 |
utils/mahalanobis.py | gautard/pystatsml | 123 | 12765803 | <reponame>gautard/pystatsml<filename>utils/mahalanobis.py
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 4 16:09:56 2016
@author: <EMAIL>
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
import seaborn as sns
#%matplotlib inline
'''
Mahalanobis distance
====================
'''
from matplotlib.patches import Ellipse
def plot_cov_ellipse(cov, pos, nstd=2, ax=None, **kwargs):
"""
Plots an `nstd` sigma error ellipse based on the specified covariance
matrix (`cov`). Additional keyword arguments are passed on to the
ellipse patch artist.
Parameters
----------
cov : The 2x2 covariance matrix to base the ellipse on
pos : The location of the center of the ellipse. Expects a 2-element
sequence of [x0, y0].
nstd : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
ax : The axis that the ellipse will be plotted on. Defaults to the
current axis.
Additional keyword arguments are pass on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
if ax is None:
ax = plt.gca()
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
# Width and height are "full" widths, not radius
width, height = 2 * nstd * np.sqrt(vals)
ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)
ax.add_artist(ellip)
return ellip
n_samples, n_features = 100, 2
mean0, mean1 = np.array([0, 0]), np.array([0, 2])
Cov = np.array([[1, .8],[.8, 1]])
np.random.seed(42)
X0 = np.random.multivariate_normal(mean0, Cov, n_samples)
X1 = np.random.multivariate_normal(mean1, Cov, n_samples)
x = np.array([2, 2])
plt.scatter(X0[:, 0], X0[:, 1], color='b')
plt.scatter(X1[:, 0], X1[:, 1], color='r')
plt.scatter(mean0[0], mean0[1], color='b', s=200, label="m0")
plt.scatter(mean1[0], mean1[1], color='r', s=200, label="m2")
plt.scatter(x[0], x[1], color='k', s=200, label="x")
plot_cov_ellipse(Cov, pos=mean0, facecolor='none', linewidth=2, edgecolor='b')
plot_cov_ellipse(Cov, pos=mean1, facecolor='none', linewidth=2, edgecolor='r')
plt.legend(loc='upper left')
#
d2_m0x = scipy.spatial.distance.euclidean(mean0, x)
d2_m0m2 = scipy.spatial.distance.euclidean(mean0, mean1)
Covi = scipy.linalg.inv(Cov)
dm_m0x = scipy.spatial.distance.mahalanobis(mean0, x, Covi)
dm_m0m2 = scipy.spatial.distance.mahalanobis(mean0, mean1, Covi)
print('Euclidean dist(m0, x)=%.2f > dist(m0, m2)=%.2f' % (d2_m0x, d2_m0m2))
print('Mahalanobis dist(m0, x)=%.2f < dist(m0, m2)=%.2f' % (dm_m0x, dm_m0m2))
'''
## Exercise
- Write a function `euclidean(a, b)` that compute the euclidean distance
- Write a function `mahalanobis(a, b, Covi)` that compute the euclidean
distance, with the inverse of the covariance matrix. Use `scipy.linalg.inv(Cov)`
to invert your matrix.
'''
def euclidian(a, b):
return np.sqrt(np.sum((a - b) ** 2))
def mahalanobis(a, b, cov_inv):
return np.sqrt(np.dot(np.dot((a - b), cov_inv), (a - b).T))
assert mahalanobis(mean0, mean1, Covi) == dm_m0m2
assert euclidian(mean0, mean1) == d2_m0m2
mahalanobis(X0, mean0, Covi)
X = X0
mean = mean0
covi= Covi
np.sqrt(np.dot(np.dot((X - mean), covi), (X - mean).T))
def mahalanobis(X, mean, covi):
"""
from scipy.spatial.distance import mahalanobis
d2= np.array([mahalanobis(X[i], mean, covi) for i in range(X.shape[0])])
np.all(mahalanobis(X, mean, covi) == d2)
"""
return np.sqrt(np.sum(np.dot((X - mean), covi) * (X - mean), axis=1))
| 2.90625 | 3 |
array/0074_search_a_2d_matrix/0074_search_a_2d_matrix.py | zdyxry/LeetCode | 6 | 12765804 | class Solution(object):
def searchMatrix(self, matrix, target):
if not matrix or target is None:
return False
rows, cols = len(matrix), len(matrix[0])
low, high = 0, rows* cols - 1
while low <= high:
mid = (low + high) / 2
num = matrix[mid / cols][mid % cols]
if num == target:
return True
elif num < target:
low = mid + 1
else:
high = mid - 1
matrix = [[1,3,5,7],[10,11,16,20],[23,30,34,50]]
target = 3
res = Solution().searchMatrix(matrix, target)
print(res) | 3.6875 | 4 |
src/MathewTrainer.py | akshaybahadur21/MathEw | 10 | 12765805 | from keras.callbacks import ModelCheckpoint
from keras.layers import Dense, Flatten, Conv2D
from keras.layers import MaxPooling2D, Dropout
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
from src.utils.train_utils import post_process
class MathewTrainer:
def __init__(self):
self.image_x = 100
self.image_y = 100
self.train_dir = "data/"
self.batch_size = 64
self.model_name = "model/mathew.h5"
def keras_model(self, image_x, image_y):
num_of_classes = 14
model = Sequential()
model.add(Conv2D(32, (2, 2), input_shape=(image_x, image_y, 1), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
model.add(Conv2D(64, (2, 2), input_shape=(image_x, image_y, 1), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
model.add(Conv2D(128, (2, 2), activation='relu'))
model.add(MaxPooling2D(pool_size=(5, 5), strides=(5, 5), padding='same'))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.6))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.6))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.6))
model.add(Dense(num_of_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
filepath = self.model_name
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
return model, callbacks_list
def train(self):
train_datagen = ImageDataGenerator(
rescale=1. / 255,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
rotation_range=15,
zoom_range=0.2,
horizontal_flip=False,
validation_split=0.2,
fill_mode='nearest')
train_generator = train_datagen.flow_from_directory(
self.train_dir,
target_size=(self.image_x, self.image_y),
color_mode="grayscale",
batch_size=self.batch_size,
seed=42,
class_mode='categorical',
subset="training",
shuffle=True)
validation_generator = train_datagen.flow_from_directory(
self.train_dir,
target_size=(self.image_x, self.image_y),
color_mode="grayscale",
batch_size=self.batch_size,
seed=42,
class_mode='categorical',
subset="validation",
shuffle=False)
print(validation_generator.class_indices)
model, callbacks_list = self.keras_model(self.image_x, self.image_y)
print(model.summary())
his = model.fit_generator(train_generator, epochs=20, validation_data=validation_generator)
model.save(self.model_name)
post_process(model, validation_generator, his)
| 3.015625 | 3 |
python-pyqt/Section01/unit03-QMainWindow/01.py | sharebook-kr/learningspoons-bootcamp-finance | 9 | 12765806 | import sys
from PyQt5.QtWidgets import *
class MyWindow(QMainWindow):
def __init__(self):
super().__init__()
self.resize(400, 300)
self.move(300, 300)
app = QApplication(sys.argv)
win = MyWindow()
win.show()
app.exec_() | 2.53125 | 3 |
src/python/twitter/checkstyle/plugins/trailing_whitespace.py | zhouyijiaren/commons | 1,143 | 12765807 | # ==================================================================================================
# Copyright 2014 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from collections import defaultdict
import tokenize
import sys
from ..common import CheckstylePlugin
class TrailingWhitespace(CheckstylePlugin):
"""Warn on invalid trailing whitespace."""
@classmethod
def build_exception_map(cls, tokens):
"""Generates a set of ranges where we accept trailing slashes, specifically within comments
and strings.
"""
exception_ranges = defaultdict(list)
for token in tokens:
token_type, _, token_start, token_end = token[0:4]
if token_type in (tokenize.COMMENT, tokenize.STRING):
if token_start[0] == token_end[0]:
exception_ranges[token_start[0]].append((token_start[1], token_end[1]))
else:
exception_ranges[token_start[0]].append((token_start[1], sys.maxint))
for line in range(token_start[0] + 1, token_end[0]):
exception_ranges[line].append((0, sys.maxint))
exception_ranges[token_end[0]].append((0, token_end[1]))
return exception_ranges
def __init__(self, *args, **kw):
super(TrailingWhitespace, self).__init__(*args, **kw)
self._exception_map = self.build_exception_map(self.python_file.tokens)
def has_exception(self, line_number, exception_start, exception_end=None):
exception_end = exception_end or exception_start
for start, end in self._exception_map.get(line_number, ()):
if start <= exception_start and exception_end <= end:
return True
return False
def nits(self):
for line_number, line in self.python_file.enumerate():
stripped_line = line.rstrip()
if stripped_line != line and not self.has_exception(line_number,
len(stripped_line), len(line)):
yield self.error('T200', 'Line has trailing whitespace.', line_number)
if line.rstrip().endswith('\\'):
if not self.has_exception(line_number, len(line.rstrip()) - 1):
yield self.error('T201', 'Line has trailing slashes.', line_number)
| 2.09375 | 2 |
examples/amoebot/amoebot_sim.py | csningli/MultiAgent | 1 | 12765808 | <filename>examples/amoebot/amoebot_sim.py<gh_stars>1-10
# MultiAgent 2.0
# (c) 2017-2018, NiL, <EMAIL>
import sys, random, datetime, math
random.seed(datetime.datetime.now())
sys.path.append("../..")
from mas.multiagent import *
from mas.utils import *
amoebot_radius = 10.0
def pq_to_xy(a) :
b = array([0.0, 0.0])
p = a[0]
q = a[1]
x = 2 * amoebot_radius * p + 2 * amoebot_radius * math.cos(math.pi / 3.0) * q
y = 2 * amoebot_radius * math.cos(math.pi / 6.0) * q
b[0] = x
b[1] = y
return b
def wq_to_xy(a) :
b = array([0.0, 0.0])
p = a[0]
q = a[1]
x = -2 * amoebot_radius * p -2 * amoebot_radius * math.cos(math.pi / 3.0) * q
y = 2 * amoebot_radius * math.cos(math.pi / 6.0) * q
b[0] = x
b[1] = y
return b
def xy_to_pq(b) :
a = array([0.0, 0.0])
x = b[0]
y = b[1]
q = int(float(y) / (2 * amoebot_radius * math.cos(math.pi / 6.0)))
p = int((float(x) - 2 * amoebot_radius * math.cos(math.pi / 3.0) * q) / (2 * amoebot_radius))
a[0] = p
a[1] = q
return a
# share the memory among all the agents
shared_memory = {}
class AmoeObject(Object) :
@property
def amoe_pos(self) :
return tuple(xy_to_pq(self.pos))
@amoe_pos.setter
def amoe_pos(self, pos) :
self.pos = pq_to_xy(pos)
def draw(self, screen) :
if self.visible == True :
p = pymunk.Vec2d(self.pos)
rot = pymunk.Vec2d(self.rot)
r = self.radius
(width, height) = screen.get_size()
# adjust the drawing coordinates to make sure (0, 0) stays in the center
p.x = int(width / 2.0 + p.x)
p.y = int(height / 2.0 - p.y)
head = pymunk.Vec2d(rot.x, -rot.y) * self.radius * 0.9
pygame.draw.circle(screen, self.stroke_color, p, int(r), 2)
pygame.draw.circle(screen, self.fill_color, p, int(r/2.0), 4)
class AmoeOracleSpace(OracleSpace) :
def __init__(self, objs = [], obts = []) :
super(AmoeOracleSpace, self).__init__(objs, obts)
self.__objs_indexing = {}
def add_obj(self, obj) :
if check_attrs(obj, {
"body" : None,
"name" : None,
"pos" : None,
}) and obj.name not in self.objs.keys() :
self.objs[obj.name] = obj
if str(obj.amoe_pos) not in self.__objs_indexing.keys() :
self.__objs_indexing[str(obj.amoe_pos)] = []
self.__objs_indexing[str(obj.amoe_pos)].append(obj.name)
def add_obt(self, obt) :
if check_attrs(obt, {
"body" : None,
"a" : None,
"b" : None,
"radius" : None,
}) and obt.name not in self.obts.keys() :
self.obts[obt.name] = obt
def move_amoe_obj(self, name, amoe_pos) :
amoe_pos = tuple(amoe_pos)
if name in self.objs.keys() :
obj = self.objs[name]
if str(obj.amoe_pos) in self.__objs_indexing.keys() :
if name in self.__objs_indexing[str(obj.amoe_pos)] :
index = self.__objs_indexing[str(obj.amoe_pos)].index(name)
del(self.__objs_indexing[str(obj.amoe_pos)][index])
obj.amoe_pos = amoe_pos
if str(amoe_pos) not in self.__objs_indexing.keys() :
self.__objs_indexing[str(amoe_pos)] = []
self.__objs_indexing[str(amoe_pos)].append(name)
def objs_at_amoe_pos(self, amoe_pos) :
amoe_pos = tuple(amoe_pos)
return self.__objs_indexing.get(str(amoe_pos), [])
def draw(self, screen) :
# draw objs
for obj in self.objs.values() :
obj.draw(screen)
# draw connection between the coupled objects
(width, height) = screen.get_size()
for i in range(int(math.floor(len(self.objs) / 2.0))) :
head = self.objs[str(2 * i)]
tail = self.objs[str(2 * i + 1)]
if head.amoe_pos != tail.amoe_pos :
head_draw = [int(round(width / 2.0 + head.pos[0])), int(round(height / 2.0 - head.pos[1]))]
tail_draw = [int(round(width / 2.0 + tail.pos[0])), int(round(height / 2.0 - tail.pos[1]))]
pygame.draw.line(screen, head.stroke_color, head_draw, tail_draw, 4)
class AmoeContext(Context) :
def handle_reqt(self, reqt) :
resp = super(AmoeContext, self).handle_reqt(reqt)
msgs = {}
for msg in self.reqt.get_msgs(dest = "") :
if msg.src not in msgs.keys() :
msgs[msg.src] = []
msgs[msg.src].append(msg)
for i in range(int(math.floor(len(self.oracle.objs)/2.0))) :
head = self.oracle.objs[str(2 * i)]
tail = self.oracle.objs[str(2 * i + 1)]
for msg in msgs.get(head.name, []) :
# print(msg.key, msg.value, head.amoe_pos, tail.amoe_pos)
if msg.key == "expand" :
target_amoe_pos = array(head.amoe_pos) + array(msg.value)
if head.amoe_pos == tail.amoe_pos and len(self.oracle.objs_at_amoe_pos(target_amoe_pos)) < 1:
self.oracle.move_amoe_obj(head.name, target_amoe_pos)
elif msg.key == "contract" :
if msg.value == "head" :
self.oracle.move_amoe_obj(tail.name, head.amoe_pos)
elif msg.value == "tail" :
self.oracle.move_amoe_obj(head.name, tail.amoe_pos)
# print(head.amoe_pos, tail.amoe_pos)
self.resp.add_msg(Message(dest = head.name, key = "head_amoe_pos", value = tuple(head.amoe_pos)))
self.resp.add_msg(Message(dest = head.name, key = "tail_amoe_pos", value = tuple(tail.amoe_pos)))
head_detect = []
tail_detect = []
for port in [(1, 0), (1, -1), (0, 1), (0, -1), (-1, 1), (-1, 0)] :
if len(self.oracle.objs_at_amoe_pos(array(head.amoe_pos) + array(port))) > 0 :
head_detect.append(port)
if len(self.oracle.objs_at_amoe_pos(array(tail.amoe_pos) + array(port))) > 0 :
tail_detect.append(port)
self.resp.add_msg(Message(dest = head.name, key = "head_detect", value = head_detect))
self.resp.add_msg(Message(dest = head.name, key = "tail_detect", value = tail_detect))
return self.resp
def draw(self, screen) :
(width, height) = screen.get_size()
# draw the grids
grid_line_color = THECOLORS["lightgray"]
unit = 2 * amoebot_radius * math.cos(math.pi / 6.0)
start = [0, height / 2]
end = [width, height / 2]
pygame.draw.line(screen, THECOLORS["gray"], start, end, 1)
q_ceil = int(math.ceil((height / unit) / 2))
for i in range(1, q_ceil) :
start = [0, height / 2 + unit * i]
end = [width, height / 2 + unit * i]
pygame.draw.line(screen, THECOLORS["gray"], start, end, 1)
start = [0, height / 2 - unit * i]
end = [width, height / 2 - unit * i]
pygame.draw.line(screen, THECOLORS["gray"], start, end, 1)
distance = pldist_l2((-width / 2.0, height / 2.0), (0, 0), pq_to_xy((0, 1)))
start = pq_to_xy((0, q_ceil))
start[0] = int(width / 2.0 + start[0])
start[1] = int(height / 2.0 - start[1])
end = pq_to_xy((0, -q_ceil))
end[0] = int(width / 2.0 + end[0])
end[1] = int(height / 2.0 - end[1])
pygame.draw.line(screen, grid_line_color, start, end, 1)
start = wq_to_xy((0, q_ceil))
start[0] = int(width / 2.0 + start[0])
start[1] = int(height / 2.0 - start[1])
end = wq_to_xy((0, -q_ceil))
end[0] = int(width / 2.0 + end[0])
end[1] = int(height / 2.0 - end[1])
pygame.draw.line(screen, grid_line_color, start, end, 1)
for i in range(1, int(1.5 * math.ceil(distance / unit))) :
start = pq_to_xy((i, q_ceil))
start[0] = int(width / 2.0 + start[0])
start[1] = int(height / 2.0 - start[1])
end = pq_to_xy((i, -q_ceil))
end[0] = int(width / 2.0 + end[0])
end[1] = int(height / 2.0 - end[1])
pygame.draw.line(screen, grid_line_color, start, end, 1)
start = pq_to_xy((-i, q_ceil))
start[0] = int(width / 2.0 + start[0])
start[1] = int(height / 2.0 - start[1])
end = pq_to_xy((-i, -q_ceil))
end[0] = int(width / 2.0 + end[0])
end[1] = int(height / 2.0 - end[1])
pygame.draw.line(screen, grid_line_color, start, end, 1)
start = wq_to_xy((i, q_ceil))
start[0] = int(width / 2.0 + start[0])
start[1] = int(height / 2.0 - start[1])
end = wq_to_xy((i, -q_ceil))
end[0] = int(width / 2.0 + end[0])
end[1] = int(height / 2.0 - end[1])
pygame.draw.line(screen, grid_line_color, start, end, 1)
start = wq_to_xy((-i, q_ceil))
start[0] = int(width / 2.0 + start[0])
start[1] = int(height / 2.0 - start[1])
end = wq_to_xy((-i, -q_ceil))
end[0] = int(width / 2.0 + end[0])
end[1] = int(height / 2.0 - end[1])
pygame.draw.line(screen, grid_line_color, start, end, 1)
super(AmoeContext, self).draw(screen)
class AmoeDetectModule(Module) :
def sense(self, reqt) :
agent_name = self.mem.read("name", None)
for msg in reqt.get_msgs(agent_name) :
if msg.key == "head_amoe_pos" :
self.mem.reg(key = "head_amoe_pos", value = msg.value)
elif msg.key == "tail_amoe_pos" :
self.mem.reg(key = "tail_amoe_pos", value = msg.value)
elif msg.key == "head_detect" :
self.mem.reg(key = "head_detect", value = msg.value)
elif msg.key == "tail_detect" :
self.mem.reg(key = "tail_detect", value = msg.value)
# print("head_detect", self.mem.read("head_detect"))
# print("tail_detect", self.mem.read("tail_detect"))
class AmoeMoveModule(Module) :
def act(self, resp) :
contract_value = self.mem.read("contract", None)
if contract_value is not None and contract_value in ["head", "tail"]:
resp.add_msg(Message(key = "contract", value = contract_value))
self.mem.reg(key = "contract", value = None)
else :
expand_value = self.mem.read("expand", None)
if expand_value is not None and check_attrs(expand_value, {"__getitem__" : None, "__len__" : None}) and len(expand_value) >= 2 :
resp.add_msg(Message(key = "expand", value = expand_value))
self.mem.reg(key = "expand", value = None)
class AmoeProcessModule(Module) :
def process(self) :
agent_name = self.mem.read("name", None)
head_amoe_pos = self.mem.read("head_amoe_pos", None)
tail_amoe_pos = self.mem.read("tail_amoe_pos", None)
head_detect = self.mem.read("head_detect", [])
tail_detect = self.mem.read("tail_detect", [])
# print("head_detect:", head_detect)
# print("tail_detect:", tail_detect)
if head_amoe_pos is not None and tail_amoe_pos is not None :
head_ports = [(1, 0), (1, -1), (0, 1), (0, -1), (-1, 1), (-1, 0)]
for port in head_detect :
if port in head_ports :
del(head_ports[head_ports.index(port)])
if head_amoe_pos == tail_amoe_pos :
if len(head_ports) > 0 :
if random.random() < 0.5 :
self.mem.reg(key = "expand", value = random.choice(head_ports))
# print("port:", self.mem.read("expand"))
else :
if random.random() < 0.5 :
self.mem.reg(key = "contract", value = "head")
else :
self.mem.reg(key = "contract", value = "tail")
class AmoeFocusAgent(Agent) :
@property
def focus(self) :
focus_info = {
}
head_amoe_pos = self.mem.read("head_amoe_pos", None)
if head_amoe_pos is not None :
focus_info["head_amoe_pos"] = "(%4.2f, %4.2f)" % (head_amoe_pos[0], head_amoe_pos[1]),
tail_amoe_pos = self.mem.read("tail_amoe_pos", None)
if tail_amoe_pos is not None :
focus_info["tail_amoe_pos"] = "(%4.2f, %4.2f)" % (tail_amoe_pos[0], tail_amoe_pos[1]),
return focus_info
class AmoeStaticAgent(AmoeFocusAgent) :
def __init__(self, name) :
super(AmoeStaticAgent, self).__init__(name)
self.mods = []
class AmoeDynamicAgent(AmoeFocusAgent) :
def __init__(self, name) :
super(AmoeDynamicAgent, self).__init__(name)
self.mods = [AmoeDetectModule(), AmoeMoveModule(), AmoeProcessModule()]
def run_sim(filename = None) :
'''
>>> run_sim()
'''
# create the oracle space
oracle = AmoeOracleSpace()
# create the context
context = AmoeContext(oracle = oracle)
# create the schedule for adding agents in the running
schedule = Schedule()
# add objects and agents to the context
row = 5
col = 5
for i in range(col) :
for j in range(row) :
k = i * row + j
# the (2 * k)-th object is coupled with the (2 * k + 1)-th object, i.e. 0 is coupled with 1, 4 is coupled with 5.
amoe_pos = (4 * i - 2 * (col - 1) + 2 * ((row - 1)/ 2 - j), 4 * j - 2 * (row - 1))
obj = AmoeObject(name = str(2 * k))
obj.amoe_pos = amoe_pos
context.add_obj(obj)
if i == (col - 1) / 2 and j == (row - 1) / 2:
schedule.add_agent(AmoeDynamicAgent(name = str(2 * k)))
else :
schedule.add_agent(AmoeStaticAgent(name = str(2 * k)))
obj = AmoeObject(name = str(2 * k + 1))
obj.amoe_pos = amoe_pos
context.add_obj(obj)
schedule.add_agent(AmoeFocusAgent(name = str(2 * k + 1)))
# create the driver
driver = Driver(context = context, schedule = schedule)
# create the inspector
inspector = Inspector(delay = 10)
# create the simulator
sim = Simulator(driver = driver)
print("Simulating")
sim.simulate(graphics = True, inspector = inspector, filename = filename)
if __name__ == '__main__' :
filename = None
if (len(sys.argv) > 1) :
filename = sys.argv[1]
run_sim(filename)
| 2.59375 | 3 |
Examples/InteractiveTutorial/MeasureRegions.py | SimpleITK/SimpleITK-MICCAI-2011-Tutorial | 25 | 12765809 | import SimpleITK as sitk
import csv
# Load the Images to be measured
ScalarValuesFile = '~/SimpleITK-MICCAI-2011-Tutorial/Data/FA.png'
ScalarValuesImage = sitk.Cast( sitk.ReadImage(ScalarValuesFile), sitk.sitkUInt32 )
sitk.Show ( ScalarValuesImage )
LabelMapFile = '~/SimpleITK-MICCAI-2011-Tutorial/Data/LB.png'
LabelMapImage = sitk.Cast( sitk.ReadImage(LabelMapFile), sitk.sitkUInt32 )
sitk.Show ( LabelMapFile )
# <demo> stop
lsfilter = sitk.LabelStatisticsImageFilter()
lsfilter.Execute(LabelMapImage,ScalarValuesImage)
keys = lsfilter.GetValidLabels();
# <demo> stop
### Now extract measurement values to cataloging in a database/spreadsheet
MySubjectID="Subj01"
measurementDict=dict()
for labelValue in keys:
uniqueId = ( MySubjectID, labelValue )
measurementMap=lsfilter.GetMeasurementMap(labelValue)
measurementDict[uniqueId]=dict( measurementMap )
# <demo> stop
print("DUMPING MEASUREMENT DICTIONARY")
print(measurementDict)
# <demo> stop
#A map between internal labels and header row strings.
headerMap={'SUBJID':'SubjectID',
'LABELID':'LabelID',
'Variance':'Variance',
'Minimum':'Minimum',
'Maximum':'Maximum',
'Mean':'Mean',
'Count':'NumPixels',
'approxMedian':'Median',
'Sum':'Sum',
'Sigma':'Sigma'}
csvFileName="MyValues.csv"
csvFile=open(csvFileName, 'wb')
myDictWriter=csv.DictWriter(csvFile,headerMap.keys())
myDictWriter.writerow(headerMap)
for uniqueId in measurementDict.keys():
unrollRow = measurementDict[uniqueId]
unrollRow['SUBJID']=uniqueId[0]
unrollRow['LABELID']=uniqueId[1]
myDictWriter.writerow(unrollRow)
csvFile.close()
| 2.59375 | 3 |
map_label_tool/py_proto/modules/v2x/proto/v2x_service_obu_to_car_pb2.py | freeclouds/OpenHDMap | 0 | 12765810 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: modules/v2x/proto/v2x_service_obu_to_car.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from modules.perception.proto import perception_obstacle_pb2 as modules_dot_perception_dot_proto_dot_perception__obstacle__pb2
from modules.v2x.proto import v2x_traffic_light_pb2 as modules_dot_v2x_dot_proto_dot_v2x__traffic__light__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='modules/v2x/proto/v2x_service_obu_to_car.proto',
package='apollo.v2x',
syntax='proto2',
serialized_pb=_b('\n.modules/v2x/proto/v2x_service_obu_to_car.proto\x12\napollo.v2x\x1a\x32modules/perception/proto/perception_obstacle.proto\x1a)modules/v2x/proto/v2x_traffic_light.proto\"I\n\x0eStatusResponse\x12\x15\n\x06status\x18\x01 \x02(\x08:\x05\x66\x61lse\x12\x0c\n\x04info\x18\x02 \x01(\t\x12\x12\n\nerror_code\x18\x03 \x01(\x03\x32\xca\x01\n\x08ObuToCar\x12_\n\x17SendPerceptionObstacles\x12&.apollo.perception.PerceptionObstacles\x1a\x1a.apollo.v2x.StatusResponse\"\x00\x12]\n\x13SendV2xTrafficLight\x12(.apollo.v2x.IntersectionTrafficLightData\x1a\x1a.apollo.v2x.StatusResponse\"\x00')
,
dependencies=[modules_dot_perception_dot_proto_dot_perception__obstacle__pb2.DESCRIPTOR,modules_dot_v2x_dot_proto_dot_v2x__traffic__light__pb2.DESCRIPTOR,])
_STATUSRESPONSE = _descriptor.Descriptor(
name='StatusResponse',
full_name='apollo.v2x.StatusResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='apollo.v2x.StatusResponse.status', index=0,
number=1, type=8, cpp_type=7, label=2,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='info', full_name='apollo.v2x.StatusResponse.info', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='error_code', full_name='apollo.v2x.StatusResponse.error_code', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=157,
serialized_end=230,
)
DESCRIPTOR.message_types_by_name['StatusResponse'] = _STATUSRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StatusResponse = _reflection.GeneratedProtocolMessageType('StatusResponse', (_message.Message,), dict(
DESCRIPTOR = _STATUSRESPONSE,
__module__ = 'modules.v2x.proto.v2x_service_obu_to_car_pb2'
# @@protoc_insertion_point(class_scope:apollo.v2x.StatusResponse)
))
_sym_db.RegisterMessage(StatusResponse)
# @@protoc_insertion_point(module_scope)
| 1.28125 | 1 |
FMPNet.py | ParadoxZW/CIFAR100-PRACTICE | 1 | 12765811 | <filename>FMPNet.py
# it doesn't work, still need debugging.
# But I'm not interesting in this architecture any more.
# So I just keep the code there
from modules import FMPBlock, LayerNorm
from torch import tensor
import torch
import numpy as np
import torch.nn.functional as F
from torch import nn
class DropoutFMP(nn.Module):
"fractional max pooling with dropout"
def __init__(self, size, out_channels, dropout=0):
super(DropoutFMP, self).__init__()
self.norm = LayerNorm(features=size)
self.dropout = nn.Dropout(dropout)
self.fmp = FMPBlock(size[0], out_channels)
def forward(self, x):
x = self.norm(x)
x = self.fmp(x)
x = self.dropout(x)
return x
class FMPNet(nn.Module):
"implemnet of a cnn network with fractional max pooling"
def __init__(self):
super(FMPNet, self).__init__()
n = 10
m = 512
self.input = DropoutFMP((3, 32, 32), n)
layers = []
h = 25 # height
k = 1 # times of 160 channels
while h >= 2:
ne = DropoutFMP((n * k, h, h), n * (k + 1), 0.045 * k)
k += 1
h = int(0.8 * h)
layers.append(ne)
self.layers = nn.Sequential(*layers)
self.l1 = nn.Linear(n * 11, m)
self.l2 = nn.Linear(m, 10)
def forward(self, x):
x = self.input(x)
x = self.layers(x)
b = x.size()[0]
return F.softmax(self.l2(self.l1(x.view(b, -1))), dim=1)
if __name__ == '__main__':
net = FMPNet()
print(net)
| 2.59375 | 3 |
moflow/mf/reader.py | mwtoews/moflow | 1 | 12765812 | import os
import numpy as np
import re
import sys
try:
import h5py
except ImportError:
h5py = None
'''
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
'''
from .. import logger, logging
from .base import MFPackage, MissingFile
from .name import Modflow
_re_fmtin = re.compile(
r'\((?P<body>(?P<rep>\d*)(?P<symbol>[IEFG][SN]?)(?P<w>\d+)(\.(?P<d>\d+))?'
r'|FREE|BINARY)\)')
class MFFileReader(object):
"""MODFLOW file reader"""
_parent_class = MFPackage
def __init__(self, f=None, parent=None):
"""Initialize with a file and an instance of a parent class
Parameters
----------
f : str, file-like object or None
A path to a file, or a file-like reader with with a 'readlines'
method, such as BytesIO. If None, then it is obtained from
parent.fpath, or parent.fname
parent : instance of MFPackage
"""
# Set up logger
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(logger.level)
if parent is None:
parent = self._parent_class()
if not isinstance(parent, self._parent_class):
self.logger.error(
"'parent' should be an instance of a %r object; found %r",
self._parent_class.__name__, parent.__class__.__name__)
self.parent = parent
if f is None:
if getattr(parent, 'fpath', None) is not None:
f = parent.fpath
elif getattr(parent, 'fname', None) is not None:
f = parent.fname
else:
raise ValueError('unsure how to open file')
# Read data
if hasattr(f, 'readlines'):
# it is a file reader object, e.g. BytesIO
self.fname = f.__class__.__name__
self.lines = f.readlines()
else:
self.fpath = self.parent.fpath = f
if getattr(self, 'fname', None) is None:
self.fname = os.path.split(self.parent.fpath)[1]
# Read whole file at once, then close it
with open(self.parent.fpath, 'r') as fp:
self.lines = fp.readlines()
if self.parent.nam is None:
self.parent.nam = Modflow()
try:
self.parent.nam.ref_dir = os.path.dirname(self.fpath)
except:
pass
self.logger.info("read file '%s' with %d lines",
self.fname, len(self.lines))
self.lineno = 0
self.data_set_num = None
def __len__(self):
"""Returns number of lines"""
return len(self.lines)
def location_exception(self, e):
"""Use to show location of exception while reading file
Example:
fp = _MFFileReader(fpath, self)
try:
fp.read_text(0)
...
fp.check_end()
except Exception as e:
exec(fp.location_exception(e))
"""
location = '%s:%s:%s:Data set %s:' % \
(self.parent.__class__.__name__, self.fname, self.lineno,
self.data_set_num)
if sys.version_info[0] < 3:
return "import sys; raise type(e), type(e)('" + location + "' + " \
"str(e)), sys.exc_info()[2]"
else:
return "import sys; raise type(e)(str(e) + '" + location + "' + " \
"str(e)).with_traceback(sys.exc_info()[2])"
def check_end(self):
"""Check end of file and show messages in logger on status"""
if len(self) == self.lineno:
self.logger.info("finished reading %d lines", self.lineno)
elif len(self) > self.lineno:
remain = len(self) - self.lineno
a, b = 's', ''
if remain == 1:
b, a = a, b
self.logger.warn(
"finished reading %d lines, but %d line%s remain%s",
self.lineno, remain, a, b)
else:
raise ValueError("%d > %d ?" % (self.lineno, len(self)))
@property
def curinfo(self):
"""Returns line and data set number info"""
return str(self.lineno) + ':Data set ' + str(self.data_set_num)
@property
def not_eof(self):
"""Reader is not at the end of file (EOF)"""
return self.lineno < len(self.lines)
@property
def curline(self):
"""Return the current line"""
try:
if self.lineno == 0:
return ''
else:
return self.lines[self.lineno - 1]
except IndexError:
self.logger.error('%s:Unexpected end of file', self.curinfo)
raise IndexError('Unexpected end of file')
def nextline(self, data_set_num=None):
"""Get next line, setting data set number and increment lineno"""
if data_set_num is not None:
self.data_set_num = data_set_num
self.logger.debug('%s:using nextline', self.curinfo)
self.lineno += 1
try:
line = self.lines[self.lineno - 1]
except IndexError:
self.lineno -= 1
self.logger.error('%s:Unexpected end of file', self.curinfo)
raise IndexError('Unexpected end of file')
if data_set_num is not None:
self.logger.debug(
'%s:returning line with length %d:%r',
self.curinfo, len(line), line)
return line
def readline(self):
"""Alias for nextline()"""
return self.nextline()
def conv(self, item, fmt, name=None):
"""Convert item to format fmt
Parameters
----------
item : str
fmt : str, default ('s')
's' for string or no conversion (default)
'i' for integer
'f' for float
name : str or None
Optional name to provide context information for debugging
"""
try:
if type(fmt) == np.dtype:
return fmt.type(item)
elif fmt == 's': # string
return item
elif fmt == 'i': # integer
return int(item)
elif fmt == 'f': # any floating-point number
# typically either a REAL or DOUBLE PRECISION
return self.parent._float_type.type(item)
else:
raise ValueError('Unknown fmt code %r' % (fmt,))
except ValueError:
if name is not None:
msg = 'Cannot cast %r of %r to type %r' % (name, item, fmt)
else:
msg = 'Cannot cast %r to type %r' % (item, fmt)
raise ValueError(msg)
def get_items(self, data_set_num=None, num_items=None, fmt='s',
multiline=False):
"""Get items from one or more lines (if multiline) into a list
If num_items is defined, then only this count will be returned and any
remaining items from the line will be ignored. If there are too few
items on the line, the values will be some form of "zero", such as
0, 0.0 or ''.
However, if `multiline=True`, then multiple lines can be read to reach
num_items.
If fmt is defined, it must be:
- 's' for string or no conversion (default)
- 'i' for integer
- 'f' for float, as defined by parent._float_type
"""
if data_set_num is not None:
self.data_set_num = data_set_num
self.logger.debug(
'%s:using get_items for num_items=%s',
self.curinfo, num_items)
startln = self.lineno + 1
fill_missing = False
if num_items is None or not multiline:
items = self.nextline().split()
if num_items is not None and len(items) > num_items:
items = items[:num_items]
if (not multiline and num_items is not None and
len(items) < num_items):
fill_missing = (num_items - len(items))
else:
assert isinstance(num_items, int), type(num_items)
assert num_items > 0, num_items
items = []
while len(items) < num_items:
items += self.nextline().split()
if len(items) > num_items: # trim off too many
items = items[:num_items]
if fmt == 's':
res = items
else:
res = [self.conv(x, fmt) for x in items]
if fill_missing:
if fmt == 's':
fill_value = ''
else:
fill_value = '0'
res += [self.conv(fill_value, fmt)] * fill_missing
if data_set_num is not None:
if multiline:
toline = ' to %s' % (self.lineno,)
else:
toline = ''
self.logger.debug('%s:read %d items from line %d%s',
self.data_set_num, num_items, startln, toline)
return res
def get_named_items(self, data_set_num, names, fmt='s'):
"""Get items into dict. See get_items for fmt usage"""
items = self.get_items(data_set_num, len(names), fmt)
res = {}
for name, item in zip(names, items):
if fmt != 's':
item = self.conv(item, fmt, name)
res[name] = item
return res
def read_named_items(self, data_set_num, names, fmt='s'):
"""Read items into parent. See get_items for fmt usage"""
startln = self.lineno + 1
items = self.get_named_items(data_set_num, names, fmt)
for name in items.keys():
setattr(self.parent, name, items[name])
self.logger.debug('%s:read %d items from line %d',
self.data_set_num, len(items), startln)
def read_text(self, data_set_num=0):
"""Reads 0 or more text (comment) for lines that start with '#'"""
startln = self.lineno + 1
self.parent.text = []
while True:
try:
line = self.nextline(data_set_num)
except IndexError:
break
if line.startswith('#'):
line = line[1:].strip()
self.parent.text.append(line)
else:
self.lineno -= 1 # scroll back one?
break
self.logger.debug('%s:read %d lines of text from line %d to %d',
self.data_set_num,
len(self.parent.text), startln, self.lineno)
def read_options(self, data_set_num, process_aux=True):
"""Read options, and optionally process auxiliary variables"""
line = self.nextline(data_set_num)
self.parent.Options = line.upper().split()
if hasattr(self.parent, 'valid_options'):
for opt in self.parent.Options:
if opt not in self.parent.Options:
self.logger.warn("%s:unrecognised option %r",
self.data_set_num, opt)
if process_aux:
raise NotImplementedError
else:
self.logger.debug('%s:read %d options from line %d:%s',
self.data_set_num, len(self.parent.Options),
self.lineno, self.parent.Options)
def read_parameter(self, data_set_num, names):
"""Read [PARAMETER values]
This optional item must start with the word "PARAMETER". If not found,
then names are set to 0.
Parameter names are provided in a list, and are stored as integers
to the parent object.
"""
startln = self.lineno + 1
line = self.nextline(data_set_num)
self.lineno -= 1
if line.upper().startswith('PARAMETER'):
items = self.get_items(num_items=len(names) + 1)
assert items[0].upper() == 'PARAMETER', items[0]
for name, item in zip(names, items[1:]):
value = self.conv(item, 'i', name)
setattr(self.parent, name, value)
else:
for name in names:
setattr(self.parent, name, 0)
self.logger.debug('%s:read %d parameters from line %d',
self.data_set_num, len(names), startln)
def get_array(self, data_set_num, shape, dtype, return_dict=False):
"""Returns array data, similar to array reading utilities U2DREL,
U2DINT, and U1DREL. If return_dict=True, a dict is returned with all
other attributes.
Inputs:
data_set_num - number
shape - 1D array, e.g. 10, or 2D array (20, 30)
dtype - e.g. np.float32 or 'f'
See Page 8-57 from the MODFLOW-2005 mannual for details.
"""
startln = self.lineno + 1
res = {}
first_line = self.nextline(data_set_num)
# Comments are considered after a '#' character on the first line
if '#' in first_line:
res['text'] = first_line[(first_line.find('#') + 1):].strip()
num_type = np.dtype(dtype).type
res['array'] = ar = np.empty(shape, dtype=dtype)
num_items = ar.size
def read_array_data(obj, fmtin):
'''Helper subroutine to actually read array data'''
fmt = _re_fmtin.search(fmtin.upper())
if not fmt:
raise ValueError(
'cannot understand Fortran format: ' + repr(fmtin))
fmt = fmt.groupdict()
if fmt['body'] == 'BINARY':
data_size = ar.size * ar.dtype.itemsize
if hasattr(obj, 'read'):
data = obj.read(data_size)
else:
raise NotImplementedError(
"not sure how to 'read' from " + repr(obj))
iar = np.fromstring(data, dtype)
else: # ASCII
items = []
if not hasattr(obj, 'readline'):
raise NotImplementedError(
"not sure how to 'readline' from " + repr(obj))
if fmt['body'] == 'FREE':
while len(items) < num_items:
items += obj.readline().split()
else: # interpret Fortran format
if fmt['rep']:
rep = int(fmt['rep'])
else:
rep = 1
width = int(fmt['w'])
while len(items) < num_items:
line = obj.readline()
pos = 0
for n in range(rep):
try:
item = line[pos:pos + width].strip()
pos += width
if item:
items.append(item)
except IndexError:
break
iar = np.fromiter(items, dtype=dtype)
if iar.size != ar.size:
raise ValueError('expected size %s, but found %s' %
(ar.size, iar.size))
return iar
# First, assume using more modern free-format control line
control_line = first_line
dat = control_line.split()
# First item is the control word
res['cntrl'] = cntrl = dat[0].upper()
if cntrl == 'CONSTANT':
# CONSTANT CNSTNT
if len(dat) < 2:
raise ValueError(
'expecting to find at least 2 items for CONSTANT')
res['cnstnt'] = cnstnt = dat[1]
if len(dat) > 2 and 'text' not in res:
st = first_line.find(cnstnt) + len(cnstnt)
res['text'] = first_line[st:].strip()
ar.fill(cnstnt)
elif cntrl == 'INTERNAL':
# INTERNAL CNSTNT FMTIN [IPRN]
if len(dat) < 3:
raise ValueError(
'expecting to find at least 3 items for INTERNAL')
res['cnstnt'] = cnstnt = dat[1]
res['fmtin'] = fmtin = dat[2]
if len(dat) >= 4:
res['iprn'] = iprn = dat[3] # not used
if len(dat) > 4 and 'text' not in res:
st = first_line.find(iprn, first_line.find(fmtin)) + len(iprn)
res['text'] = first_line[st:].strip()
iar = read_array_data(self, fmtin)
ar[:] = iar.reshape(shape) * num_type(cnstnt)
elif cntrl == 'EXTERNAL':
# EXTERNAL Nunit CNSTNT FMTIN IPRN
if len(dat) < 5:
raise ValueError(
'expecting to find at least 5 items for EXTERNAL')
res['nunit'] = nunit = int(dat[1])
res['cnstnt'] = cnstnt = dat[2]
res['fmtin'] = fmtin = dat[3].upper()
res['iprn'] = iprn = dat[4] # not used
if len(dat) > 5 and 'text' not in res:
st = first_line.find(iprn, first_line.find(fmtin)) + len(iprn)
res['text'] = first_line[st:].strip()
# Needs a reference to nam[nunit]
if self.parent.nam is None:
raise AttributeError(
"reference to 'nam' required for EXTERNAL array")
try:
obj = self.parent.nam[nunit]
except KeyError:
raise KeyError("nunit %s not in nam", nunit)
iar = read_array_data(obj, fmtin)
ar[:] = iar.reshape(shape) * num_type(cnstnt)
elif cntrl == 'OPEN/CLOSE':
# OPEN/CLOSE FNAME CNSTNT FMTIN IPRN
if len(dat) < 5:
raise ValueError(
'expecting to find at least 5 items for OPEN/CLOSE')
res['fname'] = fname = dat[1]
res['cnstnt'] = cnstnt = dat[2]
res['fmtin'] = fmtin = dat[3].upper()
res['iprn'] = iprn = dat[4]
if len(dat) > 5 and 'text' not in res:
st = first_line.find(iprn, first_line.find(fmtin)) + len(iprn)
res['text'] = first_line[st:].strip()
with open(fname, 'rb') as fp:
iar = read_array_data(fp, fmtin)
ar[:] = iar.reshape(shape) * num_type(cnstnt)
elif cntrl == 'HDF5':
# GMS extension: http://www.xmswiki.com/xms/GMS:MODFLOW_with_HDF5
if not h5py:
raise ImportError('h5py module required to read HDF5 data')
# HDF5 CNSTNT IPRN "FNAME" "pathInFile" nDim start1 nToRead1 ...
file_ch = r'\w/\.\-\+_\(\)'
dat = re.findall('([' + file_ch + ']+|"[' + file_ch + ' ]+")',
control_line)
if len(dat) < 8:
raise ValueError('expecting to find at least 8 '
'items for HDF5; found ' + str(len(dat)))
assert dat[0].upper() == 'HDF5', dat[0]
res['cnstnt'] = cnstnt = dat[1]
try:
cnstnt_val = num_type(cnstnt)
except ValueError: # e.g. 1.0 as int 1
cnstnt_val = num_type(float(cnstnt))
res['iprn'] = dat[2]
res['fname'] = fname = dat[3].strip('"')
res['pathInFile'] = pathInFile = dat[4].strip('"')
nDim = int(dat[5])
nDim_len = {1: 8, 2: 10, 3: 12}
if nDim not in nDim_len:
raise ValueError('expecting to nDim to be one of 1, 2, or 3; '
'found ' + str(nDim))
elif len(dat) < nDim_len[nDim]:
raise ValueError(
('expecting to find at least %d items for HDF5 with '
'%d dimensions; found %d') %
(nDim_len[nDim], nDim, len(dat)))
elif len(dat) > nDim_len[nDim]:
token = dat[nDim_len[nDim]]
st = first_line.find(token) + len(token)
res['text'] = first_line[st:].strip()
if nDim >= 1:
start1, nToRead1 = int(dat[6]), int(dat[7])
slice1 = slice(start1, start1 + nToRead1)
if nDim >= 2:
start2, nToRead2 = int(dat[8]), int(dat[9])
slice2 = slice(start2, start2 + nToRead2)
if nDim == 3:
start3, nToRead3 = int(dat[10]), int(dat[11])
slice3 = slice(start3, start3 + nToRead3)
fpath = os.path.join(self.parent.nam.ref_dir, fname)
if not os.path.isfile(fpath):
raise MissingFile("cannot find file '%s'" % (fpath,))
h5 = h5py.File(fpath, 'r')
ds = h5[pathInFile]
if nDim == 1:
iar = ds[slice1]
elif nDim == 2:
iar = ds[slice1, slice2]
elif nDim == 3:
iar = ds[slice1, slice2, slice3]
h5.close()
ar[:] = iar.reshape(shape) * cnstnt_val
elif len(control_line) > 20: # FIXED-FORMAT CONTROL LINE
# LOCAT CNSTNT FMTIN IPRN
del res['cntrl'] # control word was not used for fixed-format
try:
res['locat'] = locat = int(control_line[0:10])
res['cnstnt'] = cnstnt = control_line[10:20].strip()
if len(control_line) > 20:
res['fmtin'] = fmtin = control_line[20:40].strip().upper()
if len(control_line) > 40:
res['iprn'] = iprn = control_line[40:50].strip()
except ValueError:
raise ValueError('fixed-format control line not '
'understood: ' + repr(control_line))
if len(control_line) > 50 and 'text' not in res:
res['text'] = first_line[50:].strip()
if locat == 0: # all elements are set equal to cnstnt
ar.fill(cnstnt)
else:
nunit = abs(locat)
if self.parent.nunit == nunit:
obj = self
elif self.parent.nam is None:
obj = self
else:
obj = self.parent.nam[nunit]
if locat < 0:
fmtin = '(BINARY)'
iar = read_array_data(obj, fmtin)
ar[:] = iar.reshape(shape) * num_type(cnstnt)
else:
raise ValueError('array control line not understood: ' +
repr(control_line))
if 'text' in res:
withtext = ' with text "' + res['text'] + '"'
else:
withtext = ''
self.logger.debug(
'%s:read %r array with shape %s from line %d to %d%s',
self.data_set_num, ar.dtype.char, ar.shape,
startln, self.lineno, withtext)
if return_dict:
return res
else:
return ar
| 2.328125 | 2 |
Generator/DSLNode.py | Dev-Tarek/sketched-webpages-generator | 35 | 12765813 | import sys, os
from .DSL_GRAPH import graph
class DSLNode:
def __init__(self, key, parent):
self.key = key
self.parent = parent
self.children = []
def addChild(self, child):
self.children.append(child)
def render(self, file, level):
if self.key == 'root':
for child in self.children:
child.render(file, level + 1)
return
file.write(level * '\t')
# Write end-token then return
if not len(self.children) and self.key not in list(graph.keys()):
file.write(self.key + '\n')
return
if not len(self.children) and self.key in list(graph.keys()):
return
file.write(self.key + '\n')
file.seek(0, os.SEEK_END)
file.seek(file.tell() - 2, os.SEEK_SET) # On Ubuntu: file.tell() - 1
file.write('{\n')
for child in self.children:
child.render(file, level + 1)
file.write(level * '\t' + '}\n') | 2.859375 | 3 |
python/leetcode/86.py | ParkinWu/leetcode | 0 | 12765814 | <filename>python/leetcode/86.py
# 给定一个链表和一个特定值 x,对链表进行分隔,使得所有小于 x 的节点都在大于或等于 x 的节点之前。
#
# 你应当保留两个分区中每个节点的初始相对位置。
#
# 示例:
#
# 输入: head = 1->4->3->2->5->2, x = 3
# 输出: 1->2->2->4->3->5
from typing import List
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __str__(self):
s = ""
current = self
s = s + str(current.val)
while current.next:
current = current.next
s = s + " -> "
s = s + str(current.val)
return s
class Solution:
def partition(self, head: ListNode, x: int) -> ListNode:
less_header = ListNode(0)
more_header = ListNode(0)
less_p = less_header
more_p = more_header
while head is not None:
if head.val < x:
less_p.next = head
less_p = head
else:
more_p.next = head
more_p = head
head = head.next
less_p.next = more_header.next
more_p.next = None
return less_header.next
def buildList(list: List[int]) -> ListNode:
if len(list) == 0:
return None
head = ListNode(0)
cur = head
for i in list:
cur.next = ListNode(i)
cur = cur.next
return head.next
if __name__ == '__main__':
head = buildList([1, 4, 3])
print(head)
sol = Solution()
l = sol.partition(head, 4)
print(l) | 3.671875 | 4 |
gpkitmodels/GP/aircraft/prop/prop_test.py | aeroa/gpkit-models | 11 | 12765815 | <reponame>aeroa/gpkit-models<gh_stars>10-100
" propeller tests "
from gpkitmodels.GP.aircraft.prop.propeller import Propeller, ActuatorProp
from gpkitmodels.SP.aircraft.prop.propeller import BladeElementProp
from gpkitmodels.GP.aircraft.wing.wing_test import FlightState
from gpkit import units, Model
def simpleprop_test():
" test simple propeller model "
fs = FlightState()
Propeller.flight_model = ActuatorProp
p = Propeller()
pp = p.flight_model(p, fs)
m = Model(1/pp.eta + p.W/(100.*units("lbf"))+ pp.Q/(100.*units("N*m")),
[fs, p, pp])
m.substitutions.update({"rho": 1.225, "V": 50, "T": 100, "omega":1000})
m.solve()
def ME_eta_test():
fs = FlightState()
Propeller.flight_model = BladeElementProp
p = Propeller()
pp = p.flight_model(p,fs)
pp.substitutions[pp.T] = 100
pp.cost = 1./pp.eta + pp.Q/(1000.*units("N*m")) + p.T_m/(1000*units('N'))
sol = pp.localsolve(iteration_limit = 400)
def test():
"tests"
simpleprop_test()
ME_eta_test()
if __name__ == "__main__":
test()
| 2.34375 | 2 |
08-def-type-hints/comparable/mymax_demo.py | hdcpereira/example-code-2e | 1 | 12765816 | from typing import TYPE_CHECKING, List, Optional
import mymax as my
def demo_args_list_float() -> None:
args = [2.5, 3.5, 1.5]
expected = 3.5
result = my.max(*args)
print(args, expected, result, sep='\n')
assert result == expected
if TYPE_CHECKING:
reveal_type(args)
reveal_type(expected)
reveal_type(result)
def demo_args_iter_int() -> None:
args = [30, 10, 20]
expected = 30
result = my.max(args)
print(args, expected, result, sep='\n')
assert result == expected
if TYPE_CHECKING:
reveal_type(args)
reveal_type(expected)
reveal_type(result)
def demo_args_iter_str() -> None:
args = iter('banana kiwi mango apple'.split())
expected = 'mango'
result = my.max(args)
print(args, expected, result, sep='\n')
assert result == expected
if TYPE_CHECKING:
reveal_type(args)
reveal_type(expected)
reveal_type(result)
def demo_args_iter_not_comparable_with_key() -> None:
args = [object(), object(), object()]
key = id
expected = max(args, key=id)
result = my.max(args, key=key)
print(args, key, expected, result, sep='\n')
assert result == expected
if TYPE_CHECKING:
reveal_type(args)
reveal_type(key)
reveal_type(expected)
reveal_type(result)
def demo_empty_iterable_with_default() -> None:
args: List[float] = []
default = None
expected = None
result = my.max(args, default=default)
print(args, default, expected, result, sep='\n')
assert result == expected
if TYPE_CHECKING:
reveal_type(args)
reveal_type(default)
reveal_type(expected)
reveal_type(result)
def demo_different_key_return_type() -> None:
args = iter('banana kiwi mango apple'.split())
key = len
expected = 'banana'
result = my.max(args, key=key)
print(args, key, expected, result, sep='\n')
assert result == expected
if TYPE_CHECKING:
reveal_type(args)
reveal_type(key)
reveal_type(expected)
reveal_type(result)
def demo_different_key_none() -> None:
args = iter('banana kiwi mango apple'.split())
key = None
expected = 'mango'
result = my.max(args, key=key)
print(args, key, expected, result, sep='\n')
assert result == expected
if TYPE_CHECKING:
reveal_type(args)
reveal_type(key)
reveal_type(expected)
reveal_type(result)
###################################### intentional type errors
def error_reported_bug() -> None:
# example from https://github.com/python/typeshed/issues/4051
top: Optional[int] = None
try:
my.max(5, top)
except TypeError as exc:
print(exc)
def error_args_iter_not_comparable() -> None:
try:
my.max([None, None])
except TypeError as exc:
print(exc)
def error_single_arg_not_iterable() -> None:
try:
my.max(1)
except TypeError as exc:
print(exc)
def main():
for name, val in globals().items():
if name.startswith('demo') or name.startswith('error'):
print('_' * 20, name)
val()
if __name__ == '__main__':
main() | 3.4375 | 3 |
api/rf_temps/rf_stream.py | barretobrock/server-tools | 1 | 12765817 | <filename>api/rf_temps/rf_stream.py
import subprocess
from kavalkilu import LogWithInflux, HOME_SERVER_HOSTNAME, Hosts
logg = LogWithInflux('rf_stream', log_dir='rf')
serv_ip = Hosts().get_ip_from_host(HOME_SERVER_HOSTNAME)
cmd = ['/usr/local/bin/rtl_433', '-F', f'syslog:{serv_ip}:1433']
logg.info(f'Sending command: {" ".join(cmd)}')
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
process_output, _ = process.communicate()
logg.debug(f'Process output: {process_output}')
| 2.078125 | 2 |
alchemy/admin_server/external_services.py | edu-gp/annotation_tool | 2 | 12765818 | import logging
from google.cloud import pubsub
from google.cloud import secretmanager
class GCPPubSubService:
_client = None
@classmethod
def get_client(cls):
if cls._client is None:
cls._client = pubsub.PublisherClient()
return cls._client
@classmethod
def publish_message(cls, project_id, topic_name, message_constructor, **kwargs):
topic_path = f"projects/{project_id}/topics/{topic_name}"
message = message_constructor(**kwargs)
future = cls.get_client().publish(topic_path, message.encode("utf-8"))
try:
logging.info(f"Published a message to topic {topic_path}: " f"{message}")
return future.result()
except Exception as e:
logging.error(
f"Publishing to topic {topic_path} has failed with "
f"message {message} with exception: {e}"
)
raise e
class SecretManagerService:
_client = None
@classmethod
def get_client(cls):
if cls._client is None:
cls._client = secretmanager.SecretManagerServiceClient()
return cls._client
@classmethod
def get_secret(cls, project_id, secret_id, version_id="latest"):
name = cls.get_client().secret_version_path(project_id, secret_id, version_id)
response = cls.get_client().access_secret_version(name)
return response.payload.data.decode("UTF-8")
| 2.359375 | 2 |
label_studio/storage/dbStorageSource.py | smrandhawa/label-studio | 0 | 12765819 | <reponame>smrandhawa/label-studio
from .base import BaseStorage
import logging
import os
from label_studio.models import Task, Completion, StageRobin
from label_studio import db
from label_studio.utils.io import json_load
from sqlalchemy import func
import json
logger = logging.getLogger(__name__)
# def checkAndgetTrainginTask(userID, batchid):
# q = db.session.query(Task.id).filter(Task.batch_id == batchid, Task.format_type == 1).subquery()
# # Task1 = db.session.query(Completion.task_id).filter(Completion.user_id == userID, Completion.task_id.in_(
# # q)) # .delete(synchronize_session='fetch')
# # q1 = db.session.query(Task.id).filter(Task.batch_id == batchid, Task.format_type == 1).all()
# # for i in q1:
# # print(i)
# # Taskidcompleted = db.session.query(Completion.task_id).filter(Completion.user_id == userID, Completion.task_id.in_(
# # q)).subquery() # .delete(synchronize_session='fetch')
# Taskcount = db.session.query(func.count(Completion.id)).filter(Completion.user_id == userID, Completion.task_id.in_(
# q)).scalar() # .delete(synchronize_session='fetch')
# if Taskcount >= 2:
# print("Here 3", Taskcount)
# w = db.session.query(Completion).filter(Completion.user_id == userID,
# Completion.task_id.in_(q)).all() # .delete(synchronize_session='fetch')
# for r in w:
# oldc = OldCompletion(user_id=r.user_id, task_id=r.task_id, data=r.data, completed_at=r.completed_at)
# db.session.add(oldc)
# db.session.delete(r)
# db.session.commit()
# # nextTask = db.session.query(Task).filter(Task.batch_id==batchid, Task.format_type == 1, Task.id.notin_(Taskidcompleted)).first()
# nextTask = db.session.execute(
# 'SELECT * FROM TrainingTask WHERE batch_id=:batchid and TrainingTask.format_type == 1 and '
# 'id not in (select task_id from completions where user_id = :userID and '
# 'task_id in (select id from TrainingTask where batch_id= :batchid and TrainingTask.format_type == 1) ) order by id',
# {'userID': userID,'batchid':batchid }).first()
# # nextTask = db.session.execute(
# # 'SELECT * FROM TrainingTask WHERE batch_id=:batchid and format_type == 1 ',
# # {'userID': userID, 'batchid': batchid}).first()
# return nextTask
def savestage(id, userID, currentRobinIndex, taskArray, batchid):
try:
if id == -1:
dbrobinstage = StageRobin(user_id= userID, current_robin_index=currentRobinIndex, task_array=taskArray, batch_id=batchid)
db.session.add(dbrobinstage)
db.session.commit()
else:
update_statement = 'UPDATE stage_robin SET current_robin_index = {0} WHERE id= {1}'.format(currentRobinIndex,id)
db.session.execute(update_statement)
db.session.commit()
except Exception as e:
logger.debug("Storage db Error ")
logger.debug(e)
class JsonDBStorage(BaseStorage):
description = 'JSON task file'
def __init__(self, **kwargs):
super(JsonDBStorage, self).__init__(**kwargs)
if not self.importFromFile:
logger.debug("returning flag set")
return
logger.debug("reading from File")
Alltasks = {}
if os.path.exists(self.path):
Alltasks = json_load(self.path, int_keys=True)
# logger.debug(Alltasks)
# logger.debug(type(Alltasks))
if len(Alltasks) != 0:
for i, task in Alltasks.items():
try:
# existing_task = Task.query.filter_by(username=username).first()
# if existing_task is None:
# logger.debug(SubTask)
# for task in SubTask:
# task = Alltasks[SubTask]
# logger.debug(type(task))
# logger.debug(task["data"])
dbtask = Task(text= task["data"]["text"], layout=task["data"]["layout"], groundTruth=task["data"]["groundTruth"])
db.session.add(dbtask)
db.session.commit()
except Exception as e:
logger.debug("Storage db Error 3 ")
logger.debug(e)
# self.data = {}
# elif isinstance(tasks, dict):
# self.data = tasks
# elif isinstance(self.data, list):
# self.data = {int(task['id']): task for task in tasks}
# self._save()
# def _save(self):
# with open(self.path, mode='w', encoding='utf8') as fout:
# json.dump(self.data, fout, ensure_ascii=False)
@property
def readable_path(self):
return self.path
def get(self, id):
existing_task = Task.query.filter_by(id=id).first()
if existing_task is not None:
return existing_task
return None
# return self.data.get(int(id))
def set(self, id, value):
task = self.get(id)
if task is not None:
task.text = value["text"]
task.layout = value["layout"]
task.groundTruth = value["groundTruth"]
# db.session.merge(task)
db.session.commit()
else:
try:
dbtask = Task(id=id,text=task["data"]["text"], layout=task["data"]["layout"],
groundTruth=task["data"]["groundTruth"])
db.session.add(dbtask)
db.session.commit()
except Exception as e:
logger.debug("Storage db Error ")
logger.debug(e)
# self.data[int(id)] = value
# self._save()
def __contains__(self, id):
return self.get(id)
# return id in self.data
def set_many(self, ids, values):
for id, value in zip(ids, values):
self.set(id,value)
# self.data[int(id)] = value
# self._save()
def ids(self):
results = db.session.query(Task.id).all()
return [value for value, in results]
# return self.data.keys()
def max_id(self):
return db.session.query(db.func.max(Task.id)).scalar()
# return max(self.ids(), default=-1)
def items(self):
return
# return self.data.items()
# def nextTask(self, userID, traingTask, batchid):
def nextTask(self, userID, taskType, batchid, last_task_id):
# db.session.query()
print('next taks is called')
nextTask = None
if last_task_id != 0:
nextTask = db.session.execute('SELECT * FROM task WHERE id = :lasttaskid',
{'lasttaskid': last_task_id}).first()
else:
if taskType in (1,2,3):
nexttaskid = None
try:
robinstage = StageRobin.query.filter_by(user_id=userID, batch_id=batchid).first()
if robinstage == None:
randrobin = StageRobin.query.filter_by(batch_id=batchid).first()
if randrobin == None:
tasklist = db.session.execute(
'SELECT id FROM task WHERE id in (select task_id from completions where completions.user_id = 0 and completions.batch_id = :batchid ) and batch_id = :batchid and format_type = :taskType order by RANDOM() LIMIT 5', #random()
{'batchid': batchid, 'taskType': 1}).all()
taskArray = '-'.join([str(tid[0]) for tid in tasklist])
else:
taskArray = randrobin.task_array
nexttaskid = taskArray.split('-')[0]
savestage(-1, userID, 1, taskArray, batchid)
else:
currentRobinIndex = robinstage.current_robin_index
taskArray = robinstage.task_array
id = robinstage.id
nexttaskid = taskArray.split('-')[currentRobinIndex]
currentRobinIndex = currentRobinIndex + 1
currentRobinIndex = currentRobinIndex % 5
savestage(id, userID, currentRobinIndex, taskArray, batchid)
if nexttaskid is not None:
nextTask = db.session.execute(
'SELECT * FROM task WHERE id = :nexttaskid',
{'nexttaskid': nexttaskid}).first()
except Exception as e:
print('Problem occured in getting task for first two stages. Here is the exception.')
print(e)
if taskType == 4:
nextTask = db.session.execute(
'SELECT * FROM task WHERE id in (select task_id from completions where completions.user_id = 0 and completions.batch_id = :batchid ) and id not in (select task_id from completions where user_id = :userID and completions.batch_id = :batchid) and batch_id = :batchid and format_type = :taskType order by random() LIMIT 1', #random()
{'userID': userID, 'batchid': batchid, 'taskType': 1}).first()
elif taskType == 5:
#check first tasks which are not ever done by any users or admin
query = 'SELECT * FROM task WHERE id not in (select task_id from completions where completions.batch_id = {0} ) and batch_id = {0} and format_type = {1} order by random() LIMIT 1'.format(batchid,1)
nextTask = db.session.execute(query).first()
print(query)
if nextTask is None:
# check task which is not done by admin but only other users
query = 'SELECT * FROM task WHERE id not in (select task_id from completions where completions.user_id = 0 and completions.batch_id = {0} ) and id not in (select task_id from completions where user_id = {1} and completions.batch_id = {0}) and batch_id = {0} and format_type = {2} order by random() LIMIT 1'.format(batchid,userID,1)
nextTask = db.session.execute(query).first()
print(query)
if nextTask is None:
# check task which with admin completions
query = 'SELECT * FROM task WHERE id in (select task_id from completions where completions.user_id = 0 and completions.batch_id = {0} ) and id not in (select task_id from completions where user_id = {1} and completions.batch_id = {0}) and batch_id = {0} and format_type = {2} order by random() LIMIT 1'.format(batchid,userID,1)
nextTask = db.session.execute(query).first()
print(query)
elif taskType == 6:
query = 'SELECT * FROM task WHERE id not in (select task_id from completions where user_id = {0} and batch_id = {1} ) and id in (select task_id from completions where user_id != 0 and batch_id = {1} and format_type = 5) and batch_id = {1} and confidence_score < 80 order by confidence_score ASC LIMIT 1'.format(userID,batchid)
nextTask = db.session.execute(query).first()
print(query)
# TODO : Check if completion is empty the re elect task
if nextTask is None:
return None
dictTask = dict(dict(nextTask).items())
completion_data = None
if taskType == 6:
completion_data = db.session.execute(
'select id,task_id,data,completed_at from completions where task_id = :id and user_id != 0 and format_type = 5 order by accuracy_rank ASC',
{'id': nextTask.id}).first()
elif taskType in (1,2,3):
completion_data = db.session.execute(
'select id,task_id,data,completed_at from completions where task_id = :id and user_id = 0',
{'id': nextTask.id}).first()
if completion_data is not None:
completionData = json.loads(completion_data.data)
completionData['id'] = completion_data.id
# logger.debug(json.dumps(completionData, indent=2))
dictTask["completions"] = [completionData] # [json.loads(completion.data)]
dictTask['completed_at'] = completion_data.completed_at
return dictTask
def remove(self, key):
task = self.get(int(key))
if task is not None:
db.session.delete(task)
# self.data.pop(int(key), None)
# self._save()
def remove_all(self):
return
# self.data = {}
# self._save()
def empty(self):
return False
# return len(self.data) == 0
def sync(self):
pass
| 1.960938 | 2 |
see_rnn/utils.py | MichaelHopwood/MLMassSpectrom | 149 | 12765820 | <filename>see_rnn/utils.py
import numpy as np
from copy import deepcopy
from pathlib import Path
from ._backend import WARN, NOTE, TF_KERAS, Layer
try:
import tensorflow as tf
except:
pass # handled in __init__ via _backend.py
TF24plus = bool(float(tf.__version__[:3]) > 2.3)
def _kw_from_configs(configs, defaults):
def _fill_absent_defaults(kw, defaults):
# override `defaults`, but keep those not in `configs`
for name, _dict in defaults.items():
if name not in kw:
kw[name] = _dict
else:
for k, v in _dict.items():
if k not in kw[name]:
kw[name][k] = v
return kw
configs = configs or {}
configs = deepcopy(configs) # ensure external dict unchanged
for key in configs:
if key not in defaults:
raise ValueError(f"unexpected `configs` key: {key}; "
"supported are: %s" % ', '.join(list(defaults)))
kw = deepcopy(configs) # ensure external dict unchanged
# override `defaults`, but keep those not in `configs`
kw = _fill_absent_defaults(configs, defaults)
return kw
def _validate_args(_id, layer=None):
def _ensure_list(_id, layer):
# if None, leave as-is
_ids, layer = [[x] if not isinstance(x, (list, type(None))) else x
for x in (_id, layer)]
# ensure external lists unaffected
_ids, layer = [x.copy() if isinstance(x, list) else x
for x in (_ids, layer)]
return _ids, layer
def _ids_to_names_and_idxs(_ids):
names, idxs = [], []
for _id in _ids:
if not isinstance(_id, (str, int, tuple)):
tp = type(_id).__name__
raise ValueError("unsupported _id list element type: %s" % tp
+ "; supported are: str, int, tuple")
if isinstance(_id, str):
names.append(_id)
else:
if isinstance(_id, int):
idxs.append(_id)
else:
assert all(isinstance(x, int) for x in _id)
idxs.append(_id)
return names or None, idxs or None
def _one_requested(_ids, layer):
return len(layer or _ids) == 1 # give `layer` precedence
if _id and layer:
print(WARN, "`layer` will override `_id`")
_ids, layer = _ensure_list(_id, layer)
if _ids is None:
names, idxs = None, None
else:
names, idxs = _ids_to_names_and_idxs(_ids)
return names, idxs, layer, _one_requested(_ids, layer)
def _process_rnn_args(model, _id, layer, input_data, labels, mode,
data=None, norm=None):
"""Helper method to validate `input_data` & `labels` dims, layer info args,
`mode` arg, and fetch various pertinent RNN attributes.
"""
from .inspect_gen import get_layer, get_gradients
from .inspect_rnn import get_rnn_weights
def _validate_args_(_id, layer, input_data, labels, mode, norm, data):
_validate_args(_id, layer)
if data is not None:
got_inputs = (input_data is not None) or (labels is not None)
if got_inputs:
print(NOTE, "`data` will override `input_data`, `labels`, "
"and `mode`")
if not isinstance(data, list):
raise Exception("`data` must be a list of kernel & gate matrices")
if not (isinstance(data[0], np.ndarray) or isinstance(data[0], list)):
raise Exception("`data` list elements must be numpy arrays "
+ "or lists")
elif isinstance(data[0], list):
if not isinstance(data[0][0], np.ndarray):
raise Exception("`data` list elements' elements must be "
+ "numpy arrays")
if mode not in ['weights', 'grads']:
raise Exception("`mode` must be one of: 'weights', 'grads'")
if mode == 'grads' and (input_data is None or labels is None):
raise Exception("must supply input_data and labels for mode=='grads'")
if mode == 'weights' and (input_data is not None or labels is not None):
print(NOTE, "`input_data` and `labels will` be ignored for "
"`mode`=='weights'")
is_iter = (isinstance(norm, list) or isinstance(norm, tuple) or
isinstance(norm, np.ndarray))
is_iter_len2 = is_iter and len(norm)==2
if (norm is not None) and (norm != 'auto') and not is_iter_len2:
raise Exception("`norm` must be None, 'auto' or iterable ( "
+ "list, tuple, np.ndarray) of length 2")
_validate_args_(_id, layer, input_data, labels, mode, norm, data)
if layer is None:
layer = get_layer(model, _id)
rnn_type = _validate_rnn_type(layer, return_value=True)
gate_names = _rnn_gate_names(rnn_type)
n_gates = len(gate_names)
is_bidir = hasattr(layer, 'backward_layer')
rnn_dim = layer.layer.units if is_bidir else layer.units
direction_names = ['FORWARD', 'BACKWARD'] if is_bidir else [[]]
if 'CuDNN' in rnn_type:
uses_bias = True
else:
uses_bias = layer.layer.use_bias if is_bidir else layer.use_bias
if data is None:
if mode=='weights':
data = get_rnn_weights(model, _id, as_tensors=False,
concat_gates=True)
else:
data = get_gradients(model, None, input_data, labels,
layer=layer, mode='weights')
rnn_info = dict(rnn_type=rnn_type, gate_names=gate_names,
n_gates=n_gates, is_bidir=is_bidir,
rnn_dim=rnn_dim, uses_bias=uses_bias,
direction_names=direction_names)
return data, rnn_info
def _validate_rnn_type(rnn_layer, return_value=False):
if hasattr(rnn_layer, 'backward_layer'):
rnn_type = type(rnn_layer.layer).__name__
else:
rnn_type = type(rnn_layer).__name__
supported_rnns = ['LSTM', 'GRU', 'CuDNNLSTM', 'CuDNNGRU',
'SimpleRNN', 'IndRNN']
if rnn_type not in supported_rnns:
raise Exception("unsupported RNN type `%s` - must be one of: %s" % (
rnn_type, ', '.join(supported_rnns)))
if return_value:
return rnn_type
def _rnn_gate_names(rnn_type):
return {'LSTM': ['INPUT', 'FORGET', 'CELL', 'OUTPUT'],
'GRU': ['UPDATE', 'RESET', 'NEW'],
'CuDNNLSTM': ['INPUT', 'FORGET', 'CELL', 'OUTPUT'],
'CuDNNGRU': ['UPDATE', 'RESET', 'NEW'],
'SimpleRNN': [''],
'IndRNN': [''],
}[rnn_type]
def _filter_duplicates_by_keys(keys, *data):
def _second_index(ls, k):
return [i for i, x in enumerate(ls) if x == k][1]
collected = []
for k in keys:
if k in collected:
for i in range(len(data)):
data[i].pop(_second_index(keys, k))
keys.pop(keys.index(k))
collected.append(k)
if isinstance(data, tuple) and len(data) == 1:
data = data[0]
return keys, data
def _save_rnn_fig(figs, savepath, kwargs):
if len(figs) == 1:
figs[0].savefig(savepath)
return
_dir = str(Path(savepath).parent)
ext = Path(savepath).suffix
basename = Path(savepath).stem
names = [basename + '_0', basename + '_1']
for fig, name in zip(figs, names):
fig.savefig(Path(_dir).joinpath(name, ext), **kwargs)
def _layer_of_output(output):
h = output._keras_history
if isinstance(h, tuple):
for x in h:
if isinstance(x, Layer):
return x
return h.layer
def clipnums(nums):
if not isinstance(nums, (list, tuple)):
nums = [nums]
clipped = []
for num in nums:
if isinstance(num, int) or (isinstance(num, float) and num.is_integer()):
clipped.append(str(int(num)))
elif abs(num) > 1e-3 and abs(num) < 1e3:
clipped.append("%.3f" % num)
else:
clipped.append(("%.2e" % num).replace("+0", "+").replace("-0", "-"))
return clipped if len(clipped) > 1 else clipped[0]
def _get_params(model, layers=None, params=None, mode='outputs', verbose=1):
def _validate_args(layers, params, mode):
got_both = (layers is not None and params is not None)
got_neither = (layers is None and params is None)
if got_both or got_neither:
raise ValueError("one (and only one) of `layers` or `params` "
"must be supplied")
if mode not in ('outputs', 'weights'):
raise ValueError("`mode` must be one of: 'outputs', 'weights'")
if layers is not None and not isinstance(layers, list):
layers = [layers]
if params is not None and not isinstance(params, list):
params = [params]
return layers, params
def _filter_params(params, verbose):
def _to_omit(p):
if isinstance(p, tf.Variable): # param is layer weight
return False
elif tf.is_tensor(p): # param is layer output
layer = _layer_of_output(p)
if (TF_KERAS or tf.__version__[0] == '2'
) and hasattr(layer, 'activation'):
# these activations don't have gradients defined (or ==0),
# and tf.keras doesn't re-route output gradients
# to the pre-activation weights transform
value = getattr(layer.activation, '__name__', '').lower() in (
'softmax',)
if value and verbose:
print(WARN, ("{} has {} activation, which has a None "
"gradient in tf.keras; will skip".format(
layer, layer.activation.__name__)))
return value
elif 'Input' in getattr(layer.__class__, '__name__'):
# omit input layer(s)
if verbose:
print(WARN, layer, "is an Input layer; getting input "
"gradients is unsupported - will skip")
return True
else:
return False
else:
raise ValueError(("unsupported param type: {} ({}); must be"
"tf.Variable or tf.Tensor".format(type(p), p)))
_params = []
for p in params:
if not _to_omit(p):
_params.append(p)
return _params
# run check even if `params` is not None to couple `_get_params` with
# `_validate_args` for other methods
layers, params = _validate_args(layers, params, mode)
if not params:
if mode == 'outputs':
params = [l.output for l in layers]
else:
params = [w for l in layers for w in l.trainable_weights]
params = _filter_params(params, verbose)
return params
def is_tensor(x):
return (tf.is_tensor(x) if TF24plus else
isinstance(x, tf.Tensor))
| 2.125 | 2 |
FigureTable/ChordDiagram/corre.py | vkola-lab/multi-task | 0 | 12765821 | import numpy as np
import scipy
import scipy.stats
import csv
scores = np.load('regional_avgScore_nAD.npy')
print(scores.shape)
pool = [[0 for _ in range(scores.shape[1])] for _ in range(scores.shape[1])]
for i in range(scores.shape[1]-1):
for j in range(i+1, scores.shape[1]):
corr, _ = scipy.stats.pearsonr(scores[:, i], scores[:, j])
pool[i][j] = corr
pool[j][i] = corr
print(pool)
regions = \
['hippoR',
'hippoL',
'tempoR',
'tempoL',
'cerebeR',
'cerebeL',
'brainstem',
'insulaR',
'insulaL',
'occiR',
'occiL',
'frontR',
'frontL',
'parieR',
'parieL',
'ventri']
with open('nAD_correlation.csv', 'w') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=' ',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow([''] + regions)
for i in range(len(regions)):
spamwriter.writerow([regions[i]] + pool[i])
| 2.5625 | 3 |
MetaScreener/external_sw/mgltools/MGLToolsPckgs/AutoDockTools/GridParameters.py | bio-hpc/metascreener | 8 | 12765822 | <gh_stars>1-10
## Automatically adapted for numpy.oldnumeric Jul 23, 2007 by
#############################################################################
#
# Author: <NAME>, <NAME>
#
# Copyright: <NAME> TSRI 2000
#
#############################################################################
# $Header: /opt/cvs/python/packages/share1.5/AutoDockTools/GridParameters.py,v 1.41 2014/03/24 20:42:02 rhuey Exp $
#
#
# $Id: GridParameters.py,v 1.41 2014/03/24 20:42:02 rhuey Exp $
#
#
#
#
from energyConstants import Rij, epsij, SolVol, SolPar, SolCon
import UserDict
import string, os.path, sys, types, glob
from MolKit import Read
from AutoDockTools.atomTypeTools import AutoDock4_AtomTyper
import string
import numpy.oldnumeric as Numeric
from math import ceil
grid_parameter_list = [
'receptor',
'gridfld',
'npts',
'spacing',
'gridcenter',
'types',
'smooth',
'map',
'elecmap',
'dielectric',
'fmap'
]
grid_parameter_list4= [
'npts',
'custom_parameter_file',
'gridfld',
'spacing',
'receptor_types',
'ligand_types',
'receptor',
'gridcenter',
'smooth',
'map',
'elecmap',
'dsolvmap',
'dielectric4',
]
class GridParameters(UserDict.UserDict):
def __init__(self, receptor_filename='', ligand_filename=''):
UserDict.UserDict.__init__(self)
basename = os.path.basename(receptor_filename)
self.receptor_filename = basename
self.receptor_stem = os.path.splitext(basename)[0]
#self.receptor_stem = basename[:string.rfind(basename, '.')]
# if the grid parameters have been read from a file,
# then the following instance variables will be set:
self.gpf_filename = ''
self.gpf_written_filename = ''
self.file_params = []
# begin dictionary
self[ 'constant' ] = {
'keyword' : 'constant' ,
'default' : [],
'comment' : "grid map constant energy",
'value' : []
}
self[ 'covalent_coords' ] = {
'keyword' : 'covalent_coords' ,
'default' : [],
'comment' : "covalent_coords",
'value' : []
}
self[ 'covalent_constant' ] = {
'keyword' : 'covalent_constant' ,
'default' : -1.780,
'comment' : "covalent_constant",
'value' : -1.780
}
self[ 'covalent_energy_barrier' ] = {
'keyword' : 'covalent_energy_barrier' ,
'default' : 1000.,
'comment' : "covalent_energy barrier height",
'value' : 1000.
}
self[ 'covalent_half_width' ] = {
'keyword' : 'covalent_half_width' ,
'default' : 5.0,
'comment' : "covalent_half_width ",
'value' : 5.0
}
self[ 'covalentmap' ] = {
'keyword' : 'covalentmap' ,
'default' : 0,
'comment' : "covalent map",
'value' : 0
}
self[ 'dielectric' ] = {
'keyword' : 'dielectric' ,
'default' : -.1146,
'comment' : "<0, distance-dep.diel;>0, constant",
'value' : -.1146
}
self[ 'dielectric4' ] = {
'keyword' : 'dielectric' ,
'default' : -.1465, #new sept/29/05:value from august recal
'comment' : "<0, AD4 distance-dep.diel;>0, constant",
'value' : -.1465 #new sept/29/05:value from august recal
}
self[ 'dsolvmap' ] = {
'keyword' : 'dsolvmap' ,
'default' : self.receptor_stem + '.d.map',
'comment' : "desolvation potential map",
'value' : self.receptor_stem + '.d.map'
}
self[ 'elecmap' ] = {
'keyword' : 'elecmap' ,
'default' : self.receptor_stem + '.e.map',
'comment' : "electrostatic potential map",
'value' : self.receptor_stem + '.e.map'
}
self[ 'fmap' ] = {
'keyword' : 'fmap' ,
'default' : 0,
'comment' : "floating point potential gridmap",
'value' : 0
}
self[ 'gridcenter' ] = {
'keyword' : 'gridcenter' ,
'default' : 'auto',
'comment' : "xyz-coordinates or auto",
'value' : 'auto'
}
self[ 'gridcenterAuto' ] = {
'keyword' : 'gridcenterAuto' ,
'default' : 1,
'comment' : "xyz-coordinates or auto",
'value' : 1
}
self[ 'gridfld' ] = {
'keyword' : 'gridfld' ,
'default' : self.receptor_stem + '.maps.fld',
'comment' : "grid_data_file",
'value' : self.receptor_stem + '.maps.fld'
}
self[ 'ligand_types' ] = {
'keyword' : 'ligand_types',
'default' : 'A C HD N NA OA SA' ,
'comment' : "ligand atom types",
'value' : 'A C HD N NA OA SA' ,
}
self[ 'map' ] = {
'keyword' : 'map' ,
'default' : "",
'comment' : "atom-specific affinity map",
'value' : ""
}
self[ 'mset' ] = {
'keyword' : 'mset' ,
'default' : "CNOSHHH",
'comment' : "atom-specific affinity map",
'value' : "CNOSHHH"
}
self[ 'nbp_r_eps' ] = {
'keyword' : 'nbp_r_eps' ,
'default' : [],
'comment' : "lj",
'value' : []
}
self[ 'NHB' ] = {
'keyword' : 'NHB' ,
'default' : 1,
'comment' : 'model N-H hydrogen bonds',
'value' : 1
}
self[ 'npts' ] = {
'keyword' : 'npts' ,
'default' : [40,40,40],
'comment' : "num.grid points in xyz",
'value' : [40,40,40]
}
self[ 'OHB' ] = {
'keyword' : 'OHB' ,
'default' : 1,
'comment' : 'model O-H hydrogen bonds',
'value' : 1
}
self[ 'custom_parameter_file' ] = {
'keyword' : 'custom_parameter_file' ,
'default' : 0,
'comment' : "use custom parameter library",
'value' : 0,
}
self[ 'parameter_file' ] = {
'keyword' : 'parameter_file' ,
'default' : 'AD4_parameters.dat',
'comment' : "force field default parameter file",
'value' : 'AD4_parameters.dat',
}
self[ 'receptor' ] = {
'keyword' : 'receptor' ,
'default' : self.receptor_stem + '.pdbqs',
'comment' : "macromolecule",
'value' : self.receptor_stem + '.pdbqs',
}
self[ 'receptor_types' ] = {
'keyword' : 'receptor_types',
'default' : 'A C HD N NA OA SA' ,
'comment' : "receptor atom types",
'value' : 'A C HD N NA OA SA' ,
}
self[ 'SHB' ] = {
'keyword' : 'SHB' ,
'default' : 1,
'comment' : 'model S-H hydrogen bonds',
'value' : 1
}
self[ 'smooth' ] = {
'keyword' : 'smooth' ,
'default' : 0.5,
'comment' : "store minimum energy w/in rad(A)",
'value' : 0.5
}
self[ 'sol_par' ] = {
'keyword' : 'sol_par' ,
'default' : [],
'comment' : "atomic fragmental volumen, solvation parm",
'value' : []
}
self[ 'spacing' ] = {
'keyword' : 'spacing' ,
'default' : 0.375,
'comment' : "spacing(A)",
'value' : 0.375
}
self[ 'types' ] = {
'keyword' : 'types' ,
'default' : 'CAONSH',
'comment' : "atom type names",
'value' : 'CAONSH',
}
# end dictionary
self.set_receptor(receptor_filename) # also sets self.receptor_stem
self.set_ligand(ligand_filename)
self.boolean_param_list = [
'covalentmap' ,
'fmap' ,
]
# end __init__
def set_ligand(self, ligand_filename):
self.ligand_filename = os.path.basename(ligand_filename)
#this should set types
def set_ligand_types3(self, ligand_types4):
d = {}
for t in ligand_types4:
if len(t)==1:
d[t] = 1
elif t[1] in ['A','D']: #NA,SA,OA,HD
d[t[0]] = 1
elif t in ['Cl','CL','cl']: #special case: chlorine
d['c'] = 1
elif t in ['Br','BR','br']: #special case: bromine
d['b'] = 1
elif t in ['Fe','FE','fe']: #special case: iron
d['f'] = 1
else:
print "unrecognized ligand_atom_type:", t
all_types = d.keys()
all_types.sort()
type_str = all_types[0]
for t in all_types[1:]:
type_str = type_str + t
self['types']['value'] = type_str
def set_receptor(self, receptor_filename):
basename = os.path.basename(receptor_filename)
self.receptor_filename = basename
self.receptor_stem = os.path.splitext(basename)[0]
#self.receptor_stem = basename[:string.rfind(basename, '.')]
if receptor_filename!='':
self['receptor']['value'] = basename
self['gridfld']['value'] = self.receptor_stem + '.maps.fld'
self['elecmap']['value'] = self.receptor_stem + '.e.map'
#
# read methods
#
def read(self, filename):
"""Read from and set the current state according to the file.
"""
self.gpf_filename = filename
gpf_ptr = open(filename)
lines = gpf_ptr.readlines()
gpf_ptr.close()
self.file_params = []
checkedTypes = []
extraLigandTypes = []
keys = self.keys()
for line in lines:
words = string.split(string.replace(line, '\t', ' '))
if words!=[] and words[0][0]!='#':
p = words[0]
if p not in keys:
print "WARNING: unrecognized parameter in ", filename, ":\n", p
continue
# maintain a list of the parameters read from the file
if self.file_params==[] or p!=self.file_params[-1]:
self.file_params.append(p)
# parse the line
l = len(words)
for i in range(l):
if words[i][0]=='#':
l = i
break
values = words[1:l]
if ((len(values)==1) and
(type(self[p]['default'])!=types.ListType)):
self[p]['value'] = self._get_val(values[0])
if words[0]=='types':
#in this case have to set flags for possible new type
extraLigandTypes = self.checkLigTypes(values[0])
elif words[0]=='ligand_types':
self[p]['value'] = string.join(words[1:l])
elif words[0]=='receptor_types':
self[p]['value'] = string.join(words[1:l])
elif words[0]=='covalentmap':
#in this case set:
#covalent_ coords,constant,energy_barrier,half_width
self['covalentmap']['value'] = 1
self['covalent_half_width']['value'] = float(values[0])
self['covalent_energy_barrier']['value'] = float(values[1])
self['covalent_coords']['value'] = [float(values[2]),\
float(values[3]), float(values[4])]
self[p]['value'] = []
elif words[0]=='nbp_r_eps':
#in this case have to check for nhb,ohb,shb +mset
#in this case have to check for new type constants
ptype = words[-1]
if len(words[l])==1: keyWord = words[l+1]
else:
keyWord = words[l][1:]
mtype = string.split(keyWord,'-')[0]
ntype = string.split(keyWord,'-')[1]
if mtype in checkedTypes:
continue
if mtype in ['N','O','S'] and ntype =='H':
#check for 12 6 vs 12 10 here
ind = mtype+'HB'
if values[3]=='10':
self[ind]['value'] = 1
else:
self[ind]['value'] = 0
checkedTypes.append(mtype)
if mtype in extraLigandTypes:
i = ptype+mtype+ntype
Rij[i] = float(words[1])
epsij[i] = float(words[2])
elif words[0]=='sol_par':
if len(words[l])==1: mtype = words[l+1]
else: mtype = words[l][1]
if mtype in extraLigandTypes:
SolVol[mtype]= float(values[0])
SolPar[mtype]= float(values[1])
elif words[0]=='constant':
if len(words[l])==1: mtype = words[l+1]
else: mtype = words[l][1]
SolCon[mtype]= float(values[0])
elif words[0]=='gridcenter' and l>1:
#need to convert to float
newvalue=[float(values[0]),float(values[1]),float(values[2])]
self['gridcenterAuto']['value'] = 0
self[p]['value'] = newvalue
else:
self[p]['value'] = []
for v in values:
self[p]['value'].append( self._get_val(v))
def checkLigTypes(self, typeStr):
extraLigandTypes = []
for t in typeStr:
if t not in ['C','A','N','O','S','H','P','n',\
'f','F','c','b','I','M']:
extraLigandTypes.append(t)
return extraLigandTypes
def _get_val(self, val_str):
try:
return int(val_str)
except ValueError:
pass
try:
return float(val_str)
except ValueError:
pass
if type(val_str)==types.StringType:
return val_str
else:
raise NotImplementedError, "value: %s of unsupport type %s" % (val_str, type(val_str).__name__)
def read4(self, filename):
"""Read from and set the current state according to the AutoGrid4 file.
"""
self.gpf_filename = filename
gpf_ptr = open(filename)
lines = gpf_ptr.readlines()
gpf_ptr.close()
keys = self.keys()
self.file_params = []
for line in lines:
#print "reading ", line
words = string.split(string.replace(line, '\t', ' '))
#print "words=", words
if words!=[] and words[0][0]!='#':
p = words[0]
if p not in keys:
print "WARNING: unrecognized parameter in ", filename, ":\n", p
continue
#print "p=", p
# maintain a list of the parameters read from the file
if self.file_params==[] or p!=self.file_params[-1]:
self.file_params.append(p)
# parse the line
l = len(words)
for i in range(l):
if words[i][0]=='#':
l = i
break
values = words[1:l]
if p=='parameter_file':
self['custom_parameter_file']['value'] = 1
self['parameter_file']['value'] = values[0]
elif ((len(values)==1) and
(type(self[p]['default'])!=types.ListType)):
self[p]['value'] = self._get_val(values[0])
#print " value=", self[p]['value']
#if words[0]=='types':
# #in this case have to set flags for possible new type
# extraLigandTypes = self.checkLigTypes(values[0])
#setting dielectric from a gpf is no longer supported
#instead must be set in a parameter library file
#elif p=='dielectric':
# self['dielectric4']['value'] = self._get_val(values[0])
elif p=='ligand_types':
self['ligand_types']['value'] = string.join(words[1:l])
elif p=='receptor_types':
self['receptor_types']['value'] = string.join(words[1:l])
elif words[0]=='covalentmap':
#in this case set:
#covalent_ coords,constant,energy_barrier,half_width
self['covalentmap']['value'] = 1
self['covalent_half_width']['value'] = float(values[1])
self['covalent_energy_barrier']['value'] = float(values[2])
self['covalent_coords']['value'] = [float(values[3]),\
float(values[4]), float(values[5])]
self[p]['value'] = []
elif words[0]=='gridcenter' and l>1:
#need to convert to float
newvalue=[float(values[0]),float(values[1]),float(values[2])]
self['gridcenterAuto']['value'] = 0
self[p]['value'] = newvalue
else:
#print "in else for ", p
self[p]['value'] = []
for v in values:
self[p]['value'].append( self._get_val(v))
#
# write methods
#
def write(self, filename, param_list):
"""Write the current state to a file
file is a writeable file
param_list is a list of parameter strings.
For best results use the parameter_lists supplied by this class.
"""
if filename=='':
gpf_ptr = sys.stdout
else:
gpf_ptr = open(filename, 'w')
types = self['types']['value']
#FIX THIS:
macroTypes = self['mset']['value']
#macroTypes = ['C','N','O','S','H','H','H']
for p in param_list:
# maps are a special case
if p=='map':
#hpos = 'H' in types
for a in types:
gpf_ptr.write(self.make_map_string(p, a))
for t in macroTypes:
self.write_map_nbp(a, t, gpf_ptr)
#self.write_map_nbp(a, t, hpos, gpf_ptr)
self.write_constants(a,gpf_ptr)
# all the other parameters handle themselves
elif p=='gridcenter' and self['gridcenterAuto']['value']==1:
#if gridcenterAuto is true, reset p to 'auto' and write it
self['gridcenter']['value']='auto'
gpf_ptr.write( self.make_param_string(p))
elif p=='fmap' and self['fmap']['value']:
gpf_ptr.write( self.make_map_string(p,'f'))
elif p=='covalentmap' and len(self['covalent_coords']['value']):
gpf_ptr.write( self.make_covalentmap_string())
else:
gpf_ptr.write( self.make_param_string(p))
if gpf_ptr!=sys.stdout:
gpf_ptr.close()
self.gpf_filename = filename
self.gpf_written_filename = filename
def write_constants(self, a, gpf_ptr):
try:
outstring = 'sol_par %5.2f %6.4f'%(SolVol[a],SolPar[a])+ \
' # ' + a+ ' atomic fragmental volume, solvation parameters\n'
except KeyError:
outstring = 'sol_par 0.000 0.000 #' \
+ a+ ' atomic fragmental volume, solvation parameters\n'
gpf_ptr.write(outstring)
try:
outstring = 'constant %5.3f '%SolCon[a]+ \
' # ' + a+ ' grid map constant energy\n'
except KeyError:
outstring = 'constant 0.000 #' + a+ ' grid map constant energy\n'
gpf_ptr.write(outstring)
def write_map_nbp(self, a, t, gpf_ptr):
hbset = []
for item in ['N','O','S']:
ind = item +'HB'
if self[ind]['value']:
hbset.append(item)
if (a in hbset and t=='H') or (a=='H' and t in hbset):
#if hpos and ((a in hbset and t=='H') or (a=='H' and t in hbset)):
string_start = 'hb'
string_nums = '12 10 # '
else:
string_start = 'lj'
string_nums = '12 6 # '
z = string_start + a + t
try:
outstring = 'nbp_r_eps %5.2f %9.7f '%(Rij[z],epsij[z])\
+string_nums + a +'-' + t + " " + string_start +'\n'
except KeyError:
outstring = 'nbp_r_eps 0.00 0.0000000 '\
+string_nums + a +'-' + t + " " + string_start +'\n'
gpf_ptr.write(outstring)
def make_param_string(self, param):
"""return the output string for the given param using the value
and comment entries in its dictionary.
"""
p = self[param]
vt = type(p['value'])
if param in self.boolean_param_list:
if not p['value']:
return "#\n"
else:
val_str = ""
elif ((vt==types.IntType) or
(vt==types.FloatType) or
(vt==types.StringType)):
val_str = str(p['value'])
elif ((vt==types.ListType) or
(vt==types.TupleType)):
val_str = ""
for v in p['value']:
val_str = val_str + str(v) + " "
else:
raise NotImplementedError, "type (%s) of parameter %s unsupported" % (vt.__name__, param)
return self._make_string(p, val_str)
def make_intnbp_r_eps_string(self, atom1, atom2):
p = self[ 'intnbp_r_eps' ]
index = "lj" + atom1 + atom2
val_str = "%5.2f %9.7f 12 6" % (Rij[index], epsij[index])
p['comment'] = "%s-%s lj" % (atom1, atom2)
return self._make_string(p, val_str)
def make_map_string(self, param, type):
p = self[param]
val_str = self.receptor_stem + ".%s.map" % (type)
return self._make_string(p, val_str)
def make_covalentmap_string(self):
s = 'covalentmap ' + self['covalent_half_width']['value'] + ' '
s = s + self['covalent_energy_barrier']['value'] + ' '
s = s + self['covalent_coords']['value'] + '\n'
return s
def _make_string(self, p, val_str):
#fix 1/2013 for bug report:
#map bbbb_B99990001_mod_rigid.maps.fld# grid_data file
return "%s %s%s # %s\n" % (p['keyword'],
val_str,
" "*(35 -(len(p['keyword'])+len(val_str))),
p['comment'])
#AD4
def set_ligand4(self, ligand_filename, types=None):
#this should set ligand_types
#print "in set_ligand4: types=", types
ftype = os.path.splitext(ligand_filename)[-1]
if ftype!=".pdbqt":
print "ligand_filename must be in pdbqt format"
return "invalid input"
self.ligand = Read(ligand_filename)[0]
ligand = self.ligand
ligand.buildBondsByDistance()
if types is None:
types = " ".join(list(set(ligand.allAtoms.autodock_element)))
self['ligand_types']['value'] = types
#print "set_ligand4: self['ligand_types']['value']=", self['ligand_types']['value']
self.ligand_filename = os.path.basename(ligand_filename)
self.ligand_stem = os.path.splitext(self.ligand_filename)[0]
#print "GPO: set ligand_filename to ", self.ligand_filename
def set_receptor4(self, receptor_filename, types=None):
#this should set receptor_types
ftype = os.path.splitext(receptor_filename)[-1]
if ftype!=".pdbqt":
print "receptor_filename must be in pdbqt format"
return "invalid input"
self.receptor = Read(receptor_filename)[0]
receptor = self.receptor
if types is None:
types = " ".join(list(set(receptor.allAtoms.autodock_element)))
self['receptor_types']['value'] = types
basename = os.path.basename(receptor_filename)
self.receptor_filename = basename
self.receptor_stem = os.path.splitext(basename)[0]
if receptor_filename!='':
self['receptor']['value'] = basename
self['gridfld']['value'] = self.receptor_stem + '.maps.fld'
self['elecmap']['value'] = self.receptor_stem + '.e.map'
self['dsolvmap']['value'] = self.receptor_stem + '.d.map'
def write4(self, filename, param_list=grid_parameter_list4):
"""Write the current state to a file for AutoGrid4
file is a writeable file
param_list is a list of parameter strings.
For best results use the parameter_lists supplied by this class.
"""
if filename=='':
gpf_ptr = sys.stdout
else:
gpf_ptr = open(filename, 'w')
for p in param_list:
if p=='custom_parameter_file':
if self['custom_parameter_file']['value']:
#self['parameter_file']['value'] = 'AD4_parameters.dat'
gpf_ptr.write( self.make_param_string('parameter_file'))
elif p=='map':
# maps are a special case
for s in string.split(self['ligand_types']['value']):
gpf_ptr.write(self.make_map_string(p, s))
# all the other parameters handle themselves
elif p=='gridcenter' and self['gridcenterAuto']['value']==1:
#if gridcenterAuto is true, reset p to 'auto' and write it
self['gridcenter']['value']='auto'
gpf_ptr.write( self.make_param_string(p))
elif p=='dsolvmap':
outstring = "dsolvmap %s # desolvation potential map\n" %self['dsolvmap']['value']
gpf_ptr.write(outstring)
elif p=='dielectric4':
#now dielectric value can only be set in parameter file
#val = self['dielectric4']['value']
outstring = 'dielectric -0.1465 # <0, AD4 distance-dep.diel;>0, constant\n'
gpf_ptr.write(outstring)
elif p=='covalentmap' and len(self['covalent_coords']['value']):
gpf_ptr.write( self.make_covalentmap_string())
else:
gpf_ptr.write( self.make_param_string(p))
if gpf_ptr!=sys.stdout:
gpf_ptr.close()
self.gpf_filename = filename
self.gpf_written_filename = filename
def write41(self, filename, param_list=grid_parameter_list4):
"""Write the current state to a file for AutoGrid41
file is a writeable file
param_list is a list of parameter strings.
For best results use the parameter_lists supplied by this class.
"""
if filename=='':
gpf_ptr = sys.stdout
else:
gpf_ptr = open(filename, 'w')
for p in param_list:
if p=='custom_parameter_file':
#old_custom_parameter_file_value = self['custom_parameter_file']['value']
#if old_parameter_file_value=='AD4_parameters.dat':
# self['parameter_file']['value'] = 'AD4.1_bound.dat'
if self['custom_parameter_file']['value']:
old_parameter_file_value = self['parameter_file']['value']
gpf_ptr.write( self.make_param_string('parameter_file'))
self['parameter_file']['value'] = old_parameter_file_value
elif p=='map':
# maps are a special case
for s in string.split(self['ligand_types']['value']):
gpf_ptr.write(self.make_map_string(p, s))
# all the other parameters handle themselves
elif p=='gridcenter' and self['gridcenterAuto']['value']==1:
#if gridcenterAuto is true, reset p to 'auto' and write it
self['gridcenter']['value']='auto'
gpf_ptr.write( self.make_param_string(p))
elif p=='dsolvmap':
outstring = "dsolvmap %s # desolvation potential map\n" %self['dsolvmap']['value']
gpf_ptr.write(outstring)
elif p=='dielectric4':
#now dielectric value can only be set in parameter file
#val = self['dielectric4']['value']
outstring = 'dielectric -0.1465 # <0, AD4 distance-dep.diel;>0, constant\n'
gpf_ptr.write(outstring)
elif p=='covalentmap' and len(self['covalent_coords']['value']):
gpf_ptr.write( self.make_covalentmap_string())
else:
gpf_ptr.write( self.make_param_string(p))
if gpf_ptr!=sys.stdout:
gpf_ptr.close()
self.gpf_filename = filename
self.gpf_written_filename = filename
class GridParameterFileMaker:
"""Accept a <ligand>.pdbq , <receptor>.pdbqs, reference.gpf and create
<receptor>.gpf
sets gridcenter to center of bounding box
sets npts according to bounding box
"""
def __init__(self, verbose = None, size_box_to_include_ligand=True):
self.verbose = verbose
self.gpo = GridParameters()
self.size_box_to_include_ligand = size_box_to_include_ligand
def read_reference(self, reference_filename):
if self.verbose: print "reading ", reference_filename
self.gpo.read(reference_filename)
def set_ligand(self, ligand_filename):
self.ligand_filename = os.path.basename(ligand_filename)
if self.verbose:
print "set ligand_filename to", self.ligand_filename
self.gpo.set_ligand(ligand_filename)
#expect a filename like ind.out.pdbq: get 'ind' from it
self.ligand_stem = string.split(self.ligand_filename,'.')[0]
if self.verbose: print "set ligand_stem to", self.ligand_stem
self.ligand = Read(ligand_filename)[0]
#IS THIS USEFUL???
self.gpo.ligand = self.ligand
if self.verbose: print "read ", self.ligand.name
#set gpo:
#types
d = {}
for a in self.ligand.allAtoms:
d[a.autodock_element] = 1
sortKeyList = ['C','A','N','O','S','H','P','n','f','F','c','b','I','M']
lig_types = ""
for t in sortKeyList:
if t in d.keys():
lig_types = lig_types + t
self.ligand.types = lig_types
self.gpo['types']['value'] = self.ligand.types
if self.verbose: print "set types to ", self.gpo['types']['value']
#gridcenter
self.ligand.center = self.ligand.getCenter()
if self.size_box_to_include_ligand:
self.getSideLengths(self.ligand) #sets ligand.center
cen = self.ligand.center
self.gpo['gridcenter']['value'] = [round(cen[0],4), round(cen[1],4),\
round(cen[2],4)]
self.gpo['gridcenterAuto']['value'] = 0
if self.verbose: print "set gridcenter to ", self.gpo['gridcenter']['value']
#only make the box bigger from npts, do not make it smaller
for ix, val in enumerate(self.gpo['npts']['value']):
if hasattr(self.ligand, 'npts'):
npts = self.ligand.npts
if npts[ix]>val:
if self.verbose: print "increasing ", ix, " grid dimension to ", val
self.gpo['npts']['value'][ix] = npts[ix]
#if self.verbose: print "set npts to ", self.gpo['npts']['value']
def getSideLengths(self, mol):
c = mol.allAtoms.coords
maxo = Numeric.maximum.reduce(c)
mino = Numeric.minimum.reduce(c)
sideLengths = maxo-mino
mol.npts = map(int, map(ceil, sideLengths/(self.gpo['spacing']['value'])))
for ix, npts in enumerate(mol.npts):
if npts>126:
mol.npts[ix] = 126
#FIX THIS:
#use this center instead of mol.getCenter which returns averaged
#coords:
#this should make sure the ligand fits inside the box
#mino+(maxo-mino)/2.0
mol.center = mino + (maxo - mino)/2.0
def set_receptor(self, receptor_filename, gpf_filename=None):
self.receptor_filename = os.path.basename(receptor_filename)
self.receptor_stem = string.split(self.receptor_filename, '.')[0]
self.gpo.set_receptor(receptor_filename)
#FIX THIS
#self.gpo['mset']['value'] = self.receptor.types
self.gpo['types']['value'] = self.ligand.types
def set_grid_parameters(self, **kw):
"""Any grid parameters should be set here
"""
# like this:
# should it be **kw
# kw = {'spacing':1.0, 'mset':'CNOSHXM'}
# self.mv.gpo['parm']['value'] = <new value>
# EXCEPT for 'npts' for which value must be 60,60,60
for parm, newvalue in kw.items():
self.gpo[parm]['value'] = newvalue
if parm=='npts':
self.gpo['npts']['value']= map(int, newvalue.split(','))
def write_gpf(self, gpf_filename=None,
parm_list = grid_parameter_list):
if not gpf_filename:
gpf_filename = self.receptor_stem + ".gpf"
# now that we have a filename...
if self.verbose:
print "writing ", gpf_filename
self.gpo.write(gpf_filename, parm_list)
class GridParameter4FileMaker:
"""Accept a <ligand>.pdbqt, <receptor>.pdbqt, reference4.gpf and create
<receptor>4.gpf with help of its "gpo" an instance of a GridParameters
sets gridcenter to center of bounding box
sets npts according to bounding box
"""
def __init__(self, verbose = None, size_box_to_include_ligand=True):
self.verbose = verbose
self.gpo = GridParameters()
self.size_box_to_include_ligand = size_box_to_include_ligand
def read_reference(self, reference_filename):
if self.verbose: print "reading ", reference_filename
self.gpo.read4(reference_filename)
def set_types_from_directory(self, directory):
if self.verbose:
print "reading directory ", directory
filelist = glob.glob(directory + "/*.pdb*")
if self.verbose:
print "len(filelist)=", len(filelist)
ad4_typer = AutoDock4_AtomTyper()
type_dict = {}
all_types = ""
for f in filelist:
ftype = os.path.splitext(f)[-1]
if ftype!=".pdbqt":
print "skipping ", f , " not in PDBQT format!"
continue
m = Read(f)[0]
m_types = ""
m_types = " ".join(list(set(m.allAtoms.autodock_element)))
self.getSideLengths(m) #sets ligand.center
npts = m.npts
#only make the box bigger, do NOT make it smaller
for ix, val in enumerate(self.gpo['npts']['value']):
if npts[ix]>val:
self.gpo['npts']['value'][ix] = npts[ix]
if self.verbose:
print m.name, " increased grid dimension ", ix, " to ", npts[ix]
all_types = all_types + m_types
if self.verbose:
print "added ", m_types, " atom types in directory ", directory
print "end: all_types = ", all_types
self.gpo['ligand_types']['value'] = all_types
if self.verbose:
print "all ligand_types for ", directory, "= ", self.gpo['ligand_types']['value']
def set_ligand(self, ligand_filename, center_on_ligand=False):
ftype = os.path.splitext(ligand_filename)[-1]
if ftype!=".pdbqt":
print "set_ligand:only ligands in 'pdbqt' files are valid. ", ftype, " files are not supported!"
return "ERROR"
self.ligand = Read(ligand_filename)[0]
if self.ligand==None:
print 'ERROR reading: ', ligand_filename
return
if self.verbose:
print "read ", self.ligand.name
ligand_types = self.getTypes(self.ligand)
self.gpo.set_ligand4(ligand_filename, types=ligand_types)
#this sets ligand_types, gpo.ligand_stem and gpo.ligand_filename
if self.verbose:
print "set gpo.ligand_stem to", self.gpo.ligand_stem
print "set gpo.ligand_filename to", self.gpo.ligand_filename
print "set gpo.ligand_types to", self.gpo['ligand_types']['value'].__class__
#need to get npts
if self.size_box_to_include_ligand:
self.getSideLengths(self.ligand) #sets ligand.center
#gridcenter IS NOT SET BY THIS!!!
if center_on_ligand:
#cen = self.ligand.getCenter()
self.getSideLengths(self.ligand)
cen = self.ligand.center # set by call to getSideLengths NOT self.ligand.getCenter
self.gpo['gridcenter']['value'] = [round(cen[0],4), round(cen[1],4),\
round(cen[2],4)]
self.gpo['gridcenterAuto']['value'] = 0
if self.verbose: print "set gridcenter to ", self.gpo['gridcenter']['value']
#only make the box bigger, do NOT make it smaller
for ix, val in enumerate(self.gpo['npts']['value']):
#npts
if hasattr(self.ligand, 'npts'):
npts = self.ligand.npts
if npts[ix]>val:
self.gpo['npts']['value'][ix] = npts[ix]
if self.verbose: print "set npts to ", self.gpo['npts']['value']
def getTypes(self, molecule):
mol_types = ""
mol_types = " ".join(list(set(molecule.allAtoms.autodock_element)))
if self.verbose:
print "end of getTypes: mol_types=", mol_types, ' class=', mol_types.__class__
return mol_types
def getSideLengths(self, mol):
c = mol.allAtoms.coords
maxo = Numeric.maximum.reduce(c)
mino = Numeric.minimum.reduce(c)
sideLengths = maxo-mino
mol.npts = map(int, map(ceil, sideLengths/(self.gpo['spacing']['value'])))
for ix, npts in enumerate(mol.npts):
if npts>126:
mol.npts[ix] = 126
#FIX THIS:
#use this center instead of mol.getCenter which returns averaged
#coords:
#this should make sure the ligand fits inside the box
#mino+(maxo-mino)/2.0
mol.center = mino + (maxo - mino)/2.0
def set_receptor(self, receptor_filename, gpf_filename=None):
ftype = os.path.splitext(receptor_filename)[-1]
if ftype!=".pdbqt":
print "set_receptor:only pdbqt files valid. ", ftype," files are not supported."
return "ERROR:"
self.receptor = Read(receptor_filename)[0]
receptor_filename = os.path.basename(receptor_filename)
if self.receptor==None:
print 'ERROR reading: ', receptor_filename
return
if self.verbose: print "set_receptor filename to ", receptor_filename
receptor_types = self.getTypes(self.receptor)
self.gpo.set_receptor4(receptor_filename, types=receptor_types)
self.receptor_filename = os.path.basename(receptor_filename)
if hasattr(self, 'receptor'):
self.receptor_stem = self.receptor.name
else:
self.receptor_stem = os.path.splitext(self.receptor_filename)[0]
#all of this is handled by set_receptor4
#self.gpo['gridfld']['value'] = self.receptor_stem + '.maps.fld'
#self.gpo['elecmap']['value'] = self.receptor_stem + '.e.map'
#self.gpo['dsolvmap']['value'] = self.receptor_stem + '.d.map'
#this sets gpo.receptor_types, gpo.receptor_stem and gpo.receptor_filename
def set_grid_parameters(self, **kw):
"""Any grid parameters should be set here
"""
# like this:
# should it be **kw
# kw = {'spacing':1.0, 'receptor_types':'C A NA OA N SA HD MG'}
# self.mv.gpo['parm']['value'] = <new value>
# EXCEPT for 'npts' for which value must be 60,60,60
for parm, newvalue in kw.items():
if self.verbose:
print "parm=", parm
print "newvalue=", newvalue
if parm=='gridcenter':
self.gpo['gridcenterAuto']['value'] = newvalue=='auto'
self.gpo[parm]['value'] = newvalue
if parm=='npts':
self.gpo['npts']['value']= map(int, newvalue.split(','))
if parm=='ligand_types':
if newvalue.find(',')>-1:
newvalue = newvalue.replace(',', ' ')
print "setting ligand_types: newvalue=", newvalue
self.gpo[parm]['value']= newvalue
def write_gpf(self, gpf_filename=None,
parm_list = grid_parameter_list4):
if not gpf_filename:
gpf_filename = self.receptor_stem + ".gpf"
# now that we have a filename...
if self.verbose:
print "writing ", gpf_filename
for item in parm_list:
print item,
print
self.gpo.write4(gpf_filename, parm_list)
| 1.851563 | 2 |
pacman.py | Xevaquor/aipac | 0 | 12765823 | #!/usr/bin/env python
# coding=utf-8
__author__ = 'Xevaquor'
__license__ = 'MIT'
from layout import *
from copy import deepcopy
Moves = {
'North' : (0, -1),
'South' : (0, 1),
'East': (1, 0),
'West' : (-1, 0),
'Stop' : (0, 0)
}
class AgentStatus(object):
def __init__(self, pos = (0,0), scared = False):
self.position = pos
self.is_scared = scared
class PacmanGameState(object):
def __init__(self, food):
# 0 - pacman
# >0 - ghost
self.agents = [None, None]
self.power_pellets = []
self.food = deepcopy(food)
class PacmanGame(object):
def __init__(self, lay):
self.layout = lay
def get_initial_game_state(self):
gs = PacmanGameState(self.layout.food)
gs.agents[0] = AgentStatus(pos=(3, 1), scared=False)
gs.agents[1] = AgentStatus(pos=(2, 1), scared=False)
return gs
def get_legal_moves(self, state, agent_index=0):
if agent_index != 0:
raise Exception("Not implemented!")
legal_moves = []
x, y = state.agents[agent_index].position
for m, d in Moves.items():
dx, dy = d
nx = x + dx
ny = y + dy
if nx >= 0 and nx < self.layout.cols and ny >= 0 and ny < self.layout.rows:
if self.layout.grid[ny][nx] != Tile.Wall:
legal_moves.append(m)
return legal_moves
def apply_move(self, state, move, agent_index=0):
# assert move is legal
# only move so far
dx, dy = Moves[move]
s = deepcopy(state)
s.agents[agent_index].position =( s.agents[agent_index].position[0] + dx,
s.agents[agent_index].position[1] + dy)
# eat food
if agent_index == 0:
x, y = s.agents[agent_index].position
s.food[y][x] = False
return s
def is_terminate(self, state):
pass
def pacman_won(self, state):
pass
def pacman_lose(self, state):
pass
def get_score(self, state):
pass
| 3.34375 | 3 |
aia_project/main.py | maciejczyzewski/sem6 | 0 | 12765824 | <reponame>maciejczyzewski/sem6
import unittest
from flask_script import Manager, Command
from test import create_test_suite
from app import Service
service = Service()
service.start()
############################################
"""
from celery import Celery
CELERY_BROKER_BACKEND = "db+sqlite:///celery.sqlite"
CELERY_CACHE_BACKEND = "db+sqlite:///celery.sqlite"
CELERY_RESULT_BACKEND = "db+sqlite:///celery.sqlite"
celery = Celery('tasks', broker='pyamqp://guest@localhost//')
"""
############################################
class CreateCommand(Command):
"Runs service creator i.e. database"
def run(self):
service.db.create_all()
service.db.session.commit()
print("[database created]")
class TestCommand(Command):
"Runs tests (same as `python3 -m unittest`)"
def run(self):
testSuite = create_test_suite()
text_runner = unittest.TextTestRunner(verbosity=2).run(testSuite)
manager = Manager(service.app)
manager.add_command('create', CreateCommand)
manager.add_command('test', TestCommand)
if __name__ == '__main__':
manager.run()
| 2.546875 | 3 |
deploy/text_classification/Predictor.py | amirgholipour/mlops_project | 0 | 12765825 | import tensorflow as tf
import joblib
import numpy as np
import json
import traceback
import sys
import os
class Predictor(object):
def __init__(self):
self.loaded = False
def load(self):
print("Loading model",os.getpid())
self.model = tf.keras.models.load_model('model.h5', compile=False)
self.labelencoder = joblib.load('labelencoder.pkl')
self.loaded = True
print("Loaded model")
def predict(self, X,features_names):
# data = request.get("data", {}).get("ndarray")
# mult_types_array = np.array(data, dtype=object)
print ('step1......')
print(X)
X = tf.constant(X)
print ('step2......')
print(X)
if not self.loaded:
self.load()
# result = self.model.predict(X)
try:
result = self.model.predict(X)
except Exception as e:
print(traceback.format_exception(*sys.exc_info()))
raise # reraises the exception
print ('step3......')
result = tf.sigmoid(result)
print ('step4......')
print(result)
result = tf.math.argmax(result,axis=1)
print ('step5......')
print(result)
print(result.shape)
print(self.labelencoder.inverse_transform(result))
print ('step6......')
return json.dumps(result.numpy(), cls=JsonSerializer)
class JsonSerializer(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (
np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64)):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
return float(obj)
elif isinstance(obj, (np.ndarray,)):
return obj.tolist()
return json.JSONEncoder.default(self, obj) | 2.359375 | 2 |
applications/autocomplete/init.py | Vishakha1990/Lambdas | 0 | 12765826 | <filename>applications/autocomplete/init.py
import rethinkdb as r
import os, os.path, argparse
from common import *
AC = 'ac' # DB
WORDS = 'words' # TABLE
WORD = 'word' # COLUMN
FREQ = 'freq' # COLUMN
NGINX_EXAMPLE = 'docker run -d -p 80:80 -v %s:/usr/share/nginx/html:ro nginx'
def makeDB(host):
conn = r.connect(host, 28015)
dbs = r.db_list().run(conn)
if AC in dbs:
return 'already there'
#r.db_drop(AC).run(conn)
r.db_create(AC).run(conn)
r.db(AC).table_create(WORDS, primary_key = WORD).run(conn)
ra = {WORD: None, FREQ: None}
f = open(os.path.join(SCRIPT_DIR, "wordsCSV.txt"), 'r')
for line in f:
line = line.strip()
linesplit = line.split(',')
w = linesplit[0]
ra[WORD] = unicode(w)
ra[FREQ] = int(linesplit[1])
if len(linesplit[0]) == 1:
print linesplit[0]
r.db(AC).table(WORDS).insert(ra).run(conn)
f.close()
return 'initialized'
parser = argparse.ArgumentParser()
parser.add_argument('--cluster', '-c')
args = parser.parse_args()
cluster_dir = os.path.join(SCRIPT_DIR, "..", "..","util", args.cluster)
worker0 = rdjs(os.path.join(cluster_dir, 'worker-0.json'))
msg = makeDB(worker0['ip'])
print msg
| 2.5625 | 3 |
pypodman/lib/actions/export_action.py | TomSweeneyRedHat/python-pypodman | 1 | 12765827 | <gh_stars>1-10
"""Remote client command for export container filesystem to tarball."""
import sys
import podman
from pypodman.lib import AbstractActionBase
class Export(AbstractActionBase):
"""Class for exporting container filesystem to tarball."""
@classmethod
def subparser(cls, parent):
"""Add Export command to parent parser."""
parser = parent.add_parser(
'export',
help='export container to tarball',
)
parser.add_argument(
'--output',
'-o',
metavar='PATH',
nargs=1,
required=True,
help='Write to this file on host',
)
parser.add_argument(
'container',
nargs=1,
help='container to use as source',
)
parser.set_defaults(class_=cls, method='export')
def export(self):
"""Create tarball from container filesystem."""
try:
try:
ctnr = self.client.containers.get(self._args.container[0])
except podman.ContainerNotFound as e:
sys.stdout.flush()
print(
'Container {} not found.'.format(e.name),
file=sys.stderr,
flush=True)
return 1
else:
ctnr.export(self._args.output[0])
except podman.ErrorOccurred as e:
sys.stdout.flush()
print(
'{}'.format(e.reason).capitalize(),
file=sys.stderr,
flush=True)
return 1
return 0
| 2.484375 | 2 |
tests/input/test_validators.py | larribas/dagger | 9 | 12765828 | <reponame>larribas/dagger
import pytest
from dagger.input.from_node_output import FromNodeOutput
from dagger.input.from_param import FromParam
from dagger.input.validators import (
_clean_parameters,
_validate_parameters,
split_required_and_optional_inputs,
validate_and_clean_parameters,
validate_name,
)
#
# validate_name
#
def test__validate_name__with_valid_names():
valid_names = [
"param",
"name-with-dashes",
"name_with_underscores",
"name-with-dashes_and_underscores_and_123",
"x" * 64,
]
for name in valid_names:
# We are testing it doesn't raise any validation errors
validate_name(name)
def test__validate_name__with_invalid_names():
invalid_names = [
"",
"name with spaces",
"x" * 65,
"with$ymßols",
]
for name in invalid_names:
with pytest.raises(ValueError) as e:
validate_name(name)
assert (
str(e.value)
== f"'{name}' is not a valid name for an input. Inputs must comply with the regex ^[a-zA-Z0-9][a-zA-Z0-9-_]{{0,63}}$"
)
#
# split_required_and_optional_inputs
#
def test__split_required_and_optional_inputs():
required, optional = split_required_and_optional_inputs(
{
"req1": FromParam(),
"req2": FromNodeOutput("x", "y"),
"opt1": FromParam(default_value=2),
"opt2": FromParam(default_value=None),
}
)
assert required == {
"req1": FromParam(),
"req2": FromNodeOutput("x", "y"),
}
assert optional == {
"opt1": FromParam(default_value=2),
"opt2": FromParam(default_value=None),
}
#
# validate_and_clean_parameters
#
def test__validate_and_clean_parameters__when_we_provide_required_and_superfluous_params_but_no_optional_params():
clean_params = validate_and_clean_parameters(
inputs={
"a": FromParam(),
"b": FromNodeOutput("n", "o"),
"c": FromParam(default_value=10),
},
params={
"a": 1,
"b": "2",
"d": 3,
},
)
assert clean_params == {"a": 1, "b": "2", "c": 10}
def test__validate_and_clean_parameters__when_a_required_input_is_missing():
with pytest.raises(ValueError) as e:
validate_and_clean_parameters(
inputs={
"a": FromParam(),
"b": FromNodeOutput("n", "o"),
"c": FromParam(default_value=10),
},
params={
"a": 1,
"c": 1,
},
)
assert (
str(e.value)
== "The parameters supplied to this node were supposed to contain the "
"following parameters: ['a', 'b']. However, only the following "
"parameters were actually supplied: ['a', 'c']. We are missing: ['b']."
)
#
# _validate_parameters
#
def test__validate_parameters__when_there_are_no_required_inputs():
_validate_parameters(
required_inputs={},
params={},
)
# we are asserting that no validation errors are raised
def test__validate_parameters__when_all_required_inputs_are_passed():
_validate_parameters(
required_inputs={
"a": FromParam(),
"b": FromNodeOutput("n", "o"),
},
params={
"a": 1,
"b": "2",
},
)
# we are asserting that no validation errors are raised
def test__validate_parameters__when_we_are_passing_superfluous_params():
_validate_parameters(
required_inputs={
"a": FromParam(),
},
params={
"a": 1,
"b": 2,
},
)
# we are asserting that no validation errors are raised
def test__validate_parameters__when_a_required_input_is_missing():
with pytest.raises(ValueError) as e:
_validate_parameters(
required_inputs={
"a": FromParam(),
"b": FromNodeOutput("n", "o"),
"c": FromParam(),
},
params={
"a": 1,
"c": 1,
},
)
assert (
str(e.value)
== "The parameters supplied to this node were supposed to contain the "
"following parameters: ['a', 'b', 'c']. However, only the following "
"parameters were actually supplied: ['a', 'c']. We are missing: ['b']."
)
#
# _clean_parameters
#
def test__clean_parameters__removes_superfluous_parameters():
clean_params = _clean_parameters(
required_inputs={
"a": FromParam(),
},
optional_inputs={},
params={
"a": 1,
"b": 1,
},
)
assert clean_params == {"a": 1}
def test__clean_parameters__includes_default_values_that_were_not_passed_as_parameters():
clean_params = _clean_parameters(
required_inputs={
"a": FromParam(),
},
optional_inputs={
"b": FromParam(default_value=10),
"c": FromParam(default_value=20),
},
params={
"a": 1,
"c": 2,
},
)
assert clean_params == {"a": 1, "b": 10, "c": 2}
def test__clean_parameters__overriding_a_default_with_falsey_value():
falsey_values = [
None,
[],
{},
False,
]
for falsey_value in falsey_values:
clean_params = _clean_parameters(
required_inputs={},
optional_inputs={
"a": FromParam(default_value=10),
},
params={
"a": falsey_value,
},
)
assert clean_params == {"a": falsey_value}
| 2.328125 | 2 |
authentik/core/migrations/0021_alter_application_slug.py | BeryJu/passbook | 15 | 12765829 | # Generated by Django 3.2.3 on 2021-05-14 08:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("authentik_core", "0020_source_user_matching_mode"),
]
operations = [
migrations.AlterField(
model_name="application",
name="slug",
field=models.SlugField(
help_text="Internal application name, used in URLs.", unique=True
),
),
]
| 1.617188 | 2 |
libs/urllibs/setlog.py | Evelca1N/K-Spider | 3 | 12765830 | # !/usr/bin/dev python
# -*- coding:utf-8 -*-
from datetime import datetime
import sys
def LogFile(logFile, target_url):
filePoint = open('{}'.format(logFile), 'a')
filePoint.write('----------------------------\n\n')
for i in sys.argv:
filePoint.write(i + ' ')
filePoint.write('\n')
filePoint.write('[*] Crawling URL : {} at :{}\n\n'.format(target_url, str(datetime.now())[: -7]))
filePoint.close()
| 2.90625 | 3 |
photo-page/getImgHTML.py | alex-law/personal-website | 0 | 12765831 | <reponame>alex-law/personal-website
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 12 15:26:52 2020
@author: alexw
"""
import os
out = '<div class="col-sm-12 col-md-4">\n<a class="lightbox" href="img/1.jpg">\n<img src="img/1.jpg" alt="1">\n</a>\n</div>\n'
for n in range(2, 139):
base = '<div class="col-sm-12 col-md-4">\n<a class="lightbox" href="img/{}.jpg">\n<img src="img/{}.jpg" alt="{}">\n</a>\n</div>\n'.format(n,n,n)
out += base
with open('img-html.txt', 'w') as f:
f.write(out) | 2.34375 | 2 |
src/util/plot.py | ivarbrek/master_thesis_bestemt | 0 | 12765832 | import matplotlib.pyplot as plt
from typing import List, Tuple, Dict
import pandas as pd
import plotly.graph_objects as go
def plot_alns_history(solution_costs: List[Tuple[int, int]], lined: bool = False, legend: str = "") -> None:
x, y = zip(*solution_costs)
plt.figure(figsize=(10, 7)) # (8, 6) is default
plt.scatter(x, y, s=7, alpha=0.4, c='black')
if lined:
plt.plot(x, y, label=legend)
if legend != "":
plt.legend()
# plt.yscale('log')
plt.show()
def plot_operator_weights(operator_scores: Dict[str, List[float]], x_values: List[int] = None) -> None:
# plt.figure(figsize=(10, 7)) # (8, 6) is default
legend = []
for operator, scores in operator_scores.items():
if x_values:
plt.plot(x_values, scores)
else:
plt.plot(scores)
legend.append(_get_operator_legend_name(operator))
plt.legend(legend)
plt.xlabel("Iteration")
plt.show()
def _get_operator_legend_name(operator_name: str) -> str:
mapping = {
'true': 'insertion with noise',
'false': 'insertion without noise',
'r_greedy': 'greedy',
'r_2regret': '2-regret',
'r_3regret': '3-regret',
'd_random': 'random',
'd_worst': 'worst',
'd_voyage_random': 'random voyage',
'd_voyage_worst': 'worst voyage',
'd_route_random': 'random route',
'd_route_worst': 'worst route',
'd_related_location_time': 'spatial temporal related',
'd_related_location_precedence': 'spatial disease related',
}
return mapping[operator_name]
def plot_alns_history_with_production_feasibility(solution_costs: List[Tuple[int, int]],
production_feasibility: List[bool]) -> None:
df = pd.DataFrame(dict(iter=[elem[0] for elem in solution_costs],
cost=[elem[1] for elem in solution_costs],
feasible=production_feasibility))
fig, ax = plt.subplots()
colors = {False: 'red', True: 'green'}
ax.scatter(df['iter'], df['cost'], c=df['feasible'].apply(lambda x: colors[x]))
plt.show()
def plot_locations(locations_ids: List[str], special_locations: List[Tuple[float, float]] = None, save_to: str=None):
# Special locations:
# 0482: (59.3337534309431, 5.30413145167106), 2022: (11.2786502472518,64.857954476573), 2015: (15.0646427525587,68.9141123038669)
loc_data = pd.read_csv('../../data/locations.csv')
loc_data.set_index("loknr", inplace=True)
farm_size = 7
factory_size = 15
farm_color = '#0067b5' #skyblue'
factory_color = 'black'
factory_marker = 'square'
farm_marker = 'circle'
relevant_locations_and_coords = [(loc_id,
loc_data.loc[int(loc_id), "breddegrader"],
loc_data.loc[int(loc_id), "lengdegrader"], farm_color, farm_size, farm_marker)
for loc_id in locations_ids if int(loc_id) in loc_data.index]
relevant_locations_and_coords += [(000, coord[0], coord[1], factory_color, factory_size, factory_marker)
for coord in special_locations]
df = pd.DataFrame(relevant_locations_and_coords)
df.columns = ['loc_id', 'lat', 'long', 'color', 'size', 'marker']
# color = c if c else "LightSkyBlue"
# with open("../../data/custom.geo.json", "r", encoding="utf-8") as f:
# geometry = geojson.load(f)
# pprint(geometry)
# trace1 = go.Choropleth(geojson=geometry,
# locations=["Norway"],
# z=[0],
# text=['Norway-text']
# )
trace2 = go.Scattergeo(
lon=df['long'],
lat=df['lat'],
text=df['loc_id'],
mode='markers',
marker=dict(
color=df['color'],
size=df['size'],
symbol=df['marker'],
line=dict(color='black', width=0),
opacity=1
)
)
fig = go.Figure([trace2])
# fig.update_layout(
# title='Locations',
# geo_scope='europe',
# )
fig.update_geos(
fitbounds="locations",
resolution=50,
# visible=False,
showframe=False,
projection={"type": "mercator"},
)
if save_to: # Save figure
fig.write_html(save_to)
fig.show()
def plot_clustered_locations(locations_ids_list: List[List[str]],
special_locations_list: List[List[Tuple[float, float]]] = None, save_to: str=None):
loc_data = pd.read_csv('../../data/locations.csv')
loc_data.set_index("loknr", inplace=True)
farm_size = 9
factory_size = 15
factory_color = 'black'
farm_colors = ['#0067b5', '#e67512', '#006700', '#bb00bb']
factory_marker = 'square'
farm_marker = 'circle'
traces = []
for locations_ids, special_locations, farm_color in zip(locations_ids_list, special_locations_list, farm_colors):
relevant_locations_and_coords = [(loc_id,
loc_data.loc[int(loc_id), "breddegrader"],
loc_data.loc[int(loc_id), "lengdegrader"], farm_color, farm_size, farm_marker)
for loc_id in locations_ids if int(loc_id) in loc_data.index]
relevant_locations_and_coords += [(000, coord[0], coord[1], factory_color, factory_size, factory_marker)
for coord in special_locations]
df = pd.DataFrame(relevant_locations_and_coords)
df.columns = ['loc_id', 'lat', 'long', 'color', 'size', 'marker']
# color = c if c else "LightSkyBlue"
trace = go.Scattergeo(
lon=df['long'],
lat=df['lat'],
text=df['loc_id'],
mode='markers',
marker=dict(
color=df['color'],
size=df['size'],
symbol=df['marker'],
line=dict(color='black', width=0),
opacity=1
)
)
traces.append(trace)
fig = go.Figure(traces)
# fig.update_layout(
# title='Locations',
# geo_scope='europe',
# )
fig.update_geos(
fitbounds="locations",
resolution=50,
# visible=False,
showframe=False,
projection={"type": "mercator"},
)
if save_to: # Save figure
fig.write_html(save_to)
fig.show()
| 2.703125 | 3 |
morse_hospital_sim/src/turtlebot_hospital_sim/Turtlebot.py | gabrielsr/hmrs_hospital_simulation | 0 | 12765833 | <reponame>gabrielsr/hmrs_hospital_simulation
import json
import rospy
import geometry_msgs.msg
from morse.builder import *
from threading import Timer
from std_msgs.msg import String
from turtlebot_hospital_sim.BatterySensor import BatterySensor
from turtlebot_hospital_sim.ItemExchanger import ItemExchanger
# import tf_conversions
import numpy as np
PATH = "/".join(__file__.split("/")[:-3])
def formatlog(severity, who, loginfo, skill, params):
return ('['+severity+'],'+
who+','+
loginfo+','+
skill+','+
params)
def euler_from_quaternion(quaternion):
"""
Converts quaternion (w in last place) to euler roll, pitch, yaw
quaternion = [x, y, z, w]
Bellow should be replaced when porting for ROS 2 Python tf_conversions is done.
"""
x = quaternion.x
y = quaternion.y
z = quaternion.z
w = quaternion.w
sinr_cosp = 2 * (w * x + y * z)
cosr_cosp = 1 - 2 * (x * x + y * y)
roll = np.arctan2(sinr_cosp, cosr_cosp)
sinp = 2 * (w * y - z * x)
pitch = np.arcsin(sinp)
siny_cosp = 2 * (w * z + x * y)
cosy_cosp = 1 - 2 * (y * y + z * z)
yaw = np.arctan2(siny_cosp, cosy_cosp)
return roll, pitch, yaw
class Turtlebot(Pioneer3DX):
def __init__(self, name="turtlebot", path=f"{PATH}/models/turtlebot.blend"):
Pioneer3DX.__init__(self, name)
self.name = name
self.path = path
self.item_exchanger = ItemExchanger(name=name, obj="sphere")
self.curr_pose = geometry_msgs.msg.PoseStamped()
self.pose_sub = rospy.Subscriber(f"/{self.name}/pose", geometry_msgs.msg.PoseStamped, self.save_pose)
self.log_pub = rospy.Publisher(f"/log", String, queue_size=1)
def set_ros_timer(self):
try:
while rospy.get_time() == 0:
rospy.logwarn(f"{self.name} waiting for clock...")
rospy.sleep(1)
self.timer = rospy.Timer(rospy.Duration(15), self.log_robot_pose)
except:
self.thr_timer = Timer(30, self.set_ros_timer)
self.thr_timer.start()
def log_robot_pose(self, event):
quaternion = (
self.curr_pose.pose.orientation.x,
self.curr_pose.pose.orientation.y,
self.curr_pose.pose.orientation.z,
self.curr_pose.pose.orientation.w)
_, _, yaw = euler_from_quaternion(self.curr_pose.pose.orientation)
# roll = euler[0]
# pitch = euler[1]
# yaw = euler[2]
# robot_pose = "(x=%.2f;y=%.2f;yaw=%.2f)"%(self.curr_pose.pose.position.x,
# self.curr_pose.pose.position.y,
# yaw)
robot_pose = {
'x': '{:02.2f}'.format(self.curr_pose.pose.position.x),
'y': '{:02.2f}'.format(self.curr_pose.pose.position.y),
'yaw': '{:02.2f}'.format(yaw)
}
log = String()
# log.data = formatlog('debug',
# self.name,
# 'simulation',
# 'robot-pose',
# robot_pose)
logdata = {
'level': 'info',
'entity': self.name,
'content': robot_pose
}
log.data = json.dumps(logdata)
self.log_pub.publish(log)
def add_to_simulation(self, x=-19, y=-3, z=0,
x_rot=0, y_rot=0, z_rot=0,
battery_discharge_rate=0.05,
batt_init_state=1.0):
self.translate(x, y, z)
self.rotate(x_rot, y_rot, z_rot)
self.add_motion_sensor()
self.add_pose_sensor()
self.add_lidar_sensor()
self.add_odometry_sensor()
self.add_battery_sensor(battery_discharge_rate, batt_init_state)
self.properties(Influence = 0.1, Friction = 5,
WheelFLName = "Wheel_L", WheelFRName = "Wheel_R",
WheelRLName = "None", WheelRRName = "None",
CasterWheelName = "CasterWheel",
FixTurningSpeed = 0.52)
self.thr_timer = Timer(30, self.set_ros_timer)
self.thr_timer.start()
def save_pose(self, msg):
self.curr_pose = msg
def add_lidar_sensor(self):
self.lidar = Hokuyo()
self.lidar.frequency(10)
self.lidar.translate(x=0.0, z=0.252)
self.append(self.lidar)
self.lidar.properties(Visible_arc = False)
self.lidar.properties(laser_range = 10.0)
self.lidar.properties(resolution = 1)
self.lidar.properties(scan_window = 360.0)
self.lidar.create_laser_arc()
self.lidar.add_interface('ros', topic=f"{self.name}/lidar", frame_id=f"{self.name}/base_footprint")
def add_motion_sensor(self):
# self.motion = MotionVW()
self.motion = MotionVWDiff()
# self.motion.frequency(10)
self.append(self.motion)
self.motion.add_interface('ros', topic=f"{self.name}/cmd_vel")
def add_pose_sensor(self):
# Current position
self.pose = Pose()
# self.pose.frequency(20)
self.append(self.pose)
self.pose.add_interface('ros', topic=f"{self.name}/pose", frame_id="map")
def add_odometry_sensor(self):
# Displacement since last Blender tick
self.odometry = Odometry()
# self.odometry.frequency(20)
self.append(self.odometry)
self.odometry.add_interface('ros', topic=f"{self.name}/odom", frame_id=f"{self.name}/odom", child_frame_id=f"{self.name}/base_footprint")
def add_battery_sensor(self, discharge_rate, init_state):
self.battery = BatterySensor(self.name,
discharge_rate_percentage=discharge_rate,
initial_percentage=init_state)
# self.battery = Battery()
# self.battery = BatteryRobot(self)
# self.battery.frequency(10)
# self.battery.properties(DischargingRate = discharge_rate)
# self.append(self.battery)
# self.battery.add_interface('ros', topic=f"{self.name}/battery")
| 2.265625 | 2 |
ozone-framework-python-server/config/serializers.py | aamduka/ozone | 6 | 12765834 | <filename>ozone-framework-python-server/config/serializers.py
from django.contrib.auth import authenticate
from rest_framework import serializers
class LoginSerializer(serializers.Serializer):
username = serializers.CharField()
password = serializers.CharField()
def validate(self, attrs):
user = authenticate(username=attrs['username'], password=attrs['password'])
if not user:
raise serializers.ValidationError('Incorrect username or password.')
if not user.is_active:
raise serializers.ValidationError('User is disabled.')
return {'user': user}
| 2.4375 | 2 |
stanfordnlp/pipeline/ner_processor.py | msinkec/classla-stanfordnlp | 0 | 12765835 | """
Processor for performing named entity tagging.
"""
from stanfordnlp.models.common.pretrain import Pretrain
from stanfordnlp.models.common import doc
from stanfordnlp.models.common.utils import unsort
from stanfordnlp.models.ner.data import DataLoader
from stanfordnlp.models.ner.trainer import Trainer
from stanfordnlp.pipeline._constants import *
from stanfordnlp.pipeline.processor import UDProcessor
class NERProcessor(UDProcessor):
# set of processor requirements this processor fulfills
PROVIDES_DEFAULT = set([NER])
# set of processor requirements for this processor
REQUIRES_DEFAULT = set([TOKENIZE])
def _set_up_model(self, config, use_gpu):
# set up trainer
self._args = {'charlm_forward_file': config['forward_charlm_path'], 'charlm_backward_file': config['backward_charlm_path']}
self._pretrain = Pretrain(config['pretrain_path'])
self._trainer = Trainer(args=self._args, pretrain=self.pretrain, model_file=config['model_path'], use_cuda=use_gpu)
def process(self, document):
# set up a eval-only data loader and skip tag preprocessing
batch = DataLoader(
document, self.config['batch_size'], self.config, vocab=self.vocab, evaluation=True, preprocess_tags=False)
preds = []
for b in batch:
preds += self.trainer.predict(b)
# Append previous 'misc' values.
misc = batch.conll.get(['misc'])
idx = 0
for i, sent in enumerate(preds):
for j, ner_pred in enumerate(sent):
ner_pred = 'NER=' + ner_pred
misc_val = misc[idx]
if misc_val != '_':
preds[i][j] = ner_pred + '|' + misc_val
else:
preds[i][j] = ner_pred
idx += 1
batch.conll.set(['misc'], [y for x in preds for y in x])
| 2.40625 | 2 |
setup.py | nyuspc/ppt_maker | 0 | 12765836 | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="ppt_maker",
version="0.0.1",
author="<NAME>, <NAME>",
author_email="<EMAIL>",
description="Make PowerPoint slides with template and data",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/nyuspc/ppt_maker",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python",
"Intended Audience :: Financial and Insurance Industry",
"Topic :: Multimedia :: Graphics",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 1.484375 | 1 |
MP/11_get_binary_atom_type_mask.py | L-sky/Master_Thesis | 0 | 12765837 | import os
import argparse
import numpy as np
import pymatgen
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument("mp_dir", help="Root directory with Materials Project dataset")
parser.add_argument("radial_cutoff", type=float, help="Radius of sphere that decides neighborhood")
args = parser.parse_args()
mp_dir = args.mp_dir
r_cut = args.radial_cutoff
index = np.load(os.path.join(mp_dir, 'meta_derived', f'index_connected_{r_cut}.npy'))
mp_cif_dir = os.path.join(mp_dir, "cif")
mp_save_dir = os.path.join(mp_dir, f"derived_radial_cutoff_{r_cut}")
def get_max_atomic_number(cif_paths):
max_atomic_number = -1
for cif_path in tqdm(cif_paths):
structure = pymatgen.Structure.from_file(cif_path)
max_atomic_number = max(max_atomic_number, max(structure.atomic_numbers))
return max_atomic_number
def process_cif(cif_path):
structure = pymatgen.Structure.from_file(cif_path)
return np.array(structure.atomic_numbers)
cif_paths = [os.path.join(mp_cif_dir, filename) for filename in index]
max_atomic_number = get_max_atomic_number(cif_paths)
atom_type_mask = np.zeros((len(cif_paths), max_atomic_number+1), dtype=np.bool)
for i, cif_path in enumerate(tqdm(cif_paths)):
atom_type_mask[i, process_cif(cif_path)] = True
np.save(os.path.join(mp_save_dir, "atom_type_mask.npy"), atom_type_mask)
| 2.453125 | 2 |
solvate/__init__.py | michaltykac/SolvateAminoAcids | 1 | 12765838 | <reponame>michaltykac/SolvateAminoAcids
# -*- coding: UTF-8 -*-
# \file __init__.py
# \brief This file initialises the package.
#
# This file firstly denotes this folder as containing python package and secondly it makes some of the solvate
# parts easily accessible.
#
# Copyright by the Authors and individual contributors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1) Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2) Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3) Neither the name of Michal Tykac nor the names of this code's contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# This software is provided by the copyright holder and contributors "as is" and any express or implied warranties, including, but not limitted to, the implied warranties of merchantibility and fitness for a particular purpose are disclaimed. In no event shall the copyright owner or the contributors be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limitted to, procurement of substitute goods or services, loss of use, data or profits, or business interuption) however caused and on any theory of liability, whether in contract, strict liability or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage.
#
# \author <NAME>
# \author <NAME>
# \author <NAME>
# \version 0.1.0
# \date DEC 2020
######################################################
from solvate.solvate_globals import globalSettings
from solvate.solvate_log import startLog, endLog
from solvate.solvate_structures import parseInputCoordinates
from solvate.solvate_structures import getAllFragmentFragments
from solvate.solvate_structures import getAllResidueFragments
from solvate.solvate_structures import combineAndAddWaters
from solvate.solvate_structures import writeOutStructures
from solvate.solvate_matchFragments import matchFragments
from solvate.solvate_predictWaters import predictWaters
from solvate.solvate_predictWaters import removeClashes
from solvate.solvate_predictWaters import clusterWaters
| 1.273438 | 1 |
conftest.py | Mozilla-GitHub-Standards/6a6c1dd41e911a7e844c50d56b45288b640f5e21ce6a106042d9f792986f372c | 3 | 12765839 | <gh_stars>1-10
# Configuration file for running contract-tests
import configparser
import pytest
import ssl
# Hack because of how SSL certificates are verified by default in Python
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
def pytest_addoption(parser):
parser.addoption(
"--env",
dest="env",
default="stage",
help="Environment tests are running in: stage or prod"
)
parser.addoption(
"--api-version",
dest="apiversion",
help="Optional param: version of API under test"
)
@pytest.fixture(scope="module")
def conf():
config = configparser.ConfigParser()
config.read('manifest.ini')
return config
@pytest.fixture(scope="module")
def env(request):
return request.config.getoption("--env")
@pytest.fixture(scope="module")
def apiversion(request):
return request.config.getoption("--api-version")
| 2.09375 | 2 |
mxlive/lims/migrations/0060_auto_20200717_1331.py | katyjg/mxlive | 0 | 12765840 | # Generated by Django 3.0.6 on 2020-07-17 19:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('lims', '0059_auto_20200717_1318'),
]
operations = [
migrations.RenameField(
model_name='supportrecord',
old_name='user',
new_name='project',
),
]
| 1.570313 | 2 |
lobpy/datareader/lobster.py | bohblue2/lobpy | 0 | 12765841 | """
Copyright (c) 2018, University of Oxford, Rama Cont and ETH Zurich, <NAME>
This module provides the helper functions and the class LOBSTERReader, a subclass of OBReader to read in limit order book data in lobster format.
"""
######
# Imports
######
import csv
import math
import warnings
import numpy as np
from lobpy.datareader.orderbook import *
# LOBSTER specific file name functions
def _split_lobster_filename(filename):
""" splits the LOBSTER-type filename into Ticker, Date, Time Start, Time End, File Type, Number of Levels """
filename2,_ = filename.split(".")
ticker_str, date_str, time_start_str, time_end_str, file_type_str, num_levels = filename2.split("_")
return ticker_str, date_str, time_start_str, time_end_str, file_type_str, num_levels
def split_lobster_filename(filename):
""" splits the LOBSTER-type filename into Ticker, Date, Time Start, Time End, File Type, Number of Levels """
return _split_lobster_filename(filename)
def _split_lobster_filename_core(filename):
""" splits the LOBSTER-type filename into Ticker, Date, Time Start, Time End, File Type, Number of Levels """
filename2, _ = filename.split(".")
ticker_str, date_str, time_start_str, time_end_str, file_type_str, num_levels = filename2.split("_")
return ticker_str, date_str, time_start_str, time_end_str, file_type_str, num_levels
def _create_lobster_filename(ticker_str, date_str, time_start_str, time_end_str, file_type_str, num_levels):
return "_".join((ticker_str, date_str, time_start_str, time_end_str, file_type_str, num_levels))
def create_lobster_filename(ticker_str, date_str, time_start_str, time_end_str, file_type_str, num_levels):
return _create_lobster_filename(ticker_str, date_str, time_start_str, time_end_str, file_type_str, num_levels)
def _get_time_stamp_before(time_stamps, time_stamp):
''' Returns the value and index of the last time point in time_stamps before or equal time_stamp '''
time = time_stamps[0]
index = int(0)
if time == time_stamp:
# time_stamp found at index 0
return time, index
if time > time_stamp:
raise LookupError("Time stamp data start at {} which is after time_stamps: {}".format(time, time_stamp))
for ctr, time_now in enumerate(time_stamps[1:]):
if time_now > time_stamp:
return time, ctr
time = time_now
return time, ctr+1
class LOBSTERReader(OBReader):
"""
OBReader object specified for using LOBSTER files
----------
params:
ticker_str,
date_str,
time_start_str,
time_end_str,
num_levels_str,
time_start_calc_str,
time_end_calc_str
Example usage:
to create an object
>>> lobreader = LOBSTERReader("SYMBOL", "2012-06-21", "34200000", "57600000", "10")
read market depth on uniform time grid with num_observation number of observations
>>> dt, time_stamps, depth_bid, depth_ask = lobreader.load_marketdepth(num_observations)
read price process on that time grid specified above
>>> dt2, time_stamps2, price_mid, price_bid, price_ask = lobreader.load_marketdepth(None)
"""
def __init__(
self,
ticker_str,
date_str,
time_start_str,
time_end_str,
num_levels_str,
time_start_calc_str="",
time_end_calc_str="",
num_levels_calc_str=""
):
self.ticker_str = ticker_str
self.date_str = date_str
self.lobfilename = _create_lobster_filename(ticker_str, date_str, time_start_str, time_end_str, ORDERBOOK_FILE_ID, num_levels_str)
self.msgfilename = _create_lobster_filename(ticker_str, date_str, time_start_str, time_end_str, MESSAGE_FILE_ID, num_levels_str)
self.time_start = int(time_start_str)
self.time_end = int(time_end_str)
self.num_levels = int(num_levels_str)
self.time_start_calc = int(time_start_str)
self.time_end_calc = int(time_end_str)
self.num_levels_calc = int(num_levels_str)
if not (num_levels_calc_str == ""):
self.num_levels_calc = int(num_levels_calc_str)
self.data = dict()
if not (time_start_calc_str == ""):
self.time_start_calc = int(time_start_calc_str)
if not (time_end_calc_str == ""):
self.time_end_calc = int(time_end_calc_str)
def set_timecalc(self, time_start_calc_str, time_end_calc_str):
self.time_start_calc = int(time_start_calc_str)
self.time_end_calc = int(time_end_calc_str)
return True
def create_filestr(self, identifier_str, num_levels=None):
""" Creates lobster type file string """
if num_levels is None:
num_levels = self.num_levels
return _create_lobster_filename(self.ticker_str, self.date_str, str(self.time_start_calc), str(self.time_end_calc), identifier_str, str(num_levels))
def average_profile_tt(self, num_levels_calc_str="" , write_outputfile = False):
""" Computes the average order book profile, averaged over trading time, from the csv sourcefile. To avoid numerical errors by summing up large numbers, the Kahan Summation algorithm is used for mean computation
----------
args:
num_levels_calc: number of levels which should be considered for the output
write_output: if True, then the average order book profile is stored as a csv file
----------
output:
(mean_bid, mean_ask) in format of numpy arrays
"""
print("Starting computation of average order book profile in file %s."%self.lobfilename)
num_levels_calc = self.num_levels
if not(num_levels_calc_str == ""):
num_levels_calc = int(num_levels_calc_str)
if self.num_levels < num_levels_calc:
raise DataRequestError("Number of levels in data ({0}) is smaller than number of levels requested for calculation ({1}).".format(self.num_levels, num_levels_calc))
tempval1 = 0.0
tempval2 = 0.0
comp = np.zeros(num_levels_calc * 2) # compensator for lost low-order bits
mean = np.zeros(num_levels_calc * 2) # running mean
with open(self.lobfilename+".csv", newline='') as csvfile:
lobdata = csv.reader(csvfile, delimiter=',')
num_lines = sum(1 for row in lobdata)
print("Loaded successfully. Number of lines: " + str(num_lines))
csvfile.seek(0) # reset iterator to beginning of the file
print("Start calculation.")
for row in lobdata: # data are read as list of strings
currorders = np.fromiter(row[1:(4*num_levels_calc + 1):2], np.float) # parse to integer
for ctr, currorder in enumerate(currorders):
#print(lobstate)
tempval1 = currorder / num_lines - comp[ctr]
tempval2 = mean[ctr] + tempval1
comp[ctr] = (tempval2 - mean[ctr]) - tempval1
mean[ctr] = tempval2
print("Calculation finished.")
# Add data to self.data
self.add_data("--".join(("ttime-"+AV_ORDERBOOK_FILE_ID, "bid")), mean[1::2])
self.add_data("--".join(("ttime-"+AV_ORDERBOOK_FILE_ID, "ask")), mean[0::2])
if not write_outputfile:
return mean[1::2], mean[0::2] # LOBster format: bid data at odd * 2, LOBster format: ask data at even * 2
print("Write output file.")
outfilename = self.create_filestr("-".join(("ttime",AV_ORDERBOOK_FILE_ID)) , str(num_levels_calc))
outfilename = ".".join((outfilename,'csv'))
with open(outfilename, 'w') as outfile:
wr = csv.writer(outfile)
wr.writerow(mean[1::2]) # LOBster format: bid data at odd * 2
wr.writerow(mean[0::2]) # LOBster format: ask data at even * 2
print("Average order book saved as %s."%outfilename)
return mean[1::2], mean[0::2]
def average_profile(
self,
num_levels_calc_str="",
write_outputfile = False
):
""" Returns the average oder book profile from the csv sourcefile, averaged in real time. To avoid numerical errors by summing up large numbers, the Kahan Summation algorithm is used for mean computation """
if num_levels_calc_str == "":
num_levels_calc = self.num_levels_calc
else:
num_levels_calc = int(num_levels_calc_str)
if int(self.num_levels) < num_levels_calc:
raise DataRequestError("Number of levels in data ({0}) is smaller than number of levels requested for calculation ({1}).".format(self.num_level, num_levels_calc))
time_start = float(self.time_start_calc / 1000.)
time_end = float(self.time_end_calc / 1000.)
mean = np.zeros(num_levels_calc * 2) # running mean
tempval1 = 0.0
tempval2 = 0.0
linectr = 0
comp = np.zeros(num_levels_calc * 2) # compensator for lost low-order bits
flag = 0
with open(".".join((self.lobfilename, 'csv')), newline='') as orderbookfile, open(".".join((self.msgfilename, 'csv')), newline='') as messagefile:
lobdata = csv.reader(orderbookfile, delimiter=',')
messagedata = csv.reader(messagefile, delimiter=',')
rowMES = next(messagedata) # data are read as list of strings
rowLOB = next(lobdata)
nexttime = float(rowMES[0]) # t(0)
if time_end < nexttime:
# In this case there are no entries in the file for the selected time interval. Array of 0s is returned
warnings.warn("The first entry in the data files is after the end of the selected time period. Arrays of 0s will be returned as mean.")
return mean[1::2], mean[0::2]
currprofile = np.fromiter(rowLOB[1:(4*num_levels_calc + 1):2], np.float) # parse to integer, extract bucket volumes only at t(0)
if time_start <= nexttime:
flag = 1
for rowLOB, rowMES in zip(lobdata,messagedata): # data are read as list of string, iterator now starts at second entry (since first has been exhausted above)
currtime = nexttime #(t(i))
nexttime = float(rowMES[0]) #(t(i+1))
if flag == 0:
if time_start <= nexttime:
# Start calculation
flag = 1
currtime = time_start
for ctr, currbucket in enumerate(currprofile):
tempval1 = (nexttime - currtime) / float(time_end - time_start) * currbucket - comp[ctr]
tempval2 = mean[ctr] + tempval1
comp[ctr] = (tempval2 - mean[ctr]) - tempval1
mean[ctr] = tempval2
else:
if time_end < nexttime:
# Finish calculation
nexttime = time_end
for ctr, currbucket in enumerate(currprofile):
#print(currprofile)
tempval1 = (nexttime - currtime) / float(time_end - time_start) * currbucket - comp[ctr]
tempval2 = mean[ctr] + tempval1
comp[ctr] = (tempval2 - mean[ctr]) - tempval1
mean[ctr] = tempval2
if time_end == nexttime:
# Finish calculation
break
## Update order book to time t(i+1)
currprofile = np.fromiter(rowLOB[1:(4*num_levels_calc + 1):2],np.float) # parse to integer, extract bucket volumes only
else: # executed only when not quitted by break, i.e. time_end >= time at end of file in this case we extrapolate
warnings.warn("Extrapolated order book data since time_end exceed time at end of the file by %f seconds."%(time_end - nexttime))
currtime = nexttime
nexttime = time_end
for ctr, currbucket in enumerate(currprofile):
#print(lobstate)
tempval1 = (nexttime - currtime) / (time_end - time_start) * currbucket - comp[ctr]
tempval2 = mean[ctr] + tempval1
comp[ctr] = (tempval2 - mean[ctr]) - tempval1
mean[ctr] = tempval2
print("Calculation finished.")
# Add data to self.data
self.add_data("--".join((AV_ORDERBOOK_FILE_ID, "bid")), mean[1::2])
self.add_data("--".join((AV_ORDERBOOK_FILE_ID, "ask")), mean[0::2])
if not write_outputfile:
return mean[1::2], mean[0::2] # LOBster format: bid data at odd * 2, LOBster format: ask data at even * 2
print("Write output file.")
outfilename = self.create_filestr(AV_ORDERBOOK_FILE_ID , str(num_levels_calc))
outfilename = ".".join((outfilename,'csv'))
with open(outfilename, 'w') as outfile:
wr = csv.writer(outfile)
wr.writerow(mean[1::2]) # LOBster format: bid data at odd * 2
wr.writerow(mean[0::2]) # LOBster format: ask data at even * 2
print("Average order book saved as %s."%outfilename)
return mean[1::2], mean[0::2]
def _load_ordervolume(
self,
num_observations,
num_levels_calc,
profile2vol_fct=np.sum
):
''' Extracts the volume of orders in the first num_level buckets at a uniform time grid of num_observations observations from the interval [time_start_calc, time_end_calc]. The volume process is extrapolated constantly on the last level in the file, for the case that time_end_calc is larger than the last time stamp in the file. profile2vol_fct allows to specify how the volume should be summarized from the profile. Typical choices are np.sum or np.mean.
Note: Due to possibly large amount of data we iterate through the file instead of reading the whole file into an array.
'''
time_start_calc = float(self.time_start_calc) / 1000.
time_end_calc = float(self.time_end_calc) / 1000.
file_ended_line = int(num_observations)
ctr_time = 0
ctr_line = 0
ctr_obs = 0 # counter for the outer of the
time_stamps, dt = np.linspace(time_start_calc, time_end_calc, num_observations, retstep = True)
volume_bid = np.zeros(num_observations)
volume_ask = np.zeros(num_observations)
with open((self.lobfilename + '.csv')) as orderbookfile, open(self.msgfilename + '.csv') as messagefile:
# Read data from csv file
lobdata = csv.reader(orderbookfile, delimiter=',')
messagedata = csv.reader(messagefile, delimiter=',')
# get first row
# data are read as list of strings
rowMES = next(messagedata)
rowLOB = next(lobdata)
# parse to float, extract bucket volumes only
currprofile = np.fromiter(rowLOB[1:(4*num_levels_calc + 1):2], np.float)
time_file = float(rowMES[0])
for ctr_obs, time_stamp in enumerate(time_stamps):
if (time_stamp < time_file):
# no update of volume in the file. Keep processes constant
if (ctr_obs > 0):
volume_bid[ctr_obs] = volume_bid[ctr_obs-1]
volume_ask[ctr_obs] = volume_ask[ctr_obs-1]
else:
# so far no data available, raise warning and set processes to 0.
warnings.warn("Data do not contain beginning of the monitoring period. Values set to 0.", RuntimeWarning)
volume_bid[ctr_obs] = 0.
volume_ask[ctr_obs] = 0.
continue
while(time_stamp >= time_file):
# extract order volume from profile
volume_bid[ctr_obs] = profile2vol_fct(currprofile[1::2])
volume_ask[ctr_obs] = profile2vol_fct(currprofile[0::2])
# read next line
try:
rowMES = next(messagedata) # data are read as list of strings
rowLOB = next(lobdata)
except StopIteration:
if (file_ended_line == num_observations):
file_ended_line = ctr_obs
break
# update currprofile and time_file
currprofile = np.fromiter(rowLOB[1:(4*num_levels_calc + 1):2], np.float) # parse to integer, extract bucket volumes only
time_file = float(rowMES[0])
if (file_ended_line < num_observations):
warnings.warn("End of file reached. Number of values constantly extrapolated: %i"%(num_observations - file_ended_line), RuntimeWarning)
return dt, time_stamps, volume_bid, volume_ask
def _load_ordervolume_levelx(
self,
num_observations,
level
):
''' Extracts the volume of orders in the first num_level buckets at a uniform time grid of num_observations observations from the interval [time_start_calc, time_end_calc]. The volume process is extrapolated constantly on the last level in the file, for the case that time_end_calc is larger than the last time stamp in the file. profile2vol_fct allows to specify how the volume should be summarized from the profile. Typical choices are np.sum or np.mean.
Note: Due to possibly large amount of data we iterate through the file instead of reading the whole file into an array.
'''
time_start_calc = float(self.time_start_calc) / 1000.
time_end_calc = float(self.time_end_calc) / 1000.
file_ended_line = int(num_observations)
ctr_time = 0
ctr_line = 0
ctr_obs = 0 # counter for the outer of the
time_stamps, dt = np.linspace(time_start_calc, time_end_calc, num_observations, retstep = True)
volume_bid = np.zeros(num_observations)
volume_ask = np.zeros(num_observations)
# Ask level x is at position (x-1)*4 + 1, bid level x is at position (x-1)*4 + 3
x_bid = (int(level) - 1) * 4 + 3
x_ask = (int(level) - 1) * 4 + 1
with open((self.lobfilename + '.csv')) as orderbookfile, open(self.msgfilename + '.csv') as messagefile:
# Read data from csv file
lobdata = csv.reader(orderbookfile, delimiter=',')
messagedata = csv.reader(messagefile, delimiter=',')
# get first row
# data are read as list of strings
rowMES = next(messagedata)
rowLOB = next(lobdata)
# parse to float, extract bucket volumes only
#currprofile = np.fromiter(rowLOB[1:(4*num_levels_calc + 1):2], np.float)
currbid = float(rowLOB[x_bid])
currask = float(rowLOB[x_ask])
time_file = float(rowMES[0])
for ctr_obs, time_stamp in enumerate(time_stamps):
if (time_stamp < time_file):
# no update of volume in the file. Keep processes constant
if (ctr_obs > 0):
volume_bid[ctr_obs] = volume_bid[ctr_obs-1]
volume_ask[ctr_obs] = volume_ask[ctr_obs-1]
else:
# so far no data available, raise warning and set processes to 0.
warnings.warn("Data do not contain beginning of the monitoring period. Values set to 0.", RuntimeWarning)
volume_bid[ctr_obs] = 0.
volume_ask[ctr_obs] = 0.
continue
while(time_stamp >= time_file):
# extract order volume from profile
volume_bid[ctr_obs] = currbid
volume_ask[ctr_obs] = currask
# read next line
try:
rowMES = next(messagedata) # data are read as list of strings
rowLOB = next(lobdata)
except StopIteration:
if (file_ended_line == num_observations):
file_ended_line = ctr_obs
break
# update currprofile and time_file
#currprofile = np.fromiter(rowLOB[1:(4*num_levels_calc + 1):2], np.float) # parse to integer, extract bucket volumes only
currbid = float(rowLOB[x_bid])
currask = float(rowLOB[x_ask])
time_file = float(rowMES[0])
if (file_ended_line < num_observations):
warnings.warn("End of file reached. Number of values constantly extrapolated: %i"%(num_observations - file_ended_line), RuntimeWarning)
return dt, time_stamps, volume_bid, volume_ask
def _load_ordervolume_full(
self,
num_levels_calc,
profile2vol_fct=np.sum,
ret_np=True
):
''' Extracts the volume of orders in the first num_level buckets from the interval [time_start_calc, time_end_calc]. profile2vol_fct allows to specify how the volume should be summarized from the profile. Typical choices are np.sum or np.mean. If ret_np==False then the output format are lists, else numpy arrays
Note: Due to possibly large amount of data we iterate through the file instead of reading the whole file into an array.
'''
time_start_calc = float(self.time_start_calc) / 1000.
time_end_calc = float(self.time_end_calc) / 1000.
time_stamps = []
volume_bid = []
volume_ask = []
index_start = -1
index_end = -1
with open((self.lobfilename + '.csv')) as orderbookfile, open(self.msgfilename + '.csv') as messagefile:
# Read data from csv file
lobdata = csv.reader(orderbookfile, delimiter=',')
messagedata = csv.reader(messagefile, delimiter=',')
# get first row
# data are read as list of strings
for ctrRow, (rowLOB, rowMES) in enumerate(zip(lobdata, messagedata)):
time_now = float(rowMES[0])
if (index_start == -1) and (time_now >= time_start_calc):
index_start = ctrRow
if (index_end == -1) and (time_now > time_end_calc):
index_end = ctrRow
break
time_stamps.append(time_now)
currprofile = np.fromiter(rowLOB[1:(4*num_levels_calc + 1):2], np.float) # parse to integer, extract bucket volumes only
volume_bid.append(profile2vol_fct(currprofile[1::2]))
volume_ask.append(profile2vol_fct(currprofile[0::2]))
if index_end == -1:
#file end reached
index_end = len(time_stamps)
if ret_np:
return np.array(time_stamps[index_start:index_end]), np.array(volume_bid[index_start:index_end]), np.array(volume_ask[index_start:index_end])
return time_stamps[index_start:index_end], volume_bid[index_start:index_end], volume_ask[index_start:index_end]
def _load_prices(
self,
num_observations
):
''' private method to implement how the price data are loaded from the files '''
time_start_calc = float(self.time_start_calc) / 1000.
time_end_calc = float(self.time_end_calc) / 1000.
file_ended_line = int(num_observations)
ctr_time = 0
ctr_line = 0
ctr_obs = 0 # counter for the outer of the
time_stamps, dt = np.linspace(time_start_calc, time_end_calc, num_observations, retstep = True)
prices_bid = np.empty(num_observations)
prices_ask = np.empty(num_observations)
with open((self.lobfilename + '.csv')) as orderbookfile, open(self.msgfilename + '.csv') as messagefile:
# Read data from csv file
lobdata = csv.reader(orderbookfile, delimiter=',')
messagedata = csv.reader(messagefile, delimiter=',')
# get first row
# data are read as list of strings
rowMES = next(messagedata)
rowLOB = next(lobdata)
time_file = float(rowMES[0])
for ctr_obs, time_stamp in enumerate(time_stamps):
if (time_stamp < time_file):
# no update of prices in the file. Keep processes constant
if (ctr_obs > 0):
prices_bid[ctr_obs] = prices_bid[ctr_obs-1]
prices_ask[ctr_obs] = prices_ask[ctr_obs-1]
else:
# so far no data available, raise warning and set processes to 0.
warnings.warn("Data do not contain beginning of the monitoring period. Values set to 0.", RuntimeWarning)
prices_bid[ctr_obs] = 0.
prices_ask[ctr_obs] = 0.
continue
while(time_stamp >= time_file):
# LOBster stores best ask and bid price in resp. 1st and 3rd column, price in unit USD*10000
prices_bid[ctr_obs] = float(rowLOB[2]) / float(10000)
prices_ask[ctr_obs] = float(rowLOB[0]) / float(10000)
# read next line
try:
rowMES = next(messagedata) # data are read as list of strings
rowLOB = next(lobdata)
except StopIteration:
if (file_ended_line == num_observations):
file_ended_line = ctr_obs
break
# update time_file
time_file = float(rowMES[0])
if (file_ended_line < num_observations-1):
warnings.warn("End of file reached. Number of values constantly extrapolated: %i"%(num_observations - file_ended_line), RuntimeWarning)
while ctr_obs < (num_observations-1):
prices_bid[ctr_obs+1] = prices_bid[ctr_obs]
prices_ask[ctr_obs+1] = prices_ask[ctr_obs]
return dt, time_stamps, prices_bid, prices_ask
def _load_profile_snapshot_lobster(
self,
time_stamp,
num_levels_calc=None
):
''' Returns a two numpy arrays with snapshots of the bid- and ask-side of the order book at a given time stamp
Output:
bid_prices, bid_volume, ask_prices, ask_volume
'''
#convert time from msec to sec
time_stamp = float(time_stamp) / 1000.
if num_levels_calc is None:
num_levels_calc = self.num_levels_calc
with open((self.lobfilename + '.csv')) as orderbookfile, open(self.msgfilename + '.csv') as messagefile:
# Read data from csv file
lobdata = csv.reader(orderbookfile, delimiter=',')
messagedata = csv.reader(messagefile, delimiter=',')
# get first row
# data are read as list of strings
rowMES = next(messagedata)
rowLOB = next(lobdata)
# parse to float, extract bucket volumes only
time_file = float(rowMES[0])
if time_file > time_stamp:
raise LookupError("Time data in the file start at {} which is after time_stamps: {}".format(time_file, time_stamp))
if time_file == time_stamp:
# file format is [ask level, ask volume, bid level, bid volume, ask level, ....]
#conversion of price levels to USD
bid_prices = np.fromiter(rowLOB[2:(4*num_levels_calc):4], np.float) / float(10000)
bid_volume = np.fromiter(rowLOB[3:(4*num_levels_calc):4], np.float)
#conversion of price levels to USD
ask_prices = np.fromiter(rowLOB[0:(4*num_levels_calc):4], np.float) / float(10000)
ask_volume = np.fromiter(rowLOB[1:(4*num_levels_calc):4], np.float)
for rowMES in messagedata:
time_file = float(rowMES[0])
if time_file > time_stamp:
# file format is [ask level, ask volume, bid level, bid volume, ask level, ....]
#conversion of price levels to USD
bid_prices = np.fromiter(rowLOB[2:(4*num_levels_calc):4], np.float) / float(10000)
bid_volume = np.fromiter(rowLOB[3:(4*num_levels_calc):4], np.float)
#conversion of price levels to USD
ask_prices = np.fromiter(rowLOB[0:(4*num_levels_calc):4], np.float) / float(10000)
ask_volume = np.fromiter(rowLOB[1:(4*num_levels_calc):4], np.float)
break
rowLOB = next(lobdata)
else:
# time in file did not exceed time stamp to the end. Return last entries of the file
bid_prices = np.fromiter(rowLOB[2:(4*num_levels_calc):4], np.float) / float(10000)
bid_volume = np.fromiter(rowLOB[3:(4*num_levels_calc):4], np.float)
#conversion of price levels to USD
ask_prices = np.fromiter(rowLOB[0:(4*num_levels_calc):4], np.float) / float(10000)
ask_volume = np.fromiter(rowLOB[1:(4*num_levels_calc):4], np.float)
return bid_prices, bid_volume, ask_prices, ask_volume
def load_profile_snapshot(
self,
time_stamp,
num_levels_calc=None
):
''' Returns a two numpy arrays with snapshots of the bid- and ask-side of the order book at a given time stamp
Output:
bid_prices, bid_volume, ask_prices, ask_volume
'''
return self._load_profile_snapshot_lobster(time_stamp, num_levels_calc)
# END LOBSTERReader
| 3.078125 | 3 |
scripts/setup-xbee.py | jamesleesaunders/pi-alertme | 16 | 12765842 | <filename>scripts/setup-xbee.py
#!/usr/bin/python
# coding: utf-8
# Filename: setup-_xbee.py
# Description: Configure XBee
# Author: <NAME> [<EMAIL>]
# Copyright: Copyright (C) 2017 <NAME>
# License: MIT
import serial
from xbee import ZigBee
import pprint
import time
import sys
pp = pprint.PrettyPrinter(indent=4)
def receive_message(message):
if message and 'command' in message:
pp.pprint(message)
def xbee_error(error):
print('XBee Error: %s', error)
commands = {
'addresses': {
'Short Address': {'command':'MY', 'param': None},
'Long Address High': {'command':'SH', 'param': None},
'Long Address Low': {'command':'SL', 'param': None}
},
'setup_hub': {
'ZigBee Stack Profile': {'command': 'ZS', 'parameter': b'\x02'},
'Encryption Enable': {'command': 'EE', 'parameter': b'\x01'},
'Encryption Options': {'command': 'EO', 'parameter': b'\x01'},
'Encryption Key': {'command': 'KY', 'parameter': b'\x5A\x69\x67\x42\x65\x65\x41\x6C\x6C\x69\x61\x6E\x63\x65\x30\x39'},
'API Enable': {'command': 'AP', 'parameter': b'\x02'},
'API Output Mode': {'command': 'AO', 'parameter': b'\x03'}
},
'setup_device': {
'ZigBee Stack Profile': {'command': 'ZS', 'parameter': b'\x02'},
'Encryption Enable': {'command': 'EE', 'parameter': b'\x01'},
'Encryption Options': {'command': 'EO', 'parameter': b'\x00'},
'Encryption Key': {'command': 'KY', 'parameter': b''},
'API Enable': {'command': 'AP', 'parameter': b'\x02'},
'API Output Mode': {'command': 'AO', 'parameter': b'\x03'}
},
}
if len(sys.argv) < 2:
print "Missing command argument {}".format(commands.keys())
else:
action = sys.argv[1]
if action in commands:
XBEE_PORT = '/dev/tty.usbserial-A1014P7W'
XBEE_BAUD = 9600
ser = serial.Serial(XBEE_PORT, XBEE_BAUD)
zb = ZigBee(ser=ser, callback=receive_message, error_callback=xbee_error, escaped=True)
print "Running", action, "...."
for name, command in commands[action].iteritems():
print "Sending", name
zb.at(**command)
time.sleep(3)
zb.halt()
ser.close()
else:
print "Invalid command '{}'".format(action)
| 2.203125 | 2 |
test/test_nvhpc.py | adegomme/hpc-container-maker | 0 | 12765843 | <filename>test/test_nvhpc.py
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods, bad-continuation
"""Test cases for the nvhpc module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import aarch64, centos, docker, ppc64le, ubuntu, x86_64
from hpccm.building_blocks.nvhpc import nvhpc
class Test_nvhpc(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
@x86_64
@ubuntu
@docker
def test_defaults_ubuntu(self):
"""Default HPC SDK building block"""
n = nvhpc(eula=True)
self.assertMultiLineEqual(str(n),
r'''# NVIDIA HPC SDK version 22.2
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
ca-certificates && \
rm -rf /var/lib/apt/lists/*
RUN echo "deb [trusted=yes] https://developer.download.nvidia.com/hpc-sdk/ubuntu/amd64 /" >> /etc/apt/sources.list.d/hpccm.list && \
apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
nvhpc-22-2-cuda-multi && \
rm -rf /var/lib/apt/lists/*
ENV CPATH=/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/comm_libs/nvshmem/include:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/comm_libs/nccl/include:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/compilers/extras/qd/include/qd:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/math_libs/include:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/comm_libs/mpi/include:$CPATH \
LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/comm_libs/nvshmem/lib:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/comm_libs/nccl/lib:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/math_libs/lib64:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/compilers/lib:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/cuda/lib64:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/comm_libs/mpi/lib:$LD_LIBRARY_PATH \
MANPATH=/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/compilers/man:$MANPATH \
PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/comm_libs/nvshmem/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/comm_libs/nccl/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/profilers/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/compilers/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/cuda/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/comm_libs/mpi/bin:$PATH''')
@x86_64
@centos
@docker
def test_defaults_centos(self):
"""Default HPC SDK building block"""
n = nvhpc(eula=True)
self.assertMultiLineEqual(str(n),
r'''# NVIDIA HPC SDK version 22.2
RUN yum install -y \
ca-certificates && \
rm -rf /var/cache/yum/*
RUN yum install -y yum-utils && \
yum-config-manager --add-repo https://developer.download.nvidia.com/hpc-sdk/rhel/nvhpc.repo && \
yum install -y \
nvhpc-cuda-multi-22.2 && \
rm -rf /var/cache/yum/*
ENV CPATH=/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/comm_libs/nvshmem/include:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/comm_libs/nccl/include:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/compilers/extras/qd/include/qd:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/math_libs/include:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/comm_libs/mpi/include:$CPATH \
LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/comm_libs/nvshmem/lib:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/comm_libs/nccl/lib:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/math_libs/lib64:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/compilers/lib:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/cuda/lib64:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/comm_libs/mpi/lib:$LD_LIBRARY_PATH \
MANPATH=/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/compilers/man:$MANPATH \
PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/comm_libs/nvshmem/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/comm_libs/nccl/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/profilers/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/compilers/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/cuda/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/comm_libs/mpi/bin:$PATH''')
@x86_64
@centos
@docker
def test_package_centos(self):
"""Local package"""
n = nvhpc(eula=True,
package='nvhpc_2020_207_Linux_x86_64_cuda_multi.tar.gz')
self.assertMultiLineEqual(str(n),
r'''# NVIDIA HPC SDK version 20.7
COPY nvhpc_2020_207_Linux_x86_64_cuda_multi.tar.gz /var/tmp/nvhpc_2020_207_Linux_x86_64_cuda_multi.tar.gz
RUN yum install -y \
bc \
gcc \
gcc-c++ \
gcc-gfortran \
libatomic \
numactl-libs \
openssh-clients \
wget \
which && \
rm -rf /var/cache/yum/*
RUN mkdir -p /var/tmp && tar -x -f /var/tmp/nvhpc_2020_207_Linux_x86_64_cuda_multi.tar.gz -C /var/tmp -z && \
cd /var/tmp/nvhpc_2020_207_Linux_x86_64_cuda_multi && NVHPC_ACCEPT_EULA=accept NVHPC_INSTALL_DIR=/opt/nvidia/hpc_sdk NVHPC_SILENT=true ./install && \
rm -rf /var/tmp/nvhpc_2020_207_Linux_x86_64_cuda_multi /var/tmp/nvhpc_2020_207_Linux_x86_64_cuda_multi.tar.gz
ENV CPATH=/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/comm_libs/nvshmem/include:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/comm_libs/nccl/include:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/compilers/extras/qd/include/qd:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/math_libs/include:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/comm_libs/mpi/include:$CPATH \
LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/comm_libs/nvshmem/lib:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/comm_libs/nccl/lib:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/math_libs/lib64:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/compilers/lib:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/cuda/lib64:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/comm_libs/mpi/lib:$LD_LIBRARY_PATH \
MANPATH=/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/compilers/man:$MANPATH \
PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/comm_libs/nvshmem/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/comm_libs/nccl/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/profilers/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/compilers/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/cuda/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/comm_libs/mpi/bin:$PATH''')
@x86_64
@centos
@docker
def test_extended_environment(self):
"""Extended environment"""
n = nvhpc(eula=True, extended_environment=True,
package='nvhpc_2020_207_Linux_x86_64_cuda_multi.tar.gz')
self.assertMultiLineEqual(str(n),
r'''# NVIDIA HPC SDK version 20.7
COPY nvhpc_2020_207_Linux_x86_64_cuda_multi.tar.gz /var/tmp/nvhpc_2020_207_Linux_x86_64_cuda_multi.tar.gz
RUN yum install -y \
bc \
gcc \
gcc-c++ \
gcc-gfortran \
libatomic \
numactl-libs \
openssh-clients \
wget \
which && \
rm -rf /var/cache/yum/*
RUN mkdir -p /var/tmp && tar -x -f /var/tmp/nvhpc_2020_207_Linux_x86_64_cuda_multi.tar.gz -C /var/tmp -z && \
cd /var/tmp/nvhpc_2020_207_Linux_x86_64_cuda_multi && NVHPC_ACCEPT_EULA=accept NVHPC_INSTALL_DIR=/opt/nvidia/hpc_sdk NVHPC_SILENT=true ./install && \
rm -rf /var/tmp/nvhpc_2020_207_Linux_x86_64_cuda_multi /var/tmp/nvhpc_2020_207_Linux_x86_64_cuda_multi.tar.gz
ENV CC=/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/compilers/bin/nvc \
CPATH=/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/comm_libs/nvshmem/include:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/comm_libs/nccl/include:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/compilers/extras/qd/include/qd:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/math_libs/include:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/comm_libs/mpi/include:$CPATH \
CPP=cpp \
CXX=/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/compilers/bin/nvc++ \
F77=/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/compilers/bin/nvfortran \
F90=/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/compilers/bin/nvfortran \
FC=/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/compilers/bin/nvfortran \
LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/comm_libs/nvshmem/lib:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/comm_libs/nccl/lib:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/math_libs/lib64:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/compilers/lib:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/cuda/lib64:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/comm_libs/mpi/lib:$LD_LIBRARY_PATH \
MANPATH=/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/compilers/man:$MANPATH \
PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/comm_libs/nvshmem/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/comm_libs/nccl/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/profilers/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/compilers/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/cuda/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/20.7/comm_libs/mpi/bin:$PATH''')
@aarch64
@ubuntu
@docker
def test_aarch64(self):
"""Default HPC SDK building block on aarch64"""
n = nvhpc(cuda_multi=False, eula=True, version='21.2', tarball=True)
self.assertMultiLineEqual(str(n),
r'''# NVIDIA HPC SDK version 21.2
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
bc \
debianutils \
g++ \
gcc \
gfortran \
libatomic1 \
libnuma1 \
openssh-client \
wget && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://developer.download.nvidia.com/hpc-sdk/21.2/nvhpc_2021_212_Linux_aarch64_cuda_11.2.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/nvhpc_2021_212_Linux_aarch64_cuda_11.2.tar.gz -C /var/tmp -z && \
cd /var/tmp/nvhpc_2021_212_Linux_aarch64_cuda_11.2 && NVHPC_ACCEPT_EULA=accept NVHPC_INSTALL_DIR=/opt/nvidia/hpc_sdk NVHPC_SILENT=true ./install && \
rm -rf /var/tmp/nvhpc_2021_212_Linux_aarch64_cuda_11.2 /var/tmp/nvhpc_2021_212_Linux_aarch64_cuda_11.2.tar.gz
ENV CPATH=/opt/nvidia/hpc_sdk/Linux_aarch64/21.2/comm_libs/nvshmem/include:/opt/nvidia/hpc_sdk/Linux_aarch64/21.2/comm_libs/nccl/include:/opt/nvidia/hpc_sdk/Linux_aarch64/21.2/compilers/extras/qd/include/qd:/opt/nvidia/hpc_sdk/Linux_aarch64/21.2/math_libs/include:/opt/nvidia/hpc_sdk/Linux_aarch64/21.2/comm_libs/mpi/include:$CPATH \
LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_aarch64/21.2/comm_libs/nvshmem/lib:/opt/nvidia/hpc_sdk/Linux_aarch64/21.2/comm_libs/nccl/lib:/opt/nvidia/hpc_sdk/Linux_aarch64/21.2/math_libs/lib64:/opt/nvidia/hpc_sdk/Linux_aarch64/21.2/compilers/lib:/opt/nvidia/hpc_sdk/Linux_aarch64/21.2/cuda/lib64:/opt/nvidia/hpc_sdk/Linux_aarch64/21.2/comm_libs/mpi/lib:$LD_LIBRARY_PATH \
MANPATH=/opt/nvidia/hpc_sdk/Linux_aarch64/21.2/compilers/man:$MANPATH \
PATH=/opt/nvidia/hpc_sdk/Linux_aarch64/21.2/comm_libs/nvshmem/bin:/opt/nvidia/hpc_sdk/Linux_aarch64/21.2/comm_libs/nccl/bin:/opt/nvidia/hpc_sdk/Linux_aarch64/21.2/profilers/bin:/opt/nvidia/hpc_sdk/Linux_aarch64/21.2/compilers/bin:/opt/nvidia/hpc_sdk/Linux_aarch64/21.2/cuda/bin:/opt/nvidia/hpc_sdk/Linux_aarch64/21.2/comm_libs/mpi/bin:$PATH''')
@ppc64le
@ubuntu
@docker
def test_ppc64le(self):
"""Default HPC SDK building block on ppc64le"""
n = nvhpc(eula=True, cuda_multi=False, cuda='11.0', version='20.7',
tarball=True)
self.assertMultiLineEqual(str(n),
r'''# NVIDIA HPC SDK version 20.7
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
bc \
debianutils \
g++ \
gcc \
gfortran \
libatomic1 \
libnuma1 \
openssh-client \
wget && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://developer.download.nvidia.com/hpc-sdk/20.7/nvhpc_2020_207_Linux_ppc64le_cuda_11.0.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/nvhpc_2020_207_Linux_ppc64le_cuda_11.0.tar.gz -C /var/tmp -z && \
cd /var/tmp/nvhpc_2020_207_Linux_ppc64le_cuda_11.0 && NVHPC_ACCEPT_EULA=accept NVHPC_DEFAULT_CUDA=11.0 NVHPC_INSTALL_DIR=/opt/nvidia/hpc_sdk NVHPC_SILENT=true ./install && \
rm -rf /var/tmp/nvhpc_2020_207_Linux_ppc64le_cuda_11.0 /var/tmp/nvhpc_2020_207_Linux_ppc64le_cuda_11.0.tar.gz
ENV CPATH=/opt/nvidia/hpc_sdk/Linux_ppc64le/20.7/comm_libs/nvshmem/include:/opt/nvidia/hpc_sdk/Linux_ppc64le/20.7/comm_libs/nccl/include:/opt/nvidia/hpc_sdk/Linux_ppc64le/20.7/compilers/extras/qd/include/qd:/opt/nvidia/hpc_sdk/Linux_ppc64le/20.7/math_libs/include:/opt/nvidia/hpc_sdk/Linux_ppc64le/20.7/comm_libs/mpi/include:$CPATH \
LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_ppc64le/20.7/comm_libs/nvshmem/lib:/opt/nvidia/hpc_sdk/Linux_ppc64le/20.7/comm_libs/nccl/lib:/opt/nvidia/hpc_sdk/Linux_ppc64le/20.7/math_libs/lib64:/opt/nvidia/hpc_sdk/Linux_ppc64le/20.7/compilers/lib:/opt/nvidia/hpc_sdk/Linux_ppc64le/20.7/cuda/lib64:/opt/nvidia/hpc_sdk/Linux_ppc64le/20.7/comm_libs/mpi/lib:$LD_LIBRARY_PATH \
MANPATH=/opt/nvidia/hpc_sdk/Linux_ppc64le/20.7/compilers/man:$MANPATH \
PATH=/opt/nvidia/hpc_sdk/Linux_ppc64le/20.7/comm_libs/nvshmem/bin:/opt/nvidia/hpc_sdk/Linux_ppc64le/20.7/comm_libs/nccl/bin:/opt/nvidia/hpc_sdk/Linux_ppc64le/20.7/profilers/bin:/opt/nvidia/hpc_sdk/Linux_ppc64le/20.7/compilers/bin:/opt/nvidia/hpc_sdk/Linux_ppc64le/20.7/cuda/bin:/opt/nvidia/hpc_sdk/Linux_ppc64le/20.7/comm_libs/mpi/bin:$PATH''')
@x86_64
@ubuntu
@docker
def test_runtime_ubuntu(self):
"""Runtime"""
n = nvhpc(eula=True, redist=['compilers/lib/*'])
r = n.runtime()
self.assertMultiLineEqual(r,
r'''# NVIDIA HPC SDK
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
libatomic1 \
libnuma1 \
openssh-client && \
rm -rf /var/lib/apt/lists/*
COPY --from=0 /opt/nvidia/hpc_sdk/Linux_x86_64/22.2/REDIST/compilers/lib/* /opt/nvidia/hpc_sdk/Linux_x86_64/22.2/compilers/lib/
COPY --from=0 /opt/nvidia/hpc_sdk/Linux_x86_64/22.2/comm_libs/mpi /opt/nvidia/hpc_sdk/Linux_x86_64/22.2/comm_libs/mpi
ENV LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/comm_libs/mpi/lib:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/compilers/lib:$LD_LIBRARY_PATH \
PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/comm_libs/mpi/bin:$PATH''')
@x86_64
@centos
@docker
def test_runtime_centos(self):
"""Runtime"""
n = nvhpc(eula=True, mpi=False,
redist=['comm_libs/11.0/nccl/lib/libnccl.so',
'compilers/lib/*',
'math_libs/11.0/lib64/libcufft.so.10',
'math_libs/11.0/lib64/libcublas.so.11'])
r = n.runtime()
self.assertMultiLineEqual(r,
r'''# NVIDIA HPC SDK
RUN yum install -y \
libatomic \
numactl-libs \
openssh-clients && \
rm -rf /var/cache/yum/*
COPY --from=0 /opt/nvidia/hpc_sdk/Linux_x86_64/22.2/REDIST/comm_libs/11.0/nccl/lib/libnccl.so /opt/nvidia/hpc_sdk/Linux_x86_64/22.2/comm_libs/11.0/nccl/lib/libnccl.so
COPY --from=0 /opt/nvidia/hpc_sdk/Linux_x86_64/22.2/REDIST/compilers/lib/* /opt/nvidia/hpc_sdk/Linux_x86_64/22.2/compilers/lib/
COPY --from=0 /opt/nvidia/hpc_sdk/Linux_x86_64/22.2/REDIST/math_libs/11.0/lib64/libcufft.so.10 /opt/nvidia/hpc_sdk/Linux_x86_64/22.2/math_libs/11.0/lib64/libcufft.so.10
COPY --from=0 /opt/nvidia/hpc_sdk/Linux_x86_64/22.2/REDIST/math_libs/11.0/lib64/libcublas.so.11 /opt/nvidia/hpc_sdk/Linux_x86_64/22.2/math_libs/11.0/lib64/libcublas.so.11
ENV LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/comm_libs/11.0/nccl/lib:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/compilers/lib:/opt/nvidia/hpc_sdk/Linux_x86_64/22.2/math_libs/11.0/lib64:$LD_LIBRARY_PATH''')
def test_toolchain(self):
"""Toolchain"""
n = nvhpc()
tc = n.toolchain
self.assertEqual(tc.CC, 'nvc')
self.assertEqual(tc.CXX, 'nvc++')
self.assertEqual(tc.FC, 'nvfortran')
self.assertEqual(tc.F77, 'nvfortran')
self.assertEqual(tc.F90, 'nvfortran')
| 2.140625 | 2 |
resources/scripts/pytest_otel/docs/demos/test/test_demo.py | and-blk/apm-pipeline-library | 0 | 12765844 | <reponame>and-blk/apm-pipeline-library
# Copyright The OpenTelemetry Authors
# SPDX-License-Identifier: Apache-2.0
pytest_plugins = ["pytester"]
import time
import logging
import pytest
def test_basic():
time.sleep(5)
pass
def test_success():
assert True
def test_failure():
assert 1 < 0
def test_failure_code():
d = 1/0
pass
@pytest.mark.skip
def test_skip():
assert True
@pytest.mark.xfail(reason="foo bug")
def test_xfail():
assert False
@pytest.mark.xfail(run=False)
def test_xfail_no_run():
assert False
| 1.679688 | 2 |
survey/utils/__init__.py | ericazhou7/uSurvey | 5 | 12765845 | <reponame>ericazhou7/uSurvey
__author__ = 'mnandri'
| 0.875 | 1 |
packages/python/setup.py | ufora/ufora | 571 | 12765846 | <filename>packages/python/setup.py<gh_stars>100-1000
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from distutils.core import Extension
import glob
import numpy
import os
import re
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
NEWS = open(os.path.join(here, 'NEWS.txt')).read()
def read_package_version():
version_file = 'pyfora/_version.py'
with open(version_file, 'rt') as version_file:
version_line = version_file.read()
match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_line, re.M)
if match:
return match.group(1)
raise RuntimeError("Can't read version string from '%s'." % (version_file,))
version = read_package_version()
install_requires = ['futures', 'socketIO-client>=0.6.5', 'numpy', 'wsaccel','websocket-client==0.37.0']
ext_modules = []
extra_compile_args=['-std=c++11']
pythonObjectRehydratorModule = Extension('pyfora.PythonObjectRehydrator',
language='c++',
extra_compile_args=extra_compile_args,
sources=['pyfora/src/pythonObjectRehydratorModule.cpp',
'pyfora/src/BinaryObjectRegistry.cpp',
'pyfora/src/StringBuilder.cpp',
'pyfora/src/PureImplementationMappings.cpp',
'pyfora/src/PyObjectUtils.cpp',
'pyfora/src/ObjectRegistry.cpp',
'pyfora/src/IRToPythonConverter.cpp',
'pyfora/src/NamedSingletons.cpp',
'pyfora/src/BinaryObjectRegistryHelpers.cpp',
'pyfora/src/FreeVariableMemberAccessChain.cpp',
'pyfora/src/Json.cpp',
'pyfora/src/PyAbortSingletons.cpp',
'pyfora/src/ModuleLevelObjectIndex.cpp',
'pyfora/src/ScopedPyThreads.cpp',
'pyfora/src/PythonObjectRehydrator.cpp'] +
glob.glob('pyfora/src/TypeDescriptions/*.cpp') +
glob.glob('pyfora/src/serialization/*.cpp'),
include_dirs=[numpy.get_include()]
)
ext_modules.append(pythonObjectRehydratorModule)
stringbuildermodule = Extension('pyfora.StringBuilder',
language='c++',
extra_compile_args=['-std=c++11'],
sources=['pyfora/src/StringBuilder.cpp',
'pyfora/src/stringbuildermodule.cpp']
)
ext_modules.append(stringbuildermodule)
binaryObjectRegistryModule = Extension('pyfora.BinaryObjectRegistry',
language='c++',
extra_compile_args=extra_compile_args,
sources=['pyfora/src/BinaryObjectRegistry.cpp',
'pyfora/src/PyObjectWalker.cpp',
'pyfora/src/PureImplementationMappings.cpp',
'pyfora/src/binaryobjectregistrymodule.cpp',
'pyfora/src/StringBuilder.cpp',
'pyfora/src/FileDescription.cpp',
'pyfora/src/PyObjectUtils.cpp',
'pyfora/src/Exceptions.cpp',
'pyfora/src/PyAstUtil.cpp',
'pyfora/src/FreeVariableMemberAccessChain.cpp',
'pyfora/src/PyAstFreeVariableAnalyses.cpp',
'pyfora/src/PyforaInspect.cpp',
'pyfora/src/FreeVariableResolver.cpp',
'pyfora/src/Ast.cpp',
'pyfora/src/UnresolvedFreeVariableExceptions.cpp',
'pyfora/src/BinaryObjectRegistryHelpers.cpp',
'pyfora/src/Json.cpp',
'pyfora/src/ModuleLevelObjectIndex.cpp']
)
ext_modules.append(binaryObjectRegistryModule)
pyObjectWalkerModule = Extension('pyfora.PyObjectWalker',
language='c++',
extra_compile_args=extra_compile_args,
sources=['pyfora/src/pyobjectwalkermodule.cpp',
'pyfora/src/PyObjectWalker.cpp',
'pyfora/src/PureImplementationMappings.cpp',
'pyfora/src/BinaryObjectRegistry.cpp',
'pyfora/src/FileDescription.cpp',
'pyfora/src/StringBuilder.cpp',
'pyfora/src/PyObjectUtils.cpp',
'pyfora/src/FreeVariableResolver.cpp',
'pyfora/src/Exceptions.cpp',
'pyfora/src/PyAstUtil.cpp',
'pyfora/src/FreeVariableMemberAccessChain.cpp',
'pyfora/src/PyAstFreeVariableAnalyses.cpp',
'pyfora/src/PyforaInspect.cpp',
'pyfora/src/Ast.cpp',
'pyfora/src/UnresolvedFreeVariableExceptions.cpp',
'pyfora/src/BinaryObjectRegistryHelpers.cpp',
'pyfora/src/Json.cpp',
'pyfora/src/ModuleLevelObjectIndex.cpp']
)
ext_modules.append(pyObjectWalkerModule)
setup(
name='pyfora',
version=version,
description="A library for parallel execution of Python code in the Ufora runtime",
long_description=README + '\n\n' + NEWS,
classifiers=[
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering'
],
keywords='ufora fora parallel remote data-science machine-learning',
author='<NAME>.',
author_email='<EMAIL>',
url='http://www.ufora.com/',
license='Apache',
packages=find_packages('.'),
package_dir={'': '.'},
package_data={
'': ['*.txt', '*.rst'],
'pyfora': ['fora/**/*.fora']
},
zip_safe=False,
install_requires=install_requires,
entry_points={
'console_scripts':
['pyfora_aws=pyfora.aws.pyfora_aws:main']
},
ext_modules=ext_modules
)
| 1.742188 | 2 |
itdagene/app/comments/views.py | itdagene-ntnu/itdagene | 9 | 12765847 | from django.contrib.auth.decorators import permission_required
from django.contrib.messages import ERROR, add_message
from django.shortcuts import redirect
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from itdagene.app.comments.forms import CommentForm
from itdagene.app.mail.tasks import send_comment_email
@permission_required("comments.add_comment")
def add(request):
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.date = timezone.now()
instance.save()
send_comment_email(instance)
return redirect(instance.object.get_absolute_url())
else:
add_message(request, ERROR, _("Could not post comment"))
object = form.instance.object
return redirect(object.get_absolute_url())
| 2.046875 | 2 |
PyMOTW/source/sqlite3/sqlite3_date_types.py | axetang/AxePython | 1 | 12765848 | <reponame>axetang/AxePython
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2010 <NAME>. All rights reserved.
#
"""Query tasks in the database.
"""
#end_pymotw_header
import sqlite3
import sys
db_filename = 'todo.db'
sql = "select id, details, deadline from task"
def show_deadline(conn):
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor.execute(sql)
row = cursor.fetchone()
for col in ['id', 'details', 'deadline']:
print(' {:<8} {!r:<26} {}'.format(
col, row[col], type(row[col])))
return
print('Without type detection:')
with sqlite3.connect(db_filename) as conn:
show_deadline(conn)
print('\nWith type detection:')
with sqlite3.connect(db_filename,
detect_types=sqlite3.PARSE_DECLTYPES,
) as conn:
show_deadline(conn)
| 3.0625 | 3 |
linuxmonitor/routing.py | muthuubalakan/Linux-Monitor | 1 | 12765849 | from django.urls import path
from channels.http import AsgiHandler
from channels.routing import ProtocolTypeRouter, URLRouter
from channels.auth import AuthMiddlewareStack
from monitor.consumers import MemoryinfoConsumer
application = ProtocolTypeRouter({
"websocket": AuthMiddlewareStack(
URLRouter([
path("monitor/stream/", MemoryinfoConsumer),
]),
),
})
| 1.734375 | 2 |
test_tools.py | gonzatorte/sw-utils | 0 | 12765850 |
class SWTestCase:
def __init__(self):
pass
@classmethod
def configure(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
def run_all_test_case(self):
for test in self.test_to_run:
self.setUp()
test()
self.tearDown()
test_to_run = [] | 2.171875 | 2 |