repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
milos85vasic/Apache-Factory-Toolkit | git_info.py | <gh_stars>1-10
import subprocess
import json
key_branch = "branch"
destination = "git_info.json"
key_repository = "repository"
def set_git_info():
star = "* "
branch = ""
repository = ""
fetch_url = "Fetch URL:"
url_result, _ = subprocess.Popen(["git", "remote", "show", "origin"], stdout=subprocess.PIPE).communicate()
branch_result, _ = subprocess.Popen(["git", "branch"], stdout=subprocess.PIPE).communicate()
url_split_result = url_result.splitlines(keepends=False)
branch_split_result = branch_result.splitlines(keepends=False)
for line in url_split_result:
utf_line = line.decode('UTF-8')
if fetch_url in utf_line:
repository = utf_line.replace(fetch_url, "").strip()
break
for line in branch_split_result:
utf_line = line.decode('UTF-8')
if "* " in utf_line:
branch = utf_line[utf_line.index(star) + star.__len__():].strip()
break
git_configuration = {
key_branch: branch,
key_repository: repository
}
print("Repository is: ", git_configuration[key_repository])
print("Branch is: ", git_configuration[key_branch])
try:
with open(destination, 'w') as outfile:
json.dump(git_configuration, outfile)
except IOError:
print("Can't access [1]: " + destination)
def get_git_info():
return json.load(open(destination))
|
milos85vasic/Apache-Factory-Toolkit | configuration.py | apache_factory = "Apache-Factory"
pyramid_factory = "Pyramid-Factory"
mail_server_factory = "Mail-Server-Factory"
php = "Php"
mysql = "MySQL"
apache2 = "Apache2"
mysql_bin_dir = "bin"
mysql_lib_dir = "lib"
mysql_priv_dir = "priv"
mysql_bench_dir = "bench"
mysql_plugin_dir = "plugin"
mysql_data_dir = "data"
mysql_log_dir = "log"
mysql_tmp_dir = "tmp"
mysql_sock_dir = "sock"
mysql_script_dir = "scripts"
mysql_pid_dir = "pid"
mysql_share_dir = "share"
mysql_conf_dir = "conf"
mysql_installation_dir = "release"
default_port = 8080
default_port_mysql = 3306
default_port_postfix = 4000
default_port_postfix_secure = 5000
default_port_postfix_submission = 6000
default_port_dovecot = 7000
default_port_dovecot_secure = 9000
account_json = "account.json"
apache_factory_group = "apache_factory"
mail_server_factory_group = "mail_server_factory"
apache_factory_configuration_dir = "/usr/share/apache_factory"
mail_server_factory_configuration_dir = "/usr/share/mail_server_factory"
apache_factory_default_configuration_json = apache_factory_configuration_dir + "/global_configuration.json"
mail_server_factory_default_configuration_json = mail_server_factory_configuration_dir + "/global_configuration.json"
wipe_script = "wipe.py"
factory_script = "factory.py"
distribution_script = "distribute.py"
find_service_index_script = "find_service_index.py"
services_distribution_script = "distribute_services.py"
content_dir_name = "Content"
matrices_dir_name = "Matrices"
service_indexes = ["index.html", "index.htm", "index.php", "setup.py"]
rpm_fusion_free_7 = "https://download1.rpmfusion.org/free/el/rpmfusion-free-release-7.noarch.rpm"
rpm_fusion_non_free_7 = "https://download1.rpmfusion.org/nonfree/el/rpmfusion-nonfree-release-7.noarch.rpm"
rpm_fusion_free_8 = "https://download1.rpmfusion.org/free/el/rpmfusion-free-release-8.noarch.rpm"
rpm_fusion_non_free_8 = "https://download1.rpmfusion.org/nonfree/el/rpmfusion-nonfree-release-8.noarch.rpm"
apache_vhosts_directory = "vhosts"
starter_script = "starter.py"
starter_script_py = "starter_py.py"
rc_local = "/etc/rc.d/rc.local"
main_proxy_script = "main_proxy.py"
dnf_package_group_development = "Development Tools"
key_account = "account"
key_password = "password"
def get_home_directory_path(account):
return "/home/" + account
def content_dir_path(home_path):
return home_path + "/" + content_dir_name
|
milos85vasic/Apache-Factory-Toolkit | system_configuration.py | import json
import os
from Toolkit.configuration import *
from Toolkit.commands import *
from Toolkit.connectivity import *
arg_prefix = "--"
arg_server_admin = arg_prefix + "server_admin"
key_configuration_port = "port"
key_configuration_port_mysql = "port_mysql"
key_configuration_port_postfix = "port_postfix"
key_configuration_port_postfix_secure = "port_postfix_secure"
key_configuration_port_postfix_submission = "port_postfix_submission"
key_configuration_port_dovecot = "port_dovecot"
key_configuration_port_dovecot_secure = "port_dovecot_secure"
key_configuration_server_admin = "server_admin"
key_services = "services"
key_features = "features"
feature_mysql = "mysql"
feature_php_5 = "php_5"
key_services_url = "url"
key_services_urls = "urls"
key_service_root = "root"
key_credentials = "credentials"
key_services_repository = "repository"
key_password_protect = "password_protect"
key_password_protect_user = "user"
key_password_protect_password = "password"
key_password_protect_directories = "directories"
key_password_protect_service = "service"
key_password_protect_path = "path"
services_file = key_services + ".json"
key_configuration = "configuration"
key_configuration_main_proxy = "main_proxy"
key_explicit_port_number = "explicit_port_number"
key_configuration_repository = "configuration_repository"
def init_system_configuration(
arguments,
configuration_dir=apache_factory_configuration_dir,
configuration_group=apache_factory_group
):
default_config_json = configuration_dir + "/global_configuration.json"
if not os.path.isdir(configuration_dir):
steps = [
run_as_su(
concatenate(
mkdir(configuration_dir),
chmod(configuration_dir, "770")
)
)
]
run(steps)
system_configuration = get_system_configuration(configuration_dir)
account_data = {}
for arg in arguments:
if arguments.index(arg) > 0 and not str(arg).startswith(arg_prefix):
if key_account not in account_data:
system_configuration[arg] = {key_configuration_server_admin: "root@localhost"}
account = arg
account_data[key_account] = account
save_account(account_data)
else:
if key_password not in account_data:
password = arg
account_data[key_password] = password
save_account(account_data)
if str(arg).startswith(arg_server_admin):
if arguments.index(arg) == 1:
print("First argument must be name of the account!")
exit(1)
server_admin = str(arg).replace(arg_server_admin + "=", "")
account = account_data[key_account]
if account:
system_configuration[account][key_configuration_server_admin] = server_admin
else:
print("No account information available to continue further [1].")
exit(1)
if os.path.isfile(services_file):
services_config = json.load(open(services_file))
account = account_data[key_account]
if account:
system_configuration[account][key_services] = services_config
else:
print("No account information available to continue further [2].")
exit(1)
save_system_configuration(system_configuration, configuration_dir=configuration_dir)
steps = [
run_as_su(
concatenate(
chmod(default_config_json, "770"),
chgrp(configuration_group, default_config_json),
)
)
]
run(steps)
return system_configuration
def get_account():
return json.load(open(account_json))
def get_system_configuration(configuration_dir=apache_factory_configuration_dir):
default_config_json = configuration_dir + "/global_configuration.json"
system_configuration = {
key_configuration_port: default_port,
key_configuration_port_mysql: default_port_mysql
}
# TODO: Refactor this:
if configuration_dir == mail_server_factory_configuration_dir:
until = 1000
system_configuration = {
key_configuration_port_postfix:
get_first_available_port(default_port_postfix, default_port_postfix + until),
key_configuration_port_postfix_secure:
get_first_available_port(default_port_postfix_secure, default_port_postfix_secure + until),
key_configuration_port_postfix_submission:
get_first_available_port(default_port_postfix_submission, default_port_postfix_submission + until),
key_configuration_port_dovecot:
get_first_available_port(default_port_dovecot, default_port_dovecot + until),
key_configuration_port_dovecot_secure:
get_first_available_port(default_port_dovecot_secure, default_port_dovecot_secure + until)
}
if not os.path.isfile(default_config_json):
try:
with open(default_config_json, 'w') as outfile:
json.dump(system_configuration, outfile)
except IOError:
print("Can't access [3]: " + default_config_json)
else:
system_configuration = json.load(open(default_config_json))
return system_configuration
def save_account(account):
with open(account_json, 'w') as outfile:
json.dump(account, outfile)
def save_system_configuration(system_configuration, configuration_dir=apache_factory_configuration_dir):
default_config_json = configuration_dir + "/global_configuration.json"
with open(default_config_json, 'w') as outfile:
json.dump(system_configuration, outfile)
def get_services_directories(account, configuration_dir=apache_factory_configuration_dir):
directories = []
system_configuration = get_system_configuration(configuration_dir)
if account in system_configuration:
if key_services in system_configuration[account]:
if key_services in system_configuration[account][key_services]:
for service in system_configuration[account][key_services][key_services]:
directories.append(service[key_services_url])
return directories
def has_feature(account, feature, configuration_dir=apache_factory_configuration_dir):
features = None
system_configuration = get_system_configuration(configuration_dir)
account_configuration = system_configuration[account]
if isinstance(account_configuration, dict):
if key_services in account_configuration:
if key_features in system_configuration[account][key_services]:
features = system_configuration[account][key_services][key_features]
return features and feature in features
|
milos85vasic/Apache-Factory-Toolkit | mysql_common_5560.py | <reponame>milos85vasic/Apache-Factory-Toolkit
from .configuration import *
from .system_configuration import *
# MySQL 5.5.60:
def get_mysql_start_command(user_account):
system_configuration = get_system_configuration()
mysql_full_path = get_home_directory_path(user_account) + "/" + mysql + "/"
port = default_port_mysql
if user_account in system_configuration:
if key_configuration_port_mysql in system_configuration[user_account]:
port = system_configuration[user_account][key_configuration_port_mysql]
return mysql_full_path + mysql_bin_dir + "/mysqld --tmpdir=" + mysql_full_path + "tmp --datadir=" \
+ mysql_full_path + "data " + "--secure-file-priv=" + mysql_full_path + "priv --port=" + str(port) \
+ " --user=" + user_account + " " + "--socket=" + mysql_full_path + "sock/socket &"
|
milos85vasic/Apache-Factory-Toolkit | commands.py | <reponame>milos85vasic/Apache-Factory-Toolkit
import os
import subprocess
here = "./"
os_unknown = "Unknown"
os_centos_7 = "CentOS Linux 7"
os_centos_8 = "CentOS Linux 8"
toolkit_directory = "Toolkit"
toolkit_repo = "https://github.com/milos85vasic/Apache-Factory-Toolkit.git"
toolkit_repo_raw_access = "https://raw.githubusercontent.com/milos85vasic/Apache-Factory-Toolkit/master/"
run_as_user_with_password_script = "run_as_user_with_password.sh"
def run_as_su(what):
return 'su -c "' + what + '"'
def sudo(what):
return "sudo " + what
def get_package_inallation_cmd():
cmds = ["dnf", "yum", "apt-get"]
for item in cmds:
result, _ = subprocess.Popen(["which", item], stdout=subprocess.PIPE).communicate()
lines = result.splitlines(keepends=False)
for line in lines:
utf_line = line.decode('UTF-8')
if not "no " + item in utf_line:
return item
return "yum"
def get_yum(*what):
items = ""
for item in what:
items += item + " "
return get_package_inallation_cmd() + " install -y " + items
def get_yum_group(what):
return get_package_inallation_cmd() + " groups install -y '" + what + "'"
def install_package(*what):
return get_yum(*what)
def install_package_group(what):
return get_yum_group(what)
def concatenate(*what):
result = ""
for item in what:
append = item.strip()
if append:
result += " " + append
if what.index(item) < len(what) - 1:
result += ";"
return result
def clear():
return "clear"
def wget(what, **params):
destination = 'destination'
if destination in params:
return wget(what) + " -P " + params[destination]
else:
return "wget " + what
def curl(url):
return "curl -H \"Cache-Control: no-cache\" " + url
def curl_to(url, to):
return "curl -H \"Cache-Control: no-cache\" " + url + " > " + to
def mkdir(dir_name):
return "mkdir " + dir_name
def mkdirs(directories):
param = ""
for directory in directories:
param += " " + directory
return "mkdir " + param
def extract(what, **params):
destination = 'destination'
if destination in params:
return "tar -xvzf " + what + " --directory " + params[destination]
else:
return "tar -xvzf " + what
def cd(where):
return "cd " + where
def back():
return cd("..")
def home():
return cd("~")
def mv(what, where):
return "mv " + what + " " + where
def cp_dir(what, where):
return "cp -a " + what + "/. " + where + "/"
def cp(what, where):
return "cp " + what + " " + where
def check_file(what):
return "file " + what
def run(what):
for cmd in what:
os.system(cmd)
def add_user(user):
return "adduser " + user
def add_user_with_password(user, password):
return "useradd -m " + user + " && yes " + password + " | passwd " + user
def add_group(group):
return "groupadd " + group
def passwd(user):
return "passwd " + user
def run_as_user(account, command):
return "sudo -H -u " + account + " bash -c '" + command + "'"
def run_as_user_with_password(account, password, command):
# TODO: Check first if relatively to the current location script exists.
if not os.path.isfile("~/" + toolkit_directory + "/" + run_as_user_with_password_script):
steps = []
if not os.path.isdir("~/" + toolkit_directory):
steps.append(
mkdir("~/" + toolkit_directory)
)
steps.append(
"git clone --recurse-submodules " + toolkit_repo + " ~/" + toolkit_directory
)
run(steps)
# return "sh " + "~/" + toolkit_directory + "/" + run_as_user_with_password_script + " " + account + " " + password + " \"" + command + "\""
# return "sh " + "~/" + toolkit_directory + "/" + run_as_user_with_password_script + " test1 TestUser001 ls"
return "echo XXXXXXXXXXXXXXXXXXXXXX" # FIXME: Still does not work.
def git_clone(what):
return "git clone " + what
def git_clone_recursive(what):
return "git clone " + what + " --recursive"
def git_submodule_init():
return "git submodule init"
def git_submodule_checkout_each():
return "git submodule foreach --recursive git checkout master"
def git_submodule_update():
return "git submodule update"
def git_clone_to(what, where):
return "git clone " + what + " " + where
def git_clone_to_recursive(what, where):
return "git clone " + what + " " + where + " --recursive"
def git_clone_to_recursive_submodules(what):
return "git clone --recurse-submodules " + what + " ./"
def git_checkout(what):
return "git checkout " + what
def python(script, *params):
arguments = ""
for item in params:
arguments += " " + item
if not arguments:
return get_python_cmd() + " " + script
else:
return get_python_cmd() + " " + script + " " + arguments
def rm(what):
return "rm -rf " + what
def rm_files(what):
return "rm " + what
def apache_start():
return "./apachectl start"
def pyramid_setup(variant):
return "python setup.py " + variant
def pyramid_start():
return "pserve pyramid_factory.ini &"
def apache_stop():
return "./apachectl stop"
def chmod(where, mode):
return "chmod -R " + mode + " " + where
def chmodx(what):
return "chmod +x " + what
def chgrp(group, directory):
return "chgrp -R " + group + " " + directory
def chown(account, directory):
return "chown -R " + account + " " + directory
def pwd():
return "pwd"
def ls():
return "ls -lF"
def sleep(seconds):
return "sleep " + str(seconds)
def add_to_group(account, group):
return "usermod -a -G " + group + " " + account
def output(what, where):
return 'echo "' + what + '" > ' + where
def pip_upgrade():
return "pip install --upgrade pip"
def pip(what):
return "pip install " + what
def pip_upgrade_version(python_version):
return "pip" + str(python_version) + " install --upgrade pip"
def pip_version(what, python_version):
return "pip" + str(python_version) + " install " + what
def kill(who):
return "kill " + who
def echo(what):
return "echo \"" + what + "\""
def venv_init():
return "virtualenv env"
def venv_activate():
return "source env/bin/activate"
def venv_deactivate():
return "deactivate"
def venv_init_version(version, name):
return "virtualenv -p /usr/bin/python" + str(version) + " --distribute " + name
def venv_activate_name(name):
return "source " + name + "/bin/activate"
def ssh(user, command, port=22, host="127.0.0.1"):
return "ssh -p " + str(port) + " " + user + "@" + host + " " + command
def get_python_cmd():
pythons = ["python", "python3", "python2"]
for item in pythons:
result, _ = subprocess.Popen(["which", item], stdout=subprocess.PIPE).communicate()
lines = result.splitlines(keepends=False)
for line in lines:
utf_line = line.decode('UTF-8')
if not "no " + item in utf_line:
return item
return "python"
def get_python_installation_package():
os_name = get_os_name()
if os_name == os_centos_8:
return "python36"
return "python" # TODO: Support all OSs.
def get_users_list_cmd():
return "awk -F: '{ print $1}' /etc/passwd"
def get_users_list():
users = []
result, _ = subprocess.Popen([get_users_list_cmd()], stdout=subprocess.PIPE, shell=True).communicate()
lines = result.splitlines(keepends=False)
for line in lines:
utf_line = line.decode('UTF-8')
users.append(utf_line)
return users
def userdel(user):
return "userdel -Z -r -f " + user
def groupdel(group):
return "groupdel " + group
def hostnamectl():
return "hostnamectl"
def get_os_name():
result, _ = subprocess.Popen([hostnamectl()], stdout=subprocess.PIPE, shell=True).communicate()
lines = result.splitlines(keepends=False)
for line in lines:
utf_line = line.decode('UTF-8')
if "Operating System:" in utf_line:
if "CentOS Linux 8" in utf_line:
return os_centos_8
if "CentOS Linux 7" in utf_line:
return os_centos_7
return os_unknown |
milos85vasic/Apache-Factory-Toolkit | connectivity.py | <filename>connectivity.py
import socket, errno
def is_port_available(port_number):
success = True
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("127.0.0.1", port_number))
except socket.error as e:
if e.errno == errno.EADDRINUSE:
success = False
print("Port is already in use")
else:
print(e)
s.close()
if success:
"Port " + str(port_number) + " is open."
return success
def get_first_available_port(from_port, until_port):
while not is_port_available(from_port) and from_port < until_port:
from_port = from_port + 1
return from_port
|
milos85vasic/Apache-Factory-Toolkit | find_service_index.py | import json
import getpass
import sys
from commands import *
from configuration import *
from system_configuration import *
account = getpass.getuser()
system_configuration = get_system_configuration()
service = None
service_url = sys.argv[1]
service_home = sys.argv[2]
services = system_configuration[account][key_services][key_services]
for service_item in services:
if service_item[key_services_url] == service_url:
service = service_item
def get_index(directory):
for index in service_indexes:
full_path = directory + "/" + index
if os.path.isfile(full_path):
return directory
return None
service_root_directory = get_index(service_home)
if service_root_directory is not None:
service[key_service_root] = service_root_directory
save_system_configuration(system_configuration)
else:
for subdirectory, _, _ in os.walk(service_home):
service_root_directory = get_index(subdirectory)
if service_root_directory is not None:
service[key_service_root] = service_root_directory
save_system_configuration(system_configuration)
break
|
milos85vasic/Apache-Factory-Toolkit | __init__.py | # __init__.py
from .commands import *
from .configuration import *
from .connectivity import *
from .mysql_common import *
from .mysql_common_5560 import *
from .system_configuration import * |
milos85vasic/Apache-Factory-Toolkit | websetup.py | <reponame>milos85vasic/Apache-Factory-Toolkit<filename>websetup.py
import os
import sys
import subprocess
import time
toolkit_directory = "Toolkit"
toolkit_repo = "https://github.com/milos85vasic/Apache-Factory-Toolkit.git"
if __name__ == '__main__':
exists = True
steps = []
if not os.path.exists(toolkit_directory):
exists = False
steps.extend(
[
"mkdir " + toolkit_directory,
"git clone --recurse-submodules " + toolkit_repo + " ./" + toolkit_directory,
]
)
for cmd in steps:
os.system(cmd)
branch = "master"
what = sys.argv[1]
if len(sys.argv) >= 3:
branch = sys.argv[2]
from Toolkit.commands import get_python_cmd
python_cmd = get_python_cmd()
setup = python_cmd + " ./" + toolkit_directory + "/websetup_run.py " + what
if branch is not "master":
setup += " " + branch
steps = [
setup
]
if not exists:
steps.extend(
[
"rm -rf ./" + toolkit_directory,
"rm -f " + os.path.basename(__file__)
]
)
for cmd in steps:
os.system(cmd) |
MatheusAmelco/squad-1-ad-python-2 | api/migrations/0002_auto_20190727_1440.py | <filename>api/migrations/0002_auto_20190727_1440.py<gh_stars>1-10
# Generated by Django 2.2.3 on 2019-07-27 14:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='sellers',
name='cpf',
field=models.CharField(max_length=11),
),
migrations.AlterField(
model_name='sellers',
name='phone',
field=models.CharField(max_length=20),
),
]
|
MatheusAmelco/squad-1-ad-python-2 | commission/settings_prod.py | <reponame>MatheusAmelco/squad-1-ad-python-2<filename>commission/settings_prod.py<gh_stars>1-10
from .settings import *
import dj_database_url
DEBUG = False
# procura pela URL do banco em DATABASE_URL
DATABASES = {
'default': dj_database_url.config()
}
# https://devcenter.heroku.com/articles/django-assets
# Django and Static Assets
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
#MIDDLEWARE.append('whitenoise.middleware.WhiteNoiseMiddleware')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
|
MatheusAmelco/squad-1-ad-python-2 | api/serializers.py | <gh_stars>1-10
from rest_framework import serializers
from api.models import Commission_plan, Sellers, Sales
class CommissionPlanSerializer(serializers.ModelSerializer):
class Meta:
model = Commission_plan
fields = ['id', 'lower_percentage', 'upper_percentage', 'min_value']
class SellersSerializer(serializers.ModelSerializer):
class Meta:
model = Sellers
fields = ['id', 'name', 'address', 'phone', 'age', 'email', 'cpf', 'plan']
class SalesSerializer(serializers.ModelSerializer):
class Meta:
model = Sales
fields = ['id', 'month', 'amount', 'commission', 'sellers_id']
class CheckCommissionSerializer(serializers.ModelSerializer):
class Meta:
model = Sales
fields = ['amount', 'sellers_id']
|
MatheusAmelco/squad-1-ad-python-2 | api/urls.py | <reponame>MatheusAmelco/squad-1-ad-python-2
from django.urls import path
from api import views
from rest_framework_jwt.views import obtain_jwt_token
from rest_framework_jwt.views import refresh_jwt_token
urlpatterns = [
path('sellers/', views.sellers),
path('commissions/', views.commissions),
path('month_commission/', views.month_commission),
path('vendedores/<int:month>/', views.vendedores),
path('check_commission/', views.check_commission),
path('auth/', obtain_jwt_token),
path('auth/refresh-token/', refresh_jwt_token),
]
|
MatheusAmelco/squad-1-ad-python-2 | api/tests.py | <gh_stars>1-10
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from django.contrib.auth.models import User
from api.models import Commission_plan, Sellers, Sales
from decimal import Decimal
class TelesalesViewTestCase(TestCase):
def setUp(self):
# Create user
self.user = User.objects.create_user(
username="usuario",
email="<EMAIL>",
password="<PASSWORD>"
)
self.user.save()
# Create a plan
self.p1 = Commission_plan.objects.create(
lower_percentage=2.5,
upper_percentage=10.5,
min_value=5000,
)
# Create a seller
self.s1 = Sellers.objects.create(
name="<NAME>",
address="Rua Teste, 1223",
phone="47123456789",
age=30,
email="<EMAIL>",
cpf="11223344556",
plan=Commission_plan.objects.get(),
)
# Create a month comission
self.mc1 = Sales.objects.create(
month=1,
amount=10000,
sellers_id=Sellers.objects.get(),
commission=1050,
)
def test_create_auth(self):
resp = self.client.post("/auth/", {"username": "usuario", "password": "<PASSWORD>"}, format="json")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
def test_create_plan_201(self):
resp = self.client.post("/auth/", {"username": "usuario", "password": "<PASSWORD>"}, format="json")
token = resp.data["token"]
client = APIClient()
client.credentials(HTTP_AUTHORIZATION="Bearer " + token)
data = {
"lower_percentage": 2.5,
"upper_percentage": 10.5,
"min_value": 5000
}
response = client.post("/commissions/", data, format="json")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_plan_400(self):
resp = self.client.post("/auth/", {"username": "usuario", "password": "<PASSWORD>"}, format="json")
token = resp.data["token"]
client = APIClient()
client.credentials(HTTP_AUTHORIZATION="Bearer " + token)
data = {
}
response = client.post("/commissions/", data, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_seller_201(self):
resp = self.client.post("/auth/", {"username": "usuario", "password": "<PASSWORD>"}, format="json")
token = resp.data["token"]
client = APIClient()
client.credentials(HTTP_AUTHORIZATION="Bearer " + token)
data = {
"name": "<NAME>",
"address": "Rua abcd, 123",
"phone": "48012345678",
"age": 30,
"email": "<EMAIL>",
"cpf": "12345678910",
"plan": 1
}
response = client.post("/sellers/", data, format="json")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_seller_400(self):
resp = self.client.post("/auth/", {"username": "usuario", "password": "<PASSWORD>"}, format="json")
token = resp.data["token"]
client = APIClient()
client.credentials(HTTP_AUTHORIZATION="Bearer " + token)
data = {
"name": "<NAME>",
"address": "Rua abcd, 123",
"phone": "48012345678",
"age": 30,
"email": "<EMAIL>",
"cpf": "12345678910",
"plan": 99
}
response = client.post("/sellers/", data, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_month_comission_201(self):
resp = self.client.post("/auth/", {"username": "usuario", "password": "<PASSWORD>"}, format="json")
token = resp.data["token"]
client = APIClient()
client.credentials(HTTP_AUTHORIZATION="Bearer " + token)
data = {
"sellers_id": 1,
"amount": 10000,
"month": 2
}
response = client.post("/month_commission/", data, format="json")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_month_comission_400(self):
resp = self.client.post("/auth/", {"username": "usuario", "password": "<PASSWORD>"}, format="json")
token = resp.data["token"]
client = APIClient()
client.credentials(HTTP_AUTHORIZATION="Bearer " + token)
data = {
"sellers_id": 99,
"amount": 10000,
"month": 13
}
response = client.post("/month_commission/", data, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_get_month_list(self):
resp = self.client.post("/auth/", {"username": "usuario", "password": "<PASSWORD>"}, format="json")
token = resp.data["token"]
client = APIClient()
client.credentials(HTTP_AUTHORIZATION="Bearer " + token)
response = client.get("/vendedores/1/", format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_notify_200(self):
resp = self.client.post("/auth/", {"username": "usuario", "password": "<PASSWORD>"}, format="json")
token = resp.data["token"]
client = APIClient()
client.credentials(HTTP_AUTHORIZATION="Bearer " + token)
data = {
"sellers_id": 1,
"amount": 1000.65
}
response = client.post("/check_commission/", data, format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_notify_400(self):
resp = self.client.post("/auth/", {"username": "usuario", "password": "<PASSWORD>"}, format="json")
token = resp.data["token"]
client = APIClient()
client.credentials(HTTP_AUTHORIZATION="Bearer " + token)
data = {
"sellers_id": 99,
"amount": 1000.65
}
response = client.post("/check_commission/", data, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class ModelsTestCase(TestCase):
def setUp(self):
Commission_plan.objects.create(lower_percentage=2.5, upper_percentage=10.5, min_value=5000.00)
Commission_plan.objects.create(lower_percentage=1.5, upper_percentage=5, min_value=4500)
Sellers.objects.create(name="<NAME>", address="Rua abc, 213", phone="11932455678",
age=35, email="<EMAIL>", cpf="65478932102",
plan=Commission_plan.objects.get(id=1))
Sellers.objects.create(name="<NAME>", address="Rua Rasa, 01", phone="5547993548264",
age=42, email="<EMAIL>", cpf="456123987",
plan=Commission_plan.objects.get(id=2))
Sales.objects.create(sellers_id=Sellers.objects.get(id=1), amount=1238.00, month=1,
commission=round(Sales.calc_commission(self, 1, 1238.00), 2))
Sales.objects.create(sellers_id=Sellers.objects.get(id=2), amount=10950.00, month=1,
commission=round(Sales.calc_commission(self, 2, 10950.00), 2))
def test_calc_commission(self):
self.assertEqual(Sales.calc_commission(self, 1, 1238.00), round(Decimal(30.95), 2))
self.assertEqual(Sales.calc_commission(self, 2, 10950.00), round(Decimal(547.50), 2))
def test_return_sellers(self):
self.assertEqual(len(Sales.return_sellers(self, 1)), 2)
self.assertEqual(Sales.return_sellers(self, 2).status_code, 404)
def test_check_commission(self):
self.assertEqual(Sales.check_commission(Sales ,1, 1325.00), {"seller_notified": False})
self.assertEqual(Sales.check_commission(Sales, 2, 3200.00), {"seller_notified": True})
|
MatheusAmelco/squad-1-ad-python-2 | api/migrations/0001_initial.py | # Generated by Django 2.2.3 on 2019-07-24 03:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Commission_plan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lower_percentage', models.DecimalField(decimal_places=2, max_digits=5, verbose_name='lo')),
('upper_percentage', models.DecimalField(decimal_places=2, max_digits=5, verbose_name='up')),
('min_value', models.DecimalField(decimal_places=2, max_digits=19, verbose_name='min')),
],
),
migrations.CreateModel(
name='Sellers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('address', models.CharField(max_length=200)),
('phone', models.IntegerField()),
('age', models.IntegerField()),
('email', models.EmailField(max_length=100)),
('cpf', models.IntegerField()),
('plan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Commission_plan', verbose_name='plan')),
],
),
migrations.CreateModel(
name='Sales',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('month', models.IntegerField()),
('amount', models.DecimalField(decimal_places=2, max_digits=20)),
('commission', models.DecimalField(decimal_places=2, max_digits=20)),
('sellers_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Sellers', verbose_name='sid')),
],
),
]
|
MatheusAmelco/squad-1-ad-python-2 | api/admin.py | from django.contrib import admin
from .models import Sellers, Commission_plan, Sales
admin.site.register(Sellers)
admin.site.register(Commission_plan)
admin.site.register(Sales)
|
MatheusAmelco/squad-1-ad-python-2 | api/migrations/0003_auto_20190729_2006.py | <reponame>MatheusAmelco/squad-1-ad-python-2
# Generated by Django 2.2.3 on 2019-07-29 20:06
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20190727_1440'),
]
operations = [
migrations.AlterField(
model_name='sales',
name='month',
field=models.IntegerField(choices=[(1, 'Janeiro'), (2, 'Fevereiro'), (3, 'Março'), (4, 'Abril'), (5, 'Maio'), (6, 'Junho'), (7, 'Julho'), (8, 'Agosto'), (9, 'Setembro'), (10, 'Outubro'), (11, 'Novembro'), (12, 'Dezembro')]),
),
migrations.AlterField(
model_name='sellers',
name='cpf',
field=models.CharField(max_length=11, unique=True),
),
migrations.AlterField(
model_name='sellers',
name='email',
field=models.EmailField(max_length=100, unique=True, validators=[django.core.validators.EmailValidator()]),
),
]
|
Duilio999/autodownloader | main.py | <reponame>Duilio999/autodownloader<gh_stars>1-10
# import urllib.request
from openload import OpenLoad
from time import sleep
# Define Functions
def find_id(string1):
index = 0
string2 = 'openload.co/f/'
sl = 14 # sl = set lenght of distance from the openload.co/f/ string we have to obtain
while sl < len(string1):
if string1[index:sl] == string2:
return sl # Ricorda di implementarlo
index = index + 1
sl = sl + 1
return -1
def cut_id(string1, sl):
start_index = sl
while sl < len(string1):
if string1[sl] == '/':
return string1[start_index:sl]
sl = sl + 1
return -1
def download(file_id): # file_id = 'Id of the file will be downloaded'
# Get a download ticket and captcha url.
preparation_resp = ol.prepare_download(file_id)
ticket = preparation_resp.get('ticket')
# Sometimes no captcha is sent in openload.co API response.
captcha_url = preparation_resp.get('captcha_url')
if captcha_url:
# Solve captcha.
captcha_response = '' # solve_captcha(captcha_url)
else:
captcha_response = ''
download_resp = ol.get_download_link(file_id, ticket, captcha_response)
direct_download_url = download_resp.get('url')
# Process download url.
print(direct_download_url)
# Data Part
ol = OpenLoad('bf97043b1e26e3aa', 'FTDZRy_v')
account_info = ol.account_info()
print(account_info)
# Instr. Set
i = 0
while i >= 0:
try:
sleep(360)
download('9ZZOeA1yYzI')
i = -1
except Exception as e:
i = i + 1
print(e)
print('Retry... #', i)
print('OK')
|
fire717/hualubei2020-callingsmoking | data/resize.py | <reponame>fire717/hualubei2020-callingsmoking<gh_stars>10-100
import cv2
import albumentations as A
import os
import sys
import json
def getAllName(file_dir, tail_list = ['.JPG']): #
L=[]
for root, dirs, files in os.walk(file_dir):
for file in files:
if os.path.splitext(file)[1] in tail_list:
L.append(os.path.join(root, file))
return L
imgs = getAllName('finalall')
print(len(imgs))
for img_path in imgs:
img = cv2.imread(img_path)
h,w = 600,600
input_h, input_w = img.shape[:2]
h_ratio = input_h/h
w_ratio = input_w/w
if (h_ratio>1 and w_ratio>1):
if h_ratio>w_ratio:
resize_h = h
resize_w = int(input_w/h_ratio)
else:
resize_w = w
resize_h = int(input_h/w_ratio)
img = A.Resize(resize_h,resize_w,cv2.INTER_AREA)(image=img)['image']
cv2.imwrite(img_path, img) |
fire717/hualubei2020-callingsmoking | config.py |
cfg = {
"model_name": "adv-efficientnet-b4",
#adv-efficientnet-b2 se_resnext50_32x4d xception resnext101_32x8d_wsl
"img_size": 600,
'GPU_ID': '0',
"class_number": 4,
'mode': '-mutillabel',
#train
'batch_size':3,
'early_stop_patient':26,
'learning_rate':0.0002,
'epochs':35,
'save_start_epoch':5,
'train_path':"../data/train/finalall",
"k_flod":5,
'optims':'Ranger', #adam SGD AdaBelief Ranger
'schedu':'SGDR1', #default SGDR1 2 CVPR step1 2
'use_warmup':0,
'weight_decay' : 0.0001,
'use_distill':0,
'label_smooth':0,
'model_path':None,
'start_fold':0,
#test
'test_path':"../data/testA_rotate/",#_resize_rotate
'use_TTA':0,
'test_batch_size': 4,
#fixed
"save_dir": "save",
"random_seed":42,
}
|
CHPC-UofU/jupyter-lmod | jupyterlmod/handler.py | <reponame>CHPC-UofU/jupyter-lmod
import json
import os
import tornado.web
import lmod
from functools import partial
from jupyter_core.paths import jupyter_path
from notebook.base.handlers import IPythonHandler
ACTIONS = {
"avail" : lmod.avail,
"list" : partial(lmod.list, hide_hidden=True),
"freeze" : lmod.freeze,
"show" : lmod.show,
"load" : lmod.load,
"unload" : lmod.unload,
"purge" : lmod.purge,
"savelist" : lmod.savelist,
"save" : lmod.save,
"restore" : lmod.restore,
"reset" : lmod.reset
}
class LmodActionHandler(IPythonHandler):
@tornado.web.authenticated
def get(self, action):
func = ACTIONS.get(action, None)
if func:
args = self.get_arguments("args")
result = func(*args)
self.finish(json.dumps(result))
@tornado.web.authenticated
def post(self, action):
func = ACTIONS.get(action, None)
if func:
args = self.get_arguments('args')
if args:
jpath_old = os.environ.get('JUPYTER_PATH')
func(*args)
# If JUPYTER_PATH has been modified by func
# the kernel directory list is updated.
if jpath_old != os.environ.get('JUPYTER_PATH'):
self.kernel_spec_manager.kernel_dirs = jupyter_path('kernels')
self.finish(json.dumps('SUCCESS'))
_action_regex = r"/lmod/(?P<action>{})".format("|".join(ACTIONS.keys()))
default_handlers = [(_action_regex, LmodActionHandler)]
if __name__ == "__main__":
import tornado.ioloop
app = tornado.web.Application([ (r"/", LmodActionHandler), ])
app.listen(12345)
tornado.ioloop.IOLoop.current().start()
|
weizigege/pyqt5 | day01/pyqt1.py | import sys
from PyQt5.QtWidgets import QApplication,QWidget
if __name__=='__main__':
app=QApplication(sys.argv)
w=QWidget()
w.resize(400,400)
w.setWindowTitle('你是猪吗')
w.show()
sys.exit(app.exec_())
|
yildirimyy/permutation-combination | combi.py | <reponame>yildirimyy/permutation-combination
def combinations(list_item, n, com_list=[]):
# recursive cagirimda n'li seviyeye geldiysen dur
if len(list_item) == n:
# eger daha once boyle bir combinasyon bulunduysa ekleme
# ornek: abcb icin iki kere bb bb gelir
if not list_item in com_list:
# bc yoksa combinasyon listesine ekle
com_list.append(list_item)
com_list.sort()
# n elamanli derinlikte oldugun icin don
return com_list
else:
# recursive olarak alt elemanlari bulmak icin
# alt listeler ile cagirim yap
for i in range(len(list_item)):
sub_list = list_item[:i] + list_item[i+1:]
com_list = combinations(sub_list, n, com_list)
return com_list
print(combinations("123", 2, []))
print("\t")
print(combinations("abcb", 2, []))
print("\t")
# print(combinations(['A','B','C', 'B'], 2))
# print("\t")
#a[start:stop]
#a[start:]
#a[:stop]
#a[:]
# list = "abcb"
# print(list[:0] +" "+ list[1:])
# print(list[:1] +" "+ list[2:])
# print(list[:2] +" "+ list[3:])
# print(list[:3] +" "+ list[4:])
|
yildirimyy/permutation-combination | permu.py | def permutations(list):
#Listede eleman olup olmadiginin kontrolu
if len(list) == 0:
return []
#Listede 1 eleman varsa sonuc 1
if len(list) == 1:
return list
# Listede 1'den fazla eleman varsa permutasyonu bul
perm_list = [] # o anki permutasyonu tutan liste
for i in range(len(list)):
m = list[i]
sub_list = list[:i] + list[i+1:]
for p in permutations(sub_list):
perm_list.append(m + p)
return perm_list
print(permutations(list('123')))
print("\t")
print(permutations(list('abc')))
print("\t")
|
Finrsch/market_simulation | simulation.py | import pandas
import requests
import quandl #necessary ONLY if using quandl function below
quandl.ApiConfig.api_key="YOUR_TOKEN_HERE" #necessary ONLY if using the QUANDL function below
holding_list=list() #global variable. It holds the position. List element = [ticker, position_buy_value]
investment=int() #global variable, this is the amount needed to invest. Needs to be initiated with an actual VALUE!
#VARIOUS WAYS of getting the stock data, automatically. Alternatively nasdaq.com is a good source for personal use
def api_iex(stock):
"""
-- requires IEX subscription from https://iexcloud.io/
Function calls the IEX API and returns a pandas df of it. This function accesses the FULL available historical data
"""
data=requests.get('https://cloud.iexapis.com/stable/stock/{}/chart/max?token=pk_{YOUR_TOKEN_HERE}'.format(stock.lower()))
try:
dataj=data.json()
except:
return 'Error in api_iex for {}.'.format(stock)
data_list=list()
for i in dataj:
data_list.append([i['date'], i['open'], i['close'], i['low'], i['high'], i['volume']])
df=pd.DataFrame(data_list, columns=['date', 'open', 'close', 'low', 'high', 'volume'])
return df
def api_quandl(equity):
"""
-- requires QUANDL subscription for SHARDAR data sets from https://www.quandl.com/
Function calls QUANDL API for stocks
-- this needs to install and import quandl library
-- after importing quandl, you need to set your toke calue: quandl.ApiConfig.api_key="YOUR_TOKEN_HERE"
"""
source=quandl.get_table('SHARADAR/SEP', ticker=equity)
data=source[['date', 'close', 'open', 'low', 'high']]
return data
#function that checks if enough liquidity in the stock, considering investment variable set above
def stock_liquidity(stock_data, investment):
return value #yes/no
#each of the functions below are using the holding_list global variable to hold & track the position
def buying_condition():
#if BUY condition met, then we append [stock.lower(), stock_value] to the holding_list
#the buying is executed within the level of investment, the global variable
return
def loss_cut():
#check if today's LOW value is below stock_value*(1-loss_cut_level)
#it also updates investment, the global valriable
return
def sell_condition():
#if SELL condition met, then we remove [stock.lower(), stock_value] from the holding_list
#it also updates investment, the global variable
return
def algo_market_simulation(stock):
stock_data=api_iex(stock) #getting stock data, in padas format. Can be further processed as needed
a=stock_liquidity(stock_data)
if a=='no':
return 'Not enough liquidity in the stock'
"""
STARTING the loop, using the length of the pandas as the index to follow
"""
position='' #initiating position as an empty string
for i in range(len(stock_data)):
if position:
if loss_cut_level_condition:
loss_cut()
else:
sell_condition()
else:
buying_condition()
#assuming the list only holds one position
if stock.lower() in holding_list[0]:
position='yes'
else:
position=''
return
|
marfeljoergsen/smart_duplicate_file_finder | smart_duplicate_file_finder.py | <filename>smart_duplicate_file_finder.py
#!/usr/bin/env python3
import sys
import subprocess
import inspect
from os import path, SEEK_SET, SEEK_END
import math
import argparse
import hashlib
import numpy as np # for random number generation
# File to compare if md5sum is the same for similar files on 8 vs 12 TB disk...
class Data:
def __init__(self):
print('RUNNING: ' + inspect.stack()[0][3] + '()')
self.line1 = 0
self.line2 = 0
self.fileNotExistCount = 0
#self.fileNotExistLimit = 10
self.fileNotExistLimit = 9e99
if True:
#if False:
print(" *** TESTING (on laptop) ***")
self.file1 = "random_data_RO_SORTED.txt_"
self.file2 = "8tb_sdd_from_syn_mdadm_decrypted_SORTED.txt_"
self.file1root = "/home/martin/Downloads/DELETE_THIS/ejdmægler-RAW/"
self.file2root = "/home/martin/Downloads/DELETE_THIS/ejdmægler-RAW2/"
else:
print(" *** REAL PRODUCTION (on server) ***")
self.file1 = "random_data_RO_SORTED.txt"
self.file2 = "8tb_sdd_from_syn_mdadm_decrypted_SORTED.txt"
self.file1root = "/mnt/hugeData"
self.file2root = "/mnt/hugeData"
# Open file 1 first, then file 2 - read data into buffers:
try:
with open(self.file1, 'r') as f:
self.data1 = f.read().splitlines() # Read whole file into buffer
except FileNotFoundError as e:
print("ERROR: " + e.strerror + ": \"" + e.filename + "\" (please fix: self.file1)")
sys.exit(1)
try:
with open(self.file2, 'r') as f:
self.data2 = f.read().splitlines() # Read whole file into buffer
except FileNotFoundError as e:
print("ERROR: " + e.strerror + ": \"" + e.filename + "\" (please fix: self.file2)")
sys.exit(1)
# Print a bit of data:
self.chunkData = (-1, '/dev/null')
print("File 1 name and size: \"" + self.file1 + "\" (size: " + str(len(self.data1)) + " lines )")
print("File 2 name and size: \"" + self.file2 + "\" (size: " + str(len(self.data2)) + " lines )")
print(" ------------------------------------ ")
print(" ")
def findDuplicatesInSingleBuffer(self, b, *args, **kwargs):
if len(args) == 0:
minSz = -1 # not 0, use -1 to also get files with size 0...
else:
minSz = args[0]
# for ar in args:
# print(ar)
# minSz):
print('RUNNING: ' + inspect.stack()[0][3] + '()')
print(' ')
if b == 1:
data = self.data1
filelist = self.file1
runMD5path = self.file1root
elif b == 2:
data = self.data2
filelist = self.file2
runMD5path = self.file2root
else:
print("Invalid buffer! Program cannot continue")
sys.exit(1)
print("Searching for duplicate file-sizes in: " + filelist)
print(" ==> Path to use for file-list: " + runMD5path)
self.singleBufferMD5sumComparison(data, minSz, runMD5path)
print(" ")
def singleBufferMD5sumComparison(self, buf, minSize, runMD5path=""):
# this does *NOT* compare buffer 1 with buffer 2 - instead it only check a single buffer for duplicates!
disableMD5 = False # True
lastSz = []
lastFileOrDir = []
minSzCounter = 0
nonNumCounter = 0
numDuplicates = 0
numLinesTotal = len(buf)
l = 1 # current line number indicator - as shown in the text-file
for line in buf:
words = line.split()
if len(words) <= 1:
if len(line) != 0:
print("WARNING: Need size + file/directory, but unexpected string found in line: " + str(l))
else:
# Split words into [0]:size and [1]:file or directory:
sz = words[0]
fileOrDir = " ".join(words[1:])
if not sz.isnumeric():
print("WARNING: First column is not numeric in line: " + str(l) + " (" + fileOrDir + ")")
nonNumCounter = nonNumCounter + 1
else:
if len(lastSz) == 0: # meaning: The first time a new file-size occurs
if (sz == 0): # ignore files with size 0, cannot do md5sum or anything on them!
print("Filesize: 0 ==> Ignoring: " + str(fileOrDir))
else:
lastSz.append(int(sz))
lastFileOrDir.append(fileOrDir)
lastSz_lineNums = []
lastSz_lineNums.append(l)
elif lastSz[-1] == int(sz): # append when more files, with same size occurs
lastSz.append(int(sz))
lastFileOrDir.append(fileOrDir)
lastSz_lineNums.append(l)
else:
# Don't do the comparison, until we have all of similar filesizes in lastSz+lastFileOrDir:
if len(lastSz) > 1: # must be more than 1, for duplicates to exist...
# Must be > minSize, in order to be processed:
if lastSz[-1] <= minSize:
minSzCounter = minSzCounter + 1
else:
print(" ")
if len(lastSz_lineNums)<20:
print("*** Duplicate size (=" + str(lastSz[-1]) + ") in lines: " + \
str(lastSz_lineNums))
else:
print("*** Duplicate size (=" + str(lastSz[-1]) + ") in lines: " + \
str(lastSz_lineNums[0:9]) + "... (skipping, too many) ..." + \
str(lastSz_lineNums[-9:]))
numDuplicates = numDuplicates + len(lastSz)
# Do the heavy lifting:
self.doMD5sum(lastSz, runMD5path, lastFileOrDir, disableMD5)
# --- Reset, prepare for next block of same filesize-comparisons:
lastSz = []
lastFileOrDir = []
lastSz.append(int(sz))
lastFileOrDir.append(fileOrDir)
l = l + 1
if l % 10000 == 0:
print("Line: " + str(l) + "/" + str(numLinesTotal) + \
" (" + str(round(100*l/numLinesTotal)) + "% done)")
# ---=== Just checking if there are "un-processed" files to do MD5-sum on ===---
if len(lastSz) != 0: # this should've been reset, if we're done...
if lastSz > 0: # ignore files with size 0...
if len(lastSz) > 1: # must be more than 1, for duplicates to exist...
# Must be > minSize, in order to be processed:
if lastSz[-1] <= minSize:
minSzCounter = minSzCounter + 1
else:
print(" ")
print("*** Duplicate size (=" + str(lastSz[-1]) + ") in lines: " + \
str(lastSz_lineNums[0:9]) + "... (skipping, too many) ..." + \
str(lastSz_lineNums[-9:]))
# print("*** Duplicate size in lines: " + str(lastSz_lineNums) \
# + ". Size: " + str(lastSz[-1]) + " ***")
numDuplicates = numDuplicates + len(lastSz)
# Do the heavy lifting:
self.doMD5sum(lastSz, runMD5path, lastFileOrDir, disableMD5)
# --- Reset, prepare for next block of same filesize-comparisons:
lastSz = []
lastFileOrDir = []
# === And now we're done: ===
if numDuplicates>0:
print(" ")
print("All done, lines processed: " + str(l - 1))
print("Number of duplicates found: " + str(numDuplicates))
else:
print("All done, no duplicates found. Lines processed: " + str(l - 1))
if minSzCounter > 0:
print("Files discarded due to minimum-file size requirement: " + str(minSzCounter) + \
" (minSize=" + str(minSize) + ")")
if nonNumCounter > 0:
print("Files discarded due to non-numeric-file size: " + str(nonNumCounter))
print(" ")
print(" ")
def printBufLine(self, b, l):
return "Buffer: " + str(b) + ": Line: " + str(l + 1) + ": "
def doMD5sum(self, lastSz, runMD5path, lastFileOrDir, disableMD5):
storedMD5vals = []
fileNotExisting = False
# === Run md5sum on each of the duplicate files: ===
for z in range(0, len(lastSz)):
if self.fileNotExistCount >= self.fileNotExistLimit:
print("ERROR: Too many files did not exist - exiting now...")
sys.exit(1)
fullPath = path.join(runMD5path, lastFileOrDir[z])
if not path.exists(fullPath):
print("\"" + fullPath + "\": does not exist: Cannot do md5sum... Skipping...")
self.fileNotExistCount = self.fileNotExistCount + 1
fileNotExisting = True
continue
if path.isdir(fullPath):
print("\"" + fullPath + "\": is a directory... Skipping...")
elif path.isfile(fullPath):
if len(runMD5path) == 0:
print("\"" + fullPath + "\": MD5sum was not requested, only printing...")
else:
if disableMD5:
print("\"" + fullPath + "\": MD5sum is disabled, only printing...")
else:
if False: # preferably never use this, REALLY slow for LARGE files...
cmd = "md5sum " + fullPath
list_files = subprocess.getstatusoutput(cmd)
else:
list_files = self.ownMD5sum(fullPath)
# Print result to screen
if not list_files[0] == 0:
print("ERROR: md5sum exit code was: %d" % list_files[0])
print(list_files[1])
sys.exit(1)
else:
# Save md5sum, to make a conclusion in the end...
storedMD5vals.append(list_files[1].split()[0]) # save md5-value
print(list_files[1])
else:
print("\"" + fullPath + "\": is neither a dir/file, probably a special file, skipping md5sum!")
# Write out conclusion: If all files are the same or not...
if fileNotExisting:
print(" *** ERROR: One or more files was not found - please fix this (need ALL files)!")
else:
if (all(x == storedMD5vals[0] for x in storedMD5vals)):
print(" *** All these files are the same - seems you should remove duplicates!")
else:
duplicate_dict = {} # a dictionary to store each of them.
for i in storedMD5vals: # loop through them.
duplicate_dict[i] = storedMD5vals.count(i)
print(" *** WARNING: All these files are not exactly the same - try removing duplicates:")
print(duplicate_dict)
print(" ")
def ownMD5sum(self, fpath):
md5_hash = hashlib.md5()
chunksize = 1024 ** 2
#maxChunkNum = 2000 # md5sum for a 2 GB file on SSD takes around 5 seconds -> 2000 seems appropriate
maxChunkNum = 1000 # md5sum for a 2 GB file on SSD takes around 30 seconds on mech HDD...
#maxChunkNum = 2 # TEST REMOVE THIS!
f = open(fpath, "rb") # read, binary
f.seek(0, SEEK_END) # get the cursor positioned at end
fsize = f.tell() # get the current position of cursor, equivalent to size of file
if not self.chunkData[0] == fsize:
#print("Initializing chunk data...")
self.chunkData = (fsize, fpath)
self.chunks_needed = math.ceil(fsize / chunksize)
# print("Size of file is :", fsize, "bytes")
# print("# of 1MB chunks: ", self.chunks_needed)
# Define the new "chunkOrder:"
if self.chunks_needed == 0: # read 1 chunk
self.chunkOrder = range(1)
elif self.chunks_needed <= maxChunkNum: # use correct order - and *ALL* chunks
self.chunkOrder = range(self.chunks_needed)
else: # use only maxChunkNum random (increasing ordered) chunks...
self.chunkOrder = np.sort(np.random.permutation(self.chunks_needed)[:maxChunkNum])
print(" *** WARNING: Only doing md5sum on a part of the file (" + str(len(self.chunkOrder)) + \
"/" + str(self.chunks_needed) + " chunks are used)! ***")
self.chunks = len(self.chunkOrder)
#else:
#print("Same file size: Using same chunk data...")
# Begin calculating the md5sum, using chunkOrder:
if not isinstance(self.chunkOrder, np.ndarray):
f.seek(0, SEEK_SET) # read from beginning, https://python-reference.readthedocs.io/en/latest/docs/file/seek.html
for i in self.chunkOrder: # for i in range(chunks):
#print("Reading chunk: " + str(i + 1) + " of " + str(self.chunks))
data = f.read(chunksize) # https: // python - reference.readthedocs.io / en / latest / docs / file / read.html
md5_hash.update(data)
else:
for i in self.chunkOrder: # for i in range(chunks):
#print("(WARNING: " + str(self.chunks) + "/" + str(self.chunks_needed) + " chunks are used)" \
# + ": Reading RANDOM (ordered) chunk: " + str(i + 1))
f.seek(i*chunksize, SEEK_SET) # read from beginning, https://python-reference.readthedocs.io/en/latest/docs/file/seek.html
data = f.read(chunksize) # https: // python - reference.readthedocs.io / en / latest / docs / file / read.html
md5_hash.update(data)
f.close()
# Print result and return:
digest = md5_hash.hexdigest() # should return the same as the "md5sum"-command
retStr = digest + ' ' + fpath
retTuple = (0, retStr)
return retTuple
def runMD5onFile(self, b):
if b == 1:
l = self.line1
fname = self.fullPath1
elif b == 2:
l = self.line2
fname = self.fullPath2
else:
print("Invalid buffer! Program cannot continue")
sys.exit(1)
if not path.exists(fname):
print("Error: File does not exist:", fname)
sys.exit(1)
cmd = "md5sum " + fname
list_files = subprocess.getstatusoutput(cmd)
if not list_files[0] == 0:
print("ERROR: md5sum exit code was: %d" % list_files[0])
print(list_files[1])
sys.exit(1)
else:
print(self.printBufLine(b, l) + list_files[1])
def twoBufferMD5comparison(self): # this was meant to compare buffer 1 with buffer 2 (*NOT* within the same buffer)
print("RUNNING: " + inspect.stack()[0][3] + '()')
print(" ")
# Init:
sz = [0, 0]
lines = [0, 0] # will be incremented by "findNext"
f = ['', '']
sz[0] = self.findNext(1)
sz[1] = self.findNext(2)
while True:
if (sz[0] == sz[1]):
print(" --- Same size, running md5sum: ---")
self.runMD5onFile(1)
self.runMD5onFile(2)
# Increment current line+filesize and filename once:
sz[0] = self.findNext(1)
sz[1] = self.findNext(2)
else:
# Search/increment line counters until a match is found or until done
if int(sz[0]) > int(sz[1]):
sz[0] = self.findNext(1, sz[1])
else:
sz[1] = self.findNext(2, sz[0])
continue
# print("sz=" + (str(sz)))
if any(int(x) < 0 for x in sz):
print("One buffer is complete, nothing more to compare then...")
break
print("All is done.")
def findNext(self, b, searchSz="-1"):
if b == 1:
buf = self.data1
l = self.line1
elif b == 2:
buf = self.data2
l = self.line2
else:
print("Invalid buffer! Program cannot continue")
sys.exit(1)
# print("l=" + str(l) + ", len(buf)=" + str(len(buf)))
if l < len(buf):
l = l + 1 # find NEXT index, so increment - this "l" is linenumber in textfile (index from 1->)
for i in range(l - 1, len(buf)):
# print(str(i+1) + ": " + buf[i])
words = buf[i].split()
if (len(words) <= 1):
if len(line) != 0:
print("WARNING: Need size + file/directory, but unexpected string found in line: " + str(l))
else:
sz = words[0]
fileOrDir = " ".join(words[1:])
if path.isdir(fileOrDir):
print(self.printBufLine(b, i) + \
"Skipping directory: \"" + fileOrDir + "\"")
continue
if sz.isnumeric():
if int(searchSz) < 0:
break
else:
if int(sz) <= int(searchSz):
break
else:
continue
if b == 1:
self.fullPath1 = path.join(self.file1root, fileOrDir)
self.line1 = i + 1 # increment, human-line numbering (indexing from 1, not 0)
elif b == 2:
self.fullPath2 = path.join(self.file2root, fileOrDir)
self.line2 = i + 1
else:
sz = "-1"
if b == 1:
print("Buffer 1: Nothing more to do...")
self.fullPath1 = ''
elif b == 2:
print("Buffer 2: Nothing more to do...")
self.fullPath2 = ''
return sz
def init_argparse() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
usage="%(prog)s buffer",
description="Buffer is an integer, 1 or 2."
)
parser.add_argument('buffer', nargs='*')
return parser
if __name__ == '__main__':
parser = init_argparse()
args = parser.parse_args()
if not args.buffer:
print("ERROR: Choose buffer 1 or buffer 2 (as input arguments to this script)!")
sys.exit(1)
else:
buf = int(args.buffer[0])
cmp = Data() # init
if True:
cmp.findDuplicatesInSingleBuffer(buf)
else:
# minsize: To prevent WAYY to many matches
# Consider adding md5sum to those small files, to get rid of all the
# false positives (same file size, but not the same contents!)
minSize = 5000 # --- Testing on Asus-laptop: ---
# minSize = 100000000 # === Real production: ===
cmp.findDuplicatesInSingleBuffer(buf, minSize) # buffer 1
print("========================")
print(
"WARNING: Run only md5 on beginning+end of file, see: https://stackoverflow.com/questions/3431825/generating-an-md5-checksum-of-a-file")
# Compare both large files using md5sum, across disks...
# cmp.twoBufferMD5comparison(..) |
zybbigpy/VaspCZ | sourcecode/VaspNEBKeepInputs.py | <reponame>zybbigpy/VaspCZ<filename>sourcecode/VaspNEBKeepInputs.py
import os
def run():
maintain_files = 'ini,fin'.split(',')
files = os.listdir('./')
del_files = []
for file in files:
if file in maintain_files:
pass
else:
del_files.append(file)
ipt = input(f'Thoes files/dir will be kept: {maintain_files} \nThose files/dirs will be deleted: {del_files}\nconfirm ([y]es/no): ')
ipt = 'y' if ipt == '' else ipt
if ipt in ['y', 'YES', 'yes', 'Y']:
for file in del_files:
os.system(f'rm -rf {file}')
else:
print(f'Did not del any file.')
if __name__ == '__main__':
run() |
zybbigpy/VaspCZ | vtstscripts-939/kdb/remote_insert.py | from kdbinsert import KdbInsert
from optparse import OptionParser
import sys
from aselite import read_any
from remote_db import RemoteDB
from server_config import *
class RemoteInsert(KdbInsert):
#email and password are set prior to running insert if email/pw combo is present
def __init__(self):
self.email = None
self.password = None
# overloads KdbInsert.insert_into_db
def insert_into_db(self, **args):
#create database instance
db = RemoteDB()
# test if process is already in database
name = db.get_name(args['s'].get_chemical_symbols())
saddle_list = db.get_saddles(name)
for db_saddle in saddle_list:
if len(args['s']) != len(db_saddle[0]):
continue
if self.getMappings(args['s'], db_saddle[0], args['nf'], args['dc']) is not None:
#print "SQL duplicate of", name, "with id:", db_saddle[1]
return "SQL duplicate of " + name + " with id: " + str(db_saddle[1])
# add process to db
try:
db.add_process(args['or'], args['os'], args['op'], args['om'],
args['r'], args['s'], args['p'], args['m'], args['ma'], self.email, self.password)
except TypeError:
print "Account info in user_config.py is not valid. Try running remote_config.py to set up account"
return
# Indicate that the process was inserted successfully.
#print "good"
return "good"
if __name__ == "__main__":
insert_sub_class = RemoteInsert()
# Parse command line options.
parser = OptionParser(usage = "%prog [options] reactant saddle product mode")
parser.add_option("-o", "--mode", dest = "mode",
help = "optional mode file",
default = None)
options, args = parser.parse_args()
# Make sure we get the reactant, saddle, product, and mode files.
if len(args) < 3:
parser.print_help()
sys.exit()
# Load the reactant, saddle, product, and mode files.
reactant = read_any(args[0])
saddle = read_any(args[1])
product = read_any(args[2])
mode = None
if options.mode is not None:
mode = insert_sub_class.load_mode(options.mode)
# load previous params
db = RemoteDB()
params = db.get_params()
nf = params['nf']
dc = params['dc']
mac = params['mac']
insert_sub_class.insert(reactant, saddle, product, mode, nf=nf, dc=dc, mac=mac)
|
zybbigpy/VaspCZ | vtstscripts-939/kdb/local_insert.py | <gh_stars>10-100
from kdbinsert import KdbInsert
from optparse import OptionParser
import sys
from aselite import read_any
from config import *
from local_db import LocalDB
class LocalInsert(KdbInsert):
def __init__(self):
pass
# This function will overload the default insert_into_db function
def insert_into_db(self, **args):
# create instance of database
db = LocalDB(args['kdbname'], args['nf'], args['dc'], args['mac'])
# test if process is already in database
name = db.get_name(args['s'].get_chemical_symbols())
saddle_list = db.get_saddles(name)
for db_saddle in saddle_list:
if len(args['s']) != len(db_saddle[0]):
continue
if self.getMappings(args['s'], db_saddle[0], args['nf'], args['dc']) is not None:
print "SQL duplicate of", name, "with id:", db_saddle[1]
return "SQL duplicate of " + name + " with id: " + str(db_saddle[1])
# add process to db
db.add_process(args['or'], args['os'], args['op'], args['om'],
args['r'], args['s'], args['p'], args['m'], args['ma'])
# Indicate that the process was inserted successfully.
#print "good"
print "KDB insert success"
return "good"
if __name__ == "__main__":
insert_sub_class = LocalInsert()
# Parse command line options.
parser = OptionParser(usage="%prog [options] reactant saddle product mode")
parser.add_option("-o", "--mode", dest="mode",
help="optional mode file",
default=None)
parser.add_option("-n", "--nf", dest="nf", action="store", type="float",
help="neighbor fudge parameter",
default=None)
parser.add_option("-c", "--dc", dest="dc", action="store", type="float",
help="distance cutoff parameter",
default=None)
parser.add_option("-m", "--mac", dest="mac", action="store", type="float",
help="mobile atom cutoff parameter",
default=None)
options, args = parser.parse_args()
# Make sure we get the reactant, saddle, product, and mode files.
if len(args) < 3:
parser.print_help()
sys.exit()
# Load the reactant, saddle, product, and mode files.
reactant = read_any(args[0])
saddle = read_any(args[1])
product = read_any(args[2])
mode = None
if options.mode is not None:
mode = insert_sub_class.load_mode(options.mode)
# load previous params
db = LocalDB(KDB_NAME)
params = db.get_params()
if options.nf is None:
options.nf = params['nf']
if options.dc is None:
options.dc = params['dc']
if options.mac is None:
options.mac = params['mac']
# run the insert standard insert function.
insert_sub_class.insert(reactant, saddle, product, mode=mode, nf=options.nf, dc=options.dc, mac=options.mac,
kdbname=KDB_NAME)
|
zybbigpy/VaspCZ | vtstscripts-939/dymmatrix.py | <gh_stars>10-100
#!/usr/bin/env python
import numpy
numpy.seterr(all='raise')
import os
import glob
import aselite
from sys import exit
def check_file(filename):
if not os.path.isfile(filename):
print 'No such file: %s' % filename
exit(1)
def dymmatrix(displacecars, outcars):
all_displacements = []
displacements = None
for displacecar in displacecars:
check_file(displacecar)
print 'Reading %s' % displacecar
d = numpy.loadtxt(displacecar)[:,:3].flatten()
all_displacements.extend(d)
if displacements == None:
displacements = d
else:
displacements += d
ndisp = numpy.count_nonzero(displacements)
#indices of the nonzero displacements
di = numpy.nonzero(all_displacements)[0] % ndisp
print 'Number of displacements: %i' % ndisp
traj = []
for outcar in outcars:
check_file(outcar)
print 'Reading %s' % outcar
images = aselite.read_vasp_out(outcar)
atoms = images[0]
traj += images[1:]
reference_force = atoms.get_forces().flatten()
masses = traj[0].get_masses()
displacement_masses = []
for mass in masses:
displacement_masses.extend([mass,mass,mass])
masses = numpy.array(displacement_masses)
if len(traj) != ndisp:
print 'ERROR: number of displacements (%i) !=' % ndisp,
print 'number of calculations (%i)' % len(traj)
exit(1)
print 'Building dynamical matrix'
dymmat = numpy.zeros((ndisp,ndisp))
hessian = numpy.zeros_like(dymmat)
for i in range(ndisp):
f1 = traj[i].get_forces().flatten()[di]
f0 = reference_force[di]
dymmat[i] = -(f1-f0)
dymmat[i] /= displacements[di]
hessian[i] = dymmat[i]
for j in range(ndisp):
dymmat[i,j] /= numpy.sqrt(masses[di][i]*masses[di][j])
#symmetrize
dymmat = (dymmat + dymmat.transpose()) / 2.0
hessian = (hessian + hessian.transpose()) / 2.0
numpy.savetxt('freq.mat', dymmat, fmt='%16.8f')
print 'Diagonalizing matrix'
omegas, ev = numpy.linalg.eigh(dymmat)
numpy.savetxt('eigs.dat', omegas, fmt='%25.15g')
f = open('freq.dat', 'w')
for omega in omegas:
imag = 0
if omega < 0:
imag = 1
freq = numpy.sqrt(numpy.abs(omega))*521.47
s = '%12.6f cm^{-1} ... %i ' % (freq, imag)
f.write(s+'\n')
print s
f.close()
numpy.savetxt('modes.dat', ev, fmt='%16.8f')
#
f = open('modes_sqrt_amu.dat', 'w')
masses_ = atoms.get_masses()
for i in range(len(atoms)*3):
evec = ev[:,i].tolist() # eigenvectors are in columns
for j in range(len(atoms)):
dx = evec[3*j]/numpy.sqrt(masses_[j])
dy = evec[3*j+1]/numpy.sqrt(masses_[j])
dz = evec[3*j+2]/numpy.sqrt(masses_[j])
f.write('%10.6f %10.6f %10.6f\n' % (dx, dy, dz))
f.write('\n')
f.close()
#
force_constants, ev = numpy.linalg.eigh(hessian)
numpy.savetxt('force_constants.dat', force_constants, fmt='%16.12f')
effective_masses = force_constants/omegas
numpy.savetxt('effective_masses.dat', effective_masses, fmt='%12.6f')
def usage():
print 'usage: dymmmatrix.py [DISPLACECAR] [OUTCAR]'
print ' or dymmmatrix.py #DISPLACECAR DISPLACECAR1',
print 'DISPLACECAR2 ...'
print ' OUTCAR1 OUTCAR2 OUTCAR3 ...'
print
if __name__ == '__main__':
from sys import argv
if '-h' in argv:
usage()
exit(0)
if len(argv) == 1:
displacecars = ['DISPLACECAR']
outcars = ['OUTCAR']
elif len(argv) == 3:
displacecars = [argv[1]]
outcars = [argv[2]]
elif len(argv) > 3:
ndisplacecars = int(argv[1])
displacecars = argv[2:2+ndisplacecars]
outcars = argv[2+ndisplacecars:]
else:
usage()
exit(1)
dymmatrix(displacecars, outcars)
|
zybbigpy/VaspCZ | vtstscripts-939/kdb/local_db.py | import sqlite3
from aselite import Atoms
from aselite import FixAtoms
from aselite import write_vasp
import local_update
import numpy
import sys
import copy
class LocalDB():
def __init__(self, db_name, nf=None, dc=None, mac=None):
self.db_name = db_name
self.nf = nf
self.dc = dc
self.mac = mac
# check if db_name is an existing database
if not self.check_tables():
# if not initialize the database
self.create_tables()
# create a dictionary to use for assaigning unique identifiers
self.pk_dict = {'Atoms': self.get_max('Atoms', 'atoms_id'),
'Atom': self.get_max('Atom', 'atom_id'),
'Process': self.get_max('Process', 'pro_id'),
'Mobile': self.get_max('Mobile', 'mob_id')
}
# check if parameters are different
params = self.get_params()
if self.nf == None:
self.nf = params['nf']
if self.dc == None:
self.dc = params['dc']
if self.mac == None:
self.mac = params['mac']
if self.check_params():
print "Parameters are different from what is currently in the database."
answer = raw_input("Do you want to update database entries to reflect the new parameters? (yes, no)")
if 'y' in answer.lower():
print "Updating parameters DO NOT exit until update it complete or database could be corrupted."
self.update_params()
print "Update complete. Please retry previous command to insert new process."
sys.exit()
else:
print "Structures in the database will be left untouched."
#database initialization
def create_tables(self):
conn = sqlite3.connect(self.db_name)
# atoms_id is a unique identifier
# atoms_name is the structure name IE: 'Al' or 'CuO'
# atoms_cellXX are bounds for the 3D cell IE: 00 = top left 02 = top right
conn.execute('''CREATE TABLE Atoms(atoms_id INT PRIMARY KEY UNIQUE NOT NULL,
atoms_cell00 REAL NOT NULL,
atoms_cell01 REAL NOT NULL,
atoms_cell02 REAL NOT NULL,
atoms_cell10 REAL NOT NULL,
atoms_cell11 REAL NOT NULL,
atoms_cell12 REAL NOT NULL,
atoms_cell20 REAL NOT NULL,
atoms_cell21 REAL NOT NULL,
atoms_cell22 REAL NOT NULL)''')
# atom_id is a unique identifer
# atoms_id is a reference value to identify what collection of atoms it belongs in
# num is the ase.atom.index value
# symbol is the atom's chemical symbol IE: 'Al' 'Cu'
# x,y,z_coord is the x,y,z position of the atom
# fixed is a boolean contraint if the atom is fixed or not
conn.execute('''CREATE TABLE Atom(atom_id INT PRIMARY KEY UNIQUE NOT NULL,
atoms_id INT NOT NULL,
num INT NOT NULL,
symbol TEXT NOT NULL,
x_coord REAL NOT NULL,
y_coord REAL NOT NULL,
z_coord REAL NOT NULL,
mode0 REAL NOT NULL,
mode1 REAL NOT NULL,
mode2 REAL NOT NULL,
fixed INT NOT NULL,
FOREIGN KEY(atoms_id) REFERENCES Atoms(atoms_id))''')
# pro_id is a unique identifer
# all the rest of the values are references to atoms_ids
conn.execute('''CREATE TABLE Process(pro_id INT PRIMARY KEY UNIQUE NOT NULL,
name TEXT NOT NULL,
is_used INT NOT NULL,
original_reactant_id INT NOT NULL,
original_saddle_id INT NOT NULL,
original_product_id INT NOT NULL,
reactant_id INT,
saddle_id INT,
product_id INT,
FOREIGN KEY (original_reactant_id) REFERENCES Atoms(atoms_id),
FOREIGN KEY (original_saddle_id) REFERENCES Atoms(atoms_id),
FOREIGN KEY (original_product_id) REFERENCES Atoms(atoms_id),
FOREIGN KEY(reactant_id) REFERENCES Atoms(atoms_id),
FOREIGN KEY(saddle_id) REFERENCES Atoms(atoms_id),
FOREIGN KEY(product_id) REFERENCES Atoms(atoms_id))''')
# mob_id is a unqiue identifer
# num is the atom number
# pro_id is a reference to the process the atom belongs to
conn.execute('''CREATE TABLE Mobile(mob_id INT PRIMARY KEY UNIQUE NOT NULL,
num INT NOT NULL,
pro_id INT NOT NULL,
FOREIGN KEY (pro_id) REFERENCES Process(pro_id))''')
# config_option is the name of the configuration, IE: nf, dc, or mac
# config_value is the value of the corresponding corfigutation option
conn.execute('''CREATE TABLE Param(config_option TEXT UNIQUE NOT NULL,
config_value INT NOT NULL)''')
# add default parameters for Params table
default_values = [('nf', 0.2), ('dc', 0.3), ('mac', .7)]
conn.executemany('''INSERT INTO Param VALUES (?,?)''', default_values)
# commit the data and close the database
conn.commit()
conn.close()
#############################################################
# Insertions ################################################
#############################################################
# adds a process, each parameter is an ase.Atom object
# EXCEPT mode/o_mode, mode is a numpy array or None
# and mobile_set is a list of numbers
def add_process(self, o_reactant, o_saddle, o_product, o_mode,
reactant, saddle, product, mode, mobile_set):
conn = self.connect_db()
conn.execute('''BEGIN''')
or_pk = self.add_atoms(o_reactant, o_mode, conn)
os_pk = self.add_atoms(o_saddle, o_mode, conn)
op_pk = self.add_atoms(o_product, o_mode, conn)
if reactant:
r_pk = self.add_atoms(reactant, mode, conn)
s_pk = self.add_atoms(saddle, mode, conn)
p_pk = self.add_atoms(product, mode, conn)
is_used = 1
else:
r_pk = None
s_pk = None
p_pk = None
is_used = 0
symbols = o_reactant.get_chemical_symbols()
name = self.get_name(symbols)
pro_pk = self.get_id('Process')
conn.execute('''INSERT INTO Process VALUES (?,?,?,?,?,?,?,?,?)''',
(pro_pk, name, is_used, or_pk, os_pk, op_pk, r_pk, s_pk, p_pk)
)
self.add_mobile(mobile_set, pro_pk, conn)
conn.commit()
conn.close()
return pro_pk
# adds a specific ase.Atom object (SHOULD NOT BE USED)
# this function is only to be used inside add_process()
def add_atoms(self, atoms, mode, conn):
# if mode = None, create array of zeros.
#if mode == None:
#if not hasattr(mode, 'shape'):
if type(mode) != numpy.ndarray and type(mode) != list and type(mode) != tuple: # MJW fix
mode = numpy.zeros((len(atoms),3))
# get important data from ase.Atoms instance
symbols = atoms.get_chemical_symbols()
positions = atoms.get_positions()
cell = atoms.get_cell()
constraints = atoms._get_constraints()
# get unique primary key
atoms_pk = self.get_id('Atoms')
# add data to Atoms table
conn.execute('''INSERT INTO Atoms VALUES (?,?,?,?,?,?,?,?,?,?)''',
(atoms_pk,
cell[0][0],cell[0][1],cell[0][2],
cell[1][0],cell[1][1],cell[1][2],
cell[2][0],cell[2][1],cell[2][2]))
for i in range(len(atoms)):
atom_pk = self.get_id('Atom')
index = i
sub_mode = mode[i]
symbol = symbols[i]
position = positions[i]
if i in constraints[0].index:
fixed = 1
else:
fixed = 0
conn.execute('''INSERT INTO Atom VALUES (?,?,?,?,?,?,?,?,?,?,?)''',
(atom_pk, atoms_pk, index, symbol,
position[0], position[1], position[2],
sub_mode[0], sub_mode[1], sub_mode[2], fixed))
return atoms_pk
# mobile_set is a list of atom indicies, pro_pk is a unique identifier
# of the process that contains the mobile atoms
def add_mobile(self, mobile_set, pro_pk, conn):
for mobile in mobile_set:
mob_pk = self.get_id('Mobile')
conn.execute('''INSERT INTO Mobile VALUES (?,?,?)''',
(mob_pk, mobile, pro_pk))
#############################################################
# Queries ###################################################
#############################################################
# query db for ase.Atoms object based off atoms_id
def get_atoms(self, atoms_id):
conn = self.connect_db()
# query the db for the specific atoms entry
atoms = conn.execute('SELECT * FROM Atoms WHERE atoms_id = ?', (str(atoms_id),) ).fetchone()
cell = []
# populate the 3D cell block
count = 0
temp = []
for i in range(len(atoms)):
if i !=0:
temp.append(atoms[i])
if count == 2:
cell.append(temp)
temp = []
count = 0
else:
count += 1
# query the db for all atom entries with the same atoms_id as given
atoms = conn.execute('SELECT * FROM Atom WHERE atoms_id = ? ORDER BY num', (str(atoms_id),) ).fetchall()
conn.close()
symbols = []
positions = []
fixed = []
mode = []
# loop over each atom from query and pull out data needed to build ase.Atoms instance
for i in range(len(atoms)):
symbols.append(str(atoms[i][3]))
positions.append([atoms[i][4],atoms[i][5],atoms[i][6]])
mode.append([atoms[i][7],atoms[i][8],atoms[i][9]])
fixed.append(atoms[i][10])
# create FixAtoms mask for ase.Atoms' constraint
constraint = FixAtoms(mask=fixed)
# create ase.Atoms instance
cell = [[1,0,0], [0,1,0], [0,0,1]]
#atoms = Atoms(symbols = symbols, positions = positions, cell = cell, constraint = constraint)
atoms = Atoms(symbols = symbols, positions = positions, cell = cell)
#self.write_atoms('test.con', atoms)
return atoms
#generate a mode list from a given atoms_id
def get_mode(self, atoms_id):
conn = self.connect_db()
mode_list = conn.execute(''' SELECT mode0, mode1, mode2 FROM Atom WHERE atoms_id = ? ORDER BY num''', (atoms_id,)).fetchall()
conn.close()
mode = numpy.array(mode_list)
return mode
# name is the chemical name for the stucture, IE: Al or CuO
def get_process(self, name):
# query db for all processes with same name as given in params
conn = self.connect_db()
return_list = []
process_list = conn.execute('''SELECT * FROM Process WHERE name = ?''', (name,)).fetchall()
# iterate through all process matches and extract important data
for process in process_list:
#check if the process is useable
if not process[2]:
continue
# dictionary to hold important data
pro_dict = {'minimum': None,
'saddle' : None,
'product': None,
'mobile' : [],
'mode' : None,
'mirror' : False,
'id' : None}
# query database for mobile atoms list of the current process
mobile_list = conn.execute('''SELECT * FROM Mobile WHERE pro_id = ?''', (process[0],)).fetchall()
# add the mobile atoms to dictionary
for mobile in mobile_list:
pro_dict['mobile'].append(mobile[1])
# convert atoms_ids to ase.Atoms() objects
pro_dict['minimum'] = self.get_atoms(process[6])
pro_dict['saddle'] = self.get_atoms(process[7])
pro_dict['product'] = self.get_atoms(process[8])
pro_dict['mode'] = self.get_mode(process[6])
pro_dict['id'] = process[0]
return_list.append(pro_dict)
#pro_dict with mirror set to true
mirror_dict = copy.deepcopy(pro_dict)
mirror_dict['mirror'] = True
return_list.append(mirror_dict)
# swap min and product and re add dictionary
swap_dict = copy.deepcopy(pro_dict)
swap_dict['minimum'], swap_dict['product'] = swap_dict['product'], swap_dict['minimum']
return_list.append(swap_dict)
# swap_dict with mirror set to true
mirror_dict2 = copy.deepcopy(swap_dict)
mirror_dict2['mirror'] = True
return_list.append(mirror_dict2)
conn.close()
return return_list
# This function is used to check for duplicate entries
def get_saddles(self, name):
# query db for all processes
conn = self.connect_db()
sid_list = conn.execute('''SELECT saddle_id FROM PROCESS WHERE name = ?''', (name,) ).fetchall()
conn.close()
saddle_list = []
for sid in sid_list:
saddle = self.get_atoms(sid[0])
saddle_list.append([saddle, sid[0]])
return saddle_list
# Retrieves the MAC, DC, and NF parameters.
def get_params(self):
conn = self.connect_db()
params = conn.execute('''SELECT * FROM Param''').fetchall()
conn.close()
param_dict = {}
for param in params:
param_dict[str(param[0])] = param[1]
return param_dict
#############################################################
# Helper Functions ##########################################
#############################################################
# updates the params to define mobile atom's neighbors and
# what is considered a mobile atom
def update_params(self):
# update Params table
conn = self.connect_db()
conn.execute('''DELETE FROM Param''')
values = [('nf', self.nf), ('dc', self.dc), ('mac', self.mac)]
conn.executemany('''INSERT INTO Param VALUES (?,?)''', values)
conn.commit()
# query for all processes
process_list = conn.execute('''SELECT * FROM Process''').fetchall()
# loop over all processes from the query
for process in process_list:
o_reactant = self.get_atoms(process[3])
o_saddle = self.get_atoms(process[4])
o_product = self.get_atoms(process[5])
o_mode = self.get_mode(process[3])
kdbupdate = local_update.LocalUpdate()
num = kdbupdate.insert(o_reactant, o_saddle, o_product, mode=o_mode, nf=self.nf, dc=self.dc, mac=self.mac, kdbname=self.db_name)
if num == 0:
print "process number", process[0], "is now not useable for queries."
print "However it will not be deleted from the database."
# remove reactant, saddle, product, mobile but leave the original
# reactant, saddle, and product
conn.execute('''UPDATE Process
SET is_used = 0, reactant_id = NULL, saddle_id = NULL, product_id = NULL
WHERE pro_id = ?''', (process[0],) )
conn.execute('''DELETE FROM Atoms
WHERE atoms_id in (?,?,?)''', (process[6],process[7],process[8]))
conn.execute('''DELETE FROM Atom
WHERE atoms_id in (?,?,?)''', (process[6],process[7],process[8]))
conn.execute('''DELETE FROM Mobile
WHERE pro_id = ?''', (process[0],) )
conn.commit()
else:
self.remove_process(process[0])
conn.commit()
conn.close()
# helper function to remove a process from the database
def remove_process(self, pro_pk):
conn = self.connect_db()
process = conn.execute('''SELECT * FROM Process
WHERE pro_id = ?''', (pro_pk,)).fetchone()
conn.execute('''DELETE FROM Mobile WHERE pro_id = ?''', (pro_pk,))
conn.execute('''DELETE FROM Atom
WHERE atoms_id in (?,?,?,?,?,?)''', (process[3],process[4],process[5],process[6],process[7],process[8]))
conn.execute('''DELETE FROM Atoms
WHERE atoms_id in (?,?,?,?,?,?)''', (process[3],process[4],process[5],process[6],process[7],process[8]))
conn.execute('''DELETE FROM Process WHERE pro_id = ?''', (pro_pk,))
conn.commit()
conn.close()
# writes an atoms object to a file as a .con file
def write_atoms(self, filename, atoms):
write_vasp(filename, atoms)
# helper function to get name from list of symbols
# IE: symbols = ['Cu', 'Cu', 'O'] -> name = "CuO"
def get_name(self, symbols):
name = ""
for symbol in symbols:
if symbol not in name:
name = name + symbol
return name
# helper function to connect to the database
def connect_db(self):
# check if tables exist, if false create tables
return sqlite3.connect(self.db_name)
# helper function to decide if database needs to create tables or not
def check_tables(self):
conn = sqlite3.connect(self.db_name)
# query master table (created by sqlite3) for other tables
tables = conn.execute(''' SELECT name FROM sqlite_master WHERE type='table' ''').fetchall()
conn.close()
# tables is a list and will return false if empty
if tables:
return True
return False
# helper function to get largest id for a specific table
def get_max(self, table_name, id_name):
conn = self.connect_db()
# query the given table for the largest primary key
max_id = conn.execute('''SELECT max({}) FROM {}'''.format(id_name, table_name)).fetchone()[0]
conn.close()
# if there are no entries in the database max_id = None
if not max_id:
max_id = 0
return max_id
# helper function to get unique primary key number for a specific table
def get_id(self, table_name):
self.pk_dict[table_name] += 1
return self.pk_dict[table_name]
# helper function to check if given params are different from params in database.
def check_params(self):
conn = self.connect_db()
params = conn.execute('''SELECT * FROM Param''').fetchall()
for param in params:
if 'nf' == param[0].lower() and self.nf != param[1]:
conn.close()
return True
if 'dc' == param[0].lower() and self.dc != param[1]:
conn.close()
return True
if 'mac' == param[0].lower() and self.mac != param[1]:
conn.close()
return True
conn.close()
return False
|
zybbigpy/VaspCZ | vtstscripts-939/coverage.py | #!/usr/bin/env python
import sys
import glob
ignore = ['kdbaddpr.pl', 'kdbaddprnew.pl', 'kdbaddvpr.pl', 'kdbquery.pl', 'kdbquerynew.pl', 'akmcreset.pl', 'akmcupdate.pl', 'akmc.pl', 'akmccleanjobs.pl', 'pos2con.pl']
if len(sys.argv) > 1:
for fname in sys.argv[1:]:
if fname in ignore:
ignore.remove(fname)
else:
ignore.append(fname)
f = open(__file__, 'r')
lines = f.readlines()
f.close()
f = open(__file__, 'w')
for line in lines:
if line.startswith('ignore ='):
line = 'ignore = ' + repr(ignore) + "\n"
f.write(line)
f.close()
print 'The coverage.py ignore list has been updated:', ignore
sys.exit()
ig = [p.replace('.pl', '') for p in ignore]
pl = sorted([p.replace('.pl', '') for p in glob.glob("*.pl")])
py = [p.replace('.py', '') for p in glob.glob("*.py")]
print
for p in pl:
if p in ig:
print "\033[02;32m% 16s.pl (not going to implement)\033[00m" % p
elif p in py:
print "\033[01;32m% 16s.pl %s.py\033[00m" % (p, p)
else:
print "\033[01;31m% 16s.pl\033[00m" % p
print
|
zybbigpy/VaspCZ | vtstscripts-939/center.py | #!/usr/bin/env python
import aselite
from sys import argv, exit
if len(argv) < 2 or len(argv) > 3 or '-h' in argv:
print 'usage: center.py FILE [DISTANCE]'
print ' centers the structure in the current box and'
print ' optionally adds DISTANCE amount of vacuum to FILE'
print
exit(0)
filename = argv[1]
if len(argv) == 3:
distance = float(argv[2])
else:
distance = None
atoms = aselite.read_any(filename)
if distance:
atoms.center(distance)
else:
atoms.center()
atoms.write(filename)
|
zybbigpy/VaspCZ | vtstscripts-939/kdb/test/test.py | # NOTE: to test MySQL/anything in remote, you must first
# install MySQL-server (atleast until I setup the DB on Theory server)
# then run remote_initialize.py
import os
import sys
import commands
import filecmp
#get args
args = sys.argv
if len(args) <= 1:
print "test.py requires atleast one arguement. Please 'local' and or 'remote' as parameters."
if "local" in args:
print "testing local"
#remove any old queries and or sqlite3 databases
try:
os.system("rm -rf kdbmatches")
os.system('rm kdb.db')
except:
pass
# sqlite3 tests
# test insert function with sqlite3 database
print "Testing insert."
out1 = commands.getoutput("python ../local_client.py insert test_vars/reactant.con test_vars/saddle.con test_vars/product.con --mode test_vars/mode.dat")
print out1
print ""
print ""
# test check for duplicates with sqlite3 database
print "Testing duplicate check."
out2 = commands.getoutput("python ../local_client.py insert test_vars/reactant.con test_vars/saddle.con test_vars/product.con --mode test_vars/mode.dat")
print out2
print ""
print ""
# test query function with sqlite3 database
print "Testing query."
out3 = commands.getoutput("python ../local_client.py query test_vars/reactant.con")
print out3
print ""
print ""
print "Testing insert math"
if filecmp.cmp('kdb.db', 'test_vars/kdb.db'):
print "math good"
else:
print "insert math different from expected"
print "Testing query math"
common_files = ['PRODUCT_0','PRODUCT_1','PRODUCT_2','PRODUCT_3','PRODUCT_4','PRODUCT_5','PRODUCT_6','PRODUCT_7',
'SADDLE_0','SADDLE_1','SADDLE_2','SADDLE_3','SADDLE_4','SADDLE_5','SADDLE_6','SADDLE_7']
result = filecmp.cmpfiles('kdbmatches', 'test_vars/kdbmatches', common_files)
if not result[1] and not result[2]:
print "math good"
elif not result[1] and result[2]:
print "some errors occured:"
print result[2]
else:
print "the list of files that are different."
print result[1]
if "remote" in args:
print "testing insert"
out1 = commands.getoutput("python ../remote_client.py insert test_vars/reactant.con test_vars/saddle.con test_vars/product.con --mode test_vars/mode.dat")
print out1
print ""
print ""
print "testing query"
out2 = commands.getoutput("python ../remote_client.py query test_vars/reactant.con")
print out2
print ""
print "" |
zybbigpy/VaspCZ | sourcecode/Check_Hb.py | <gh_stars>10-100
"""
针对fcc获得Hb
遍历当前文件夹下的文件下的文件夹,fcc_withele存在的时候,就计算一个Hb
"""
import os
def cal_Hb():
Hb = item1 + item2 - item3 -item4
return Hb
def run():
for path, dirnames, filenames in os.walk():
for dirname in dirnames:
if dirname[:-2] == 'fcc_with':
print(path, dirname)
if __name__ == '__main__':
run()
|
zybbigpy/VaspCZ | vtstscripts-939/kdb/config.py | # This file holds variables that will never be changed during execution.
# The below parameters are only used when a database has not been created.
MOBILE_ATOM_CUTOFF = 0.7
NEIGHBOR_FUDGE = 0.2
DISTANCE_CUTOFF = 0.3
PBC_MAPPING_CHECK = False
REBOX_SUGGESTIONS = False
KDB_NAME = 'kdb.db' |
zybbigpy/VaspCZ | vtstscripts-939/stretch.py | #!/usr/bin/env python
import aselite
from sys import exit, argv
from os.path import isfile
import numpy as np
if '-h' in argv or len(argv) < 5:
print 'usage: stretch.py FILENAME INDEX1 INDEX2 DISTANCE'
print ' Stretches the atom INDEX2 by the bond defined by atoms INDEX1'
print ' and INDEX2 by DISTANCE. Saves the result to FILENAME_stretch.'
print
exit(0)
filename = argv[1]
index1 = int(argv[2])
index2 = int(argv[3])
distance = float(argv[4])
if not isfile(filename):
print 'No such file: %s' % filename
exit(1)
atoms = aselite.read_any(filename)
r = atoms.get_positions()
bond = r[index2] - r[index1]
bond /= np.linalg.norm(bond)
r[index2] += distance*bond
atoms.set_positions(r)
atoms.write(filename+'_stretch')
|
zybbigpy/VaspCZ | vtstscripts-939/kdb/server_config.py | #List of variables used through remote package
db_name = 'kdb'
backup_db_name = 'kdb_backup'
user_db_name = 'kdb_user'
#note these variables should only be set on the server
#client's do not need these.
user = 'root'
password = 'password'
port = 3306
host = 'localhost'
|
zybbigpy/VaspCZ | othercode/get_Hb_2nn_to_6nn_forfcc.py | <gh_stars>10-100
import os
import subprocess
import sys
sys.path.append(os.path.join(os.environ['HOME'], 'bin/VaspCZ/sourcecode'))
import zzdlib
import numpy as np
import VaspCZ
pys_path = os.path.join(os.environ['HOME'], 'bin/VaspCZ/sourcecode')
def run_Hf_and_Hb(ele):
# 4个文件夹下的结构优化的WARNING 检查和能量检查。
E_pure = zzdlib.Vasp.check_WARNING_and_Energy(path=f'{ele}-Energies/Fe108')
E_ele1 = zzdlib.Vasp.check_WARNING_and_Energy(path=f'{ele}-Energies/Fe107{ele}1')
E_V1 = zzdlib.Vasp.check_WARNING_and_Energy(path=f'{ele}-Energies/Fe107V1')
xnn_list = '1nn,2nn,3nn,4nn,5nn,6nn'.split(',')
for xnn in xnn_list:
E_ele1V1 = zzdlib.Vasp.check_WARNING_and_Energy(path=f'{ele}-Energies/Fe106{ele}1V1_{xnn}')
Hf = E_V1 - 107/108*E_pure
Hb = E_ele1V1 + E_pure - E_ele1 - E_V1
print(f'xnn: {xnn} Hf: {Hf:.4f} Hb: {Hb:.4f}')
if __name__ == '__main__':
os.system(f'source ~/.bashrc')
ele = os.path.basename(os.getcwd()).split('with')[-1]
print(f'ele: {ele}')
run_Hf_and_Hb(ele) |
zybbigpy/VaspCZ | vtstscripts-939/kdb/local_update.py | <reponame>zybbigpy/VaspCZ<filename>vtstscripts-939/kdb/local_update.py<gh_stars>10-100
#!/usr/bin/env python
import local_db
from kdbinsert import KdbInsert
class LocalUpdate(KdbInsert):
# overrides default insert_into_db function
def insert_into_db(self, **args):
# create instance of database
db = local_db.LocalDB(args['kdbname'], args['nf'], args['dc'], args['mac'])
# add process to db
db.add_process(args['or'], args['os'], args['op'], args['om'],
args['r'], args['s'], args['p'], args['m'], args['ma'])
# Indicate that the process was inserted successfully.
print "good update"
|
zybbigpy/VaspCZ | sourcecode/VaspVibAna_forNEB.py | """
NEB的振动分析
程序运行在NEB目录,需要NEB完成
读取ini/CONTCAR fin/CONTCAR 鞍点(一般02)/CONTCAR
对比ini/CONTCAR fin/CONTCAR 的原子位置,确定是第几个是扩散元素。
对ini/CONTCAR saddle/CONTCAR 扩散元素T T T,非扩散元素F F F
修改INCAR进行振动分析
"""
import os, sys
import subprocess
import zzdlib
import argparse
import numpy as np
python = sys.executable
current_py_folder = os.path.dirname(os.path.abspath(__file__))
VaspCZ_path = [os.path.dirname(current_py_folder) if 'sourcecode' in current_py_folder else current_py_folder][0] + '/sourcecode'
def modify_POSCAR(data, index):
"""
根据输入的数据和索引修改POSCAR,添加Selective Dynamics, 索引所在的位置设置为T T T, 其他位置设置为 F F F
:return:
"""
POSCAR_data = []
direct, direct_index = zzdlib.File.getLine(data, keywords='Direct')
decoded_data = zzdlib.Vasp.decode_POSCAR(data)
number_of_atom = decoded_data[2]
for i in range(len(data)):
if i < direct_index:
POSCAR_data.append(data[i]) # 前面的部分
elif i == direct_index: # Direct部分,要加一个Selective Dynamics
POSCAR_data.append('Selective Dynamics\n')
POSCAR_data.append(data[i])
elif direct_index < i <= direct_index+np.sum(number_of_atom): # 原子位置部分
if i != direct_index+index+1:
POSCAR_data.append(data[i].strip('\n') + ' F F F\n')
else:
POSCAR_data.append(data[i].strip('\n') + ' T T T\n')
else: # 最后的部分
POSCAR_data.append(data[i])
return POSCAR_data
def get_saddle_image():
if not os.path.isfile('neb.dat'):
os.system('nebbarrier.pl')
with open('neb.dat', 'r') as f:
data = f.readlines()
# 解码成n行,5列的numpy数组
data = [line.split() for line in data]
data = np.array(data).astype(float)
# print(data, data.shape)
index = np.argmax(data, axis=0)[2]
saddle_image = data[index][0]
saddle_image = f'0{int(saddle_image)}'
return saddle_image
def run(include_fin=False):
# 检查NEB是否完成
out = subprocess.check_output([python, f'{VaspCZ_path}/NEBCheck1.1.py'], shell=False, stderr=subprocess.STDOUT).decode('utf-8')
isDone = bool([0 if out.find(f'Path:{"./":<40} NEB计算完成!')==-1 else 1][0])
# isDone = ['Done' if ("Path:./" in line and "NEB计算完成" in line) for line in out.split('\n') else None]
# print(out, type(out), isDone, type(isDone))
if isDone==False:
print(f'当前目录下NEB计算并未完成,退出程序')
exit()
else:
print(f'检查:当前目录{os.getcwd()} NEB计算完成\n开始振动分析')
# 读取文件ini/Opt和fin/Opt下的POSCAR文件,最终获取扩散原子的索引
with open('./ini/Opt/POSCAR', 'r') as f:
iniPOS = f.readlines()
with open('./fin/Opt/POSCAR', 'r') as f:
finPOS = f.readlines()
# print(iniCONT)
ini_result = zzdlib.Vasp.decode_POSCAR(iniPOS) # 结果是:vector, elements, number_of_atom, position
fin_result = zzdlib.Vasp.decode_POSCAR(finPOS)
ini_position = ini_result[3]
fin_position = fin_result[3]
# print(ini_position.shape)
distance = np.sqrt(np.sum(np.square(ini_position-fin_position), axis=1))
# print(distance)
index = distance.astype(bool).tolist().index(True) # 获取了扩散原子的索引号,这个索引号的原子设置为T T T其他为F F F
# print(index)
# 把ini的CONTCAR读取为POSCAR分析振动
with open('ini/CONTCAR', 'r') as f:
data = f.readlines()
POSCAR_for_iV = modify_POSCAR(data, index)
# fin/CONTCAR 读取变为POSCAR分析振动
with open('fin/CONTCAR', 'r') as f:
data = f.readlines()
POSCAR_for_fV = modify_POSCAR(data, index)
# 鞍点CONTCAR 读取分析振动
saddle_image = get_saddle_image()
with open(f'{saddle_image}/CONTCAR', 'r') as f:
data = f.readlines()
POSCAR_for_sV = modify_POSCAR(data, index)
# 开始振动分析
if not os.path.isdir('vib_analysis'):
os.mkdir('./vib_analysis')
os.chdir('./vib_analysis')
# 初态振动分析
vib_analysis(f'ini_state', POSCAR_data=POSCAR_for_iV)
vib_analysis(f'sad_state', POSCAR_data=POSCAR_for_sV)
if include_fin:
vib_analysis(f'fin_state', POSCAR_data=POSCAR_for_fV)
os.chdir('..')
def vib_analysis(folder, POSCAR_data):
if not os.path.isdir(folder):
os.mkdir(folder)
os.chdir(folder)
os.system(f'cp ../../ini/Opt/INCAR .')
os.system(f'cp ../../ini/Opt/POTCAR .')
os.system(f'cp ../../ini/Opt/KPOINTS .')
Vaspsh_path = zzdlib.File.Vaspsh_path()
os.system(f'cp {Vaspsh_path}/Vasp.sh .')
with open('POSCAR', 'w') as f:
f.writelines(POSCAR_data)
# print(os.getcwd())
# 修改INCAR
data_INCAR = zzdlib.File.openFile('INCAR', 'r')
data_INCAR = zzdlib.File.substituteData(data_INCAR, keywords='SYSTEM', newline='SYSTEM=Vib\n') # 修改
data_INCAR = zzdlib.File.substituteData(data_INCAR, keywords='NSW', newline='NSW=1\n') # 修改
data_INCAR = zzdlib.File.substituteData(data_INCAR, keywords='POTIM', newline='POTIM=0.03\n') # 修改
data_INCAR = zzdlib.File.substituteData(data_INCAR, keywords='IBRION', newline='IBRION=5\n') # 修改
data_INCAR.append('NFREE=2\n') # 添加
data_INCAR.append('ISYM=0\n') # 添加
data_INCAR.append('PREC=Accurate\n') # 添加
data_INCAR = zzdlib.File.substituteData(data_INCAR, keywords='NPAR', newline='\n') # 删除
data_INCAR = zzdlib.File.substituteData(data_INCAR, keywords='NCORE', newline='\n') # 删除
zzdlib.File.openFile('INCAR', 'w', data=data_INCAR)
# POTCAR 不修改
# KPOINS 不修改
# Vasp.sh
data_VaspSh = zzdlib.File.openFile('../../ini/Opt/Vasp.sh', 'r')
jobname_old = zzdlib.File.getLine(data_VaspSh, keywords='#PBS -N')[0].split()[2]
jobname = jobname_old[:-2]+folder[0]+'V' # 在原来的基础上去掉最后两个字母,加folder第一个字符,i f s , 加上V代表振动分析
global nodes
global ppn
zzdlib.Vasp.modify_Vasp_sh(jobname=jobname, nodes=nodes, ppn=ppn)
zzdlib.Vasp.check_and_qsub()
os.chdir('..')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='manual to this script')
parser.add_argument('-nd', '--nodes', type=str, default='1')
parser.add_argument('-np', '--ppn', type=str, default='8')
parser.add_argument('-fin', '--include_fin', type=str, default='False')
args = parser.parse_args()
nodes = args.nodes
ppn = args.ppn
include_fin = bool(args.include_fin)
run(include_fin=include_fin) |
zybbigpy/VaspCZ | othercode/test.py | import os
import subprocess
def dd():
print('mdmdmd')
res = subprocess.check_output(dd, stderr=subprocess.STDOUT, shell=True)
print(res) |
zybbigpy/VaspCZ | othercode/get_total_used_core.py | <filename>othercode/get_total_used_core.py<gh_stars>10-100
#!/public/software/apps/python/3.7.1/bin/python3
# coding=utf-8
import os
import argparse
import subprocess
def getshellResult(code):
res = subprocess.check_output(code, stderr=subprocess.STDOUT, shell=True)
res = res.decode('utf-8')
res = res.split('\n')
return res
def run(code, usr):
res = getshellResult(code)
used_core = 0
for i in range(len(res)):
line = res[i]
if usr in line and 'R' in line:
job_core = line.split()[6]
used_core += int(job_core)
print(f'已使用核:{used_core}')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-cd', '--code', default='qstat -a', help='code to show the job queue')
parser.add_argument('-usr', '--user', default='yangyuqi')
args = parser.parse_args()
code = args.code
usr = args.user
run(code, usr) |
zybbigpy/VaspCZ | vtstscripts-939/vef.py | #!/usr/bin/env python
import aselite
from os.path import isfile, dirname, abspath, join
from os import system
from sys import exit, argv
if '-h' in argv:
print 'usage: vef.py'
print ' prints the force and energy for each ionic step of a vasp run'
print
exit(0)
vtst_path = dirname(abspath(__file__))
filename = 'OUTCAR'
if not isfile(filename):
print 'No such file: %s' % filename
exit(1)
traj = aselite.read_vasp_out(filename)
if len(traj) == 0:
exit(0)
fe = open('fe.dat', 'w')
for i, atoms in enumerate(traj):
e = atoms.get_potential_energy()
if i == 0:
e0 = e
f = atoms.get_max_atom_force()
str = '%5i %20.8f %20.6f %20.6g ' % (i,f,e,e-e0)
print str
fe.write(str+'\n')
fe.close()
if i > 1:
system('gnuplot %s' % join(vtst_path, 'vef.gnu'))
|
zybbigpy/VaspCZ | vtstscripts-939/2con.py | #!/usr/bin/env python
import sys
import aselite
if len(sys.argv) < 3:
print 'usage: 2con.py IN OUT'
print ' converts file IN of type POSCAR or xyz to con and saves it to OUT'
print
sys.exit(0)
traj = aselite.read_any(sys.argv[1])
aselite.write_con(sys.argv[2], traj)
|
zybbigpy/VaspCZ | vtstscripts-939/kdb/remote_db.py | <gh_stars>10-100
from aselite import Atoms
from aselite import FixAtoms
from aselite import write_vasp
import numpy as np
import pymysql
from server_config import *
import re
import copy
import sys
class RemoteDB():
def __init__(self):
self.db_name = db_name
self.backup_db_name = backup_db_name
self.user_db_name = user_db_name
#dictionaries hold highest primary key values to reduce number of queries while inserting
self.pk_dict = {'Atoms': self.get_max('Atoms', 'atoms_id', self.db_name),
'Atom': self.get_max('Atom', 'atom_id', self.db_name),
'Process': self.get_max('Process', 'pro_id', self.db_name),
'Mobile': self.get_max('Mobile', 'mob_id', self.db_name)}
self.b_pk_dict = {'Atoms': self.get_max('Atoms', 'atoms_id', self.backup_db_name),
'Atom': self.get_max('Atom', 'atom_id', self.backup_db_name),
'Process': self.get_max('Process', 'pro_id', self.backup_db_name)}
self.u_pk_dict = {'User': self.get_max('User', 'user_id', self.user_db_name)}
#############################################################
# Insertions ################################################
#############################################################
# adds a process, each parameter is an ase.Atom object
# EXCEPT mode/o_mode, mode is a numpy array or None
# and mobile_set is a list of numbers
def add_process(self, o_reactant, o_saddle, o_product, o_mode,
reactant, saddle, product, mode, mobile_set, email, passwd):
#check email/passwd combo is in database
user_id = self.get_user_id(email, passwd)
if user_id == None:
print "email/password incorrect."
return -1
# get name and primary keys
symbols = o_reactant.get_chemical_symbols()
name = self.get_name(symbols)
o_pro_pk = self.get_id('Process', db='backup')
pro_pk = self.get_id('Process')
# connect to backup DB
conn = self.connect_db(db=self.backup_db_name)
conn.execute('''BEGIN''')
# insert atoms objects into backup DB
or_pk = self.add_atoms(o_reactant, o_mode, conn)
os_pk = self.add_atoms(o_saddle, o_mode, conn)
op_pk = self.add_atoms(o_product, o_mode, conn)
# insert process to backup DB
conn.execute('''INSERT INTO Process
VALUES ('%d','%s','%d','%d','%d', '%d')''' %
(o_pro_pk, name, or_pk, os_pk, op_pk, user_id))
conn.execute('COMMIT')
conn.close()
# connect to DB
conn = self.connect_db(db=self.db_name)
conn.execute('''BEGIN''')
# insert atoms objects into DB
r_pk = self.add_atoms(reactant, mode, conn)
s_pk = self.add_atoms(saddle, mode, conn)
p_pk = self.add_atoms(product, mode, conn)
# insert process to DB
conn.execute('''INSERT INTO Process
VALUES ('%d','%s','%d','%d','%d','%d')''' %
(pro_pk, name, o_pro_pk, r_pk, s_pk, p_pk))
# insert mobile atoms list into DB
self.add_mobile(mobile_set, pro_pk, conn)
conn.execute('COMMIT')
conn.close()
return pro_pk
# adds a process to the kdb table only. Doesn't create a backup. This function
# should only be used within kdbupdate.
def add_updated_process(self, reactant, saddle, product, mode, mobile_set, o_pro_pk):
symbols = reactant.get_chemical_symbols()
name = self.get_name(symbols)
pro_pk = self.get_id('Process')
conn = self.connect_db()
conn.execute('''BEGIN''')
r_pk = self.add_atoms(reactant, mode, conn)
s_pk = self.add_atoms(saddle, mode, conn)
p_pk = self.add_atoms(product, mode, conn)
conn.execute('''INSERT INTO Process
VALUES ('%d','%s','%d','%d','%d','%d')''' %
(pro_pk, name, o_pro_pk, r_pk, s_pk, p_pk))
self.add_mobile(mobile_set, pro_pk, conn)
conn.execute('COMMIT')
conn.close()
return pro_pk
# adds a specific ase.Atom object (SHOULD NOT BE USED)
# this function is only to be used inside add_process()
def add_atoms(self, atoms, mode, conn):
# if mode = None, create array of zeros.
if mode == None:
mode = np.zeros((len(atoms),3))
# get important data from ase.Atoms instance
symbols = atoms.get_chemical_symbols()
positions = atoms.get_positions()
cell = atoms.get_cell()
constraints = atoms._get_constraints()
# get unique primary key for the correct database
conn.execute('SELECT DATABASE()')
db_name = conn.fetchone()[0]
db = ''
if db_name == self.backup_db_name:
db = 'backup'
atoms_pk = self.get_id('Atoms', db=db)
# add data to Atoms table
conn.execute('''INSERT INTO Atoms VALUES ('%d','%f','%f','%f','%f','%f','%f','%f','%f','%f')''' %
(atoms_pk,
cell[0][0],cell[0][1],cell[0][2],
cell[1][0],cell[1][1],cell[1][2],
cell[2][0],cell[2][1],cell[2][2]))
# for each atom in atoms add the atom to the database
for i in range(len(atoms)):
atom_pk = self.get_id('Atom', db=db)
index = i
sub_mode = mode[i]
symbol = symbols[i]
position = positions[i]
if i in constraints[0].index:
fixed = 1
else:
fixed = 0
conn.execute('''INSERT INTO Atom VALUES ('%d','%d','%d','%s','%f','%f','%f','%f','%f','%f','%d')''' %
(atom_pk, atoms_pk, index, symbol,
position[0], position[1], position[2],
sub_mode[0], sub_mode[1], sub_mode[2], fixed))
return atoms_pk
# mobile_set is a list of atom indicies, pro_pk is a unique identifier
# of the process that contains the mobile atoms
def add_mobile(self, mobile_set, pro_pk, conn):
for mobile in mobile_set:
mob_pk = self.get_id('Mobile')
conn.execute('''INSERT INTO Mobile VALUES ('%d','%d','%d')''' %
(mob_pk, mobile, pro_pk))
# add a user to the kdb_user database.
# not all emails are hashed using SHA.
def add_user(self, f_name, l_name, email, passwd):
if not self.is_email(email):
print "please enter a valid email."
return "please enter a valid email."
user_pk = self.get_id('User', db='user')
conn = self.connect_db(db=self.user_db_name)
try:
conn.execute('''INSERT INTO User VALUES ('%d', '%s', '%s', '%s', SHA('%s'))''' %
(user_pk, f_name, l_name, email, passwd))
conn.execute('''COMMIT''')
conn.close()
print "account added"
return "account added"
except pymysql.err.IntegrityError:
print "This email address is already in our database."
return "This email address is already in our database."
conn.close()
#############################################################
# Queries ###################################################
#############################################################
# query db for ase.Atoms object based off atoms_id
def get_atoms(self, atoms_id, db=db_name):
conn = self.connect_db(db=db)
# query the db for the specific atoms entry
conn.execute('''SELECT * FROM Atoms WHERE atoms_id = '%d' ''' % atoms_id)
atoms = conn.fetchone()
cell = []
# populate the 3D cell block
count = 0
temp = []
for i in range(len(atoms)):
if i !=0:
temp.append(atoms[i])
if count == 2:
cell.append(temp)
temp = []
count = 0
else:
count += 1
# query the db for all atom entries with the same atoms_id as given
conn.execute('''SELECT * FROM Atom WHERE atoms_id = '%d' '''% atoms_id)
atoms = conn.fetchall()
conn.close()
symbols = []
positions = []
fixed = []
mode = []
# loop over each atom from query and pull out data needed to build ase.Atoms instance
for i in range(len(atoms)):
symbols.append(str(atoms[i][3]))
positions.append([atoms[i][4],atoms[i][5],atoms[i][6]])
mode.append([atoms[i][7],atoms[i][8],atoms[i][9]])
fixed.append(atoms[i][10])
# create FixAtoms mask for ase.Atoms' constraint
constraint = FixAtoms(mask=fixed)
# create ase.Atoms instance
atoms = Atoms(symbols = symbols, positions = positions, cell = cell, constraint = constraint)
#self.write_atoms('test.con', atoms)
return atoms
#generate a mode list from a given atoms_id
def get_mode(self, atoms_id, db=db_name):
conn = self.connect_db(db=db)
conn.execute(''' SELECT mode0, mode1, mode2 FROM Atom WHERE atoms_id = '%d' ORDER BY num''' % atoms_id)
mode_list = conn.fetchall()
conn.close()
mode = np.array(mode_list)
return mode
# name is the chemical name for the stucture, IE: Al or CuO
def get_process(self, name):
# query db for all processes with same name as given in params
conn = self.connect_db()
return_list = []
conn.execute('''SELECT * FROM Process WHERE name = '%s' ''' % name)
process_list = conn.fetchall()
# iterate through all process matches and extract important data
for process in process_list:
# dictionary to hold important data
pro_dict = {'minimum': None,
'saddle' : None,
'product': None,
'mobile' : [],
'mode' : None,
'mirror' : False,
'id' : None}
# query database for mobile atoms list of the current process
conn.execute('''SELECT * FROM Mobile WHERE pro_id = '%d' ''' % process[0])
mobile_list = conn.fetchall()
# add the mobile atoms to dictionary
for mobile in mobile_list:
pro_dict['mobile'].append(mobile[1])
# convert atoms_ids to ase.Atoms() objects
pro_dict['minimum'] = self.get_atoms(process[3])
pro_dict['saddle'] = self.get_atoms(process[4])
pro_dict['product'] = self.get_atoms(process[5])
pro_dict['mode'] = self.get_mode(process[3])
pro_dict['id'] = process[0]
return_list.append(pro_dict)
# swap min and product and re add dictionary
swap_dict = copy.deepcopy(pro_dict)
swap_dict['minimum'], swap_dict['product'] = swap_dict['product'], swap_dict['minimum']
return_list.append(swap_dict)
#more lists to append for mirror changes
mirror_dict = copy.deepcopy(pro_dict)
mirror_dict['mirror'] = True
return_list.append(mirror_dict)
mirror_dict2 = copy.deepcopy(swap_dict)
mirror_dict2['mirror'] = True
return_list.append(mirror_dict2)
conn.close()
return return_list
# function grabs all params in the param table and returns them
def get_params(self):
conn = self.connect_db(db=self.backup_db_name)
conn.execute('''SELECT * FROM Param''')
param_list = conn.fetchall()
conn.close()
return_dict = {}
for tup in param_list:
return_dict[tup[0]] = tup[1]
return return_dict
# function used to check for duplicate entries
def get_saddles(self, name):
# query db for all processes
conn = self.connect_db()
conn.execute('''SELECT saddle_id FROM Process WHERE name = '%s' ''' % name)
sid_list = conn.fetchall()
saddle_list = []
for sid in sid_list:
saddle = self.get_atoms(sid[0])
saddle_list.append([saddle, sid[0]])
return saddle_list
# function grabs the user_id given an email/passwd combo
def get_user_id(self, email, passwd):
if not self.is_user(email, passwd):
return None
conn = self.connect_db(db=self.user_db_name)
conn.execute('''SELECT user_id FROM User WHERE email = '%s' && password = <PASSWORD>('%s')''' % (email, passwd))
return conn.fetchone()[0]
def is_user(self, email, passwd):
conn = self.connect_db(db=self.user_db_name)
conn.execute('''SELECT user_id FROM User WHERE email = '%s' && password = SHA('%s')''' % (email, passwd))
if conn.fetchone() is None:
return False
else:
return True
#############################################################
# Helper Functions ##########################################
#############################################################
# helper function to remove a process from the database
#outdated do not use
def remove_process(self, pro_pk):
conn = self.connect_db()
conn.execute('''SELECT * FROM Process
WHERE pro_id = '%d' ''' % pro_pk)
process = conn.fetchone()
conn.execute('''DELETE FROM Mobile WHERE pro_id = '%d' ''' % pro_pk)
conn.execute('''DELETE FROM Atom
WHERE atoms_id in ('%d','%d','%d','%d','%d','%d')''' % (process[3],process[4],process[5],process[6],process[7],process[8]))
conn.execute('''DELETE FROM Atoms
WHERE atoms_id in ('%d','%d','%d','%d','%d','%d')''' % (process[3],process[4],process[5],process[6],process[7],process[8]))
conn.execute('''DELETE FROM Process WHERE pro_id = '%d' ''' % pro_pk)
conn.execute('COMMIT')
conn.close()
def remove_process_user(self, email):
#get user_id
conn = self.connect_db(db=self.user_db_name)
conn.execute('''SELECT user_id FROM User WHERE email = '%s' ''' % email)
results = conn.fetchone()
conn.close()
if results == None:
print "%s is not in the database." % email
sys.exit()
user_id = results[0]
#get all backup processes from the user_id
conn = self.connect_db(db=self.backup_db_name)
conn.execute('''SELECT pro_id FROM Process WHERE user_id = '%d' ''' % user_id)
results = conn.fetchall()
conn.close()
if results == ():
print "%s has not added any processes." % email
sys.exit()
pro_ids = []
for x in results:
pro_ids.append(x[0])
#loop through proceses and delete them
for o_pro_id in pro_ids:
#remove from querable table
conn = self.connect_db()
conn.execute('''SELECT pro_id from Process WHERE original_pro_id = '%d' ''' % o_pro_id)
result = conn.fetchone()
if result is not None:
pro_id = result[0]
conn.execute('''SELECT * FROM Process WHERE pro_id = '%d' ''' % pro_id)
process = conn.fetchone()
conn.execute('''DELETE FROM Mobile WHERE pro_id = '%d' ''' % pro_id)
conn.execute('''DELETE FROM Atom WHERE atoms_id in ('%d', '%d', '%d')''' % (process[2],process[3],process[4]))
conn.execute('''DELETE FROM Process WHERE pro_id = '%d' ''' % pro_id)
conn.execute('''DELETE FROM Atoms WHERE atoms_id in ('%d', '%d', '%d')''' % (process[2],process[3],process[4]))
conn.execute('COMMIT')
conn.close()
#remove from nonquerable table
for o_pro_id in pro_ids:
conn = self.connect_db(db=self.backup_db_name)
conn.execute('''SELECT * FROM Process WHERE pro_id = '%d' ''' % o_pro_id)
process = conn.fetchone()
conn.execute('''DELETE FROM Atom WHERE atoms_id in ('%d', '%d', '%d')''' % (process[2],process[3],process[4]))
conn.execute('''DELETE FROM Process WHERE pro_id = '%d' ''' % o_pro_id)
conn.execute('''DELETE FROM Atoms WHERE atoms_id in ('%d', '%d', '%d')''' % (process[2],process[3],process[4]))
conn.execute('COMMIT')
conn.close()
# writes an atoms object to a file as a .con file
def write_atoms(self, filename, atoms):
write_vasp(filename, atoms)
# helper function to get name from list of symbols
# IE: symbols = ['Cu', 'Cu', 'O'] -> name = "CuO"
def get_name(self, symbols):
name = ""
for symbol in symbols:
if symbol not in name:
name = name + symbol
return name
# helper function to connect to the database
def connect_db(self, db=db_name):
return pymysql.connect(host=host, port=port, user=user, passwd=password, db=db).cursor()
# helper function to get largest id for a specific table
def get_max(self, table_name, id_name, db):
conn = self.connect_db(db=db)
# query the given table for the largest primary key
conn.execute('''SELECT max({}) FROM {}'''.format(id_name, table_name))
max_id = conn.fetchone()[0]
conn.close()
# if there are no entries in the database max_id = None
if not max_id:
max_id = 0
return max_id
# helper function to get unique primary key number for a specific table
def get_id(self, table_name, db=''):
if db == 'backup':
self.b_pk_dict[table_name] += 1
return self.b_pk_dict[table_name]
elif db == 'user':
self.u_pk_dict[table_name] += 1
return self.u_pk_dict[table_name]
else:
self.pk_dict[table_name] += 1
return self.pk_dict[table_name]
# helper function to valid email add_process
def is_email(self, email):
if not re.match(r'(.+)@(.+)\.(.{2,4})', email):
return False
return True |
zybbigpy/VaspCZ | sourcecode/VaspModiFile.py | #!/home/zhangzhengde/bin/bin/python3
#coding=utf-8
import sys
sys.path.append('/home/zhangzhengde/bin/pythonlib')
import zzdlib
import os
data_INCAR = zzdlib.File.openFile('./INCAR','r')
data_INCAR_old = data_INCAR.copy()
data_INCAR = zzdlib.File.substituteData(data_INCAR,'ISIF','ISIF=2')
data_INCAR = zzdlib.File.substituteData(data_INCAR,'NELM','NELM=400','default')
if zzdlib.File.getLine(data_INCAR,'NELMDL') == 'Not Match':
data_INCAR.append('NELMDL=5\n')
data_INCAR.append('LMAXMIX=4\n')
data_INCAR.append('AMIX=0.2\n')
data_INCAR.append('BMIX=0.0001\n')
data_INCAR.append('AMIX_MAG=0.8\n')
data_INCAR.append('BMIX_MAG=0.0001\n')
zzdlib.File.openFile('./INCAR','w',data=data_INCAR)
for i in range(len(data_INCAR)):
try:
print('{} {}'.format(data_INCAR_old[i].strip('\n'),data_INCAR[i].strip('\n')))
except Exception as e:
print('{} {}'.format(' ',data_INCAR[i].strip('\n')))
|
zybbigpy/VaspCZ | sourcecode/VaspVibAna_Result.py | <gh_stars>10-100
"""
振动分析结束后,获取振动分析结果,计算出当前态下的振动振动频率。
公式为:v = (求积vi_equilirium)/(求积vi_saddle)
平衡态一般取初态ini的结果,如果有反向过程,如bcc九频模型中w4是w3的反过程,取w3过渡态的fin作为平衡态,即可就出w4的振动频率
"""
import zzdlib
import os
import argparse
import numpy as np
def run_walk(isprint=True):
run_path = os.getcwd()
for dirpath, dirnames, filenames in os.walk('./'):
if 'vib_analysis' in dirnames:
os.chdir(f'{dirpath}/vib_analysis')
run(isprint=isprint)
os.chdir(run_path)
def run(isprint=True):
# 先检查振动分析都没有完成
dirnames = os.listdir()
Donelist = []
for state in ['ini', 'sad', 'fin']:
if f'{state}_state' in dirnames:
data =zzdlib.File.openFile(f'{state}_state/log', 'r')
Total_list = zzdlib.File.getAllline(data, keywords='Total')
if int(Total_list[0].split('/')[-1].strip('\n')) == len(Total_list):
isDone=True
else:
isDone=False
else:
isDone=False
Donelist.append(isDone)
tmp_path = f'{os.getcwd()}'
print(f'{tmp_path:<40}', Donelist)
# 计算振动频率
if Donelist[0] and Donelist[1]: #
# print(f'开始计算正向过程振动频率')
cal('ini_state', 'sad_state', isprint=isprint, title='foreward')
if Donelist[1] and Donelist[2]:
# print(f'开始计算反向过程振动频率')
cal('fin_state', 'sad_state', isprint=isprint, title='backward')
def cal(path1, path2, isprint=True, title=''):
data1 = zzdlib.File.openFile(f'{path1}/OUTCAR', 'r')
data2 = zzdlib.File.openFile(f'{path2}/OUTCAR', 'r')
frequncy1 = zzdlib.File.getAllline(data1, keywords='THz')
frequncy2 = zzdlib.File.getAllline(data2, keywords='THz')
if isprint:
print(f'{os.getcwd()}')
for line in frequncy1:
print(line.strip('\n'))
for line in frequncy2:
print(line.strip('\n'))
fre1_value = [fre.split('=')[1].split('THz')[0].strip() for fre in frequncy1]
fre2_value = [fre.split('=')[1].split('THz')[0].strip() for fre in frequncy2]
fre1 = np.array(fre1_value).astype(float)
fre2 = np.array(fre2_value).astype(float)
fre = np.prod(fre1)/np.prod(fre2[:-1]) # 这一这里去掉了最后一个振动频率,但是不能确定最后一个振动频率就要去掉的振动频率
print(f'{os.getcwd():<40} {title:8} freqency: {fre:.4f}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='manual to this script')
parser.add_argument('-p', '--isprint', type=bool, default=True)
args = parser.parse_args()
isprint = args.isprint
print(f'{"":-<20}{"开始检查振动分析结果":^20}{"":-<20}')
run_walk(isprint=isprint)
print(f'{"":-<20}{"检查结束":^20}{"":-<20}')
|
zybbigpy/VaspCZ | sourcecode/VaspOpt-Sta.py | <filename>sourcecode/VaspOpt-Sta.py
#!/home/zhangzhengde/bin/bin/python3
import sys
sys.path.append('/home/zhangzhengde/bin/pythonlib')
import os
import zzdlib
import argparse
inl = ['ini','fin']
parser = argparse.ArgumentParser(description = 'manual to this script')
parser.add_argument('--nc',type=str,default='0,0')
parser.add_argument('--EMER',type=str,default='default')
args = parser.parse_args()
print('脚本运行,参数:--nc={} --EMER={}'.format(args.nc,args.EMER))
os.system('cp CONTCAR POSCAR')
with open('./INCAR','r') as f:
data_INCAR = f.readlines()
for nl in range(len(data_INCAR)):
if 'SYSTEM' in data_INCAR[nl]:
data_INCAR[nl] = 'SYSTEM=Static\n' #修改表头
if 'NSW' in data_INCAR[nl]:
data_INCAR[nl] = 'NSW=1\n' #修改NSW
if 'IBRION' in data_INCAR[nl]:
data_INCAR[nl] = 'IBRION=-1\n' #修改IBRION
if 'EDIFFG' in data_INCAR[nl]:
data_INCAR[nl] = '#'+data_INCAR[nl] #去掉EDIFFG
with open('./INCAR','w') as f:
f.writelines(data_INCAR)
f.close()
data_Sh = zzdlib.File.openFile('./Vasp.sh','r')
#修改任务名
oldname = zzdlib.File.getLine(data_Sh,'#PBS -N')[0].strip('\n').split()[-1]
jobname = oldname[:-1]+'S'
data_Sh = zzdlib.File.substituteData(data_Sh,'#PBS -N',' #PBS -N '+jobname)
#修改nodes
ndAndnc = zzdlib.File.getLine(data_Sh,'#PBS -l nodes')[0].strip('\n').split()[-1]
nd = ndAndnc.split(':')[0].split('=')[-1]
nc = ndAndnc.split(':')[1].split('=')[-1]
if args.nc.split(',')[0] != '0':
nd = args.nc.split(',')[0]
if args.nc.split(',')[1] != '0':
nc = args.nc.split(',')[1]
data_Sh = zzdlib.File.substituteData(data_Sh,'#PBS -l nodes',' #PBS -l nodes='+nd+':ppn='+nc)
#修改是否紧急
EMER = zzdlib.File.getLine(data_Sh,'#PBS -q EMERGENCY')[0]#获取默认加急状态
if EMER == 'Not Match':
if args.EMER == 'yes' or args.EMER == 'y':
data_Sh.insert(4,' #PBS -q EMERGENCY\n')
else:
if args.EMER == 'no' or args.EMER == 'n':
data_Sh.remove(' #PBS -q EMERGENCY\n')
zzdlib.File.openFile('./Vasp.sh','w',data_Sh)
if zzdlib.Vasp.checkInputs():
usrsel=input('前检查无问题,是否要提交任务(默认yes):')
if usrsel == 'no' or usrsel =='n':
print('未提交任务')
else:
os.system('qsub Vasp.sh')
else:
print('前检查有问题,请人工检查')
|
zybbigpy/VaspCZ | vtstscripts-939/aselite.py | <reponame>zybbigpy/VaspCZ<gh_stars>10-100
#!/usr/bin/env python
#encoding: utf-8
'''aselite is a striped down single file version of ase that retains the
following features: atom and atoms objects, some of ase.io and some of
ase.constraints.'''
# Copyright 2008, 2009 CAMd
# (see accompanying license files for details).
from math import cos, sin
import warnings
import numpy as np
np.seterr(all='raise')
import os
def read_any(filename):
try:
return read_vasp(filename)
except:
pass
try:
return read_xyz(filename)
except:
pass
try:
return read_con(filename)
except:
pass
raise IOError, "Could not read file %s." % filename
def write_jmol(filename, atoms, eigenvalues, eigenvectors):
f_xyz = open(filename,'w')
for i in xrange(len(eigenvectors)):
mode = eigenvectors[:,i]
mode.shape = (len(mode)/3,3)
f_xyz.write("%i\n"%len(atoms))
f_xyz.write("%f\n"%eigenvalues[i])
for j,atom in enumerate(atoms):
f_xyz.write("%s %f %f %f %f %f %f\n" % (atom.symbol, atom.position[0], atom.position[1], atom.position[2], mode[j,0], mode[j,1], mode[j,2]))
f_xyz.close()
def get_atomtypes(fname):
"""Given a file name, get the atomic symbols.
The function can get this information from OUTCAR and POTCAR
format files. The files can also be compressed with gzip or
bzip2.
"""
atomtypes=[]
if fname.find('.gz') != -1:
import gzip
f = gzip.open(fname)
elif fname.find('.bz2') != -1:
import bz2
f = bz2.BZ2File(fname)
else:
f = open(fname)
for line in f:
if line.find('TITEL') != -1:
atomtypes.append(line.split()[3].split('_')[0].split('.')[0])
return atomtypes
def atomtypes_outpot(posfname, numsyms):
"""Try to retreive chemical symbols from OUTCAR or POTCAR
If getting atomtypes from the first line in POSCAR/CONTCAR fails, it might
be possible to find the data in OUTCAR or POTCAR, if these files exist.
posfname -- The filename of the POSCAR/CONTCAR file we're trying to read
numsyms -- The number of symbols we must find
"""
import os.path as op
import glob
# First check files with exactly same name except POTCAR/OUTCAR instead
# of POSCAR/CONTCAR.
fnames = [posfname.replace('POSCAR', 'POTCAR').replace('CONTCAR',
'POTCAR')]
fnames.append(posfname.replace('POSCAR', 'OUTCAR').replace('CONTCAR',
'OUTCAR'))
# Try the same but with compressed files
fsc = []
for fn in fnames:
fsc.append(fn + '.gz')
fsc.append(fn + '.bz2')
for f in fsc:
fnames.append(f)
# Finally try anything with POTCAR or OUTCAR in the name
vaspdir = op.dirname(posfname)
fs = glob.glob(vaspdir + '*POTCAR*')
for f in fs:
fnames.append(f)
fs = glob.glob(vaspdir + '*OUTCAR*')
for f in fs:
fnames.append(f)
tried = []
files_in_dir = os.listdir('.')
for fn in fnames:
if fn in files_in_dir:
tried.append(fn)
at = get_atomtypes(fn)
if len(at) == numsyms:
return at
raise IOError('Could not determine chemical symbols. Tried files '
+ str(tried))
def get_atomtypes_from_formula(formula):
"""Return atom types from chemical formula (optionally prepended
with and underscore).
"""
symbols = string2symbols(formula.split('_')[0])
atomtypes = [symbols[0]]
for s in symbols[1:]:
if s != atomtypes[-1]: atomtypes.append(s)
return atomtypes
def read_vasp(filename='CONTCAR'):
"""Import POSCAR/CONTCAR type file.
Reads unitcell, atom positions and constraints from the POSCAR/CONTCAR
file and tries to read atom types from POSCAR/CONTCAR header, if this fails
the atom types are read from OUTCAR or POTCAR file.
"""
if isinstance(filename, str):
f = open(filename)
else: # Assume it's a file-like object
f = filename
# First line should contain the atom symbols , eg. "Ag Ge" in
# the same order
# as later in the file (and POTCAR for the full vasp run)
atomtypes = f.readline().split()
# Sometimes the first line in POSCAR/CONTCAR is of the form
# "CoP3_In-3.pos". Check for this case and extract atom types
if len(atomtypes) == 1 and '_' in atomtypes[0]:
atomtypes = get_atomtypes_from_formula(atomtypes[0])
lattice_constant = float(f.readline().split()[0])
# Now the lattice vectors
a = []
for ii in range(3):
s = f.readline().split()
floatvect = float(s[0]), float(s[1]), float(s[2])
a.append(floatvect)
basis_vectors = np.array(a) * lattice_constant
# Number of atoms. Again this must be in the same order as
# in the first line
# or in the POTCAR or OUTCAR file
atom_symbols = []
numofatoms = f.readline().split()
#vasp5.1 has an additional line which gives the atom types
#the following try statement skips this line
try:
int(numofatoms[0])
except ValueError:
numofatoms = f.readline().split()
# check for comments in numofatoms line and get rid of them if necessary
commentcheck = np.array(['!' in s for s in numofatoms])
if commentcheck.any():
# only keep the elements up to the first including a '!':
numofatoms = numofatoms[:np.arange(len(numofatoms))[commentcheck][0]]
numsyms = len(numofatoms)
if len(atomtypes) < numsyms:
# First line in POSCAR/CONTCAR didn't contain enough symbols.
atomtypes = atomtypes_outpot(f.name, numsyms)
else:
try:
for atype in atomtypes[:numsyms]:
if not atype in chemical_symbols:
raise KeyError
except KeyError:
atomtypes = atomtypes_outpot(f.name, numsyms)
for i, num in enumerate(numofatoms):
numofatoms[i] = int(num)
[atom_symbols.append(atomtypes[i]) for na in xrange(numofatoms[i])]
# Check if Selective dynamics is switched on
sdyn = f.readline()
selective_dynamics = sdyn[0].lower() == "s"
# Check if atom coordinates are cartesian or direct
if selective_dynamics:
ac_type = f.readline()
else:
ac_type = sdyn
cartesian = ac_type[0].lower() == "c" or ac_type[0].lower() == "k"
tot_natoms = sum(numofatoms)
atoms_pos = np.empty((tot_natoms, 3))
if selective_dynamics:
selective_flags = np.empty((tot_natoms, 3), dtype=bool)
for atom in xrange(tot_natoms):
ac = f.readline().split()
atoms_pos[atom] = (float(ac[0]), float(ac[1]), float(ac[2]))
if selective_dynamics:
curflag = []
for flag in ac[3:6]:
curflag.append(flag == 'F')
selective_flags[atom] = curflag
# Done with all reading
if type(filename) == str:
f.close()
if cartesian:
atoms_pos *= lattice_constant
atoms = Atoms(symbols = atom_symbols, cell = basis_vectors, pbc = True)
if cartesian:
atoms.set_positions(atoms_pos)
else:
atoms.set_scaled_positions(atoms_pos)
if selective_dynamics:
constraints = []
indices = []
for ind, sflags in enumerate(selective_flags):
if sflags.any() and not sflags.all():
constraints.append(FixScaled(atoms.get_cell(), ind, sflags))
elif sflags.all():
indices.append(ind)
if indices:
constraints.append(FixAtoms(indices))
if constraints:
atoms.set_constraint(constraints)
atoms.format = 'vasp'
return atoms
def read_vasp_out(filename='OUTCAR',index = 'all'):
"""Import OUTCAR type file.
Reads unitcell, atom positions, energies, and forces from the OUTCAR file
and attempts to read constraints (if any) from CONTCAR/POSCAR, if present.
"""
try: # try to read constraints, first from CONTCAR, then from POSCAR
constr = read_vasp('CONTCAR').constraints
except:
try:
constr = read_vasp('POSCAR').constraints
except:
constr = None
if isinstance(filename, str):
f = open(filename)
else: # Assume it's a file-like object
f = filename
data = f.readlines()
natoms = 0
images = []
atoms = Atoms(pbc = True, constraint = constr)
energy = 0
species = []
species_num = []
symbols = []
ecount = 0
poscount = 0
for n,line in enumerate(data):
if 'POTCAR:' in line:
temp = line.split()[2]
for c in ['.','_','1']:
if c in temp:
temp = temp[0:temp.find(c)]
species += [temp]
if 'ions per type' in line:
species = species[:len(species)/2]
temp = line.split()
for ispecies in range(len(species)):
species_num += [int(temp[ispecies+4])]
natoms += species_num[-1]
for iatom in range(species_num[-1]): symbols += [species[ispecies]]
if 'direct lattice vectors' in line:
cell = []
for i in range(3):
temp = data[n+1+i].split()
cell += [[float(temp[0]), float(temp[1]), float(temp[2])]]
if 'energy without entropy' in line:
energy = float(data[n].split()[6])
#energy = float(data[n+2].split()[4])
if ecount < poscount:
# reset energy for LAST set of atoms, not current one - VASP 5.11? and up
images[-1].calc.energy = energy
ecount += 1
if 'POSITION ' in line:
forces = []
atoms_symbols = []
atoms_positions = []
positions = []
for iatom in range(natoms):
temp = data[n+2+iatom].split()
atoms_symbols.append(symbols[iatom])
atoms_positions.append([float(temp[0]),float(temp[1]),float(temp[2])])
forces += [[float(temp[3]),float(temp[4]),float(temp[5])]]
atoms = Atoms('H'*natoms, pbc = True, constraint = constr)
atoms.set_cell(cell)
atoms.set_chemical_symbols(atoms_symbols)
atoms.set_positions(atoms_positions)
atoms.set_calculator(SinglePointCalculator(energy,forces,None,None,atoms))
images += [atoms]
poscount += 1
if 'HIPREC TOTAL-FORCE' in line:
forces = []
for line in data[n+2:n+2+natoms]:
fields = line.split()
force = []
for i in range(3):
force.append(float(fields[i]))
forces.append(force)
images[-1].calc.forces = np.array(forces)
# return requested images, code borrowed from ase/io/trajectory.py
if isinstance(index, int):
return images[index]
elif index == 'all':
return images
else:
step = index.step or 1
if step > 0:
start = index.start or 0
if start < 0:
start += len(images)
stop = index.stop or len(images)
if stop < 0:
stop += len(images)
else:
if index.start is None:
start = len(images) - 1
else:
start = index.start
if start < 0:
start += len(images)
if index.stop is None:
stop = -1
else:
stop = index.stop
if stop < 0:
stop += len(images)
return [images[i] for i in range(start, stop, step)]
def write_vasp(filename, atoms, label='', direct=False, sort=None, symbol_count = None, long_format=True):
"""Method to write VASP position (POSCAR/CONTCAR) files.
Writes label, scalefactor, unitcell, # of various kinds of atoms,
positions in cartesian or scaled coordinates (Direct), and constraints
to file. Cartesian coordiantes is default and default label is the
atomic species, e.g. 'C N H Cu'.
"""
if isinstance(filename, str):
f = open(filename, 'w')
else: # Assume it's a 'file-like object'
f = filename
if isinstance(atoms, (list, tuple)):
if len(atoms) > 1:
raise RuntimeError("Don't know how to save more than "+
"one image to VASP input")
else:
atoms = atoms[0]
# Write atom positions in scaled or cartesian coordinates
if direct:
coord = atoms.get_scaled_positions()
else:
coord = atoms.get_positions()
if atoms.constraints:
sflags = np.zeros((len(atoms), 3), dtype=bool)
for constr in atoms.constraints:
if isinstance(constr, FixScaled):
sflags[constr.a] = constr.mask
elif isinstance(constr, FixAtoms):
sflags[constr.index] = [True, True, True]
if sort:
ind = np.argsort(atoms.get_chemical_symbols())
symbols = np.array(atoms.get_chemical_symbols())[ind]
coord = coord[ind]
if atoms.constraints:
sflags = sflags[ind]
else:
symbols = atoms.get_chemical_symbols()
# Create a list sc of (symbol, count) pairs
if symbol_count:
sc = symbol_count
else:
sc = []
psym = symbols[0]
count = 0
for sym in symbols:
if sym != psym:
sc.append((psym, count))
psym = sym
count = 1
else:
count += 1
sc.append((psym, count))
# Create the label
if label == '':
for sym, c in sc:
label += '%2s ' % sym
f.write(label + '\n')
# Write unitcell in real coordinates and adapt to VASP convention
# for unit cell
# ase Atoms doesn't store the lattice constant separately, so always
# write 1.0.
f.write('%19.16f\n' % 1.0)
if long_format:
latt_form = ' %21.16f'
else:
latt_form = ' %11.6f'
for vec in atoms.get_cell():
f.write(' ')
for el in vec:
f.write(latt_form % el)
f.write('\n')
# Numbers of each atom
for sym, count in sc:
f.write(' %3i' % count)
f.write('\n')
if atoms.constraints:
f.write('Selective dynamics\n')
if direct:
f.write('Direct\n')
else:
f.write('Cartesian\n')
if long_format:
cform = ' %19.16f'
else:
cform = ' %9.6f'
for iatom, atom in enumerate(coord):
for dcoord in atom:
f.write(cform % dcoord)
if atoms.constraints:
for flag in sflags[iatom]:
if flag:
s = 'F'
else:
s = 'T'
f.write('%4s' % s)
f.write('\n')
if type(filename) == str:
f.close()
def length_angle_to_box(boxlengths, angles):
box = np.zeros( (3,3) )
angles *= np.pi/180.0
box[0][0] = 1.0
box[1][0] = np.cos(angles[0])
box[1][1] = np.sin(angles[0])
box[2][0] = np.cos(angles[1])
box[2][1] = (np.cos(angles[2]) - box[1][0] * box[2][0])/box[1][1]
box[2][2] = np.sqrt(1.0 - box[2][0]**2 - box[2][1]**2)
box[0,:]*=boxlengths[0]
box[1,:]*=boxlengths[1]
box[2,:]*=boxlengths[2]
return box
def box_to_length_angle(box):
lengths = np.zeros(3)
lengths[0] = np.linalg.norm(box[0,:])
lengths[1] = np.linalg.norm(box[1,:])
lengths[2] = np.linalg.norm(box[2,:])
angles = np.zeros(3)
angles[0] = np.arccos(np.dot(box[0,:]/lengths[0],box[1,:]/lengths[1]))
angles[1] = np.arccos(np.dot(box[0,:]/lengths[0],box[2,:]/lengths[2]))
angles[2] = np.arccos(np.dot(box[1,:]/lengths[1],box[2,:]/lengths[2]))
angles *= 180.0/np.pi
return lengths, angles
def read_con(filename):
f = open(filename, 'r')
lines = f.readlines()
f.close()
trajectory = []
line_index = 0
while True:
try:
boxlengths = np.array([float(length) for length in lines[line_index+2].split()])
boxangles = np.array([float(angle) for angle in lines[line_index+3].split()])
cell = length_angle_to_box(boxlengths, boxangles)
num_types = int(lines[line_index+6].strip())
num_each_type = [int(n) for n in lines[line_index+7].split()]
mass_each_type = [float(n) for n in lines[line_index+8].split()]
a = Atoms('H'*sum(num_each_type))
a.format = 'con'
a.cell = cell
a.set_pbc((True, True, True))
frozen = []
positions = []
symbols = []
masses = []
line_index += 9
atom_index = 0
for i in range(num_types):
symbol = lines[line_index].strip()
mass = mass_each_type[i]
line_index += 2
for j in range(num_each_type[i]):
split = lines[line_index].split()
positions.append([float(s) for s in split[0:3]])
symbols.append(symbol)
masses.append(mass)
if split[3] != '0':
frozen.append(atom_index)
atom_index += 1
line_index += 1
a.set_chemical_symbols(symbols)
a.set_positions(positions)
a.set_masses(masses)
a.set_constraint(FixAtoms(frozen))
except:
if len(trajectory) == 1:
return trajectory[0]
if len(trajectory) == 0:
raise
return trajectory
trajectory.append(a)
def write_con(filename, p, w = 'w'):
con = open(filename, w)
print >> con, "Generated by vtstscripts"
print >> con
lengths, angles = box_to_length_angle(p.cell)
print >> con, " ".join(['%12.6f' % s for s in lengths])
print >> con, " ".join(['%12.6f' % s for s in angles])
print >> con
print >> con
atom_count = {}
name_order = []
for i in range(len(p)):
name = p[i].symbol
if name not in name_order:
name_order.append(name)
if name in atom_count:
atom_count[name] += 1
else:
atom_count[name] = 1
print >> con, len(name_order)
print >> con, " ".join([str(atom_count[i]) for i in name_order])
printmasses = []
index = 0
for i in range(len(name_order)):
printmasses.append(p[index].mass)
index += atom_count[name_order[i]]
print >> con, " ".join(["%12.6f"% i for i in printmasses])
index = 0
for i in range(len(name_order)):
print >> con, name_order[i]
print >> con, "Coordinates of Component", i+1
for j in range(atom_count[name_order[i]]):
free = 0
if len(p.constraints) > 0:
if index in p.constraints[0].index:
free = 1
con.write("%12.6f %12.6f %12.6f %d %d\n" % (p[index].position[0],
p[index].position[1], p[index].position[2], free, index))
index += 1
def read_xdatcar(fileName):
f = open(fileName, 'r')
lines = f.readlines()
f.close()
lattice_constant = float(lines[1].strip())
cell = np.array([[float(x) * lattice_constant for x in lines[2].split()],
[float(x) * lattice_constant for x in lines[3].split()],
[float(x) * lattice_constant for x in lines[4].split()]])
elements = lines[5].split()
natoms = [int(x) for x in lines[6].split()]
nframes = (len(lines)-7)/(sum(natoms) + 1)
trajectory = []
for i in range(nframes):
a = Atoms('H'*sum(natoms))
a.masses = [1.0] * len(a)
a.set_chemical_symbols(''.join([n*e for (n, e) in zip(natoms, elements)]))
a.cell = cell.copy()
j = 0
for N, e in zip(natoms, elements):
for k in range(N):
split = lines[8 + i * (sum(natoms) + 1) + j].split()
a[j].position = [float(l) for l in split[0:3]]
j += 1
a.positions = np.dot(a.positions, cell)
trajectory.append(a)
return trajectory
def read_xyz(fileobj, index=-1):
if isinstance(fileobj, str):
fileobj = open(fileobj)
lines = fileobj.readlines()
L1 = lines[0].split()
if len(L1) == 1:
del lines[:2]
natoms = int(L1[0])
else:
natoms = len(lines)
images = []
while len(lines) >= natoms:
positions = []
symbols = []
for line in lines[:natoms]:
symbol, x, y, z = line.split()[:4]
symbol = symbol.lower().capitalize()
symbols.append(symbol)
positions.append([float(x), float(y), float(z)])
images.append(Atoms(symbols=symbols, positions=positions))
images[-1].format = 'xyz'
del lines[:natoms + 2]
return images[index]
def write_xyz(fileobj, images):
if isinstance(fileobj, str):
fileobj = open(fileobj, 'w')
if not isinstance(images, (list, tuple)):
images = [images]
symbols = images[0].get_chemical_symbols()
natoms = len(symbols)
for atoms in images:
fileobj.write('%d\n\n' % natoms)
for s, (x, y, z) in zip(symbols, atoms.get_positions()):
fileobj.write('%-2s %22.15f %22.15f %22.15f\n' % (s, x, y, z))
class Units:
from math import pi, sqrt
# Constants from <NAME>'s PhysicalQuantities module (1986 CODATA):
_c = 299792458. # speed of light, m/s
_mu0 = 4.e-7 * pi # permeability of vacuum
_eps0 = 1 / _mu0 / _c**2 # permittivity of vacuum
_Grav = 6.67259e-11 # gravitational constant
_hplanck = 6.6260755e-34 # Planck constant, J s
_hbar = _hplanck / (2 * pi) # Planck constant / 2pi, J s
_e = 1.60217733e-19 # elementary charge
_me = 9.1093897e-31 # electron mass
_mp = 1.6726231e-27 # proton mass
_Nav = 6.0221367e23 # Avogadro number
_k = 1.380658e-23 # Boltzmann constant, J/K
_amu = 1.6605402e-27 # atomic mass unit, kg
Ang = Angstrom = 1.0
nm = 10.0
Bohr = 4e10 * pi * _eps0 * _hbar**2 / _me / _e**2 # Bohr radius
eV = 1.0
Hartree = _me * _e**3 / 16 / pi**2 / _eps0**2 / _hbar**2
kJ = 1000.0 / _e
kcal = 4.184 * kJ
mol = _Nav
Rydberg = 0.5 * Hartree
Ry = Rydberg
Ha = Hartree
second = 1e10 * sqrt(_e / _amu)
fs = 1e-15 * second
kB = _k / _e # Boltzmann constant, eV/K
Pascal = (1 / _e) / 1e30 # J/m^3
GPa = 1e9 * Pascal
Debye = 1e11 *_e * _c
alpha = _e**2 / (4 * pi * _eps0) / _hbar / _c # fine structure constant
# Derived atomic units that have no assigned name:
_aut = _hbar / (alpha**2 * _me * _c**2) # atomic unit of time, s
_auv = _e**2 / _hbar / (4 * pi * _eps0) # atomic unit of velocity, m/s
_auf = alpha**3 * _me**2 * _c**3 / _hbar # atomic unit of force, N
_aup = alpha**5 * _me**4 * _c**5 / _hbar**3 # atomic unit of pressure, Pa
AUT = second * _aut
units = Units()
chemical_symbols = ['X', 'H', 'He', 'Li', 'Be',
'B', 'C', 'N', 'O', 'F',
'Ne', 'Na', 'Mg', 'Al', 'Si',
'P', 'S', 'Cl', 'Ar', 'K',
'Ca', 'Sc', 'Ti', 'V', 'Cr',
'Mn', 'Fe', 'Co', 'Ni', 'Cu',
'Zn', 'Ga', 'Ge', 'As', 'Se',
'Br', 'Kr', 'Rb', 'Sr', 'Y',
'Zr', 'Nb', 'Mo', 'Tc', 'Ru',
'Rh', 'Pd', 'Ag', 'Cd', 'In',
'Sn', 'Sb', 'Te', 'I', 'Xe',
'Cs', 'Ba', 'La', 'Ce', 'Pr',
'Nd', 'Pm', 'Sm', 'Eu', 'Gd',
'Tb', 'Dy', 'Ho', 'Er', 'Tm',
'Yb', 'Lu', 'Hf', 'Ta', 'W',
'Re', 'Os', 'Ir', 'Pt', 'Au',
'Hg', 'Tl', 'Pb', 'Bi', 'Po',
'At', 'Rn', 'Fr', 'Ra', 'Ac',
'Th', 'Pa', 'U', 'Np', 'Pu',
'Am', 'Cm', 'Bk', 'Cf', 'Es',
'Fm', 'Md', 'No', 'Lr']
atomic_numbers = {'A':1}
for Z, symbol in enumerate(chemical_symbols):
atomic_numbers[symbol] = Z
atomic_names = [
'', 'Hydrogen', 'Helium', 'Lithium', 'Beryllium', 'Boron',
'Carbon', 'Nitrogen', 'Oxygen', 'Fluorine', 'Neon', 'Sodium',
'Magnesium', 'Aluminium', 'Silicon', 'Phosphorus', 'Sulfur',
'Chlorine', 'Argon', 'Potassium', 'Calcium', 'Scandium',
'Titanium', 'Vanadium', 'Chromium', 'Manganese', 'Iron',
'Cobalt', 'Nickel', 'Copper', 'Zinc', 'Gallium', 'Germanium',
'Arsenic', 'Selenium', 'Bromine', 'Krypton', 'Rubidium',
'Strontium', 'Yttrium', 'Zirconium', 'Niobium', 'Molybdenum',
'Technetium', 'Ruthenium', 'Rhodium', 'Palladium', 'Silver',
'Cadmium', 'Indium', 'Tin', 'Antimony', 'Tellurium',
'Iodine', 'Xenon', 'Caesium', 'Barium', 'Lanthanum',
'Cerium', 'Praseodymium', 'Neodymium', 'Promethium',
'Samarium', 'Europium', 'Gadolinium', 'Terbium',
'Dysprosium', 'Holmium', 'Erbium', 'Thulium', 'Ytterbium',
'Lutetium', 'Hafnium', 'Tantalum', 'Tungsten', 'Rhenium',
'Osmium', 'Iridium', 'Platinum', 'Gold', 'Mercury',
'Thallium', 'Lead', 'Bismuth', 'Polonium', 'Astatine',
'Radon', 'Francium', 'Radium', 'Actinium', 'Thorium',
'Protactinium', 'Uranium', 'Neptunium', 'Plutonium',
'Americium', 'Curium', 'Berkelium', 'Californium',
'Einsteinium', 'Fermium', 'Mendelevium', 'Nobelium',
'Lawrencium', 'Unnilquadium', 'Unnilpentium', 'Unnilhexium']
atomic_masses = np.array([
0.00000, # X
1.00794, # H
4.00260, # He
6.94100, # Li
9.01218, # Be
10.81100, # B
12.01100, # C
14.00670, # N
15.99940, # O
18.99840, # F
20.17970, # Ne
22.98977, # Na
24.30500, # Mg
26.98154, # Al
28.08550, # Si
30.97376, # P
32.06600, # S
35.45270, # Cl
39.94800, # Ar
39.09830, # K
40.07800, # Ca
44.95590, # Sc
47.88000, # Ti
50.94150, # V
51.99600, # Cr
54.93800, # Mn
55.84700, # Fe
58.93320, # Co
58.69340, # Ni
63.54600, # Cu
65.39000, # Zn
69.72300, # Ga
72.61000, # Ge
74.92160, # As
78.96000, # Se
79.90400, # Br
83.80000, # Kr
85.46780, # Rb
87.62000, # Sr
88.90590, # Y
91.22400, # Zr
92.90640, # Nb
95.94000, # Mo
np.nan, # Tc
101.07000, # Ru
102.90550, # Rh
106.42000, # Pd
107.86800, # Ag
112.41000, # Cd
114.82000, # In
118.71000, # Sn
121.75700, # Sb
127.60000, # Te
126.90450, # I
131.29000, # Xe
132.90540, # Cs
137.33000, # Ba
138.90550, # La
140.12000, # Ce
140.90770, # Pr
144.24000, # Nd
np.nan, # Pm
150.36000, # Sm
151.96500, # Eu
157.25000, # Gd
158.92530, # Tb
162.50000, # Dy
164.93030, # Ho
167.26000, # Er
168.93420, # Tm
173.04000, # Yb
174.96700, # Lu
178.49000, # Hf
180.94790, # Ta
183.85000, # W
186.20700, # Re
190.20000, # Os
192.22000, # Ir
195.08000, # Pt
196.96650, # Au
200.59000, # Hg
204.38300, # Tl
207.20000, # Pb
208.98040, # Bi
np.nan, # Po
np.nan, # At
np.nan, # Rn
np.nan, # Fr
226.02540, # Ra
np.nan, # Ac
232.03810, # Th
231.03590, # Pa
238.02900, # U
237.04820, # Np
np.nan, # Pu
np.nan, # Am
np.nan, # Cm
np.nan, # Bk
np.nan, # Cf
np.nan, # Es
np.nan, # Fm
np.nan, # Md
np.nan, # No
np.nan])# Lw
# Covalent radii from:
#
# Covalent radii revisited,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME> and <NAME>,
# Dalton Trans., 2008, 2832-2838 DOI:10.1039/B801115J
missing = 0.2
covalent_radii = np.array([
missing, # X
0.31, # H
0.28, # He
1.28, # Li
0.96, # Be
0.84, # B
0.76, # C
0.71, # N
0.66, # O
0.57, # F
0.58, # Ne
1.66, # Na
1.41, # Mg
1.21, # Al
1.11, # Si
1.07, # P
1.05, # S
1.02, # Cl
1.06, # Ar
2.03, # K
1.76, # Ca
1.70, # Sc
1.60, # Ti
1.53, # V
1.39, # Cr
1.39, # Mn
1.32, # Fe
1.26, # Co
1.24, # Ni
1.32, # Cu
1.22, # Zn
1.22, # Ga
1.20, # Ge
1.19, # As
1.20, # Se
1.20, # Br
1.16, # Kr
2.20, # Rb
1.95, # Sr
1.90, # Y
1.75, # Zr
1.64, # Nb
1.54, # Mo
1.47, # Tc
1.46, # Ru
1.42, # Rh
1.39, # Pd
1.45, # Ag
1.44, # Cd
1.42, # In
1.39, # Sn
1.39, # Sb
1.38, # Te
1.39, # I
1.40, # Xe
2.44, # Cs
2.15, # Ba
2.07, # La
2.04, # Ce
2.03, # Pr
2.01, # Nd
1.99, # Pm
1.98, # Sm
1.98, # Eu
1.96, # Gd
1.94, # Tb
1.92, # Dy
1.92, # Ho
1.89, # Er
1.90, # Tm
1.87, # Yb
1.87, # Lu
1.75, # Hf
1.70, # Ta
1.62, # W
1.51, # Re
1.44, # Os
1.41, # Ir
1.36, # Pt
1.36, # Au
1.32, # Hg
1.45, # Tl
1.46, # Pb
1.48, # Bi
1.40, # Po
1.50, # At
1.50, # Rn
2.60, # Fr
2.21, # Ra
2.15, # Ac
2.06, # Th
2.00, # Pa
1.96, # U
1.90, # Np
1.87, # Pu
1.80, # Am
1.69, # Cm
missing, # Bk
missing, # Cf
missing, # Es
missing, # Fm
missing, # Md
missing, # No
missing, # Lr
])
# singular, plural, default value
names = {'position': ('positions', np.zeros(3)),
'number': ('numbers', 0),
'tag': ('tags', 0),
'momentum': ('momenta', np.zeros(3)),
'mass': ('masses', None),
'magmom': ('magmoms', 0.0),
'charge': ('charges', 0.0)
}
def atomproperty(name, doc):
"""Helper function to easily create Atom attribute property."""
def getter(self):
return self.get(name)
def setter(self, value):
self.set(name, value)
def deleter(self):
self.delete(name)
return property(getter, setter, deleter, doc)
def xyzproperty(index):
"""Helper function to easily create Atom XYZ-property."""
def getter(self):
return self.position[index]
def setter(self, value):
self.position[index] = value
return property(getter, setter, doc='XYZ'[index] + '-coordinate')
class Atom(object):
"""Class for representing a single atom.
Parameters:
symbol: str or int
Can be a chemical symbol (str) or an atomic number (int).
position: sequence of 3 floats
Atomi position.
tag: int
Special purpose tag.
momentum: sequence of 3 floats
Momentum for atom.
mass: float
Atomic mass in atomic units.
magmom: float or 3 floats
Magnetic moment.
charge: float
Atomic charge.
"""
__slots__ = ['data', 'atoms', 'index']
def __init__(self, symbol='X', position=(0, 0, 0),
tag=None, momentum=None, mass=None,
magmom=None, charge=None,
atoms=None, index=None):
self.data = d = {}
if atoms is None:
# This atom is not part of any Atoms object:
if isinstance(symbol, str):
d['number'] = atomic_numbers[symbol]
else:
d['number'] = symbol
d['position'] = np.array(position, float)
d['tag'] = tag
if momentum is not None:
momentum = np.array(momentum, float)
d['momentum'] = momentum
d['mass'] = mass
if magmom is not None:
magmom = np.array(magmom, float)
d['magmom'] = magmom
d['charge'] = charge
self.index = index
self.atoms = atoms
def __repr__(self):
s = "Atom('%s', %s" % (self.symbol, list(self.position))
for name in ['tag', 'momentum', 'mass', 'magmom', 'charge']:
value = self.get_raw(name)
if value is not None:
if isinstance(value, np.ndarray):
value = value.tolist()
s += ', %s=%s' % (name, value)
if self.atoms is None:
s += ')'
else:
s += ', index=%d)' % self.index
return s
def cut_reference_to_atoms(self):
"""Cut reference to atoms object."""
for name in names:
self.data[name] = self.get_raw(name)
self.index = None
self.atoms = None
def get_raw(self, name):
"""Get attribute, return None if not explicitely set."""
if name == 'symbol':
return chemical_symbols[self.get_raw('number')]
if self.atoms is None:
return self.data[name]
plural = names[name][0]
if plural in self.atoms.arrays:
return self.atoms.arrays[plural][self.index]
else:
return None
def get(self, name):
"""Get attribute, return default if not explicitely set."""
value = self.get_raw(name)
if value is None:
if name == 'mass':
value = atomic_masses[self.number]
else:
value = names[name][1]
return value
def set(self, name, value):
"""Set attribute."""
if name == 'symbol':
name = 'number'
value = atomic_numbers[value]
if self.atoms is None:
assert name in names
self.data[name] = value
else:
plural, default = names[name]
if plural in self.atoms.arrays:
array = self.atoms.arrays[plural]
if name == 'magmom' and array.ndim == 2:
assert len(value) == 3
array[self.index] = value
else:
if name == 'magmom' and np.asarray(value).ndim == 1:
array = np.zeros((len(self.atoms), 3))
elif name == 'mass':
array = self.atoms.get_masses()
else:
default = np.asarray(default)
array = np.zeros((len(self.atoms),) + default.shape,
default.dtype)
array[self.index] = value
self.atoms.new_array(plural, array)
def delete(self, name):
"""Delete attribute."""
assert self.atoms is None
assert name not in ['number', 'symbol', 'position']
self.data[name] = None
symbol = atomproperty('symbol', 'Chemical symbol')
number = atomproperty('number', 'Atomic number')
position = atomproperty('position', 'XYZ-coordinates')
tag = atomproperty('tag', 'Integer tag')
momentum = atomproperty('momentum', 'XYZ-momentum')
mass = atomproperty('mass', 'Atomic mass')
magmom = atomproperty('magmom', 'Initial magnetic moment')
charge = atomproperty('charge', 'Atomic charge')
x = xyzproperty(0)
y = xyzproperty(1)
z = xyzproperty(2)
def _get(self, name):
"""Helper function for deprecated get methods."""
warnings.warn('Use atom.%s' % name, stacklevel=3)
return getattr(self, name)
def _set(self, name, value):
"""Helper function for deprecated set methods."""
warnings.warn('Use atom.%s = ...' % name, stacklevel=3)
setattr(self, name, value)
def get_symbol(self): return self._get('symbol')
def get_atomic_number(self): return self._get('number')
def get_position(self): return self._get('position')
def get_tag(self): return self._get('tag')
def get_momentum(self): return self._get('momentum')
def get_mass(self): return self._get('mass')
def get_initial_magnetic_moment(self): return self._get('magmom')
def get_charge(self): return self._get('charge')
def set_symbol(self, value): self._set('symbol', value)
def set_atomic_number(self, value): self._set('number', value)
def set_position(self, value): self._set('position', value)
def set_tag(self, value): self._set('tag', value)
def set_momentum(self, value): self._set('momentum', value)
def set_mass(self, value): self._set('mass', value)
def set_initial_magnetic_moment(self, value): self._set('magmom', value)
def set_charge(self, value): self._set('charge', value)
class Atoms(object):
"""Atoms object.
The Atoms object can represent an isolated molecule, or a
periodically repeated structure. It has a unit cell and
there may be periodic boundary conditions along any of the three
unit cell axes.
Information about the atoms (atomic numbers and position) is
stored in ndarrays. Optionally, there can be information about
tags, momenta, masses, magnetic moments and charges.
In order to calculate energies, forces and stresses, a calculator
object has to attached to the atoms object.
Parameters:
symbols: str (formula) or list of str
Can be a string formula, a list of symbols or a list of
Atom objects. Examples: 'H2O', 'COPt12', ['H', 'H', 'O'],
[Atom('Ne', (x, y, z)), ...].
positions: list of xyz-positions
Atomic positions. Anything that can be converted to an
ndarray of shape (n, 3) will do: [(x1,y1,z1), (x2,y2,z2),
...].
scaled_positions: list of scaled-positions
Like positions, but given in units of the unit cell.
Can not be set at the same time as positions.
numbers: list of int
Atomic numbers (use only one of symbols/numbers).
tags: list of int
Special purpose tags.
momenta: list of xyz-momenta
Momenta for all atoms.
masses: list of float
Atomic masses in atomic units.
magmoms: list of float or list of xyz-values
Magnetic moments. Can be either a single value for each atom
for collinear calculations or three numbers for each atom for
non-collinear calculations.
charges: list of float
Atomic charges.
cell: 3x3 matrix
Unit cell vectors. Can also be given as just three
numbers for orthorhombic cells. Default value: [1, 1, 1].
pbc: one or three bool
Periodic boundary conditions flags. Examples: True,
False, 0, 1, (1, 1, 0), (True, False, False). Default
value: False.
constraint: constraint object(s)
Used for applying one or more constraints during structure
optimization.
calculator: calculator object
Used to attach a calculator for calculating energies and atomic
forces.
info: dict of key-value pairs
Dictionary of key-value pairs with additional information
about the system. The following keys may be used by ase:
- spacegroup: Spacegroup instance
- unit_cell: 'conventional' | 'primitive' | int | 3 ints
- adsorbate_info:
Items in the info attribute survives copy and slicing and can
be store to and retrieved from trajectory files given that the
key is a string, the value is picklable and, if the value is a
user-defined object, its base class is importable. One should
not make any assumptions about the existence of keys.
Examples:
These three are equivalent:
>>> d = 1.104 # N2 bondlength
>>> a = Atoms('N2', [(0, 0, 0), (0, 0, d)])
>>> a = Atoms(numbers=[7, 7], positions=[(0, 0, 0), (0, 0, d)])
>>> a = Atoms([Atom('N', (0, 0, 0)), Atom('N', (0, 0, d)])
FCC gold:
>>> a = 4.05 # Gold lattice constant
>>> b = a / 2
>>> fcc = Atoms('Au',
... cell=[(0, b, b), (b, 0, b), (b, b, 0)],
... pbc=True)
Hydrogen wire:
>>> d = 0.9 # H-H distance
>>> L = 7.0
>>> h = Atoms('H', positions=[(0, L / 2, L / 2)],
... cell=(d, L, L),
... pbc=(1, 0, 0))
"""
def __init__(self, symbols=None,
positions=None, numbers=None,
tags=None, momenta=None, masses=None,
magmoms=None, charges=None,
scaled_positions=None,
cell=None, pbc=None,
constraint=None,
calculator=None,
info=None):
atoms = None
if hasattr(symbols, 'GetUnitCell'):
from ase.old import OldASEListOfAtomsWrapper
atoms = OldASEListOfAtomsWrapper(symbols)
symbols = None
elif hasattr(symbols, 'get_positions'):
atoms = symbols
symbols = None
elif (isinstance(symbols, (list, tuple)) and
len(symbols) > 0 and isinstance(symbols[0], Atom)):
# Get data from a list or tuple of Atom objects:
data = [[atom.get_raw(name) for atom in symbols]
for name in
['position', 'number', 'tag', 'momentum',
'mass', 'magmom', 'charge']]
atoms = self.__class__(None, *data)
symbols = None
if atoms is not None:
# Get data from another Atoms object:
if scaled_positions is not None:
raise NotImplementedError
if symbols is None and numbers is None:
numbers = atoms.get_atomic_numbers()
if positions is None:
positions = atoms.get_positions()
if tags is None and atoms.has('tags'):
tags = atoms.get_tags()
if momenta is None and atoms.has('momenta'):
momenta = atoms.get_momenta()
if magmoms is None and atoms.has('magmoms'):
magmoms = atoms.get_initial_magnetic_moments()
if masses is None and atoms.has('masses'):
masses = atoms.get_masses()
if charges is None and atoms.has('charges'):
charges = atoms.get_charges()
if cell is None:
cell = atoms.get_cell()
if pbc is None:
pbc = atoms.get_pbc()
if constraint is None:
constraint = [c.copy() for c in atoms.constraints]
if calculator is None:
calculator = atoms.get_calculator()
self.arrays = {}
if symbols is None:
if numbers is None:
if positions is not None:
natoms = len(positions)
elif scaled_positions is not None:
natoms = len(scaled_positions)
else:
natoms = 0
numbers = np.zeros(natoms, int)
self.new_array('numbers', numbers, int)
else:
if numbers is not None:
raise ValueError(
'Use only one of "symbols" and "numbers".')
else:
self.new_array('numbers', symbols2numbers(symbols), int)
if cell is None:
cell = np.eye(3)
self.set_cell(cell)
if positions is None:
if scaled_positions is None:
positions = np.zeros((len(self.arrays['numbers']), 3))
else:
positions = np.dot(scaled_positions, self._cell)
else:
if scaled_positions is not None:
raise RuntimeError('Both scaled and cartesian positions set!')
self.new_array('positions', positions, float, (3,))
self.set_constraint(constraint)
self.set_tags(default(tags, 0))
self.set_momenta(default(momenta, (0.0, 0.0, 0.0)))
self.set_masses(default(masses, None))
self.set_initial_magnetic_moments(default(magmoms, 0.0))
self.set_charges(default(charges, 0.0))
if pbc is None:
pbc = False
self.set_pbc(pbc)
if info is None:
self.info = {}
else:
self.info = dict(info)
self.adsorbate_info = {}
self.set_calculator(calculator)
def set_calculator(self, calc=None):
"""Attach calculator object."""
if hasattr(calc, '_SetListOfAtoms'):
from ase.old import OldASECalculatorWrapper
calc = OldASECalculatorWrapper(calc, self)
if hasattr(calc, 'set_atoms'):
calc.set_atoms(self)
self._calc = calc
def get_calculator(self):
"""Get currently attached calculator object."""
return self._calc
def _del_calculator(self):
self._calc = None
calc = property(get_calculator, set_calculator, _del_calculator,
doc='Calculator object.')
def set_constraint(self, constraint=None):
"""Apply one or more constrains.
The *constraint* argument must be one constraint object or a
list of constraint objects."""
if constraint is None:
self._constraints = []
else:
if isinstance(constraint, (list, tuple)):
self._constraints = constraint
else:
self._constraints = [constraint]
def _get_constraints(self):
return self._constraints
def _del_constraints(self):
self._constraints = []
constraints = property(_get_constraints, set_constraint, _del_constraints,
'Constraints of the atoms.')
def set_cell(self, cell, scale_atoms=False, fix=None):
"""Set unit cell vectors.
Parameters:
cell :
Unit cell. A 3x3 matrix (the three unit cell vectors) or
just three numbers for an orthorhombic cell.
scale_atoms : bool
Fix atomic positions or move atoms with the unit cell?
Default behavior is to *not* move the atoms (scale_atoms=False).
Examples:
Two equivalent ways to define an orthorhombic cell:
>>> a.set_cell([a, b, c])
>>> a.set_cell([(a, 0, 0), (0, b, 0), (0, 0, c)])
FCC unit cell:
>>> a.set_cell([(0, b, b), (b, 0, b), (b, b, 0)])
"""
if fix is not None:
raise TypeError('Please use scale_atoms=%s' % (not fix))
cell = np.array(cell, float)
if cell.shape == (3,):
cell = np.diag(cell)
elif cell.shape != (3, 3):
raise ValueError('Cell must be length 3 sequence or '
'3x3 matrix!')
if scale_atoms:
M = np.linalg.solve(self._cell, cell)
self.arrays['positions'][:] = np.dot(self.arrays['positions'], M)
self._cell = cell
def get_cell(self):
"""Get the three unit cell vectors as a 3x3 ndarray."""
return self._cell.copy()
def get_reciprocal_cell(self):
"""Get the three reciprocal lattice vectors as a 3x3 ndarray.
Note that the commonly used factor of 2 pi for Fourier
transforms is not included here."""
rec_unit_cell = np.linalg.inv(self.get_cell()).transpose()
return rec_unit_cell
def set_pbc(self, pbc):
"""Set periodic boundary condition flags."""
if isinstance(pbc, int):
pbc = (pbc,) * 3
self._pbc = np.array(pbc, bool)
def get_pbc(self):
"""Get periodic boundary condition flags."""
return self._pbc.copy()
def new_array(self, name, a, dtype=None, shape=None):
"""Add new array.
If *shape* is not *None*, the shape of *a* will be checked."""
if dtype is not None:
a = np.array(a, dtype)
else:
a = a.copy()
if name in self.arrays:
raise RuntimeError
for b in self.arrays.values():
if len(a) != len(b):
raise ValueError('Array has wrong length: %d != %d.' %
(len(a), len(b)))
break
if shape is not None and a.shape[1:] != shape:
raise ValueError('Array has wrong shape %s != %s.' %
(a.shape, (a.shape[0:1] + shape)))
self.arrays[name] = a
def get_array(self, name, copy=True):
"""Get an array.
Returns a copy unless the optional argument copy is false.
"""
if copy:
return self.arrays[name].copy()
else:
return self.arrays[name]
def set_array(self, name, a, dtype=None, shape=None):
"""Update array.
If *shape* is not *None*, the shape of *a* will be checked.
If *a* is *None*, then the array is deleted."""
b = self.arrays.get(name)
if b is None:
if a is not None:
self.new_array(name, a, dtype, shape)
else:
if a is None:
del self.arrays[name]
else:
a = np.asarray(a)
if a.shape != b.shape:
raise ValueError('Array has wrong shape %s != %s.' %
(a.shape, b.shape))
b[:] = a
def has(self, name):
"""Check for existence of array.
name must be one of: 'tags', 'momenta', 'masses', 'magmoms',
'charges'."""
return name in self.arrays
def set_atomic_numbers(self, numbers):
"""Set atomic numbers."""
self.set_array('numbers', numbers, int, ())
def get_atomic_numbers(self):
"""Get integer array of atomic numbers."""
return self.arrays['numbers'].copy()
def set_chemical_symbols(self, symbols):
"""Set chemical symbols."""
self.set_array('numbers', symbols2numbers(symbols), int, ())
def get_chemical_symbols(self, reduce=False):
"""Get list of chemical symbol strings.
If reduce is True, a single string is returned, where repeated
elements have been contracted to a single symbol and a number.
E.g. instead of ['C', 'O', 'O', 'H'], the string 'CO2H' is returned.
"""
if not reduce:
# XXX
return [chemical_symbols[Z] for Z in self.arrays['numbers']]
else:
num = self.get_atomic_numbers()
N = len(num)
dis = np.concatenate(([0], np.arange(1, N)[num[1:] != num[:-1]]))
repeat = np.append(dis[1:], N) - dis
symbols = ''.join([chemical_symbols[num[d]] + str(r) * (r != 1)
for r, d in zip(repeat, dis)])
return symbols
def set_tags(self, tags):
"""Set tags for all atoms."""
self.set_array('tags', tags, int, ())
def get_tags(self):
"""Get integer array of tags."""
if 'tags' in self.arrays:
return self.arrays['tags'].copy()
else:
return np.zeros(len(self), int)
def set_momenta(self, momenta):
"""Set momenta."""
if len(self.constraints) > 0 and momenta is not None:
momenta = np.array(momenta) # modify a copy
for constraint in self.constraints:
constraint.adjust_forces(self.arrays['positions'], momenta)
self.set_array('momenta', momenta, float, (3,))
def set_velocities(self, velocities):
"""Set the momenta by specifying the velocities."""
self.set_momenta(self.get_masses()[:, np.newaxis] * velocities)
def get_momenta(self):
"""Get array of momenta."""
if 'momenta' in self.arrays:
return self.arrays['momenta'].copy()
else:
return np.zeros((len(self), 3))
def set_masses(self, masses='defaults'):
"""Set atomic masses.
The array masses should contain a list of masses. In case
the masses argument is not given or for those elements of the
masses list that are None, standard values are set."""
if masses == 'defaults':
masses = atomic_masses[self.arrays['numbers']]
elif isinstance(masses, (list, tuple)):
newmasses = []
for m, Z in zip(masses, self.arrays['numbers']):
if m is None:
newmasses.append(atomic_masses[Z])
else:
newmasses.append(m)
masses = newmasses
self.set_array('masses', masses, float, ())
def get_masses(self):
"""Get array of masses."""
if 'masses' in self.arrays:
return self.arrays['masses'].copy()
else:
return atomic_masses[self.arrays['numbers']]
def set_initial_magnetic_moments(self, magmoms=None):
"""Set the initial magnetic moments.
Use either one or three numbers for every atom (collinear
or non-collinear spins)."""
if magmoms is None:
self.set_array('magmoms', None)
else:
magmoms = np.asarray(magmoms)
self.set_array('magmoms', magmoms, float, magmoms.shape[1:])
def get_initial_magnetic_moments(self):
"""Get array of initial magnetic moments."""
if 'magmoms' in self.arrays:
return self.arrays['magmoms'].copy()
else:
return np.zeros(len(self))
def get_magnetic_moments(self):
"""Get calculated local magnetic moments."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
if self._calc.get_spin_polarized():
return self._calc.get_magnetic_moments(self)
else:
return np.zeros(len(self))
def get_magnetic_moment(self):
"""Get calculated total magnetic moment."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
if self._calc.get_spin_polarized():
return self._calc.get_magnetic_moment(self)
else:
return 0.0
def set_charges(self, charges):
"""Set charges."""
self.set_array('charges', charges, float, ())
def get_charges(self):
"""Get array of charges."""
if 'charges' in self.arrays:
return self.arrays['charges'].copy()
else:
return np.zeros(len(self))
def set_positions(self, newpositions):
"""Set positions."""
positions = self.arrays['positions']
if self.constraints:
newpositions = np.asarray(newpositions, float)
for constraint in self.constraints:
constraint.adjust_positions(positions, newpositions)
self.set_array('positions', newpositions, shape=(3,))
def get_positions(self):
"""Get array of positions."""
return self.arrays['positions'].copy()
def get_calculation_done(self):
"""Let the calculator calculate its thing,
using the current input.
"""
if self.calc is None:
raise RuntimeError('Atoms object has no calculator.')
self.calc.initialize(self)
self.calc.calculate(self)
def get_potential_energy(self):
"""Calculate potential energy."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
return self._calc.get_potential_energy(self)
def get_potential_energies(self):
"""Calculate the potential energies of all the atoms.
Only available with calculators supporting per-atom energies
(e.g. classical potentials).
"""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
return self._calc.get_potential_energies(self)
def get_kinetic_energy(self):
"""Get the kinetic energy."""
momenta = self.arrays.get('momenta')
if momenta is None:
return 0.0
return 0.5 * np.vdot(momenta, self.get_velocities())
def get_velocities(self):
"""Get array of velocities."""
momenta = self.arrays.get('momenta')
if momenta is None:
return None
m = self.arrays.get('masses')
if m is None:
m = atomic_masses[self.arrays['numbers']]
return momenta / m.reshape(-1, 1)
def get_total_energy(self):
"""Get the total energy - potential plus kinetic energy."""
return self.get_potential_energy() + self.get_kinetic_energy()
def get_forces(self, apply_constraint=True):
"""Calculate atomic forces.
Ask the attached calculator to calculate the forces and apply
constraints. Use *apply_constraint=False* to get the raw
forces."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
forces = self._calc.get_forces(self)
if apply_constraint:
for constraint in self.constraints:
constraint.adjust_forces(self.arrays['positions'], forces)
return forces
def get_max_atom_force(self, apply_constraint=True):
return np.sqrt((self.get_forces()**2).sum(axis=1).max())
def get_stress(self):
"""Calculate stress tensor.
Returns an array of the six independent components of the
symmetric stress tensor, in the traditional order
(s_xx, s_yy, s_zz, s_yz, s_xz, s_xy).
"""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
stress = self._calc.get_stress(self)
shape = getattr(stress, 'shape', None)
if shape == (3, 3):
return np.array([stress[0, 0], stress[1, 1], stress[2, 2],
stress[1, 2], stress[0, 2], stress[0, 1]])
else:
# Hopefully a 6-vector, but don't check in case some weird
# calculator does something else.
return stress
def get_stresses(self):
"""Calculate the stress-tensor of all the atoms.
Only available with calculators supporting per-atom energies and
stresses (e.g. classical potentials). Even for such calculators
there is a certain arbitrariness in defining per-atom stresses.
"""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
return self._calc.get_stresses(self)
def get_dipole_moment(self):
"""Calculate the electric dipole moment for the atoms object.
Only available for calculators which has a get_dipole_moment()
method."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
try:
dipole = self._calc.get_dipole_moment(self)
except AttributeError:
raise AttributeError(
'Calculator object has no get_dipole_moment method.')
return dipole
def copy(self):
"""Return a copy."""
import copy
atoms = self.__class__(cell=self._cell, pbc=self._pbc, info=self.info)
atoms.arrays = {}
for name, a in self.arrays.items():
atoms.arrays[name] = a.copy()
atoms.constraints = copy.deepcopy(self.constraints)
atoms.adsorbate_info = copy.deepcopy(self.adsorbate_info)
return atoms
def __len__(self):
return len(self.arrays['positions'])
def get_number_of_atoms(self):
"""Returns the number of atoms.
Equivalent to len(atoms) in the standard ASE Atoms class.
"""
return len(self)
def __repr__(self):
num = self.get_atomic_numbers()
N = len(num)
if N == 0:
symbols = ''
elif N <= 60:
symbols = self.get_chemical_symbols(reduce=True)
else:
symbols = ''.join([chemical_symbols[Z] for Z in num[:15]]) + '...'
s = "%s(symbols='%s', " % (self.__class__.__name__, symbols)
for name in self.arrays:
if name == 'numbers':
continue
s += '%s=..., ' % name
if (self._cell - np.diag(self._cell.diagonal())).any():
s += 'cell=%s, ' % self._cell.tolist()
else:
s += 'cell=%s, ' % self._cell.diagonal().tolist()
s += 'pbc=%s, ' % self._pbc.tolist()
if len(self.constraints) == 1:
s += 'constraint=%s, ' % repr(self.constraints[0])
if len(self.constraints) > 1:
s += 'constraint=%s, ' % repr(self.constraints)
if self._calc is not None:
s += 'calculator=%s(...), ' % self._calc.__class__.__name__
return s[:-2] + ')'
def __add__(self, other):
atoms = self.copy()
atoms += other
return atoms
def extend(self, other):
"""Extend atoms object by appending atoms from *other*."""
if isinstance(other, Atom):
other = self.__class__([other])
n1 = len(self)
n2 = len(other)
for name, a1 in self.arrays.items():
a = np.zeros((n1 + n2,) + a1.shape[1:], a1.dtype)
a[:n1] = a1
a2 = other.arrays.get(name)
if a2 is not None:
a[n1:] = a2
self.arrays[name] = a
for name, a2 in other.arrays.items():
if name in self.arrays:
continue
a = np.empty((n1 + n2,) + a2.shape[1:], a2.dtype)
a[n1:] = a2
if name == 'masses':
a[:n1] = self.get_masses()
else:
a[:n1] = 0
self.set_array(name, a)
return self
__iadd__ = extend
def append(self, atom):
"""Append atom to end."""
self.extend(self.__class__([atom]))
def __getitem__(self, i):
"""Return a subset of the atoms.
i -- scalar integer, list of integers, or slice object
describing which atoms to return.
If i is a scalar, return an Atom object. If i is a list or a
slice, return an Atoms object with the same cell, pbc, and
other associated info as the original Atoms object. The
indices of the constraints will be shuffled so that they match
the indexing in the subset returned.
"""
if isinstance(i, int):
natoms = len(self)
if i < -natoms or i >= natoms:
raise IndexError('Index out of range.')
return Atom(atoms=self, index=i)
import copy
atoms = self.__class__(cell=self._cell, pbc=self._pbc, info=self.info)
# TODO: Do we need to shuffle indices in adsorbate_info too?
atoms.adsorbate_info = self.adsorbate_info
atoms.arrays = {}
for name, a in self.arrays.items():
atoms.arrays[name] = a[i].copy()
# Constraints need to be deepcopied, since we need to shuffle
# the indices
atoms.constraints = copy.deepcopy(self.constraints)
condel = []
for con in atoms.constraints:
if isinstance(con, FixConstraint):
try:
con.index_shuffle(i)
except IndexError:
condel.append(con)
for con in condel:
atoms.constraints.remove(con)
return atoms
def __delitem__(self, i):
check_constraint = np.array([isinstance(c, FixAtoms)
for c in self._constraints])
if len(self._constraints) > 0 and not check_constraint.all():
raise RuntimeError('Remove constraint using set_constraint() ' +
'before deleting atoms.')
mask = np.ones(len(self), bool)
mask[i] = False
for name, a in self.arrays.items():
self.arrays[name] = a[mask]
if len(self._constraints) > 0:
for n in range(len(self._constraints)):
self._constraints[n].delete_atom(range(len(mask))[i])
def pop(self, i=-1):
"""Remove and return atom at index *i* (default last)."""
atom = self[i]
atom.cut_reference_to_atoms()
del self[i]
return atom
def __imul__(self, m):
"""In-place repeat of atoms."""
if isinstance(m, int):
m = (m, m, m)
M = np.product(m)
n = len(self)
for name, a in self.arrays.items():
self.arrays[name] = np.tile(a, (M,) + (1,) * (len(a.shape) - 1))
positions = self.arrays['positions']
i0 = 0
for m0 in range(m[0]):
for m1 in range(m[1]):
for m2 in range(m[2]):
i1 = i0 + n
positions[i0:i1] += np.dot((m0, m1, m2), self._cell)
i0 = i1
if self.constraints is not None:
self.constraints = [c.repeat(m, n) for c in self.constraints]
self._cell = np.array([m[c] * self._cell[c] for c in range(3)])
return self
def repeat(self, rep):
"""Create new repeated atoms object.
The *rep* argument should be a sequence of three positive
integers like *(2,3,1)* or a single integer (*r*) equivalent
to *(r,r,r)*."""
atoms = self.copy()
atoms *= rep
return atoms
__mul__ = repeat
def translate(self, displacement):
"""Translate atomic positions.
The displacement argument can be a float an xyz vector or an
nx3 array (where n is the number of atoms)."""
self.arrays['positions'] += np.array(displacement)
def center(self, vacuum=None, axis=None):
"""Center atoms in unit cell.
Centers the atoms in the unit cell, so there is the same
amount of vacuum on all sides.
Parameters:
vacuum (default: None): If specified adjust the amount of
vacuum when centering. If vacuum=10.0 there will thus be 10
Angstrom of vacuum on each side.
axis (default: None): If specified, only act on the specified
axis. Default: Act on all axes.
"""
# Find the orientations of the faces of the unit cell
c = self.get_cell()
dirs = np.zeros_like(c)
for i in range(3):
dirs[i] = np.cross(c[i - 1], c[i - 2])
dirs[i] /= np.sqrt(np.dot(dirs[i], dirs[i])) # normalize
if np.dot(dirs[i], c[i]) < 0.0:
dirs[i] *= -1
# Now, decide how much each basis vector should be made longer
if axis is None:
axes = (0, 1, 2)
else:
axes = (axis,)
p = self.arrays['positions']
longer = np.zeros(3)
shift = np.zeros(3)
for i in axes:
p0 = np.dot(p, dirs[i]).min()
p1 = np.dot(p, dirs[i]).max()
height = np.dot(c[i], dirs[i])
if vacuum is not None:
lng = (p1 - p0 + 2 * vacuum) - height
else:
lng = 0.0 # Do not change unit cell size!
top = lng + height - p1
shf = 0.5 * (top - p0)
cosphi = np.dot(c[i], dirs[i]) / np.sqrt(np.dot(c[i], c[i]))
longer[i] = lng / cosphi
shift[i] = shf / cosphi
# Now, do it!
translation = np.zeros(3)
for i in axes:
nowlen = np.sqrt(np.dot(c[i], c[i]))
self._cell[i] *= 1 + longer[i] / nowlen
translation += shift[i] * c[i] / nowlen
self.arrays['positions'] += translation
def get_center_of_mass(self, scaled=False):
"""Get the center of mass.
If scaled=True the center of mass in scaled coordinates
is returned."""
m = self.arrays.get('masses')
if m is None:
m = atomic_masses[self.arrays['numbers']]
com = np.dot(m, self.arrays['positions']) / m.sum()
if scaled:
return np.linalg.solve(self._cell.T, com)
else:
return com
def get_moments_of_inertia(self, vectors=False):
"""Get the moments of inertia along the principal axes.
The three principal moments of inertia are computed from the
eigenvalues of the symmetric inertial tensor. Periodic boundary
conditions are ignored. Units of the moments of inertia are
amu*angstrom**2.
"""
com = self.get_center_of_mass()
positions = self.get_positions()
positions -= com # translate center of mass to origin
masses = self.get_masses()
#initialize elements of the inertial tensor
I11 = I22 = I33 = I12 = I13 = I23 = 0.0
for i in range(len(self)):
x, y, z = positions[i]
m = masses[i]
I11 += m * (y**2 + z**2)
I22 += m * (x**2 + z**2)
I33 += m * (x**2 + y**2)
I12 += -m * x * y
I13 += -m * x * z
I23 += -m * y * z
I = np.array([[I11, I12, I13],
[I12, I22, I23],
[I13, I23, I33]])
evals, evecs = np.linalg.eigh(I)
if vectors:
return evals, evecs.transpose()
else:
return evals
def get_angular_momentum(self):
"""Get total angular momentum with respect to the center of mass."""
com = self.get_center_of_mass()
positions = self.get_positions()
positions -= com # translate center of mass to origin
return np.cross(positions, self.get_momenta()).sum(0)
def rotate(self, v, a=None, center=(0, 0, 0), rotate_cell=False):
"""Rotate atoms.
Rotate the angle *a* around the vector *v*. If *a* is not
given, the length of *v* is used as the angle. If *a* is a
vector, then *v* is rotated into *a*. The point at *center*
is fixed. Use *center='COM'* to fix the center of mass.
Vectors can also be strings: 'x', '-x', 'y', ... .
Examples:
Rotate 90 degrees around the z-axis, so that the x-axis is
rotated into the y-axis:
>>> a = pi / 2
>>> atoms.rotate('z', a)
>>> atoms.rotate((0, 0, 1), a)
>>> atoms.rotate('-z', -a)
>>> atoms.rotate((0, 0, a))
>>> atoms.rotate('x', 'y')
"""
norm = np.linalg.norm
v = string2vector(v)
if a is None:
a = norm(v)
if isinstance(a, (float, int)):
v /= norm(v)
c = cos(a)
s = sin(a)
else:
v2 = string2vector(a)
v /= norm(v)
v2 /= norm(v2)
c = np.dot(v, v2)
v = np.cross(v, v2)
s = norm(v)
# In case *v* and *a* are parallel, np.cross(v, v2) vanish
# and can't be used as a rotation axis. However, in this
# case any rotation axis perpendicular to v2 will do.
eps = 1e-7
if s < eps:
v = np.cross((0, 0, 1), v2)
if norm(v) < eps:
v = np.cross((1, 0, 0), v2)
assert norm(v) >= eps
if s > 0:
v /= s
if isinstance(center, str) and center.lower() == 'com':
center = self.get_center_of_mass()
p = self.arrays['positions'] - center
self.arrays['positions'][:] = (c * p -
np.cross(p, s * v) +
np.outer(np.dot(p, v), (1.0 - c) * v) +
center)
if rotate_cell:
rotcell = self.get_cell()
rotcell[:] = (c * rotcell -
np.cross(rotcell, s * v) +
np.outer(np.dot(rotcell, v), (1.0 - c) * v))
self.set_cell(rotcell)
def rotate_euler(self, center=(0, 0, 0), phi=0.0, theta=0.0, psi=0.0):
"""Rotate atoms via Euler angles.
See e.g http://mathworld.wolfram.com/EulerAngles.html for explanation.
Parameters:
center :
The point to rotate about. A sequence of length 3 with the
coordinates, or 'COM' to select the center of mass.
phi :
The 1st rotation angle around the z axis.
theta :
Rotation around the x axis.
psi :
2nd rotation around the z axis.
"""
if isinstance(center, str) and center.lower() == 'com':
center = self.get_center_of_mass()
else:
center = np.array(center)
# First move the molecule to the origin In contrast to MATLAB,
# numpy broadcasts the smaller array to the larger row-wise,
# so there is no need to play with the Kronecker product.
rcoords = self.positions - center
# First Euler rotation about z in matrix form
D = np.array(((cos(phi), sin(phi), 0.),
(-sin(phi), cos(phi), 0.),
(0., 0., 1.)))
# Second Euler rotation about x:
C = np.array(((1., 0., 0.),
(0., cos(theta), sin(theta)),
(0., -sin(theta), cos(theta))))
# Third Euler rotation, 2nd rotation about z:
B = np.array(((cos(psi), sin(psi), 0.),
(-sin(psi), cos(psi), 0.),
(0., 0., 1.)))
# Total Euler rotation
A = np.dot(B, np.dot(C, D))
# Do the rotation
rcoords = np.dot(A, np.transpose(rcoords))
# Move back to the rotation point
self.positions = np.transpose(rcoords) + center
def get_dihedral(self, list):
"""Calculate dihedral angle.
Calculate dihedral angle between the vectors list[0]->list[1]
and list[2]->list[3], where list contains the atomic indexes
in question.
"""
# vector 0->1, 1->2, 2->3 and their normalized cross products:
a = self.positions[list[1]] - self.positions[list[0]]
b = self.positions[list[2]] - self.positions[list[1]]
c = self.positions[list[3]] - self.positions[list[2]]
bxa = np.cross(b, a)
bxa /= np.linalg.norm(bxa)
cxb = np.cross(c, b)
cxb /= np.linalg.norm(cxb)
angle = np.vdot(bxa, cxb)
# check for numerical trouble due to finite precision:
if angle < -1:
angle = -1
if angle > 1:
angle = 1
angle = np.arccos(angle)
if np.vdot(bxa, c) > 0:
angle = 2 * np.pi - angle
return angle
def _masked_rotate(self, center, axis, diff, mask):
# do rotation of subgroup by copying it to temporary atoms object
# and then rotating that
#
# recursive object definition might not be the most elegant thing,
# more generally useful might be a rotation function with a mask?
group = self.__class__()
for i in range(len(self)):
if mask[i]:
group += self[i]
group.translate(-center)
group.rotate(axis, diff)
group.translate(center)
# set positions in original atoms object
j = 0
for i in range(len(self)):
if mask[i]:
self.positions[i] = group[j].get_position()
j += 1
def set_dihedral(self, list, angle, mask=None):
"""
set the dihedral angle between vectors list[0]->list[1] and
list[2]->list[3] by changing the atom indexed by list[3]
if mask is not None, all the atoms described in mask
(read: the entire subgroup) are moved
example: the following defines a very crude
ethane-like molecule and twists one half of it by 30 degrees.
>>> atoms = Atoms('HHCCHH', [[-1, 1, 0], [-1, -1, 0], [0, 0, 0],
[1, 0, 0], [2, 1, 0], [2, -1, 0]])
>>> atoms.set_dihedral([1,2,3,4],7*pi/6,mask=[0,0,0,1,1,1])
"""
# if not provided, set mask to the last atom in the
# dihedral description
if mask is None:
mask = np.zeros(len(self))
mask[list[3]] = 1
# compute necessary in dihedral change, from current value
current = self.get_dihedral(list)
diff = angle - current
axis = self.positions[list[2]] - self.positions[list[1]]
center = self.positions[list[2]]
self._masked_rotate(center, axis, diff, mask)
def rotate_dihedral(self, list, angle, mask=None):
"""Rotate dihedral angle.
Complementing the two routines above: rotate a group by a
predefined dihedral angle, starting from its current
configuration
"""
start = self.get_dihedral(list)
self.set_dihedral(list, angle + start, mask)
def get_angle(self, list):
"""Get angle formed by three atoms.
calculate angle between the vectors list[0]->list[1] and
list[1]->list[2], where list contains the atomic indexes in
question."""
# normalized vector 1->0, 1->2:
v10 = self.positions[list[0]] - self.positions[list[1]]
v12 = self.positions[list[2]] - self.positions[list[1]]
v10 /= np.linalg.norm(v10)
v12 /= np.linalg.norm(v12)
angle = np.vdot(v10, v12)
angle = np.arccos(angle)
return angle
def set_angle(self, list, angle, mask=None):
"""Set angle formed by three atoms.
Sets the angle between vectors list[1]->list[0] and
list[1]->list[2].
Same usage as in set_dihedral."""
# If not provided, set mask to the last atom in the angle description
if mask is None:
mask = np.zeros(len(self))
mask[list[2]] = 1
# Compute necessary in angle change, from current value
current = self.get_angle(list)
diff = current - angle
# Do rotation of subgroup by copying it to temporary atoms object and
# then rotating that
v10 = self.positions[list[0]] - self.positions[list[1]]
v12 = self.positions[list[2]] - self.positions[list[1]]
v10 /= np.linalg.norm(v10)
v12 /= np.linalg.norm(v12)
axis = np.cross(v10, v12)
center = self.positions[list[1]]
self._masked_rotate(center, axis, diff, mask)
def rattle(self, stdev=0.001, seed=None):
"""Randomly displace atoms.
This method adds random displacements to the atomic positions,
taking a possible constraint into account. The random numbers are
drawn from a normal distribution of standard deviation stdev.
For a parallel calculation, it is important to use the same
seed on all processors! """
rs = np.random.RandomState(seed)
positions = self.arrays['positions']
self.set_positions(positions +
rs.normal(scale=stdev, size=positions.shape))
def get_distance(self, a0, a1, mic=False):
"""Return distance between two atoms.
Use mic=True to use the Minimum Image Convention.
"""
R = self.arrays['positions']
D = R[a1] - R[a0]
if mic:
Dr = np.linalg.solve(self._cell.T, D)
D = np.dot(Dr - np.round(Dr) * self._pbc, self._cell)
return np.linalg.norm(D)
def set_distance(self, a0, a1, distance, fix=0.5):
"""Set the distance between two atoms.
Set the distance between atoms *a0* and *a1* to *distance*.
By default, the center of the two atoms will be fixed. Use
*fix=0* to fix the first atom, *fix=1* to fix the second
atom and *fix=0.5* (default) to fix the center of the bond."""
R = self.arrays['positions']
D = R[a1] - R[a0]
x = 1.0 - distance / np.linalg.norm(D)
R[a0] += (x * fix) * D
R[a1] -= (x * (1.0 - fix)) * D
def get_scaled_positions(self):
"""Get positions relative to unit cell.
Atoms outside the unit cell will be wrapped into the cell in
those directions with periodic boundary conditions so that the
scaled coordinates are between zero and one."""
scaled = np.linalg.solve(self._cell.T, self.arrays['positions'].T).T
for i in range(3):
if self._pbc[i]:
# Yes, we need to do it twice.
# See the scaled_positions.py test
scaled[:, i] %= 1.0
scaled[:, i] %= 1.0
return scaled
def set_scaled_positions(self, scaled):
"""Set positions relative to unit cell."""
self.arrays['positions'][:] = np.dot(scaled, self._cell)
def get_temperature(self):
"""Get the temperature. in Kelvin"""
ekin = self.get_kinetic_energy() / len(self)
return ekin / (1.5 * units.kB)
def get_isotropic_pressure(self, stress):
"""Get the current calculated pressure, assume isotropic medium.
in Bar
"""
if type(stress) == type(1.0) or type(stress) == type(1):
return -stress * 1e-5 / units.Pascal
elif stress.shape == (3, 3):
return (-(stress[0, 0] + stress[1, 1] + stress[2, 2]) / 3.0) * \
1e-5 / units.Pascal
elif stress.shape == (6,):
return (-(stress[0] + stress[1] + stress[2]) / 3.0) * \
1e-5 / units.Pascal
else:
raise ValueError('The external stress has the wrong shape.')
def __eq__(self, other):
"""Check for identity of two atoms objects.
Identity means: same positions, atomic numbers, unit cell and
periodic boundary conditions."""
try:
a = self.arrays
b = other.arrays
return (len(self) == len(other) and
(a['positions'] == b['positions']).all() and
(a['numbers'] == b['numbers']).all() and
(self._cell == other.cell).all() and
(self._pbc == other.pbc).all())
except AttributeError:
return NotImplemented
def __ne__(self, other):
eq = self.__eq__(other)
if eq is NotImplemented:
return eq
else:
return not eq
__hash__ = None
def get_volume(self):
"""Get volume of unit cell."""
return abs(np.linalg.det(self._cell))
def _get_positions(self):
"""Return reference to positions-array for in-place manipulations."""
return self.arrays['positions']
def _set_positions(self, pos):
"""Set positions directly, bypassing constraints."""
self.arrays['positions'][:] = pos
positions = property(_get_positions, _set_positions,
doc='Attribute for direct ' +
'manipulation of the positions.')
def _get_atomic_numbers(self):
"""Return reference to atomic numbers for in-place
manipulations."""
return self.arrays['numbers']
numbers = property(_get_atomic_numbers, set_atomic_numbers,
doc='Attribute for direct ' +
'manipulation of the atomic numbers.')
def _get_cell(self):
"""Return reference to unit cell for in-place manipulations."""
return self._cell
cell = property(_get_cell, set_cell, doc='Attribute for direct ' +
'manipulation of the unit cell.')
def _get_pbc(self):
"""Return reference to pbc-flags for in-place manipulations."""
return self._pbc
pbc = property(_get_pbc, set_pbc,
doc='Attribute for direct manipulation ' +
'of the periodic boundary condition flags.')
def get_name(self):
"""Return a name extracted from the elements."""
elements = {}
for a in self:
try:
elements[a.symbol] += 1
except:
elements[a.symbol] = 1
name = ''
for element in elements:
name += element
if elements[element] > 1:
name += str(elements[element])
return name
def write(self, filename, format=None):
if format == None:
format = self.format
if format == 'vasp':
write_vasp(filename, self)
elif format == 'xyz':
write_xyz(filename, self)
elif format == 'con':
write_con(filename, self)
else:
raise Exception, "Unknown file format: %s" % format
def string2symbols(s):
"""Convert string to list of chemical symbols."""
n = len(s)
if n == 0:
return []
c = s[0]
if c.isdigit():
i = 1
while i < n and s[i].isdigit():
i += 1
return int(s[:i]) * string2symbols(s[i:])
if c == '(':
p = 0
for i, c in enumerate(s):
if c == '(':
p += 1
elif c == ')':
p -= 1
if p == 0:
break
j = i + 1
while j < n and s[j].isdigit():
j += 1
if j > i + 1:
m = int(s[i + 1:j])
else:
m = 1
return m * string2symbols(s[1:i]) + string2symbols(s[j:])
if c.isupper():
i = 1
if 1 < n and s[1].islower():
i += 1
j = i
while j < n and s[j].isdigit():
j += 1
if j > i:
m = int(s[i:j])
else:
m = 1
return m * [s[:i]] + string2symbols(s[j:])
else:
raise ValueError
def symbols2numbers(symbols):
if isinstance(symbols, str):
symbols = string2symbols(symbols)
numbers = []
for s in symbols:
if isinstance(s, str):
numbers.append(atomic_numbers[s])
else:
numbers.append(s)
return numbers
def string2vector(v):
if isinstance(v, str):
if v[0] == '-':
return -string2vector(v[1:])
w = np.zeros(3)
w['xyz'.index(v)] = 1.0
return w
return np.array(v, float)
def default(data, dflt):
"""Helper function for setting default values."""
if data is None:
return None
elif isinstance(data, (list, tuple)):
newdata = []
allnone = True
for x in data:
if x is None:
newdata.append(dflt)
else:
newdata.append(x)
allnone = False
if allnone:
return None
return newdata
else:
return data
def slice2enlist(s):
"""Convert a slice object into a list of (new, old) tuples."""
if isinstance(s, (list, tuple)):
return enumerate(s)
if s.step == None:
step = 1
else:
step = s.step
if s.start == None:
start = 0
else:
start = s.start
return enumerate(range(start, s.stop, step))
class FixConstraint:
"""Base class for classes that fix one or more atoms in some way."""
def index_shuffle(self, ind):
"""Change the indices.
When the ordering of the atoms in the Atoms object changes,
this method can be called to shuffle the indices of the
constraints.
ind -- List or tuple of indices.
"""
raise NotImplementedError
def repeat(self, m, n):
""" basic method to multiply by m, needs to know the length
of the underlying atoms object for the assignment of
multiplied constraints to work.
"""
raise NotImplementedError
class FixConstraintSingle(FixConstraint):
"""Base class for classes that fix a single atom."""
def index_shuffle(self, ind):
"""The atom index must be stored as self.a."""
newa = -1 # Signal error
for new, old in slice2enlist(ind):
if old == self.a:
newa = new
break
if newa == -1:
raise IndexError('Constraint not part of slice')
self.a = newa
class FixAtoms(FixConstraint):
"""Constraint object for fixing some chosen atoms."""
def __init__(self, indices=None, mask=None):
"""Constrain chosen atoms.
Parameters
----------
indices : list of int
Indices for those atoms that should be constrained.
mask : list of bool
One boolean per atom indicating if the atom should be
constrained or not.
Examples
--------
Fix all Copper atoms:
>>> c = FixAtoms(mask=[s == 'Cu' for s in atoms.get_chemical_symbols()])
>>> atoms.set_constraint(c)
Fix all atoms with z-coordinate less than 1.0 Angstrom:
>>> c = FixAtoms(mask=atoms.positions[:, 2] < 1.0)
>>> atoms.set_constraint(c)
"""
if indices is None and mask is None:
raise ValueError('Use "indices" or "mask".')
if indices is not None and mask is not None:
raise ValueError('Use only one of "indices" and "mask".')
if mask is not None:
self.index = np.asarray(mask, bool)
else:
# Check for duplicates
srt = np.sort(indices)
for i in range(len(indices) - 1):
if srt[i] == srt[i+1]:
raise ValueError(
'FixAtoms: The indices array contained duplicates. '
'Perhaps you wanted to specify a mask instead, but '
'forgot the mask= keyword.')
self.index = np.asarray(indices, int)
if self.index.ndim != 1:
raise ValueError('Wrong argument to FixAtoms class!')
def adjust_positions(self, old, new):
new[self.index] = old[self.index]
def adjust_forces(self, positions, forces):
forces[self.index] = 0.0
def index_shuffle(self, ind):
# See docstring of superclass
if self.index.dtype == bool:
self.index = self.index[ind]
else:
index = []
for new, old in slice2enlist(ind):
if old in self.index:
index.append(new)
if len(index) == 0:
raise IndexError('All indices in FixAtoms not part of slice')
self.index = np.asarray(index, int)
def copy(self):
if self.index.dtype == bool:
return FixAtoms(mask=self.index.copy())
else:
return FixAtoms(indices=self.index.copy())
def __repr__(self):
if self.index.dtype == bool:
return 'FixAtoms(mask=%s)' % ints2string(self.index.astype(int))
return 'FixAtoms(indices=%s)' % ints2string(self.index)
def repeat(self, m, n):
i0 = 0
l = len(self.index)
natoms = 0
if isinstance(m, int):
m = (m, m, m)
index_new = []
for m2 in range(m[2]):
for m1 in range(m[1]):
for m0 in range(m[0]):
i1 = i0 + n
if self.index.dtype == bool:
index_new.extend(self.index)
else:
index_new += [i+natoms for i in self.index]
i0 = i1
natoms += n
if self.index.dtype == bool:
self.index = np.asarray(index_new, bool)
else:
self.index = np.asarray(index_new, int)
return self
def delete_atom(self, ind):
""" Removes atom number ind from the index array, if present.
Required for removing atoms with existing FixAtoms constraints.
"""
if self.index.dtype == bool:
self.index = np.delete(self.index, ind)
else:
if ind in self.index:
i = list(self.index).index(ind)
self.index = np.delete(self.index, i)
for i in range(len(self.index)):
if self.index[i] >= ind:
self.index[i] -= 1
def ints2string(x, threshold=10):
"""Convert ndarray of ints to string."""
if len(x) <= threshold:
return str(x.tolist())
return str(x[:threshold].tolist())[:-1] + ', ...]'
class FixBondLengths(FixConstraint):
def __init__(self, pairs, iterations=10):
self.constraints = [FixBondLength(a1, a2)
for a1, a2 in pairs]
self.iterations = iterations
def adjust_positions(self, old, new):
for i in range(self.iterations):
for constraint in self.constraints:
constraint.adjust_positions(old, new)
def adjust_forces(self, positions, forces):
for i in range(self.iterations):
for constraint in self.constraints:
constraint.adjust_forces(positions, forces)
def copy(self):
return FixBondLengths([constraint.indices
for constraint in self.constraints])
class FixBondLength(FixConstraint):
"""Constraint object for fixing a bond length."""
def __init__(self, a1, a2):
"""Fix distance between atoms with indices a1 and a2."""
self.indices = [a1, a2]
def adjust_positions(self, old, new):
p1, p2 = old[self.indices]
d = p2 - p1
p = sqrt(np.dot(d, d))
q1, q2 = new[self.indices]
d = q2 - q1
q = sqrt(np.dot(d, d))
d *= 0.5 * (p - q) / q
new[self.indices] = (q1 - d, q2 + d)
def adjust_forces(self, positions, forces):
d = np.subtract.reduce(positions[self.indices])
d2 = np.dot(d, d)
d *= 0.5 * np.dot(np.subtract.reduce(forces[self.indices]), d) / d2
forces[self.indices] += (-d, d)
def index_shuffle(self, ind):
'Shuffle the indices of the two atoms in this constraint'
newa = [-1, -1] # Signal error
for new, old in slice2enlist(ind):
for i, a in enumerate(self.indices):
if old == a:
newa[i] = new
if newa[0] == -1 or newa[1] == -1:
raise IndexError('Constraint not part of slice')
self.indices = newa
def copy(self):
return FixBondLength(*self.indices)
def __repr__(self):
return 'FixBondLength(%d, %d)' % tuple(self.indices)
class FixedMode(FixConstraint):
"""Constrain atoms to move along directions orthogonal to
a given mode only."""
def __init__(self, indices, mode):
if indices is None:
raise ValueError('Use "indices".')
if indices is not None:
self.index = np.asarray(indices, int)
self.mode = (np.asarray(mode) / np.sqrt((mode **2).sum())).reshape(-1)
def adjust_positions(self, oldpositions, newpositions):
newpositions = newpositions.ravel()
oldpositions = oldpositions.ravel()
step = newpositions - oldpositions
newpositions -= self.mode * np.dot(step, self.mode)
newpositions = newpositions.reshape(-1, 3)
oldpositions = oldpositions.reshape(-1, 3)
def adjust_forces(self, positions, forces):
forces = forces.ravel()
forces -= self.mode * np.dot(forces, self.mode)
forces = forces.reshape(-1, 3)
def copy(self):
return FixedMode(self.index.copy(), self.mode)
def __repr__(self):
return 'FixedMode(%s, %s)' % (ints2string(self.index),
self.mode.tolist())
class FixedPlane(FixConstraintSingle):
"""Constrain an atom *a* to move in a given plane only.
The plane is defined by its normal: *direction*."""
def __init__(self, a, direction):
self.a = a
self.dir = np.asarray(direction) / sqrt(np.dot(direction, direction))
def adjust_positions(self, oldpositions, newpositions):
step = newpositions[self.a] - oldpositions[self.a]
newpositions[self.a] -= self.dir * np.dot(step, self.dir)
def adjust_forces(self, positions, forces):
forces[self.a] -= self.dir * np.dot(forces[self.a], self.dir)
def copy(self):
return FixedPlane(self.a, self.dir)
def __repr__(self):
return 'FixedPlane(%d, %s)' % (self.a, self.dir.tolist())
class FixedLine(FixConstraintSingle):
"""Constrain an atom *a* to move on a given line only.
The line is defined by its *direction*."""
def __init__(self, a, direction):
self.a = a
self.dir = np.asarray(direction) / sqrt(np.dot(direction, direction))
def adjust_positions(self, oldpositions, newpositions):
step = newpositions[self.a] - oldpositions[self.a]
x = np.dot(step, self.dir)
newpositions[self.a] = oldpositions[self.a] + x * self.dir
def adjust_forces(self, positions, forces):
forces[self.a] = self.dir * np.dot(forces[self.a], self.dir)
def copy(self):
return FixedLine(self.a, self.dir)
def __repr__(self):
return 'FixedLine(%d, %s)' % (self.a, self.dir.tolist())
class FixCartesian(FixConstraintSingle):
"Fix an atom in the directions of the cartesian coordinates."
def __init__(self, a, mask=(1, 1, 1)):
self.a = a
self.mask = -(np.array(mask) - 1)
def adjust_positions(self, old, new):
step = new[self.a] - old[self.a]
step *= self.mask
new[self.a] = old[self.a] + step
def adjust_forces(self, positions, forces):
forces[self.a] *= self.mask
def copy(self):
return FixCartesian(self.a, 1 - self.mask)
def __repr__(self):
return 'FixCartesian(indice=%s mask=%s)' % (self.a, self.mask)
class fix_cartesian(FixCartesian):
"Backwards compatibility for FixCartesian."
def __init__(self, a, mask=(1, 1, 1)):
import warnings
super(fix_cartesian, self).__init__(a, mask)
warnings.warn('fix_cartesian is deprecated. Please use FixCartesian'
' instead.', DeprecationWarning, stacklevel=2)
class FixScaled(FixConstraintSingle):
"Fix an atom in the directions of the unit vectors."
def __init__(self, cell, a, mask=(1, 1, 1)):
self.cell = cell
self.a = a
self.mask = np.array(mask)
def adjust_positions(self, old, new):
scaled_old = np.linalg.solve(self.cell.T, old.T).T
scaled_new = np.linalg.solve(self.cell.T, new.T).T
for n in range(3):
if self.mask[n]:
scaled_new[self.a, n] = scaled_old[self.a, n]
new[self.a] = np.dot(scaled_new, self.cell)[self.a]
def adjust_forces(self, positions, forces):
scaled_forces = np.linalg.solve(self.cell.T, forces.T).T
scaled_forces[self.a] *= -(self.mask - 1)
forces[self.a] = np.dot(scaled_forces, self.cell)[self.a]
def copy(self):
return FixScaled(self.cell, self.a, self.mask)
def __repr__(self):
return 'FixScaled(%s, %d, %s)' % (repr(self.cell),
self.a,
repr(self.mask))
class fix_scaled(FixScaled):
"Backwards compatibility for FixScaled."
def __init__(self, cell, a, mask=(1, 1, 1)):
import warnings
super(fix_scaled, self).__init__(cell, a, mask)
warnings.warn('fix_scaled is deprecated. Please use FixScaled '
'instead.', DeprecationWarning, stacklevel=2)
class SinglePointCalculator:
"""Special calculator for a single configuration.
Used to remember the energy, force and stress for a given
configuration. If the positions, atomic numbers, unit cell, or
boundary conditions are changed, then asking for
energy/forces/stress will raise an exception."""
def __init__(self, energy, forces, stress, magmoms, atoms):
"""Save energy, forces and stresses for the current configuration."""
self.energy = energy
if forces is not None:
forces = np.array(forces, float)
self.forces = forces
if stress is not None:
stress = np.array(stress, float)
self.stress = stress
if magmoms is not None:
magmoms = np.array(magmoms, float)
self.magmoms = magmoms
self.atoms = atoms.copy()
def calculation_required(self, atoms, quantities):
ok = self.atoms == atoms
return ('forces' in quantities and (self.forces is None or not ok) or
'energy' in quantities and (self.energy is None or not ok) or
'stress' in quantities and (self.stress is None or not ok) or
'magmoms' in quantities and (self.magmoms is None or not ok))
def update(self, atoms):
if self.atoms != atoms:
raise RuntimeError('Energy, forces and stress no longer correct.')
def get_potential_energy(self, atoms=None):
if atoms is not None:
self.update(atoms)
if self.energy is None:
raise RuntimeError('No energy.')
return self.energy
def get_forces(self, atoms):
self.update(atoms)
if self.forces is None:
raise RuntimeError('No forces.')
return self.forces
def get_stress(self, atoms):
self.update(atoms)
if self.stress is None:
raise NotImplementedError
return self.stress
def get_spin_polarized(self):
return self.magmoms is not None and self.magmoms.any()
def get_magnetic_moments(self, atoms=None):
if atoms is not None:
self.update(atoms)
if self.magmoms is not None:
return self.magmoms
else:
return np.zeros(len(self.positions))
class SinglePointKPoint:
def __init__(self, kpt, spin):
self.k = kpt
self.s = spin
self.eps_n = []
self.f_n = []
class SinglePointDFTCalculator(SinglePointCalculator):
def __init__(self, energy, forces, stress, magmoms, atoms,
eFermi=None):
SinglePointCalculator.__init__(self, energy, forces, stress,
magmoms, atoms)
if eFermi is not None:
self.eFermi = eFermi
self.kpts = None
def get_fermi_level(self):
"""Return the Fermi-level(s)."""
return self.eFermi
def get_bz_k_points(self):
"""Return the k-points."""
if self.kpts is not None:
# we assume that only the gamma point is defined
return np.zeros((1, 3))
return None
def get_number_of_spins(self):
"""Return the number of spins in the calculation.
Spin-paired calculations: 1, spin-polarized calculation: 2."""
if self.kpts is not None:
# we assume that only the gamma point is defined
return len(self.kpts)
return None
def get_spin_polarized(self):
"""Is it a spin-polarized calculation?"""
nos = self.get_number_of_spins()
if nos is not None:
return nos == 2
return None
def get_ibz_k_points(self):
"""Return k-points in the irreducible part of the Brillouin zone."""
return self.get_bz_k_points()
def get_occupation_numbers(self, kpt=0, spin=0):
"""Return occupation number array."""
# we assume that only the gamma point is defined
assert(kpt == 0)
if self.kpts is not None:
for kpt in self.kpts:
if kpt.s == spin:
return kpt.f_n
return None
def get_eigenvalues(self, kpt=0, spin=0):
"""Return eigenvalue array."""
# we assume that only the gamma point is defined
assert(kpt == 0)
if self.kpts is not None:
for kpt in self.kpts:
if kpt.s == spin:
return kpt.eps_n
return None
|
zybbigpy/VaspCZ | sourcecode/VaspGenerate.py | import os
import VaspCZ.zzdlib as zzd
def generate_inputs(examp='fcc_Fe_3x3x3'):
"""
从example下拷贝例子
:return:
"""
path = zzd.File.VaspCZ_software_path()
for file in 'INCAR,POSCAR,POTCAR,KPOINTS'.split(','):
os.system(f'cp {path}/examples/{examp}/{file} .')
print(f'生成文件:{file} (example)')
vaspsh_path = zzd.File.Vaspsh_path()
if 'Vasp.sh' not in os.listdir(vaspsh_path):
print(f'在路径"{vaspsh_path}"下未找到Vasp.sh文件,将适合该平台的PBS脚本拷贝到该文件夹下。')
else:
os.system(f'cp {vaspsh_path}/Vasp.sh .')
print(f'生成文件:Vasp.sh (example)')
def generate_INCAR_for_Sta():
print(f'修改当前结构优化INCAR为静态计算INCAR')
try:
data_INCAR = zzd.File.openFile('INCAR', 'r')
except Exception as e:
raise NameError(f'{e} 当前路径无INCAR文件')
for nl in range(len(data_INCAR)):
if 'SYSTEM' in data_INCAR[nl]:
data_INCAR[nl] = 'SYSTEM=Static\n' # 修改表头
if 'NSW' in data_INCAR[nl]:
data_INCAR[nl] = 'NSW=1\n' # 修改NSW
if 'IBRION' in data_INCAR[nl]:
data_INCAR[nl] = 'IBRION=-1\n' # 修改IBRION
if 'EDIFFG' in data_INCAR[nl]:
data_INCAR[nl] = '#' + data_INCAR[nl] # 去掉EDIFFG
zzd.File.openFile('INCAR', 'w', data=data_INCAR)
print(f'修改为静态计算INCAR完成')
def run():
a = zzd.File.VaspCZ_software_path()
print(a)
if __name__ == '__main__':
run()
|
zybbigpy/VaspCZ | vtstscripts-939/kdb/kdb.py | import glob
import os
import numpy
import sys
import re
import subprocess
class Kdb():
def check_svn_version(self):
#does not work
svn_info = subprocess.check_output("svn info")
print svn_info
def check_version(self):
if sys.version_info[:2] != (2,7):
print "python 2.7.X required."
return False
return True
def atomAtomPbcVector(self, atoms, a, b):
if not hasattr(atoms, 'ibox'):
atoms.ibox = numpy.linalg.inv(atoms.get_cell())
if not hasattr(atoms, 'pbcVectors'):
atoms.pbcVectors = {}
if (a, b) not in atoms.pbcVectors or (b, a) not in atoms.pbcVectors:
atoms.pbcVectors[(a, b)] = self.pbc(atoms.positions[b] - atoms.positions[a], atoms.get_cell(), atoms.ibox)
atoms.pbcVectors[(b, a)] = -atoms.pbcVectors[(a, b)]
return atoms.pbcVectors[(a, b)]
def atomAtomPbcDistance(self, atoms, a, b):
if not hasattr(atoms, 'pbcDistances'):
atoms.pbcDistances = {}
if (a, b) not in atoms.pbcDistances or (b, a) not in atoms.pbcDistances:
atoms.pbcDistances[(a, b)] = numpy.linalg.norm(self.atomAtomPbcVector(atoms, a, b))
atoms.pbcDistances[(b, a)] = atoms.pbcDistances[(a, b)]
return atoms.pbcDistances[(a, b)]
def atomAtomDistance(self, atoms, a, b):
if not hasattr(atoms, 'distances'):
atoms.distances = {}
if (a, b) not in atoms.distances or (b, a) not in atoms.distances:
atoms.distances[(a, b)] = numpy.linalg.norm(atoms.positions[a] - atoms.positions[b])
atoms.distances[(b, a)] = atoms.distances[(a, b)]
return atoms.distances[(a, b)]
def getNameList(self, atoms):
"""
Returns a sorted list of element names.
"""
nl = []
for name in atoms.get_chemical_symbols():
if name not in nl:
nl.append(name)
return sorted(nl)
def nameCount(self, atoms):
counts = {}
for name in atoms.get_chemical_symbols():
if not name in counts:
counts[name] = 0
counts[name] += 1
return counts
def pbc(self, r, box, ibox = None):
"""
Applies periodic boundary conditions.
Parameters:
r: the vector the boundary conditions are applied to
box: the box that defines the boundary conditions
ibox: the inverse of the box. This will be calcluated if not provided.
"""
#if ibox == None:
#if not hasattr(ibox, 'shape'):
if type(ibox) != numpy.ndarray and type(ibox) != list and type(ibox) != tuple: #MJW fix
ibox = numpy.linalg.inv(box)
vdir = numpy.dot(r, ibox)
vdir = (vdir % 1.0 + 1.5) % 1.0 - 0.5
return numpy.dot(vdir, box)
def per_atom_norm(self, v, box, ibox = None):
'''
Returns a length N numpy array containing per atom distance
v: an Nx3 numpy array
box: box matrix that defines the boundary conditions
ibox: the inverse of the box. will be calculated if not provided
'''
diff = self.pbc(v, box, ibox)
return numpy.array([numpy.linalg.norm(d) for d in diff])
def load_mode(self, modefilein):
'''
Reads a mode.dat file into an N by 3 numpy array
modefilein: filename
'''
f = open(modefilein, 'r')
lines = f.readlines()
f.close()
mode = []
for line in lines:
l = line.strip().split()
for j in range(3):
mode.append(float(l[j]))
mode = numpy.array(mode)
mode.resize(len(mode)/3, 3)
return mode
def save_mode(self, modefileout, displace_vector):
'''
Saves an Nx3 numpy array into a mode.dat file.
modefileout: filename
displace_vector: the mode (Nx3 numpy array)
'''
f = open(modefileout, 'w')
for i in range(len(displace_vector)):
f.write("%.3f %.3f %.3f\n" % (displace_vector[i][0],
displace_vector[i][1], displace_vector[i][2]))
def list_element_combinations(self, kdbdir):
combinations = [os.path.basename(i) for i in glob.glob(os.path.join(kdbdir, "*"))]
return combinations
def combo_split(self, combo):
elements = []
for i in range(len(combo)):
if combo[i] == combo[i].lower():
elements[-1] += combo[i]
else:
elements.append(combo[i])
return elements
def is_symbol_subset(self, a, b):
for symbol in a:
if symbol not in b:
return False
return True
def query_has_all(self, kdbdir, symbols):
result = []
combinations = [os.path.basename(i) for i in glob.glob(os.path.join(kdbdir, "*"))]
for combo in combinations:
elements = self.combo_split(combo)
if not is_symbol_subset(symbols, elements):
continue
for N in glob.glob(os.path.join(kdbdir, combo, '*')):
result.append(N)
return result
|
zybbigpy/VaspCZ | sourcecode/VaspQsub.py | <reponame>zybbigpy/VaspCZ<gh_stars>10-100
#!/home/zhangzhengde/bin/bin/python3
#coding=utf-8
import sys
sys.path.append('/home/zhangzhengde/bin/pythonlib')
import zzdlib as zzd
import os
data_Sh = zzd.File.openFile('./Vasp.sh','r')
#传入第1个参数是节点数,第2个参数是核数,第3个参数的任务名,第4个参数是是否加急
defnc = zzd.File.getLine(data_Sh,'#PBS -l nodes')[0].split()[-1]
defnodes = defnc.split(':')[0].split('=')[-1]
defppn = defnc.split(':')[1].split('=')[-1]
defEMER = zzd.File.getLine(data_Sh,'#PBS -q EMERGENCY')
try:
para1 = eval(sys.argv[3])
if type(para1) == int:
print('错误的任务名,退出')
exit()
except Exception as e:
para1 = '1'
for i in range(1,len(sys.argv)):
if i ==3:
if sys.argv[i] !='def':
data_Sh = zzd.File.substituteData(data_Sh,'#PBS -N ',' #PBS -N '+sys.argv[3])
if i ==2: #2 3 一起了
if sys.argv[1] != defnodes and sys.argv[2] == defppn:
data_Sh = zzd.File.substituteData(data_Sh,'#PBS -l nodes',' #PBS -l nodes='+sys.argv[1]+':ppn='+defppn)
if sys.argv[2] != defppn and sys.argv[1] == defnodes:
data_Sh = zzd.File.substituteData(data_Sh,'#PBS -l nodes',' #PBS -l nodes='+defnodes+':ppn='+sys.argv[2])
if sys.argv[1] != defnodes and sys.argv[2] != defppn:
data_Sh = zzd.File.substituteData(data_Sh,'#PBS -l nodes',' #PBS -l nodes='+sys.argv[1]+':ppn='+sys.argv[2])
if i ==4:
if defEMER == 'Not Match':#没找到说明默认是不加急的
if sys.argv[i] == 'yes' or sys.argv[i] == 'y':
data_Sh.insert(4,' #PBS -q EMERGENCY\n')
else: #默认加急
if sys.argv[i] == 'no' or sys.argv[i] =='n':
data_Sh.remove(' #PBS -q EMERGENCY\n')
zzd.File.openFile('./Vasp.sh','w',data=data_Sh)
if zzd.Vasp.checkInputs():
usrsel=input('<NAME>! Would you like to submit the job? (default=yes):')
if usrsel == 'no' or usrsel =='n':
print('未提交任务')
else:
os.system('qsub Vasp.sh')
else:
print('前检查有问题,请人工检查')
|
zybbigpy/VaspCZ | sourcecode/k_point_test.py | <filename>sourcecode/k_point_test.py
#!/home/zhangzhengde/bin/bin/python3
#coding=utf-8
import os
import argparse
def modify_vasp_sh(jobname, nodes, ppn):
with open('./Vasp.sh', 'r') as f:
data = f.readlines()
new_data = []
for line in data:
if ' #PBS -N' in line:
new_data.append(f' #PBS -N {jobname}\n')
elif ' #PBS -l nodes' in line:
new_data.append(f' #PBS -l nodes={nodes}:ppn={ppn}\n')
else:
new_data.append(line)
with open('./Vasp.sh', 'w') as f:
f.writelines(new_data)
def run(jobname, nodes, ppn, K):
if os.path.isdir(K): # 有目录什么也不做
print(f'k_mesh:{K} already exists, do nothing.')
pass
else:
os.system('mkdir '+K) # 创建目录
for file in os.listdir():
if os.path.isfile(file):
os.system(f'cp {file} {K}')# 拷贝输入文件
os.chdir(K) # 进入创建的目录
# 无需修改INCAT
# 无需修改POTCAR
# 无需修改POSCAR
# 修改KPOINTS
with open('./KPOINTS', 'r') as f:
data = f.readlines()
data[3] = f'{K[0]} {K[1]} {K[2]}\n'
with open('./KPOINTS', 'w') as f:
f.writelines(data)
# 修改Vasp.sh,指定任务和任务名,修改,提交任务
modify_vasp_sh(f'{jobname}_{K}', nodes, ppn)
# 测试代码,打印
#os.system('cat KPOINTS')
#os.system('cat Vasp.sh')
os.system('qsub Vasp.sh') # 提交任务
os.chdir('..')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-jb', '--jobname_prefix', default='k_test', type=str)
parser.add_argument('-nd', '--nodes', default='1', type=str)
parser.add_argument('-np', '--ppn', default='8', type=str)
parser.add_argument('-k', '--k_mesh', default='111,333,555,777,999', type=str)
args = parser.parse_args()
jobname = args.jobname_prefix
nodes = args.nodes
k_mesh = args.k_mesh.split(',')
ppn = args.ppn
print(f'running k_point test \n parameter: \njobname_prefix:{jobname} nodes:{nodes} ppn:{ppn} \nk_mesh:{k_mesh}')
inp = input('confirm run ([y]es/no): ')
if inp in ['', 'y', 'yes', 'Y', 'Yes', 'YES']:
for K in k_mesh:
run(jobname, nodes, ppn, K)
else:
print('Did not run.') |
zybbigpy/VaspCZ | test.py | <reponame>zybbigpy/VaspCZ<filename>test.py
import sys
# print(sys.modules)
import VaspCZ.zzdlib as zzd
b = zzd.File.openFile('install.py', 'r')
print(b) |
zybbigpy/VaspCZ | vtstscripts-939/kdb/remote_update.py | <reponame>zybbigpy/VaspCZ
from remote_db import RemoteDB
from kdbinsert import KdbInsert
from optparse import OptionParser
import pymysql
from server_config import *
class RemoteUpdate(KdbInsert):
def __init__(self):
self.current_o_pro_pk = None
# overloaded insert function from KdbInsert class
def insert_into_db(self, **args):
# create instance of database
db = RemoteDB()
# add process to db
db.add_updated_process(args['r'], args['s'], args['p'], args['m'], args['ma'], self.current_o_pro_pk)
# Indicate that the process was inserted successfully.
print "good update"
# changes the nf, dc, and mac values stored in the database
def change_params(self, nf, dc, mac):
conn = self.connect_db(db=backup_db_name)
conn.execute('''DELETE FROM Param''')
values = [('nf', nf), ('dc', dc), ('mac', mac)]
for value in values:
conn.execute('''INSERT INTO Param Values ('%s','%f')''' % value)
conn.execute('''COMMIT''')
conn.close()
print "Parameters have been changed."
# remove all processes that are not backups
def remove_all_process(self):
conn = self.connect_db()
conn.execute('''DELETE FROM Atom''')
conn.execute('''DELETE FROM Mobile''')
conn.execute('''DELETE FROM Process''')
conn.execute('''DELETE FROM Atoms''')
conn.execute('''COMMIT''')
conn.close()
# query the backup database for all processes
# and reinsert processes into queryable database
def populate_kdb(self):
conn = self.connect_db(db=backup_db_name)
conn.execute('''SELECT pro_id, reactant_id, saddle_id, product_id FROM Process''')
process_list = conn.fetchall()
conn.close()
db = RemoteDB()
params = db.get_params()
if len(process_list) == 0:
print "No items in database to update."
return
print "Updating", db_name, "database."
for process in process_list:
reactant = db.get_atoms(process[1], db=backup_db_name)
saddle = db.get_atoms(process[2], db=backup_db_name)
product = db.get_atoms(process[3], db=backup_db_name)
mode = db.get_mode(process[1], db=backup_db_name)
self.current_o_pro_pk = process[0]
self.insert(reactant, saddle, product, mode=mode, nf=params['nf'], dc=params['dc'], mac=params['mac'])
def connect_db(self, db=db_name):
return pymysql.connect(host=host, port=port, user=user, passwd=password, db=db).cursor()
if __name__ == "__main__":
db = RemoteDB()
params = db.get_params()
# Parse command line options.
parser = OptionParser(usage = "%prog [options] reactant saddle product mode")
parser.add_option("-n", "--nf", dest = "nf", action="store", type="float",
help = "neighbor fudge parameter",
default = params['nf'])
parser.add_option("-c", "--dc", dest = "dc", action="store", type="float",
help = "distance cutoff parameter",
default = params['dc'])
parser.add_option("-m", "--mac", dest = "mac", action="store", type="float",
help = "mobile atom cutoff parameter",
default = params['mac'])
options, args = parser.parse_args()
update_class = RemoteUpdate()
update_class.change_params(options.nf, options.dc, options.mac)
update_class.remove_all_process()
update_class.populate_kdb()
print "Update complete." |
zybbigpy/VaspCZ | vtstscripts-939/kdb/kdbinsert.py | <gh_stars>10-100
#!/usr/bin/env python
import numpy
from aselite import elements
from aselite import FixAtoms
from kdb import Kdb
class KdbInsert(Kdb):
def __init__(self):
pass
def coordination_numbers(self, p, nf):
nl = []
for a in range(len(p)):
nl.append([])
for b in range(len(p)):
if b != a:
dist = numpy.linalg.norm(p.get_positions()[a] - p.get_positions()[b])
if dist < (elements[p.get_chemical_symbols()[a]]["radius"] +
elements[p.get_chemical_symbols()[b]]["radius"]) * (1.0 + nf):
nl[a].append(b)
return [len(l) for l in nl]
def getMappings(self, a, b, nf, dc, mappings = None):
""" A recursive depth-first search for a complete set of mappings from atoms
in configuration a to atoms in configuration b. Do not use the mappings
argument, this is only used internally for recursion.
Returns None if no mapping was found, or a dictionary mapping atom
indices a to atom indices b.
Note: If a and b are mirror images, this function will still return a
mapping from a to b, even though it may not be possible to align them
through translation and rotation. """
# If this is the top-level user call, create and loop through top-level
# mappings.
if mappings == None:
# Find the least common coordination number in b.
bCoordinations = self.coordination_numbers(b, nf)
bCoordinationsCounts = {}
for coordination in bCoordinations:
if coordination in bCoordinationsCounts:
bCoordinationsCounts[coordination] += 1
else:
bCoordinationsCounts[coordination] = 1
bLeastCommonCoordination = bCoordinationsCounts.keys()[0]
for coordination in bCoordinationsCounts.keys():
if bCoordinationsCounts[coordination] < bCoordinationsCounts[bLeastCommonCoordination]:
bLeastCommonCoordination = coordination
# Find one atom in a with the least common coordination number in b.
# If it does not exist, return None.
aCoordinations = self.coordination_numbers(a, nf)
try:
aAtom = aCoordinations.index(bLeastCommonCoordination)
except ValueError:
return None
# Create a mapping from the atom chosen from a to each of the atoms with
# the least common coordination number in b, and recurse.
for i in range(len(bCoordinations)):
if bCoordinations[i] == bLeastCommonCoordination:
# Make sure the element types are the same.
if a.get_chemical_symbols()[aAtom] != b.get_chemical_symbols()[i]:
continue
mappings = self.getMappings(a, b, nf, dc, {aAtom:i})
# If the result is not none, then we found a successful mapping.
if mappings is not None:
return mappings
# There were no mappings.
return None
# This is a recursed invocation of this function.
else:
# Find an atom from a that has not yet been mapped.
unmappedA = 0
while unmappedA < len(a):
if unmappedA not in mappings.keys():
break
unmappedA += 1
# Calculate the distances from unmappedA to all mapped a atoms.
distances = {}
for i in mappings.keys():
distances[i] = self.atomAtomDistance(a, unmappedA, i)
# Loop over each unmapped b atom. Compare the distances between it and
# the mapped b atoms to the corresponding distances between unmappedA
# and the mapped atoms. If everything is similar, create a new mapping
# and recurse.
for bAtom in range(len(b)):
if bAtom not in mappings.values():
for aAtom in distances:
# Break if type check fails.
if b.get_chemical_symbols()[bAtom] != a.get_chemical_symbols()[unmappedA]:
break
# Break if distance check fails
bDist = self.atomAtomDistance(b, bAtom, mappings[aAtom])
if abs(distances[aAtom] - bDist) > dc:
break
else:
# All distances were good, so create a new mapping.
newMappings = mappings.copy()
newMappings[unmappedA] = bAtom
# If this is now a complete mapping from a to b, return it.
if len(newMappings) == len(a):
return newMappings
# Otherwise, recurse.
newMappings = self.getMappings(a, b, nf, dc, newMappings)
# Pass any successful mapping up the recursion chain.
if newMappings is not None:
return newMappings
# There were no mappings.
return None
def stripUnselectedAtoms(self, atoms, selected):
""" Removes any atoms from atoms that are not in selected and returns a new
structure and a mapping from atoms in the old structure to atoms in the new
structure. """
src = atoms.copy()
dest = atoms.copy()
while len(dest) > 0:
dest.pop()
mapping = {}
index = 0
constraints = []
for i in selected:
mapping[i] = index
index += 1
if i in src.constraints[0].index:
constraints.append(index)
dest.append(src[i])
dest.set_constraint(FixAtoms(constraints))
return dest, mapping
def getProcessMobileAtoms(self, r, s, p, mac):
""" Returns a list of atom indices that move more than mac
between reactant and saddle, saddle and product, or
reactant and product. If no atoms move more than mac, returns
the atom that moves the most. """
mobileAtoms = []
reactant2saddle = self.per_atom_norm(s.positions - r.positions, s.get_cell())
product2saddle = self.per_atom_norm(s.positions - p.positions, s.get_cell())
reactant2product = self.per_atom_norm(p.positions - r.positions, s.get_cell())
for i in range(len(s)):
if max(reactant2saddle[i], product2saddle[i], reactant2product[i]) > mac:
mobileAtoms.append(i)
if len(mobileAtoms) == 0:
mobileAtoms.append(list(reactant2product).index(max(reactant2product)))
return mobileAtoms
def getProcessNeighbors(self, mobileAtoms, r, s, p, nf):
""" Given a list mobile atoms, a reactant, saddle, and product,
returns a list of neighboring atoms according to the nf (NEIGHBOR_FUDGE)
paramter."""
neighborAtoms = []
for atom in mobileAtoms:
r1 = elements[s.get_chemical_symbols()[atom]]["radius"]
for i in range(len(s)):
if i in mobileAtoms or i in neighborAtoms:
continue
r2 = elements[s.get_chemical_symbols()[i]]["radius"]
maxDist = (r1 + r2) * (1.0 + nf)
if self.atomAtomPbcDistance(r, atom, i) < maxDist:
neighborAtoms.append(i)
elif self.atomAtomPbcDistance(s, atom, i) < maxDist:
neighborAtoms.append(i)
elif self.atomAtomPbcDistance(p, atom, i) < maxDist:
neighborAtoms.append(i)
return neighborAtoms
#function will be overridden in remote/local classes
def insert_into_db(self, **args):
print "function not yet overloaded"
def insert(self, reactant, saddle, product, mode=None, nf=0.2, dc=0.3, mac=0.7, kdbname='kdb.db'):
# Keep a copy of the original data
original_reactant = reactant.copy()
original_saddle = saddle.copy()
original_product = product.copy()
if mode is not None:
original_mode = mode.copy()
else:
original_mode = None
mobileAtoms = self.getProcessMobileAtoms(reactant, saddle, product, mac)
selectedAtoms = mobileAtoms + self.getProcessNeighbors(mobileAtoms, reactant, product, saddle, nf)
# Quit if not enough selected atoms.
if len(selectedAtoms) < 2:
print "kdbinsert abort: Too few atoms in process, or neighbor_fudge too small."
return 0
# Remove unselected atoms.
reactant, mapping = self.stripUnselectedAtoms(reactant, selectedAtoms)
saddle, mapping = self.stripUnselectedAtoms(saddle, selectedAtoms)
product, mapping = self.stripUnselectedAtoms(product, selectedAtoms)
# Update the mode.
if mode is not None:
newMode = numpy.zeros((len(selectedAtoms), 3))
for m in mapping:
newMode[mapping[m]] = mode[m]
mode = newMode
# Remove PBC's.
temp = reactant.copy()
undone = range(len(temp))
working = [undone.pop()]
while len(undone) > 0:
if len(working) == 0:
print "kdbinsert abort: Dissociated reactant, or neighbor_fudge too small."
return 0
a = working.pop()
for i in undone[:]:
v = self.pbc(temp.positions[i] - temp.positions[a], temp.get_cell())
d = numpy.linalg.norm(v)
if d < (elements[temp.get_chemical_symbols()[a]]["radius"] +
elements[temp.get_chemical_symbols()[i]]["radius"]) * (1.0 + nf):
temp[i].position = temp[a].position + v
working.append(i)
undone.remove(i)
v1s = self.pbc(saddle.positions - reactant.positions, reactant.get_cell())
v12 = self.pbc(product.positions - reactant.positions, reactant.get_cell())
reactant = temp
saddle.positions = reactant.positions + v1s
product.positions = reactant.positions + v12
# Find saddle center of coordinates.
coc = numpy.zeros((1,3))
for i in range(len(saddle)):
coc += saddle[i].position
coc = coc / len(saddle)
# Shift all structures so that the saddle center of coordinates is at
# [0, 0, 0].
reactant.positions = reactant.positions - coc
saddle.positions = saddle.positions - coc
product.positions = product.positions - coc
# Give all structures a huge box.
# TODO: all references to boxes should be removed after PBCs are removed.
reactant.cell = numpy.identity(3) * 1024
saddle.cell = numpy.identity(3) * 1024
product.cell = numpy.identity(3) * 1024
# get mobile_list
mob_list = []
for atom in mobileAtoms:
mob_list.append(mapping[atom])
arg_dict = {'or': original_reactant, 'os': original_saddle, 'op': original_product, 'om': original_mode,
'r': reactant, 's': saddle, 'p': product, 'm': mode, 'ma': mob_list,
'kdbname': kdbname, 'nf': nf, 'dc': dc, 'mac': mac}
# function is overloaded in either local_insert.py or remote_insert.py
return self.insert_into_db(**arg_dict)
|
zybbigpy/VaspCZ | vtstscripts-939/kdb_client.py |
import sys
import os
import shutil
cwd = os.getcwd()
os.chdir(os.path.dirname(__file__))
from kdb import kdb
from kdb import remote_client
# this is not ideal but to pickle the ASE.Atoms objects we need the class files in the same directory
# that the program is being called from.
shutil.copyfile('kdb/aselite.py', 'aselite.py')
import aselite
os.remove('aselite.py')
os.remove('aselite.pyc')
os.chdir(cwd)
def run(args):
if not kdb.Kdb().check_version():
sys.exit()
if len(args) < 1:
print "first parameter sohuld be either: insert or query"
sys.exit()
if args[0] == 'insert':
if len(args) < 4:
print "parameters for insert should include reactant, saddle, and product files."
sys.exit()
try:
reactant = aselite.read_any(args[1])
saddle = aselite.read_any(args[2])
product = aselite.read_any(args[3])
except IOError:
print "One or more files could not be read."
sys.exit()
try:
mode = kdb.Kdb().load_mode(args[3])
except:
mode = None
remote_client.server_insert(reactant, saddle, product, mode)
elif args[0] == 'query':
if len(args) < 2:
print "parameters for query should include a reactant file."
sys.exit()
try:
reactant = aselite.read_any(args[1])
except IOError:
print "could not read reactant file."
sys.exit()
remote_client.server_query(reactant)
else:
print "first parameter sohuld be either: insert or query"
if __name__ == "__main__":
args = sys.argv[1:]
run(args) |
zybbigpy/VaspCZ | sourcecode/VaspNEBCheckDist.py | <filename>sourcecode/VaspNEBCheckDist.py
import os
import argparse
import VaspCZ.zzdlib as zzd
def VaspNEBCheckDist(POSorCONT):
if '00' in os.listdir() and 'INCAR' in os.listdir():
data_log = zzd.File.openFile('./INCAR')
image = zzd.File.getLine(data_log,'IMAGES')[0].split('=')[-1].strip('\n')
if int(image) <=9:
# os.system('cp ini/CONTCAR 00/CONTCAR')
# os.system('cp fin/CONTCAR 0'+str(int(image)+1)+'/CONTCAR')
for i in range(0,int(image)+1):
if i == 0:
dist = zzd.getshellResult('dist.pl ./0'+str(i)+'/POSCAR ./0'+str(i+1)+'/'+POSorCONT)
elif i == int(image):
dist = zzd.getshellResult('dist.pl ./0'+str(i)+'/'+POSorCONT+' ./0'+str(i+1)+'/POSCAR')
else:
dist = zzd.getshellResult('dist.pl ./0'+str(i)+'/'+POSorCONT+' ./0'+str(i+1)+'/'+POSorCONT)
print('{} 0{}-0{} {}'.format(POSorCONT,i,i+1,dist[0].strip('\n')))
else:
print('image too large')
else:
print('当前不在NEB目录,退出程序')
exit()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='manual to this script')
parser.add_argument('--POSorCONT', type=str, default='POSCAR')
args = parser.parse_args()
VaspNEBCheckDist(args.POSorCONT)
|
zybbigpy/VaspCZ | vtstscripts-939/kdb/local_client.py | <gh_stars>10-100
# Main.py is the user entry point into the program.
import sys
from kdb import Kdb
import local_insert
import local_query
import local_db
from aselite import read_any
from config import *
def run(args):
if not Kdb().check_version():
sys.exit()
if len(args) < 1:
print "first parameter should be either: insert, query"
sys.exit()
if args[0] == 'insert':
if len(args) < 4:
print "parameters for insert should include reactant, saddle, and product files."
sys.exit()
#read files
try:
reactant = read_any(args[1])
saddle = read_any(args[2])
product = read_any(args[3])
except IOError:
print "One or more files could not be read."
sys.exit()
try:
mode = Kdb().load_mode(args[3])
except:
mode = None
#grab params
db = local_db.LocalDB(KDB_NAME)
params = db.get_params()
#insert
local_insert.LocalInsert().insert(reactant, saddle, product, mode=mode, dc=params['dc'], nf=params['nf'], mac=params['mac'], kdbname=KDB_NAME)
elif args[0] == 'query':
if len(args) < 2:
print "parameters for query should include a reactant file."
sys.exit()
#read file
try:
reactant = read_any(args[1])
except IOError:
print "reactant file could not be read."
sys.exit()
#grab params
db = local_db.LocalDB(KDB_NAME)
params = db.get_params()
#query
local_query.LocalQuery().query(reactant, "./kdbmatches", dc=params['dc'], nf=params['nf'], kdbname=KDB_NAME)
if __name__ == "__main__":
args = sys.argv[1:]
run(args) |
zybbigpy/VaspCZ | sourcecode/VaspCZ1.0.2.py | <gh_stars>10-100
#!/home/zhangzhengde/bin/bin/python3
#coding=utf-8
"""
这是应用程序Linux用户界面。
"""
import os, sys
import numpy as np
import time
import VaspGenerate
import VaspCZ.zzdlib as zzd
import utils
import subprocess
python = sys.executable
current_py_folder = os.path.dirname(os.path.abspath(__file__))
VaspCZ_path = [
os.path.dirname(current_py_folder) if 'sourcecode' in current_py_folder else current_py_folder][0] + '/sourcecode'
__version__ = '1.0.2'
def run():
# print(VaspCZ_path)
# 检查Vasp.sh文件和PseudoPotential文件夹是否配置成功。
sh_pash = zzd.File.Vaspsh_path()
if 'Vasp.sh' in os.listdir(zzd.File.Vaspsh_path()):
pass
else:
print(f'VaspCZ WARNING: Vasp.sh文件没有正确配置。请将适合本平台PBS脚本拷贝到目录"{sh_pash}"下并命名为Vasp.sh,再重新运行程序。')
if 'PseudoPotential' in os.listdir(zzd.File.Vasp_pseudo_path()):
pass
else:
print(f'VaspCZ WARNING: 贋势文件没有正确配置。请将将你的贋势文件拷贝到目录"{zzd.File.Vasp_pseudo_path()}"下并命名:PseudoPotential/[贋势类型]。')
i = 0
while True:
content = {0: 'Exit', 1: 'Opt and Sta module', 2: 'NEB module', 3: 'Test module'}
ipt = input(utils.gui_string(
title='VaspCZ interface',
content=content,
footnote=f'by: <NAME> (<EMAIL>) version: {__version__}'))
try:
ipt = content[int(ipt)]
except Exception as e:
print(f'VaspCZ 功能{e}选择错误,请正确输入.')
pass
# print(ipt)
if ipt == 'Opt and Sta module':
print(f'OS module selected')
content_os = utils.zip_content([
'Back', 'Generate inputs (example)', 'Generate INCAR for Sta', 'Generate POTCAR',
'Generate KPOINTS', 'Generate Vasp.sh',
'Vasp Keep Inputs', 'Vasp Pre-check and Qsub', 'Check Results'])
while True:
ipt1 = input(utils.gui_string(
'Optimization and Static calculation', content=content_os))
try:
ipt1 = content_os[int(ipt1)]
except Exception as e:
print(f'OS module 功能{e}选择错误,请正确输入.')
if ipt1 == 'Generate inputs (example)':
VaspGenerate.generate_inputs(examp='fcc_Fe_3x3x3')
exit()
elif ipt1 == 'Generate INCAR for Sta':
VaspGenerate.generate_INCAR_for_Sta()
exit()
elif ipt1 == 'Generate POTCAR':
utils.deal_with_gen_pot()
exit()
elif ipt1 == 'Generate KPOINTS':
utils.deal_with_gen_kpoints()
exit()
elif ipt1 == 'Generate Vasp.sh':
utils.deal_with_gen_vasp_sh()
exit()
elif ipt1 == 'Vasp Keep Inputs':
utils.deal_with_vasp_keep_inputs()
exit()
elif ipt1 == 'Vasp Pre-check and Qsub':
zzd.Vasp.check_and_qsub(need_input=True)
exit()
elif ipt1 == 'Check Results':
utils.deal_with_check_results()
exit()
elif ipt1 == 'Back':
break
elif ipt == 'NEB module':
print(f'NEB module selected')
content_neb = utils.zip_content([
'Back', 'NEB Opt-Sta', 'NEB Sta-NEB', 'NEB Vibration Analysis', 'NEB Keep INFI/Opt Inputs', 'NEB Keep Inputs',
'NEB Check RMS', 'NEB Check Dist', 'NEB Check Results', 'NEB Check Vibration Results'
])
while True:
ipt2 = input(utils.gui_string(
title='NEB calculation', content=content_neb
))
try:
ipt2 = content_neb[int(ipt2)]
except Exception as e:
print(f'NEB module 功能{e}选择错误,请正确输入.')
# print(ipt2)
if ipt2 == 'Back':
break
elif ipt2 == 'NEB Opt-Sta':
utils.deal_with_neb_opt_sta()
elif ipt2 == 'NEB Sta-NEB':
utils.deal_with_neb_sta_neb()
exit()
elif ipt2 == 'NEB Vibration Analysis':
utils.deal_with_neb_vibration_analysis()
exit()
elif ipt2 == 'NEB Keep INFI/Opt Inputs':
subprocess.call(f'{python} {VaspCZ_path}/VaspNEBKeepINFI_OptInputs.py', shell=True)
exit()
elif ipt2 == 'NEB Keep Inputs':
subprocess.call(f'{python} {VaspCZ_path}/VaspNEBKeepInputs.py', shell=True)
exit()
elif ipt2 == 'NEB Check RMS':
subprocess.call(f'{python} {VaspCZ_path}/VaspNEBCheckRMS.py', shell=True)
exit()
elif ipt2 == 'NEB Check Dist':
ipt2_nebcd = input('Check ([POS]/CONT): ')
pos_or_cont = 'POSCAR' if (ipt2_nebcd == 'POS' or ipt2_nebcd == '') else 'CONTCAR'
subprocess.call(f'{python} {VaspCZ_path}/VaspNEBCheckDist.py --POSorCONT={pos_or_cont}', shell=True)
exit()
elif ipt2 == 'NEB Check Results':
utils.deal_with_neb_check_results()
elif ipt2 == 'NEB Check Vibration Results':
code = [sys.executable, f'{VaspCZ_path}/VaspVibAna_Result.py', '--isprint=True']
subprocess.call(code, shell=False)
exit()
else:
pass
elif ipt == 'Test module':
print(f'Tese module selected')
content_test = utils.zip_content([
'Back',
'ENCUT Test',
'KPOINTS Mesh Test'
])
while True:
ipt3 = input(utils.gui_string(title='Vasp Test Module', content=content_test))
try:
ipt3 = content_test[int(ipt3)]
except Exception as e:
print(f'Test module 功能{e}选择错误,请正确输入.')
if ipt3 == 'Back':
break
elif ipt3 == 'ENCUT Test':
utils.deal_with_test_encut()
exit()
elif ipt3 == 'KPOINTS Mesh Test':
utils.deal_with_test_kpoints()
exit()
else:
pass
elif ipt == 'Exit':
break
else:
pass
if __name__ == '__main__':
run() |
zybbigpy/VaspCZ | vtstscripts-939/kdb/remote_initialize.py | <filename>vtstscripts-939/kdb/remote_initialize.py
import pymysql
import sys
from server_config import *
# Try to create the databases without any tables.
def create_databases():
if user == '' or password == '' or port == None or host == '':
print "Cannot initialize Database until server_config.py is properly setup."
return
print "creating databases"
conn = pymysql.connect(host=host, port=port, user=user, passwd=password).cursor()
try:
conn.execute('CREATE DATABASE %s' % db_name)
conn.execute('CREATE DATABASE %s' % backup_db_name)
conn.execute('CREATE DATABASE %s' % user_db_name)
except pymysql.err.ProgrammingError:
print "databases were previoulsy created. Exiting now."
conn.close()
sys.exit()
conn.close()
# creates user table
def create_user_table():
conn = connect_db(db=user_db_name)
# user_id is a unique identifier
# email is unique but not an identifier
# password is MD5 hashed
conn.execute('''CREATE TABLE User(user_id INT PRIMARY KEY UNIQUE NOT NULL,
first_name VARCHAR(20) NOT NULL,
last_name VARCHAR(20) NOT NULL,
email VARCHAR(255) NOT NULL UNIQUE,
password VARCHAR(100) NOT NULL)''')
conn.execute('''UPDATE User SET password = <PASSWORD>(password)''')
conn.execute('''COMMIT''')
conn.close()
# create tables for database that holds queryable Atoms
def create_tables():
print "creating tables"
conn = connect_db(db=db_name)
# atoms_id is a unique identifier
# atoms_name is the structure name IE: 'Al' or 'CuO'
# atoms_cellXX are bounds for the 3D cell IE: 00 = top left, 02 = top right
conn.execute('''CREATE TABLE Atoms(atoms_id INT PRIMARY KEY UNIQUE NOT NULL,
atoms_cell00 REAL NOT NULL,
atoms_cell01 REAL NOT NULL,
atoms_cell02 REAL NOT NULL,
atoms_cell10 REAL NOT NULL,
atoms_cell11 REAL NOT NULL,
atoms_cell12 REAL NOT NULL,
atoms_cell20 REAL NOT NULL,
atoms_cell21 REAL NOT NULL,
atoms_cell22 REAL NOT NULL)''')
# atom_id is a unique identifer
# atoms_id is a reference value to identify what collection of atoms it belongs in
# num is the ase.atom.index value
# symbol is the atom's chemical symbol IE: 'Al' 'Cu'
# x,y,z_coord is the x,y,z position of the atom
# fixed is a boolean contraint if the atom is fixed or not
conn.execute('''CREATE TABLE Atom(atom_id INT PRIMARY KEY UNIQUE NOT NULL,
atoms_id INT NOT NULL,
num INT NOT NULL,
symbol TEXT NOT NULL,
x_coord REAL NOT NULL,
y_coord REAL NOT NULL,
z_coord REAL NOT NULL,
mode0 REAL NOT NULL,
mode1 REAL NOT NULL,
mode2 REAL NOT NULL,
fixed INT NOT NULL,
FOREIGN KEY(atoms_id) REFERENCES Atoms(atoms_id))''')
# pro_id is a unique identifer
# all the rest of the values are references to atoms_ids
conn.execute('''CREATE TABLE Process(pro_id INT PRIMARY KEY UNIQUE NOT NULL,
name TEXT NOT NULL,
original_pro_id INT NOT NULL,
reactant_id INT NOT NULL,
saddle_id INT NOT NULL,
product_id INT NOT NULL,
FOREIGN KEY(reactant_id) REFERENCES Atoms(atoms_id),
FOREIGN KEY(saddle_id) REFERENCES Atoms(atoms_id),
FOREIGN KEY(product_id) REFERENCES Atoms(atoms_id))''')
# this foreign key refers to a Process table in the backup database
conn.execute('''ALTER TABLE Process
ADD FOREIGN KEY fk_name(original_pro_id) REFERENCES %s.Process(pro_id)
ON DELETE CASCADE''' % backup_db_name)
# mob_id is a unqiue identifer
# num is the atom number
# pro_id is a reference to the process the atom belongs to
conn.execute('''CREATE TABLE Mobile(mob_id INT PRIMARY KEY UNIQUE NOT NULL,
num INT NOT NULL,
pro_id INT NOT NULL,
FOREIGN KEY (pro_id) REFERENCES Process(pro_id))''')
# commit and close the connection
conn.execute('COMMIT')
conn.close()
# creates tables to hold backups. NOT queryable objects.
def create_backup_tables():
print "creating backup tables"
conn = connect_db(db=backup_db_name)
# atoms_id is a unique identifier
# atoms_name is the structure name IE: 'Al' or 'CuO'
# atoms_cellXX are bounds for the 3D cell IE: 00 = top left, 02 = top right
conn.execute('''CREATE TABLE Atoms(atoms_id INT PRIMARY KEY UNIQUE NOT NULL,
atoms_cell00 REAL NOT NULL,
atoms_cell01 REAL NOT NULL,
atoms_cell02 REAL NOT NULL,
atoms_cell10 REAL NOT NULL,
atoms_cell11 REAL NOT NULL,
atoms_cell12 REAL NOT NULL,
atoms_cell20 REAL NOT NULL,
atoms_cell21 REAL NOT NULL,
atoms_cell22 REAL NOT NULL)''')
# atom_id is a unique identifer
# atoms_id is a reference value to identify what collection of atoms it belongs in
# num is the ase.atom.index value
# symbol is the atom's chemical symbol IE: 'Al' 'Cu'
# x,y,z_coord is the x,y,z position of the atom
# fixed is a boolean contraint if the atom is fixed or not
conn.execute('''CREATE TABLE Atom(atom_id INT PRIMARY KEY UNIQUE NOT NULL,
atoms_id INT NOT NULL,
num INT NOT NULL,
symbol TEXT NOT NULL,
x_coord REAL NOT NULL,
y_coord REAL NOT NULL,
z_coord REAL NOT NULL,
mode0 REAL NOT NULL,
mode1 REAL NOT NULL,
mode2 REAL NOT NULL,
fixed INT NOT NULL,
FOREIGN KEY(atoms_id) REFERENCES Atoms(atoms_id))''')
# pro_id is a unique identifer
# all the rest of the values are references to atoms_ids
conn.execute('''CREATE TABLE Process(pro_id INT PRIMARY KEY UNIQUE NOT NULL,
name TEXT NOT NULL,
reactant_id INT NOT NULL,
saddle_id INT NOT NULL,
product_id INT NOT NULL,
user_id INT NOT NULL,
FOREIGN KEY(reactant_id) REFERENCES Atoms(atoms_id),
FOREIGN KEY(saddle_id) REFERENCES Atoms(atoms_id),
FOREIGN KEY(product_id) REFERENCES Atoms(atoms_id))''')
conn.execute('''ALTER TABLE Process
ADD FOREIGN KEY fk_name(user_id) REFERENCES %s.User(user_id)
ON DELETE CASCADE''' % user_db_name)
# config_option is the name of the configuration, IE: nf, dc, or mac
# config_value is the value of the corresponding corfigutation option
conn.execute('''CREATE TABLE Param(config_option varchar(25) UNIQUE NOT NULL,
config_value REAL NOT NULL)''')
# add default parameters for Params table
default_values = ('nf', .2, 'dc', .3, 'mac', .7)
conn.execute('''INSERT INTO Param (config_option, config_value)
VALUES ('%s','%f'), ('%s','%f'), ('%s','%f')''' % default_values)
# commit and close the connection
conn.execute('COMMIT')
conn.close()
# function used to completely purge the database.
# generally should only be used for testing.
def purge_db():
print "purging database"
conn = pymysql.connect(host=host, port=port, user=user, passwd=password).cursor()
conn.execute('DROP DATABASE %s' % db_name)
conn.execute('DROP DATABASE %s' % backup_db_name)
conn.execute('DROP DATABASE %s' % user_db_name)
conn.close()
# standard connect to database function.
# this should maybe just be put in kdb.py (but it would have to be overloaded for remote/local configuations)
def connect_db(db=db_name):
return pymysql.connect(host=host, port=port, user=user, passwd=password, db=db).cursor()
if __name__ == "__main__":
args = sys.argv
if len(args) > 1:
if 'userpurge' in args:
import remote_db
db = remote_db.RemoteDB()
if len(args)>2:
db.remove_process_user(args[2])
print "removed"
else:
print "You need to specify email."
elif 'purge' in args:
purge_db()
create_databases()
create_user_table()
create_backup_tables()
create_tables()
else:
create_databases()
create_user_table()
create_backup_tables()
create_tables() |
zybbigpy/VaspCZ | vtstscripts-939/xyz2con.py | <reponame>zybbigpy/VaspCZ<filename>vtstscripts-939/xyz2con.py
#!/usr/bin/env python
import aselite
import numpy as np
from sys import argv, exit
if len(argv) < 3 or '-h' in argv:
print "usage: xyz2con.py FILENAME BOXSIZE\n"
exit(0)
filename = argv[1]
atoms = aselite.read_xyz(filename)
atoms.positions -= np.min(atoms.positions)
a = float(argv[2])
atoms.set_cell((a,a,a))
aselite.write_con(filename.replace('xyz', 'con'), atoms)
|
zybbigpy/VaspCZ | sourcecode/NEBCheck1.1.py | #!/home/zhuguifeng/BIN/bin/python3
# -*- coding: utf-8-*-
import sys
sys.path.append('/home/zhangzhengde/bin/pythonlib')
import zzdlib as zzd
import os
import subprocess
import time
import argparse
def isNEB(dirpath):
with open(dirpath+'/log') as file_log:
flag = 0
data_log = file_log.readlines()
for i in range(6):
if 'each image running on' in data_log[i]:
flag = 1
if flag == 1:
return True
elif flag == 0:
return False
def openFile(filepath):
with open(filepath) as file:
data = file.readlines()
return data
def isTaskrun(dirpath):#提供路径,判断该路径下Vasp.sh中的任务名是否在mjb中正在计算,是返回True和mjb中该行信息,否返回False和空字符串
data_Vaspsh = openFile(dirpath+'/Vasp.sh')
for nl in range(len(data_Vaspsh)): #从Vasp.sh中获取任务名
if '#PBS -N' in data_Vaspsh[nl]:
jobname = data_Vaspsh[nl].split('-N')[1].strip('\n').strip(' ')
usrname = os.getcwd().split('home/')[1].split('/')[0]
ob_mjb = subprocess.Popen(['qstat','-x','-u',usrname],stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
data_mjb = ob_mjb.stdout.readlines()
for nl in list(reversed(range(len(data_mjb)))):
data_mjb[nl] = bytes.decode(data_mjb[nl])
if jobname in data_mjb[nl]: #如果任务名存在
line = data_mjb[nl].strip('\n')
if line.split()[9] == 'R': #且任务正在运行
isrun = True
line = line
break
else:
isrun = False
line = 'job exist do not running'
else:
isrun = False
line = 'job not exist'
return isrun,line
#1.检查是否有错误
def CheckWARNING(directory,reQsub):
for dirpath, dirnames, filenames in os.walk(directory):
warnflag = 0
doneflag = 0
forceflag = 0
step =0
if 'log' in filenames:
try:
if isNEB(dirpath) == True:
data_INCAR = openFile(dirpath+'/INCAR')
for i in range(len(data_INCAR)):
if 'NSW' in data_INCAR[i]:
NSW = data_INCAR[i].split('=')[1].strip('\n').strip(' ')
if 'IMAGES=' in data_INCAR[i]:
IMAGES = data_INCAR[i].split('=')[1].strip('\n').strip(' ')
break
os.system('cp '+dirpath+'/log '+dirpath+'/01/stdout')#拷贝neb目录的log到01文件夹中
for i in range(1,eval(IMAGES)+1):
data_log = openFile(dirpath+'/0'+str(i)+'/stdout')
for j in range(len(data_log)):
if 'reached required accuracy' in data_log[j]:#判断是否完成计算
#print('Path:{} 计算完成。'.format(dirpath))
doneflag = doneflag+1
if 'WARNING' in data_log[j]:
print('Path:{} stdout WARNING:'.format(dirpath+'/0'+str(i)+'/'))
#print(data_log[j]) #打印错误信息
warnflag = 1
with open(dirpath+'/0'+str(i)+'/OUTCAR') as file_OUTCAR:#log中存在WARNING再检测OUTCAR中的
data_OUTCAR = file_OUTCAR.readlines()
for nl in range(len(data_OUTCAR)):
if 'WARNING' in data_OUTCAR[nl]:
#print('Path:{} OUTCAR WARNING:'.format(dirpath+'/0'+str(i)+'/'))
#print(data_OUTCAR[nl])
warnflag=1
break
break
if 'Ctrl-C caught... cleaning up processes' in data_log[j]:
forceflag =1
break
data_rootdirlog = openFile(dirpath+'/log') #获取Log中的数据
if len(data_rootdirlog) == 0:
loglen = 'log without data'
else:
if 'running on' in data_rootdirlog[0]:
loglen = 'log with right data'
for nl in list(reversed(range(len(data_rootdirlog)))):
if 'F=' in data_rootdirlog[nl]:
step = data_rootdirlog[nl].split('=')[0].strip('F').strip(' ')
break
if doneflag <eval(IMAGES):
isrun,line = isTaskrun(dirpath)
if isrun == True:
if forceflag == 1:
print('Path:{:<40} NEB强制结束计算 {}F'.format(dirpath,step))
elif step == NSW:
print('Path:{:<40} NEB计算达到设定步数 {}F'.format(dirpath,step))
else:
print('Path:{:<40} NEB正在计算... {}F'.format(dirpath,step))
else:
print('Path:{:<40} NEB计算结束 {}F'.format(dirpath,step))
elif doneflag == eval(IMAGES):
print('Path:{:<40} NEB计算完成!'.format(dirpath))
except Exception as e:
print('Path:{:<40} 错误类型:{}'.format(dirpath,e))
#判断错误,是否重新提交任务。
data_rootdirlog = openFile(dirpath+'/log')
if len(data_rootdirlog) == 0: #log中无数据
loglen = 'log without data'
isrun,line = isTaskrun(dirpath)
if isrun == True:
runtime = line.split()[10]
runpath = os.getcwd()
if int(line.split()[10].split(':')[0]) >=1 or int(line.split()[10].split(':')[1]) >= 2: #且任务运行时间大于1分钟
#提示运行,删除并且重新提交任务
print('发现路径{}/log无值且程序运行时间为{}'.format(runpath+dirpath.strip('.'),runtime))
choose = reQsub
if choose == 'yes' or choose=='y':
jobID = line.split()[0].split('.')[0]
os.chdir(runpath+dirpath.strip('.'))
os.system('qdel '+jobID)
print('已经删除任务{}.msvr1'.format(jobID))
time.sleep(1)
os.system('qsub Vasp.sh')
print('已经重新提交{}NEB计算'.format(runpath+dirpath.strip('.')))
os.chdir(runpath)
elif choose == '' or choose=='no' or choose=='n':
continue
else:
print('输入错误')
continue
else:
print('发现路径{}/log无值且程序运行时间为{},程序刚刚运行'.format(runpath+dirpath.strip('.'),runtime))
else:
#print(data_rootdirlog)
for mml in range(len(data_rootdirlog)):
if data_rootdirlog[mml] == 'Ctrl-C caught... cleaning up processes\n':
print('Path:{:<40} 手动退出'.format(dirpath))
break
if warnflag == 0:
print('log和OUTCAR中无警告')
def CheckEnergy(directory):
#2.打印能量等信息
runpath = os.getcwd()
for dirpath, dirnames, filenames in os.walk(directory):
if 'log' in filenames:
try:
if isNEB(dirpath) == True:
os.chdir(dirpath)
obj = subprocess.Popen(['nebef.pl'],stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
nebef = str(obj.stdout.read()).split("'")[1].split('\\n')
print('Path:{}'.format(runpath+dirpath))
print(' IMAGE RMS Energy Barrier')
for i in range(len(nebef)):
print(nebef[i].strip('\n'))
os.chdir(runpath)
except Exception as e:
print('Path:{} 错误类型:{}'.format(runpath+dirpath,e))
def getBarrier(IS='-1'):
#IS代表输入的Ionstep,根据IS获取该步时候的扩散势垒
Path = []
Barrier = []
Step = []
Image = []
runpath = os.getcwd()
for dirpath, dirnames, filenames in os.walk('./'):
if 'INCAR' in filenames and 'POTCAR' in filenames and 'KPOINTS' in filenames:
data_INCAR=zzd.File.openFile(dirpath+'/INCAR','r')
#print('dir',data_INCAR[0].split('=')[-1])
if 'NEB' in data_INCAR[0].split('=')[-1]:
#print('NEB')
data_Sh = zzd.File.openFile(dirpath+'/Vasp.sh','r')
jobname = zzd.File.getLine(data_Sh,'#PBS -N')[0].split()[-1][:10] #获取任务名,且只取前10个字符
stat = zzd.Vasp.checkJobstatus(jobname)
if stat == 'R' or stat == 'Q':
print('路径{} NEB计算正在{}'.format(dirpath,stat))
elif 'log' in os.listdir(dirpath):#未提交或者已经算完:
data_log = zzd.File.openFile(dirpath+'/log','r')
isRA = zzd.File.getLine(data_log,'reached required accuracy')[0]
images = zzd.File.getLine(data_INCAR,'IMAGES')[0].split('=')[-1]
Fstep = zzd.File.getLine(data_INCAR,'NSW')[0].split('=')[-1]
ionstep = zzd.File.getAllline(data_log,'F=')[-1].split()[0]
if 'reached required accuracy' in isRA or Fstep == ionstep:
#print('RA')
barriers = []
os.chdir(dirpath)
if IS == '-1':#默认用nebef.pl获取势垒
nebef = zzd.getshellResult('nebef.pl')
for inn in nebef:
barriers.append(float(inn.split()[3]))
else:#获取输出的步数IS,计算该步数下的势垒。
try:
data_OUTini = zzd.File.openFile('./00/OUTCAR','r')
Eini = zzd.File.getAllline(data_OUTini,'energy without entropy')[-1].split()[-1]
data_OUTfin = zzd.File.openFile('./0'+str(int(images)+1)+'/OUTCAR','r')
Efin = zzd.File.getAllline(data_OUTini,'energy without entropy')[-1].split()[-1]
barriers.append(float(Eini)-float(Eini))
for ii in range(1,int(images)+1):
data = zzd.File.openFile('./0'+str(ii)+'/stdout','r')
Ener = zzd.File.getLine(data,IS+' F=')[0].split()[4]
barriers.append(float(Ener)-float(Eini))
except Exception as e:
print('获取第{}步势垒出现问题,提示为:{}'.format(IS,e))
Barrier.append(max(barriers))
Step.append(ionstep)
Path.append(dirpath[:18])
Image.append(images)
os.chdir(runpath)
else:
print('{} 未达到收敛标准或者计算步数未满,退出'.format(dirpath))
else:
print('出现了奇怪的问题')
print('{:<20}{:<10}{:<5}{:<3}'.format('path','barrier','step','images'))
for i in range(len(Barrier)):
print('{:<20}{:<10.7f}{:<5}{:<3}'.format(Path[i],Barrier[i],Step[i],Image[i]))
if __name__ == '__main__':
print('脚本运行,功能列表: 1.检查计算是否完成(默认) 2.获取NEB计算结果 3.以上全部 --func=1 --reQsub=no')
parser = argparse.ArgumentParser(description ='Manual')
parser.add_argument('--func',type=str,default='1')
parser.add_argument('--reQsub',type=str,default='no')
args = parser.parse_args()
choose = args.func
print('NEBCheckBegins... --func={}'.format(choose))
if choose == '1' or choose =='3':
CheckWARNING('./',args.reQsub)
if choose == '2' or choose =='3':
CheckEnergy('./')
if choose == '4':
IS = input('检查NEB第几步的势垒(默认最后一步): ')
if IS == '':
IS='-1'
getBarrier(IS)
print('NEBCheck done!')
|
zybbigpy/VaspCZ | sourcecode/VaspVibAna_forSb.py | """
振动分析.
仅仅支持fcc 3x3x3 超胞共108Fe,掺杂溶质solute和空位vacancy时两种振动频率的计算。
运行在fcc_withele 文件夹下,提交2个振动分析:Fe106Te1V1 或 Fe107Te1
2019.8.7 更新
原来只考虑1NN原子振动的变化,且VASP结果默认不区分各个原子的振动频率
现在考虑1NN和2NN原子振动的变化。
对于Fe107Te1, Te位置为0.33 0.50 0.50, 1NN 12个 代表位置为0.50 0.50 0.66, 2NN 6个 代表位置为 0.66 0.50 0.50, 共3个原子的振动。
对于Fe106Te1V1, 考虑空位的1NN 2NN和溶质的1NN 2NN的所有原子的并集,共有18+10=28个原子的振动。
"""
import os, sys
import subprocess
import zzdlib
import argparse
import numpy as np
python = sys.executable
current_py_folder = os.path.dirname(os.path.abspath(__file__))
VaspCZ_path = [os.path.dirname(current_py_folder) if 'sourcecode' in current_py_folder else current_py_folder][0] + '/sourcecode'
def get_saddle_image():
if not os.path.isfile('neb.dat'):
os.system('nebbarrier.pl')
with open('neb.dat', 'r') as f:
data = f.readlines()
# 解码成n行,5列的numpy数组
data = [line.split() for line in data]
data = np.array(data).astype(float)
# print(data, data.shape)
index = np.argmax(data, axis=0)[2]
saddle_image = data[index][0]
saddle_image = f'0{int(saddle_image)}'
return saddle_image
def run(nodes, ppn, ele):
print(f'开始进行fcc_with{ele}振动分析')
os.chdir(f'{ele}-Energies')
cases = ['Fe107ele1', 'Fe106ele1V1']
pos_dict = {
'Fe107': ['505066', '665050', '335050'],
'Fe106': [
'005050', '166650', '165033', '165066', '163350', '336633', '333333', '335016', '338350',
'331650', '333366', '335050', '335083', '336666', '505000', '505033', '501666', '503350', '503383',
'506650', '506683', '508366', '663366', '666666', '665050', '665083', '835066']
}
for i in range(len(cases)):
case = cases[i].replace('ele', ele)
if not os.path.isdir(case):
print(f'case {case} 不存在,退出程序')
exit()
os.chdir(case)
if os.path.isdir('vib_analysis'):
print(f'{ele} {case} vib_analysis 文件夹已存在,退出程序')
else:
os.mkdir('vib_analysis')
os.chdir('vib_analysis')
# 读取文件ini/Opt和fin/Opt下的POSCAR文件,最终获取扩散原子的索引
POSCAR_data = zzdlib.File.openFile('../POSCAR', 'r')
pos_result = zzdlib.Vasp.decode_POSCAR(POSCAR_data) # 结果是:vector, elements, number_of_atom, position
pos_position = pos_result[3]
pos_list = pos_dict[case[:5]]
for j in range(len(pos_list)):
pos_tmp = pos_list[j] # pos现在一个字符串
x_tmp = f'0.{pos_tmp[:2]}{pos_tmp[1]}{pos_tmp[1]}'
y_tmp = f'0.{pos_tmp[2:4]}{pos_tmp[3]}{pos_tmp[3]}'
z_tmp = f'0.{pos_tmp[4::]}{pos_tmp[5]}{pos_tmp[5]}'
pos = (x_tmp, y_tmp, z_tmp)
indexes = []
for k in range(len(pos_position)):
x, y, z = pos_position[k].tolist()
x = f'{x:<.4f}'
y = f'{y:<.4f}'
z = f'{z:<.4f}'
xyz = (x, y, z)
if xyz == pos:
# print(xyz, i)
indexes.append(k)
# 每个索引对应要计算振动的一个原子,每一次不同的Poslist就是计算不同的原子的振动
dir_name = f'{pos[0][2:4]}{pos[1][2:4]}{pos[2][2:4]}' # 如 505066
os.mkdir(dir_name)
os.chdir(dir_name)
# INCAR 需要修改
os.system(f'cp ../../INCAR .')
zzdlib.Vasp.modify_INCAR_for_vibration_analysis()
# POSCAR 从上级从是CONTCAR拷贝,需要修改
os.system(f'cp ../../CONTCAR POSCAR')
data_POS = zzdlib.File.openFile('POSCAR', 'r')
POSCAR_data = zzdlib.Vasp.modify_POSCAR_Selective_Dynamics(data=data_POS, indexes=indexes)
zzdlib.File.openFile('POSCAR', 'w', data=POSCAR_data)
# KPOINTS 无序修改
os.system(f'cp ../../KPOINTS .')
# POTCAR 无序修改
os.system(f'cp ../../POTCAR .')
# Vasp.sh 需要修改
Vaspsh_path = zzdlib.File.Vaspsh_path()
os.system(f'cp {Vaspsh_path}/Vasp.sh .')
jobname = f'fc{case[3:5]}{ele}{dir_name}V'
zzdlib.Vasp.modify_Vasp_sh(jobname, nodes=nodes, ppn=ppn)
zzdlib.Vasp.check_and_qsub(need_input=True)
os.chdir('..')
os.chdir('../..')
os.chdir('..')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='manual to this script')
parser.add_argument('-nd', '--nodes', type=str, default='1')
parser.add_argument('-np', '--ppn', type=str, default='8')
args = parser.parse_args()
nodes = args.nodes
ppn = args.ppn
ele = os.path.basename(os.getcwd()).split('with')[-1]
print(f'输入参数: nodes:{nodes} ppn:{ppn} ele:{ele}')
run(nodes, ppn, ele)
|
zybbigpy/VaspCZ | vtstscripts-939/nebinterp.py | <reponame>zybbigpy/VaspCZ<gh_stars>10-100
# written by <NAME>
def interpolate_images(image_list, num_new_images, kind = 'linear', use_image_distance_in_spline = False):
''' This function interpolates a list of ASE "Atoms" to a new list of images'''
nimages = len(image_list)
natoms = atoms.positions.shape[0]
if nimages == 2:
print ('Only 2 images, kind will be linear')
kind = 'linear'
elif nimages < 2:
print('YOU NEED AT LEAST 2 IMAGES FOR INTERPOLATION!')
from ase.geometry.geometry import find_mic
from numpy import zeros, linspace, sqrt
from scipy.interpolate import interp1d
####################
distance_seq = zeros(nimages)
position_collection = zeros((natoms,3,nimages))
image_index = 0 # for the first image, we don't need distances, just the orginal positions
for atom_index in range(0,natoms):
for dim_index in range(3):
position_collection[atom_index, dim_index, image_index] = image_list[image_index].positions[atom_index, dim_index]
for image_index in range(1,nimages):
D = image_list[image_index].positions - image_list[image_index-1].positions
D_min, D_min_len = find_mic(D, cell = image_list[image_index].get_cell() )
# D_min is list of minimum image vectors
distance = sqrt((D_min**2).sum())
distance_seq[image_index] = distance_seq[image_index-1] + distance
for atom_index in range(0,natoms):
for dim_index in range(3):
position_collection[atom_index, dim_index, image_index] = \
position_collection[atom_index, dim_index, image_index-1] + D_min[atom_index][dim_index]
seq = linspace(0,1, nimages)
# splines have a coordinate output and input that is image number scaled from 0 to 1.
# we could try using the RMS distance/Frobenius distance/L2 norm along the path then scale it:
if use_image_distance_in_spline:
seq = distance_seq/distance_seq.max()
# builds a spline for every atom's x,y,z coordinates
spline_func_collection = []
for atom_index in range(0,natoms):
spline_func_collection.append([])
for dim_index in range(3):
func = interp1d(seq, position_collection[atom_index,dim_index] ,kind=kind)
spline_func_collection[atom_index].append(func)
################
mag_collection = zeros((natoms,nimages))
for image_index in range(0,nimages):
mag_mom = image_list[image_index].get_initial_magnetic_moments()
#print(mag_mom)
for atom_index in range(0,natoms):
mag_collection[atom_index, image_index ] = mag_mom[atom_index]
#print(mag_collection)
mag_spline_func_collection = []
for atom_index in range(0,natoms):
func = interp1d(seq, mag_collection[atom_index], kind=kind)
mag_spline_func_collection.append(func)
#############################
from copy import deepcopy
new_image_list = []
new_seq = linspace(0,1, num_new_images )
new_mag_mom = zeros(natoms)
for new_image_index in range(0, num_new_images):
new_image = deepcopy(image_list[0])
# I'm initializing new 'Atoms' objects with deepcopy, there must be a better
# way which will also handle lattice vector changes
pos = new_seq[new_image_index]
for atom_index in range(0,natoms):
for dim_index in range(3):
new_image.positions[atom_index, dim_index] = spline_func_collection[atom_index][dim_index](pos)
for atom_index in range(0,natoms):
new_mag_mom[atom_index] = mag_spline_func_collection[atom_index](pos)
new_image.set_initial_magnetic_moments(new_mag_mom)
new_image_list.append(new_image)
return new_image_list
def rms_distance(imageA,imageB):
from numpy import sqrt
from ase.geometry.geometry import find_mic
D = imageB.positions-imageA.positions # 2d arrays
D_min, D_min_len = find_mic( D, imageB.cell )
distance = sqrt((D_min**2).sum())
return distance
def compute_image_rms_distances(image_list):
from numpy import zeros
distances = zeros(len(image_list)-1)
for i in range(0, len(image_list)-1):
distances[i] = rms_distance(image_list[i+1], image_list[i])
return distances
############## These functions are meant to make working with VASP easier
def try_mkdir(direct):
from os import mkdir
from os.path import isdir
if isdir(direct) == False:
mkdir(direct)
def get_nimages(directory = ''):
from os.path import isfile
images = 1
while isfile(directory +"%02d/CONTCAR"%images):
images+=1
images-=2
print(images,"Images Found")
return images
def read_mag_cols(fname='OUTCAR'):
fid = open(fname,'r')
lines = fid.readlines()
mag_line = -1
for i in range(len(lines)):
if " magnetization (x)" in lines[i]:
mag_line = i
#print (lines[mag_line+4:mag_line+4+n_atoms])
mag_cols = [[],[],[],[],[]]
line_index = mag_line+4
while '---' not in lines[line_index]:
#sline = line.split()
sline = lines[line_index].split()
mag_cols[0].append(int(sline[0]))
for icol in range(1,5):
mag_cols[icol].append(float(sline[icol]))
line_index += 1
#the last column, mag_cols[-1] has the total magnetic moment
return mag_cols
###################### Test the function here
if __name__=='__main__':
num_new_images = 5# this number matches the IMAGES tag in VASP
use_image_distance_in_spline = True
# handy function for getting the number of images in VASP format in this directory
nimages= get_nimages()
from ase import io
from numpy import array
image_list =[]
for image in range(0,nimages+2):
atoms = io.read('%02d/CONTCAR'%image)
mag_cols = read_mag_cols('%02d/OUTCAR'%image)
atoms.set_initial_magnetic_moments(mag_cols[-1])
image_list.append(atoms)
### now that the images are read, we can use the interpolating function
# the +2 is because vasp doesn't count the first and last images in the IMAGES tag
interpolated_image_list = interpolate_images(image_list, num_new_images+2,
kind = 'cubic', use_image_distance_in_spline = use_image_distance_in_spline)
if use_image_distance_in_spline:
print('Compare image spacing before and after interpolation:')
print(compute_image_rms_distances(image_list))
print(compute_image_rms_distances(interpolated_image_list))
# with the interpolated images, we can write them to a subdirectory
sub_dir = 'interpolated_images/'
try_mkdir(sub_dir)
for new_image_index in range(0, num_new_images+2):
imdir = sub_dir+'%02d/'%new_image_index
try_mkdir(imdir)
atoms = interpolated_image_list[new_image_index]
fname = imdir+'POSCAR'
io.write(fname, atoms, format='vasp')
#fname = sub_dir+imdir+'CONTCAR'
#io.write(fname, atoms, format='vasp')
######## this part makes a MAGMOM line for our INCAR file
magmom_name = imdir+'MAGMOM'
mag_mom = atoms.get_initial_magnetic_moments()
fid = open(magmom_name,'w')
fid.write('MAGMOM =')
for atom_index in range(mag_mom.shape[0]):
fid.write(' %.2f'%( mag_mom[atom_index] ))
fid.close()
|
zybbigpy/VaspCZ | sourcecode/VaspNEBKeepINFI_OptInputs.py | #!/home/zhangzhengde/bin/bin/python3
#coding=utf-8
"""
更新,包含了提示要删除文件。
"""
import os
import sys
alldir = os.listdir()
# 只保留ini fin
del_files = []
for dir in alldir:
if dir == 'ini' or dir == 'fin':
continue
else:
del_files.append(dir)
del_ini_files = os.listdir('ini')
del_ini_files.remove('Opt')
del_fin_files = os.listdir('fin')
del_fin_files.remove('Opt')
del_ini_Opt_files = os.listdir('ini/Opt')
del_fin_Opt_files = os.listdir('fin/Opt')
for file in ['INCAR', 'POSCAR', 'POTCAR', 'KPOINTS', 'Vasp.sh']:
del_ini_Opt_files.remove(file)
del_fin_Opt_files.remove(file)
ipt = input(f'即将删除文件和文件夹\n当前目录下: {del_files}\nini/下: {del_ini_files}\nini/Opt/下: {del_ini_Opt_files}\nfin/下: {del_fin_files}\nfin/Opt/下: {del_fin_Opt_files}\n是否确定([y]es/no): ')
if ipt in ['y', 'yes', 'Y', 'YES', '']:
for file in del_files:
os.system(f'rm -rf {file}')
for infi in ['ini', 'fin']:
os.chdir(infi)
need_del_files = eval(f'del_{infi}_files')
for file in need_del_files:
os.system(f'rm -rf {file}')
os.chdir('Opt')
need_del_files = eval(f'del_{infi}_Opt_files')
for file in need_del_files:
os.system(f'rm -rf {file}')
os.chdir('../..')
else:
print(f'未删除任何东西')
|
zybbigpy/VaspCZ | sourcecode/__init__.py | __all__ = ['zzdlib', 'VaspGenerate', 'utils'] |
zybbigpy/VaspCZ | vtstscripts-939/rattle.py | <filename>vtstscripts-939/rattle.py
#!/usr/bin/env python
import aselite
from sys import argv, exit
if len(argv) < 3:
print 'usage: center.py FILE STDDEV'
print ' randomly displaces the atoms in FILE by a gaussian with'
print ' a standard deviation of STDDEV'
print
exit(0)
filename = argv[1]
stddev = float(argv[2])
atoms = aselite.read_any(filename)
atoms.rattle(stddev)
atoms.write(filename)
|
zybbigpy/VaspCZ | vtstscripts-939/pos2xyz.py | #!/usr/bin/env python
import aselite
from sys import argv
if '-h' in argv or len(argv) != 2:
print 'usage: pos2xyz.py POSCAR'
print
exit(1)
atoms = aselite.read_vasp(argv[1])
aselite.write_xyz('%s.xyz' % argv[1], atoms)
|
zybbigpy/VaspCZ | vtstscripts-939/kdb/kdbquery.py | #!/usr/bin/env python
import os
import sys
import numpy
import glob
import shutil
import math
import copy
from optparse import OptionParser
from kdb import Kdb
from config import *
from aselite import elements
from aselite import write_vasp
class KdbQuery(Kdb):
def __init__(self):
self.return_dict = {}
def isDistance(self, pbcvector, target, box, dc):
for x in [-1, 0, 1]:
for y in [-1, 0, 1]:
for z in [-1, 0, 1]:
temp = pbcvector.copy()
temp += x * box[0]
temp += y * box[1]
temp += z * box[2]
if abs(numpy.linalg.norm(temp) - target) < dc:
return True
return False
def centroid(self, a, which=None):
if which == None:
which = range(len(a))
c = numpy.array([0.0, 0.0, 0.0])
for i in which:
c += a.positions[i]
c /= len(which)
return c
def clump(self, c, atoms, nf):
# Remove PBC's.
temp = c.copy()
undone = atoms[:]
working = [undone.pop()]
while len(undone) > 0:
if len(working) == 0:
print "Dissociated reactant, or neighbor_fudge too small."
return
a = working.pop()
for i in undone[:]:
v = self.pbc(temp.positions[i] - temp.positions[a], temp.cell)
d = numpy.linalg.norm(v)
if d < (elements[temp.get_chemical_symbols()[a]]["radius"] + elements[temp.get_chemical_symbols()[i]]["radius"]) * (1.0 + nf):
temp.positions[i] = temp.positions[a] + v
working.append(i)
undone.remove(i)
return temp
def query_db(self, **args):
print "function not yet overloaded"
#note this function gets overloaded when interacting with remote DB
def output_query(self, outputdir, numMatches, suggestion, sugproduct, modeTemp=None):
#create direcotry if none exists.
if not os.path.isdir(outputdir):
os.mkdir(outputdir)
#write the files to the output
write_vasp(outputdir + "/SADDLE_%d" % numMatches, suggestion)
write_vasp(outputdir + "/PRODUCT_%d" % numMatches, sugproduct)
if modeTemp is not None:
self.save_mode(outputdir + "/MODE_%d" % numMatches, modeTemp)
os.system("touch %s/.done_%d" % (outputdir, numMatches))
def query(self, reactant, outputdir = "./kdbmatches", nf=0.2, dc=0.3, nodupes = False, kdbname = 'kdb.db'):
# XXX: I think the best way forward to allow parallel processes
# here is to make the query function return atoms objects instead
# of writing them to file there.
# Get the ibox to speed up pbcs.
ibox = numpy.linalg.inv(reactant.cell)
# Remove directory if kdbmatches is already there.
if os.path.isdir(outputdir):
shutil.rmtree(outputdir)
# A list of unique saddles, used for duplicate removal.
uniques = []
# Get a list of kdb entries that match the query configuration elementally.
entries, name = self.query_db(kdbname = kdbname, reactant = reactant)
if len(entries) == 0:
print "No entries for those elements."
return
# For each nonfrozen atom in reactant, create a list of neighboring element
# types and the count of each type.
# TODO: this can be made N^2/2 trivially.
# TODO: this can use SAP for ortho boxes.
reactantNeighbors = {}
for i in range(len(reactant)):
if i in reactant.constraints[0].index:
continue
r1 = elements[reactant.get_chemical_symbols()[i]]["radius"]
reactantNeighbors[i] = {}
for j in range(len(reactant)):
if j == i:
continue
r2 = elements[reactant.get_chemical_symbols()[j]]["radius"]
d = numpy.linalg.norm(self.pbc(reactant.positions[i] - reactant.positions[j], reactant.cell, ibox))
if d > (r1 + r2) * (1 + nf):
continue
if reactant.get_chemical_symbols()[j] not in reactantNeighbors[i]:
reactantNeighbors[i][reactant.get_chemical_symbols()[j]] = 0
reactantNeighbors[i][reactant.get_chemical_symbols()[j]] += 1
# Create a list of element types and counts for the entire reactant.
reactantNameCount = self.nameCount(reactant)
numMatches = 0
###########################################################################
# (Main) Loop over each kdb entry.
###########################################################################
for entry in entries:
entryMatches = 0
mirrored = "not mirrored"
if entry["mirror"]:
mirrored = "mirrored"
# print "checking", name, "with id:", entry['id'], mirrored
print "KDB checking entry:", entry['id'], "(",mirrored,")"
# Load the minimum.
kdbmin = copy.deepcopy(entry['minimum'])
# Make sure the reactant has at least as many atoms of each type as the
# kdb configuration.
passedNameCount = True
kdbNameCount = self.nameCount(kdbmin)
for name in kdbNameCount:
if name not in reactantNameCount:
passedNameCount = False
break
if kdbNameCount[name] > reactantNameCount[name]:
passedNameCount = False
break
if not passedNameCount:
print "%10d name count fail" % entryMatches
continue
# Load the mobile atoms list.
kdbmobile = copy.deepcopy(entry['mobile'])
# Mirror the minimum if the mirror flag is set for this entry.
if entry["mirror"]:
for i in range(len(kdbmin)):
kdbmin.positions[i] += 2.0 * (kdbmin.positions[0] - kdbmin.positions[i])
# For each mobile atom in kdbmin, create a list of neighboring element
# types and the count of each type.
kdbNeighbors = {}
for i in kdbmobile:
r1 = elements[kdbmin.get_chemical_symbols()[i]]["radius"]
kdbNeighbors[i] = {}
for j in range(len(kdbmin)):
if j == i:
continue
r2 = elements[kdbmin.get_chemical_symbols()[j]]["radius"]
d = numpy.linalg.norm(kdbmin.positions[i] - kdbmin.positions[j])
if d > (r1 + r2) * (1 + nf):
continue
if kdbmin.get_chemical_symbols()[j] not in kdbNeighbors[i]:
kdbNeighbors[i][kdbmin.get_chemical_symbols()[j]] = 0
kdbNeighbors[i][kdbmin.get_chemical_symbols()[j]] += 1
kdbUnmapped = range(len(kdbmin)) # Keep track of the kdb atoms that have been mapped.
# Create the initial mappings.
mappings = None
db_a = kdbmobile[0] # This will be the selected mobile atom.
for m in kdbmobile:
mMappings = []
for freeAtom in reactantNeighbors.keys():
for elementType in reactantNeighbors[freeAtom]:
if elementType not in kdbNeighbors[m]:
break
if kdbNeighbors[m][elementType] != reactantNeighbors[freeAtom][elementType]:
break
else:
mMappings.append({m:freeAtom})
if mappings == None:
mappings = mMappings
if len(mMappings) < len(mappings):
mappings = mMappings
db_a = m
kdbUnmapped.remove(db_a)
while len(kdbUnmapped) > 0 and len(mappings) > 0:
# Create a list of new mappings that will replace mappings at the
# end of this iteration.
newMappings = []
# Select an unmapped atom from kdbmin.
kdbAtom = kdbUnmapped.pop()
# Get the distance between kdbAtom and every other atom in the kdb
# configuration.
kdbDistances = {}
for i in range(len(kdbmin)):
kdbDistances[i] = numpy.linalg.norm(kdbmin.positions[kdbAtom] - kdbmin.positions[i])
# Loop over each mapping and try to place kdbAtom.
for mapping in mappings:
# Loop over each atom in the reactant.
for reactantAtom in range(len(reactant)):
# Make sure it has not already been mapped.
if reactantAtom in mapping.values():
continue
# Loop over the atoms in mapping and see if the distance
# between reactantAtom and mapping.values() atoms is the same
# within dc (DISTANCE_CUTOFF) of the distance between kdbAtom
# and mapping.keys() atoms.
for DA in mapping.keys():
RA = mapping[DA]
pbcVector = self.atomAtomPbcVector(reactant, RA, reactantAtom)
if PBC_MAPPING_CHECK:
if not self.isDistance(pbcVector, kdbDistances[DA], reactant.cell, dc):
break
else:
if abs(kdbDistances[DA] - self.atomAtomPbcDistance(reactant, RA, reactantAtom)) > dc:
break
else:
newMapping = mapping.copy()
newMapping[kdbAtom] = reactantAtom
newMappings.append(newMapping)
mappings = newMappings
# Load the mode.
mode = copy.deepcopy(entry['mode'])
# Loop over each mapping and try to find a rotation that aligns the
# kdb configuration with the query configuration.
for mapping in mappings:
#print "in mappings loop"
#print "mapping values: ",mapping.values()
reactantrot = self.clump(reactant, mapping.values(), nf)
# If no neighbors found, go to next mapping
if reactantrot is None:
continue
#print "mapping values, after clump: ",mapping.values()
#print "reactantrot: ",reactantrot
# Make a copy of kdbmin for rotation and put it in the box.
kdbrot = kdbmin.copy()
kdbrot.cell = reactant.cell.copy()
# Rotation Matrix calculation start
tb = kdbrot.copy()
tb.positions -= self.centroid(tb)
ta = tb.copy()
offset = self.centroid(reactantrot, mapping.values())
i = 0
for m in mapping:
ta.positions[i] = tb.positions[m] + self.pbc((reactantrot.positions[mapping[m]] - offset) - tb.positions[m], reactantrot.cell)
i += 1
ta.positions -= self.centroid(ta)
m = numpy.dot(tb.positions.transpose(), ta.positions)
sxx = m[0][0]
sxy = m[0][1]
sxz = m[0][2]
syx = m[1][0]
syy = m[1][1]
syz = m[1][2]
szx = m[2][0]
szy = m[2][1]
szz = m[2][2]
n = numpy.zeros((4,4))
n[0][1] = syz - szy
n[0][2] = szx - sxz
n[0][3] = sxy - syx
n[1][2] = sxy + syx
n[1][3] = szx + sxz
n[2][3] = syz + szy
n += n.transpose()
n[0][0] = sxx + syy + szz
n[1][1] = sxx - syy - szz
n[2][2] = -sxx + syy - szz
n[3][3] = -sxx - syy + szz
w, v = numpy.linalg.eig(n)
maxw = 0
maxv = 0
for i in range(len(w)):
if w[i] > maxw:
maxw = w[i]
maxv = v[:,i]
Rmat = numpy.zeros((3,3))
aa = maxv[0]**2
bb = maxv[1]**2
cc = maxv[2]**2
dd = maxv[3]**2
ab = maxv[0]*maxv[1]
ac = maxv[0]*maxv[2]
ad = maxv[0]*maxv[3]
bc = maxv[1]*maxv[2]
bd = maxv[1]*maxv[3]
cd = maxv[2]*maxv[3]
Rmat[0][0] = aa + bb - cc - dd
Rmat[0][1] = 2*(bc-ad)
Rmat[0][2] = 2*(bd+ac)
Rmat[1][0] = 2*(bc+ad)
Rmat[1][1] = aa - bb + cc - dd
Rmat[1][2] = 2*(cd-ab)
Rmat[2][0] = 2*(bd-ac)
Rmat[2][1] = 2*(cd+ab)
Rmat[2][2] = aa - bb - cc + dd
Rmat = Rmat.transpose()
# Rotation Matrix calculation end
translation1 = self.centroid(kdbrot)
kdbrot.positions -= translation1
kdbrot.positions = numpy.dot(kdbrot.positions, Rmat)
translation2 = self.centroid(reactantrot, mapping.values())
kdbrot.positions += translation2
# Calculate a score for this mapping.
score = max([numpy.linalg.norm(self.pbc(kdbrot.positions[m] - reactantrot.positions[mapping[m]], reactantrot.cell)) for m in mapping])
if score > dc:
continue
# Load the saddle from the database.
kdbSaddle = copy.deepcopy(entry['saddle'])
# Mirror the saddle if the mirror flag is set for this entry.
if entry["mirror"]:
for i in range(len(kdbSaddle)):
kdbSaddle.positions[i] += 2.0 * (kdbmin.positions[0] - kdbSaddle.positions[i])
# Load the product from the database.
kdbProduct = copy.deepcopy(entry['product'])
# Mirror the product if the mirror flag is set for this entry.
if entry["mirror"]:
for i in range(len(kdbProduct)):
kdbProduct.positions[i] += 2.0 * (kdbmin.positions[0] - kdbProduct.positions[i])
# Map the mode.
if mode is not None:
modeTemp = reactantrot.positions * 0.0
for m in mapping:
modeTemp[mapping[m]] = mode[m]
try:
modeTemp /= numpy.linalg.norm(modeTemp)
except FloatingPointError:
mode = None
# Perform the saddle transformation.
kdbSaddle.positions -= translation1
kdbSaddle.positions = numpy.dot(kdbSaddle.positions, Rmat)
kdbSaddle.positions += translation2
# Perform the mode transformation.
if mode is not None:
modeTemp = numpy.dot(modeTemp, Rmat)
# Perform the product transformation.
kdbProduct.positions -= translation1
kdbProduct.positions = numpy.dot(kdbProduct.positions, Rmat)
kdbProduct.positions += translation2
# Create the suggestion.
suggestion = reactant.copy()
sugproduct = reactant.copy()
for m in mapping:
if mapping[m] not in suggestion.constraints[0].index:
suggestion.positions[mapping[m]] = kdbSaddle.positions[m]
if mapping[m] not in sugproduct.constraints[0].index:
sugproduct.positions[mapping[m]] = kdbProduct.positions[m]
# Check for duplicates.
if nodupes:
isdupe = False
for unique in uniques:
pan = self.per_atom_norm(unique.positions - suggestion.positions, suggestion.cell, ibox)
if max(pan) <= dc:
isdupe = True
break
if isdupe:
continue
uniques.append(suggestion.copy())
# Rebox.
if REBOX_SUGGESTIONS:
suggestion.positions = self.pbc(suggestion.positions, suggestion.cell)
sugproduct.positions = self.pbc(sugproduct.positions, sugproduct.cell)
# Write suggestion.
if mode is not None:
self.output_query(outputdir, numMatches, suggestion, sugproduct, modeTemp)
else:
self.output_query(outputdir, numMatches, suggestion, sugproduct)
entryMatches += 1
numMatches += 1
#print "%10d" % entryMatches
print "KDB matches: ", entryMatches
|
zybbigpy/VaspCZ | sourcecode/utils.py | <reponame>zybbigpy/VaspCZ
import os,sys
import VaspCZ.zzdlib as zzd
import subprocess
python = sys.executable
current_py_folder = os.path.dirname(os.path.abspath(__file__))
VaspCZ_path = [
os.path.dirname(current_py_folder) if 'sourcecode' in current_py_folder else current_py_folder][0] + '/sourcecode'
def gui_string(title, content, footnote=None, iptnote='', mode='select', isprint=False):
"""
生成GUI界面,默认文本宽度为62字符
:param title: 标题
:param content: 内容
:param footnote: 注脚,如果有
:param iptnote: 最后一行Input后面的提示字符串,可输入默认值
:param mode: 等于select时内容格式为数字加功能,选择,等于其他时,内容格式为说明字符串
:return:
"""
gui = f'|{"":=<60}|\n' # 第一行
gui += f'|{title:^60}|\n' # 标题行
gui += f'|{"":-<60}|\n' # 第三行
if mode == 'select': # 选择性内容
for key in list(content.keys())[1::]: # 内容行
cont = content[key]
gui += f'|{"":<8}({key}) {cont:<48}|\n'
else: # 说明性内容
for key in list(content.keys())[1::]: # 内容行
cont = content[key]
len_list = [56, 60, 60, 60, 60, 60, 60, 60]
for j in range(len(len_list)):
line = cont[0: len_list[j]]
cont = cont[len_list[j]::]
if isprint:
print('line:', line)
print('cont:', cont)
if j == 0:
line = f'{line:<56}' if len(line) < 56 else line
else:
line = f'{line:<60}' if len(line) < 60 else line
if line[:56] == f'{"":<56}':
break
elif line[-1] != ' ': # 处理最后一个字符不是空的情况
if isprint:
print('linexx', line, len(line), line[-1])
if line[-2] == ' ':
cont = line[-1] + cont
line = line[:-1]
else:
cont = ' ' if cont == '' else cont
if cont[0] == ' ':
pass
else:
cont = line[-1] + cont
line = line[:-1] + '-'
if j == 0:
gui += f'|{"":<4}{line:<56}|\n'
else:
gui += f'|{line:<60}|\n'
key = list(content.keys())[0]
gui += f'|{"":<8}({key}) {content[key]:<48}|\n'
if footnote is not None: # 如果有注脚行
gui += f'|{"":-<60}|\n' # 注脚行前一行
gui += f'|{footnote:>60}|\n'
gui += f'|{"":=<60}|\n' # 最后一行
gui += f'Input{iptnote}: ' # 输入行
return gui
def zip_content(content):
return dict(zip(range(len(content)), content))
def deal_with_gen_pot():
while True:
elements = zzd.Vasp.decode_POSCAR(zzd.File.openFile('POSCAR', 'r'))[1]
path = zzd.File.Vasp_pseudo_path()+'/PseudoPotential'
content_os_gen_pot = zip_content([
'Exit',
f'The POTCAR will be generated according to the elements in POSCAR (current {elements}) from path: "{path}", default type is "PBE".',
f'Change settings by input like: [ele1, ele2] PBE'])
ipt11 = input(gui_string(
title='Generate POTCAR', content=content_os_gen_pot, mode='string'))
if ipt11 == '0':
break
elif ipt11 == '':
elements = elements
psudotype = 'PBE'
else:
try:
elements = ipt11.split(']')[0].strip('[').split(',')
psudotype = ipt11.split(']')[1].strip()
except Exception as e:
raise NameError(f'deal_with_gen_pot: 输入为{ipt11}. 输入格式错误。')
psudotype = 'PBE' if psudotype == '' else psudotype
print(f'生成元素:{elements}的贋势,类型:{psudotype}')
zzd.Vasp.generate_POTCAR(elements=elements, pseudotype=psudotype)
break
def deal_with_gen_kpoints():
while True:
content_os_gen_kp = zip_content([
'Exit',
f'The KPOINTS will be generated in vector: "5 5 5" with Monkhorst type.',
f'Change settings by input like: 5 5 5 M'])
ipt = input(gui_string(
title='Generate KPOINTS', content=content_os_gen_kp, mode='string'))
if ipt == '0':
break
elif ipt == '':
vector = '5 5 5'
kptype = 'Monkhorst'
else:
try:
vector = f'{ipt.split()[0]} {ipt.split()[1]} {ipt.split()[2]}'
kptype = ipt.split()[3]
except Exception as e:
raise NameError(f'deal_with_gen_pot: 输入为{ipt}. 输入格式错误。')
kptype = 'Monkhorst' if kptype[0] == 'M' else kptype
kptype = 'Gamma' if kptype[0] == 'G' else kptype
print(f'生成网格:"{vector}" 的贋势,类型:{kptype}')
zzd.Vasp.generate_KPOINTS(vector=vector, kptype=kptype)
break
def deal_with_gen_vasp_sh():
path = zzd.File.Vaspsh_path()+'/Vasp.sh'
content = zip_content([
'Exit',
f'The Vasp.sh file will be generated from template: {path}',
f'{"Default nodes:":<20}{"1":>10}',
f'{"Default ppn:":<20}{"12":>10}',
f'{"Default job name:":<20}{"vaspjob":>10}',
f'Change settings by input like: 1 12 jobname'
])
ipt = input(gui_string(title='Generate Vasp.sh', content=content, mode='string'))
if ipt == '0':
return None
elif ipt == '':
nodes, ppn, jobname = ('1', '12', 'vaspjob')
else:
try:
nodes, ppn, jobname = ipt.split()
except Exception as e:
raise NameError(f'{e} deal_with_Vasp_sh error, 输入错误')
print(f'生成Vasp.sh nodes: {nodes} ppn: {ppn} jobname: {jobname}')
os.system(f'cp {path} .')
data = zzd.File.openFile('Vasp.sh', 'r')
data = zzd.File.substituteData(data=data, keywords='#PBS -N', newline=f' #PBS -N {jobname}\n')
data = zzd.File.substituteData(data=data, keywords='#PBS -l nodes', newline=f' #PBS -l nodes={nodes}:ppn={ppn}\n')
zzd.File.openFile('Vasp.sh', 'w', data=data)
def deal_with_vasp_keep_inputs():
content = zip_content([
'Exit',
'The INCAR, POSCAR, POTCAR, KPOINTS and Vasp.sh will be kept in current directory while other files will be removed',
'Add files need to keep input like: file1, file2'
])
ipt = input(gui_string(title='Vasp Keep Inputs', content=content, mode='string'))
if ipt == '0':
return None
elif ipt == '':
addfile = []
else:
addfile = ipt.strip().split(',')
zzd.Vasp.keepInputs(addfile=addfile, workdir='./', need_confirm=True)
def deal_with_check_results():
content = zip_content([
'Exit',
'Only Job Status',
'Current Results',
'All'
])
ipt = input(gui_string('Vasp Check Results', content=content, mode='select', iptnote='(default=3)'))
if ipt == '0':
return None
elif ipt == '':
ipt = '3'
print(VaspCZ_path)
subprocess.call(f'{python} {VaspCZ_path}/VaspCheckResults.py -func={ipt}', shell=True)
def deal_with_neb_opt_sta():
content = zip_content([
'Back',
'Opt-Sta (Current directory)',
'INI FIN Opt-Sta',
])
while True:
ipt = input(gui_string('NEB Optimization-Static Calculation', content=content, iptnote='(default=1)'))
if ipt == '0':
break
elif ipt == '1' or ipt == '':
subprocess.call(f'{python} {VaspCZ_path}/VaspOpt-Sta.py', shell=True)
exit()
elif ipt == '2':
data_Vaspsh = zzd.File.openFile('./ini/Opt/Vasp.sh', 'r')
d_jobname = zzd.File.getLine(data_Vaspsh, '#PBS -N')[0].strip('\n').split()[-1]
d_jobname = f'{d_jobname[:-1]}S'
d_nodes = zzd.File.getLine(data_Vaspsh, '#PBS -l nodes')[0].strip('\n').split()[-1].split(':')[0].split('=')[-1]
d_ppn = zzd.File.getLine(data_Vaspsh, '#PBS -l nodes')[0].strip('\n').split()[-1].split(':')[-1].split('=')[-1]
content2 = zip_content([
'Exit',
'The static calculations in sub directories ini/ and fin/ will be performed when optimizations in ini/Opt and fin/Opt are finished.',
'Default nodes and ppn from ini/Opt/Vasp.sh',
f'Default nodes: {d_nodes:>15}',
f'Default ppn: {d_ppn:>15}',
f'Default jobname: {d_jobname:>15}',
'Change settings by input like: nodes ppn jobname'
])
ipt2 = input(gui_string(
title='Vasp NEB INI FIN Opt-Sta', content=content2, mode='string'))
if ipt2 == '0':
exit()
elif ipt2 == '':
nc = f'{d_nodes},{d_ppn}'
jobname = d_jobname
else:
try:
nc = f'{ipt2.split()[0]},{ipt2.split()[1]}'
jobname = ipt2.split()[2]
except Exception as e:
raise NameError(f'{e} deal_with_neb_opt_sta error, 输入错误')
subprocess.call(f'{python} {VaspCZ_path}/VaspINFIOpt-Sta.py --nc={nc} --jobname={jobname}', shell=True)
exit()
else:
pass
def deal_with_neb_sta_neb():
data_Vaspsh = zzd.File.openFile('./ini/Opt/Vasp.sh', 'r')
d_jobname = zzd.File.getLine(data_Vaspsh, '#PBS -N')[0].strip('\n').split()[-1]
d_jobname = f'{d_jobname[:-1]}N'
d_nodes = zzd.File.getLine(data_Vaspsh, '#PBS -l nodes')[0].strip('\n').split()[-1].split(':')[0].split('=')[-1]
d_ppn = zzd.File.getLine(data_Vaspsh, '#PBS -l nodes')[0].strip('\n').split()[-1].split(':')[-1].split('=')[-1]
dist = zzd.getshellResult('dist.pl ini/CONTCAR fin/CONTCAR')
dist = eval(dist[-1])
print('ini和fin中CONTCAR的dist为:{}'.format(dist))
if dist >= 9:
print('dist过大,请检查')
raise NameError(f'初态和末态距离太大,插入态数目大于9,不合理,请检查。')
else: # 向下取整数,如果是偶数则加一,如果是奇数直接用。0-1.9输入1,2-3.9属于3,4-5.9属于5
image = int(dist / 0.8)
if image % 2 == 0: # 是偶数
image = image + 1
content = zip_content([
'Exit',
'The NEB calculation will be performed when static calculations in ini/ and fin/ are finished.',
'The INCAR of NEB will be generated automatically fitted to NEB calculation based on ini/Opt/INCAR, The images is approximately equal to (distance between ini/CONTCAR and fin/CONTCAR)/0.8',
'Default nodes and ppn from ini/Opt/Vasp.sh',
f'Default nodes: {image:>15}',
f'Defalut ppn: {d_ppn:>15}',
f'Defalut jobname: {d_jobname:>15}',
'Change settings by input like: nodes ppn jobname'
])
ipt = input(gui_string(title='Vasp NEB Sta-NEB', content=content, mode='string'))
if ipt == '0':
return None
elif ipt == '':
nc = f'{image},{d_ppn}'
jobname = d_jobname
else:
try:
nc = f'{ipt.split()[0]},{ipt.split()[1]}'
jobname = ipt.split()[2]
except Exception as e:
raise NameError(f'{e} deal_with_neb_sta_neb error, 输入错误')
subprocess.call(f'{python} {VaspCZ_path}/VaspINFISta-NEB.py --nc={nc} --jobname={jobname}', shell=True)
def deal_with_neb_vibration_analysis():
content = zip_content([
'Exit',
'The vibration analysis will be preformed after NEB calculation is Done.',
'The attempt frequency of migration atom in initial, saddle and finnal state will be calculated.',
'Default nodes: 1',
'Default ppn: 8',
f'Defacult cal fin: False (do not include finnal state)',
'Change settings by input like: nodes ppn True(or False)'
])
ipt = input(gui_string(
title='NEB Vibration Analysis',
content=content, mode='string'
))
if ipt == '0':
return None
elif ipt == '':
nodes, ppn, include = ('1', '8', 'False')
else:
try:
nodes, ppn, include = ipt.split()[0], ipt.split()[1], ipt.split()[2]
except Exception as e:
raise NameError(f'{e} deal_with_neb_vibration_analysis error, 输入错误')
subprocess.call(
f'{python} {VaspCZ_path}/VaspVibAna_forNEB.py --nodes={nodes} --ppn={ppn} --include_fin={include}', shell=True)
def deal_with_neb_check_results():
content = zip_content([
'Back',
'Only NEB Status',
'NEB Results',
'All',
'NEB Barrier in typical step'
])
while True:
ipt = input(gui_string(title='NEB Check Results', content=content, mode='select', iptnote="(1/2/[3]/4)"))
if ipt == '0':
break
elif ipt == '':
ipt = '3'
subprocess.call(f'{python} {VaspCZ_path}/NEBCheck1.1.py --func={ipt}', shell=True)
exit()
elif ipt in ['1', '2', '3', '4']:
subprocess.call(f'{python} {VaspCZ_path}/NEBCheck1.1.py --func={ipt}', shell=True)
exit()
else:
print(f'输入:{ipt},选择功能错误,请正确输入')
pass
def deal_with_test_kpoints():
content = zip_content([
'Exit',
'The KPOINT test will be performed when the input files in current dir (INCAR, POSCAR, POTCAR, KPOINTS)',
'Default setting jobname prefix: ktest_',
'Default setting nodes: 1',
'Default setting ppn: 8',
'Default setting k_mesh: 111,333,555,777,999',
'Change settings by input like: ktest_ 1 8 111,333,555,777,999'
])
ipt = input(gui_string(title='Vasp KPOINTS Test', content=content, mode='string'))
if ipt == '0':
return None
elif ipt == '':
prefix, nodes, ppn, k_mesh = ('ktest_', '1', '8', '111,333,555,777,999')
else:
try:
prefix, nodes, ppn, k_mesh = ipt.split()[0], ipt.split()[1], ipt.split()[2], ipt.split()[3]
except Exception as e:
raise NameError(f'{e} deal_with_test_kpoints error, 输入错误')
subprocess.call(f'{python} {VaspCZ_path}/VaspTestKPOINT.py --jobname_prefix={prefix} --nodes={nodes} --ppn={ppn} --k_mesh={k_mesh}', shell=True)
def deal_with_test_encut():
content = zip_content([
'Exit',
'The ENCUT test will be performed when the five input in current dir (INCAR, POSCAR, POTCAR, KPOINTS)',
'Default setting jobname prefix: ENCUT_',
'Default setting nodes: 1',
'Default setting ppn: 8',
'Default setting ENCUTs: 200,250,300,350,400,450,500,550,600,650,700',
'Change settings by input like: ENCUT_ 1 8 200,250,300,350,400,450,500,550,600,650,700'
])
ipt = input(gui_string(title='Vasp ENCUT Test', content=content, mode='string'))
default_params = ('k_test', '1', '8', '200,250,300,350,400,450,500,550,600,650,700')
if ipt == '0':
return None
elif ipt == '':
params = default_params
else:
params = [ipt.split(i) for i in range(4) if i < len(ipt.split())] # 获取前几位输入参数
params += [default_params[i] for i in range(len(ipt.split(), 4))] # 获取默认参数
try:
a = eval(params[1])
a = eval(params[2])
except Exception as e:
raise NameError(f'{e} deal_with_test_encut error, 输入错误')
prefix, nodes, ppn, ENCUTs = params
subprocess.call(
f'{python} {VaspCZ_path}/VaspTestENCUT.py -jb={prefix} -nd={nodes} -np={ppn} -EN={ENCUTs}', shell=True)
|
zybbigpy/VaspCZ | sourcecode/VaspGetNELM.py | <filename>sourcecode/VaspGetNELM.py<gh_stars>10-100
#!/home/zhangzhengde/bin/bin/python3
#coding=utf-8
import sys
sys.path.append('/home/zhangzhengde/bin/pythonlib/')
import zzdlib
import os
data_log = zzdlib.File.openFile('./log','r')
print('{:^5} {}'.format(' ',data_log[12].strip('\n')))
for i in range(len(data_log)):
elec = data_log[i].split()[0].strip(':')
if elec == 'RMM' or elec == 'DAV': # 该行开头是RMM DAV而下一行不是:
try:
elec2 = data_log[i+1].split()[0].strip(':')
if (elec2 == 'RMM' or elec2 == 'DAV') == False:
ions = data_log[i+1].split()[0] +data_log[i+1].split()[1].strip('=')
print('{:^5} {}'.format(ions,data_log[i].strip('\n')))
except:
print('{:^5} {}'.format('run',data_log[i].strip('\n')))
|
zybbigpy/VaspCZ | vtstscripts-939/kdb/routes.py | <reponame>zybbigpy/VaspCZ<filename>vtstscripts-939/kdb/routes.py
from bottle import route, post, run, request
import remote_insert, remote_query, remote_db
import pickle
host = '0.0.0.0'
port = 8080
@post('/account_create')
def create_account():
email = request.forms.get('email')
password = request.forms.get('password')
first = request.forms.get('first')
last = request.forms.get('last')
db = remote_db.RemoteDB()
output = db.add_user(first, last, email, password)
return output
@post('/insert')
def insert():
db = remote_db.RemoteDB()
if not db.is_user(request.forms.get('email'),request.forms.get('password')):
return "invalid account info"
reactant = pickle.loads(request.forms.get('reactant'))
saddle = pickle.loads(request.forms.get('saddle'))
product = pickle.loads(request.forms.get('product'))
try:
mode = pickle.loads(request.forms.get('mode'))
except:
mode = None
insert_class = remote_insert.RemoteInsert()
insert_class.email = request.forms.get('email')
insert_class.password = request.forms.get('password')
output = insert_class.insert(reactant, saddle, product, mode)
return str(output)
@post('/query')
def query():
reactant = pickle.loads(request.forms.get('reactant'))
query_class = remote_query.RemoteQuery()
query_class.query(reactant)
output = pickle.dumps(query_class.return_dict)
return output
def start():
run(host=host, port=port)
if __name__ == "__main__":
start()
|
zybbigpy/VaspCZ | vtstscripts-939/kdb/__init__.py | import aselite
import bottle
import config
import kdb
import kdbinsert
import kdbquery
import local_client
import local_db
import local_insert
import local_query
import local_update
import remote_client
import remote_config
import remote_db
import remote_initialize
import remote_insert
import remote_query
import remote_update
import routes
import server_config |
zybbigpy/VaspCZ | sourcecode/VaspINFISta-NEB.py | #!/home/zhangzhengde/bin/bin/python3
import sys
sys.path.append('/home/zhangzhengde/bin/pythonlib')
import os
import VaspCZ.zzdlib as zzdlib
import argparse
parser = argparse.ArgumentParser(description='manual to this script')
parser.add_argument('--nc', type=str, default='0,0')
parser.add_argument('--EMER', type=str, default='default')
parser.add_argument('--jobname', type=str, default='vaspNEB')
args = parser.parse_args()
print(f'脚本运行,参数:--nc={args.nc} --EMER={args.EMER} --jobname={args.jobname}')
dist = zzdlib.getshellResult('dist.pl ini/CONTCAR fin/CONTCAR')
dist = eval(dist[-1])
print('ini和fin中CONTCAR的dist为:{}'.format(dist))
if dist >= 9:
print('dist过大,请检查')
exit()
else: # 向下取整数,如果是偶数则加一,如果是奇数直接用。0-1.9输入1,2-3.9属于3,4-5.9属于5
image = int(dist/0.8)
if image % 2 == 0: # 是偶数
image = image + 1
os.system('nebmake.pl ini/CONTCAR fin/CONTCAR '+str(image)) # 插入中间的IMAGE
os.system('cp ini/OUTCAR 00/')
os.system('cp fin/OUTCAR 0'+str(image+1)+'/')
# 拷贝输入文件
os.system('cp ini/Opt/INCAR .')
os.system('cp ini/Opt/KPOINTS .')
os.system('cp ini/Opt/POTCAR .')
# os.system('cp ini/Opt/Vasp.sh .')
# 修改INCAR
data_INCAR = zzdlib.File.openFile(path='./INCAR',mode='r')
if data_INCAR[0] == 'SYSTEM=Opt\n':
data_INCAR[0] = 'SYSTEM=NEB\n'
data_INCAR = zzdlib.File.substituteData(data=data_INCAR,keywords='IBRION', newline='IBRION=1')
data_INCAR.append('NFREE=2\n')
data_INCAR.append('#neb\n')
data_INCAR.append('IMAGES='+str(image)+'\n')
data_INCAR.append('SPRING=-5\n')
data_INCAR.append('LCLIMB=.TRUE.\n')
data_INCAR.append('ICHAIN=0\n')
zzdlib.File.openFile(path='./INCAR' ,mode='w', data=data_INCAR)
# 修改Vasp.sh
vaspsh_path = zzdlib.File.Vaspsh_path()
os.system(f'cp {vaspsh_path}/Vasp.sh .')
data_Sh = zzdlib.File.openFile('./Vasp.sh','r')
# oldname = zzdlib.File.getLine(data_Sh,'#PBS -N')[0].strip('\n').split()[-1] #获取旧任务名
# jobname = oldname[:-2] +'NEB'
jobname = args.jobname
data_Sh = zzdlib.File.substituteData(data_Sh, '#PBS -N', ' #PBS -N '+jobname)
if args.nc.split(',')[0] == '0':
nodes = str(image)
else:
nodes = args.nc.split(',')[0]
if args.nc.split(',')[1] == '0':
ppn = '8'
else:
ppn = args.nc.split(',')[1]
data_Sh = zzdlib.File.substituteData(data_Sh, '#PBS -l nodes', ' #PBS -l nodes='+nodes+':ppn='+ppn)
EMER = zzdlib.File.getLine(data_Sh,'#PBS -q EMERGENCY')[0] # 获取默认加急状态
if EMER == 'Not Match':
isEMER = False
if args.EMER == 'yes' or args.EMER == 'y':
data_Sh.insert(4, ' #PBS -q EMERGENCY\n')
else:
isEMER = True
if args.EMER == 'no' or args.EMER == 'n':
data_Sh.remove(' #PBS -q EMERGENCY\n')
zzdlib.File.openFile('./Vasp.sh', 'w', data_Sh)
# 提交任务
# zzdlib.Vasp.checkInputs()
zzdlib.Vasp.check_and_qsub()
# if zzdlib.Vasp.checkInputs():
# usrsel=input('前检查无问题,是否要提交任务(默认yes):')
# if usrsel == 'no' or usrsel == 'n':
# print('未提交任务')
# else:
# os.system('qsub Vasp.sh')
# else:
# print('前检查有问题,请人工检查')
|
zybbigpy/VaspCZ | vtstscripts-939/getangle.py | #!/usr/bin/env python
import time
import sys
import numpy as np
from string import Template
import fnmatch
import os
import re
import datetime
## READ IN VASP CON FILE ##
filein = sys.argv[1]
with open(filein, 'r') as f:
first_line = f.readline()
second_line = f.readline()
third_line = f.readline()
fourth_line = f.readline()
fifth_line = f.readline()
foo = first_line[0]
elementSymbol = first_line.split()
a = second_line.split()
foo_v1 = third_line.split()
foo_v2 = fourth_line.split()
foo_v3 = fifth_line.split()
#Assign lattice vectors to float arrays
v1 = np.array([float(foo_v1[0]), float(foo_v1[1]), float(foo_v1[2])])
v2 = np.array([float(foo_v2[0]), float(foo_v2[1]), float(foo_v2[2])])
v3 = np.array([float(foo_v3[0]), float(foo_v3[1]), float(foo_v3[2])])
#Calcualte cross product of v1, v2 and save as surface area
vol12 = np.cross(v2, v1)
vol23 = np.cross(v2, v3)
vol13 = np.cross(v3, v1)
SA12 = np.linalg.norm(vol12)
SA23 = np.linalg.norm(vol23)
SA13 = np.linalg.norm(vol13)
#volume of cell
vol = np.dot(vol12, v3)*float(a[0])*float(a[0])*float(a[0])
cosang12 = np.dot(v1, v2)/np.linalg.norm(v1)/np.linalg.norm(v2)
cosang13 = np.dot(v1, v3)/np.linalg.norm(v1)/np.linalg.norm(v3)
cosang23 = np.dot(v2, v3)/np.linalg.norm(v2)/np.linalg.norm(v3)
rad12 = np.arccos(cosang12)
rad13 = np.arccos(cosang13)
rad23 = np.arccos(cosang23)
ang12 = np.degrees(rad12)
ang13 = np.degrees(rad13)
ang23 = np.degrees(rad23)
print "Angle between v1 and v2: %f" % ang12
print "Angle between v1 and v3: %f" % ang13
print "Angle between v2 and v3: %f" % ang23
print "Surface Area of side v1, v2: %f" % SA12
print "Surface Area of side v1, v3: %f" % SA13
print "Surface Area of side v2, v3: %f" % SA23
print "Volume: %f" % vol
|
zybbigpy/VaspCZ | sourcecode/VaspKeepInputs.py | <gh_stars>10-100
#!/home/zhangzhengde/bin/bin/python3
#coding=utf-8
import os
import sys
Files = 'INCAR,POSCAR,POTCAR,KPOINTS,Vasp.sh'
Flist = Files.split(',')
for i in range(1,len(sys.argv)):
Flist.append(sys.argv[i])
print(Flist)
dir = 'KeepInputsDir'
os.system('mkdir '+dir)
for i in range(len(Flist)):
os.system('cp ./'+Flist[i]+' ./'+dir+'/')
os.system('rm *')
os.system('cp ./'+dir+'/* ./')
os.system('rm -rf '+dir)
|
zybbigpy/VaspCZ | vtstscripts-939/kdb/remote_query.py | from kdbquery import KdbQuery
from optparse import OptionParser
import sys
from aselite import read_any
from remote_db import RemoteDB
from server_config import *
class RemoteQuery(KdbQuery):
def __init__(self):
self.return_dict = {}
# overloads KdbQuery.query_db()
def query_db(self, **args):
db = RemoteDB()
name = db.get_name(args['reactant'].get_chemical_symbols())
entries = db.get_process(name)
return entries, name
def output_query(self, outpurdir, numMatches, suggestion, sugproduct, modeTemp=None):
self.return_dict[numMatches] = [suggestion, sugproduct]
if modeTemp is not None:
self.return_dict[numMatches].append(modeTemp)
if __name__ == "__main__":
# Parse command line options.
parser = OptionParser(usage = "%prog [options] reactant.con")
parser.add_option("--nodupes", dest = "nodupes", action="store_true",
help = "detect and remove duplicate suggestions (can be expensive)")
options, args = parser.parse_args()
# Make sure we get the reactant file name.
if len(args) < 1:
parser.print_help()
sys.exit()
# Load the reactant con file.
reactant = read_any(args[0])
db = RemoteDB()
params = db.get_params()
query_sub_class = RemoteQuery()
query_sub_class.query(reactant, "./kdbmatches_remote", dc=params['dc'], nf=params['nf'], nodupes=options.nodupes, kdbname=db_name) |
zybbigpy/VaspCZ | vtstscripts-939/kdb/remote_config.py | <reponame>zybbigpy/VaspCZ<gh_stars>10-100
import getpass
# remote config is used to create new accounts on the database
class RemoteConfig():
def config(self):
f_name = raw_input('Please enter your first name: ')
l_name = raw_input('Please enter your last name: ')
email = raw_input('Please enter your email address: ')
passwd = getpass.getpass('Please enter your password (text will not show up): ')
return [f_name, l_name, email, passwd]
def is_yes(self, string):
if 'y' in string.lower():
return True
else:
return False |
zybbigpy/VaspCZ | sourcecode/VaspNEBCheckRMS.py | <filename>sourcecode/VaspNEBCheckRMS.py
import os
import VaspCZ.zzdlib as zzd
def VaspNEBCheckRMS():
print('CheckNEBRMS is running...')
if '00' in os.listdir() and 'INCAR' in os.listdir():
data_INCAR = zzd.File.openFile('./INCAR')
data_log = zzd.File.openFile('./log')
image = zzd.File.getLine(data_INCAR,'IMAGES')[0].split('=')[-1].strip('\n')
#print(zzd.File.getAllline(data_log,'F='))
ionstep = zzd.File.getAllline(data_log,'F=')[-1].split()[0]
if int(image) <=9:
#print('aa')
#data00 = zzd.getshellResult('grep RMS 00/OUTCAR')
data01 = zzd.getshellResult('grep RMS 01/OUTCAR')
data02 = zzd.getshellResult('grep RMS 02/OUTCAR')
data03 = zzd.getshellResult('grep RMS 03/OUTCAR')
#data04 = zzd.getshellResult('grep RMS 04/OUTCAR')
#print(data00)
print('{:^3}{:^10}{:^10}{:^10}{:^10}'.format('Step','01-RMS','02-RMS','03-RMS','01+02+03'))
for i in range(int(ionstep)):
print('{:>3}{:>10}{:>10}{:>10}{:>10.6f}'.format(i+1,data01[i].split()[4],data02[i].split()[4],data03[i].split()[4],float(data01[i].split()[4])+float(data02[i].split()[4])+float(data03[i].split()[4])))
else:
print('不在NEB目录,退出程序')
exit()
if __name__ == '__main__':
VaspNEBCheckRMS()
|
zybbigpy/VaspCZ | othercode/get_Sb_forfcc.py | import os
import subprocess
import sys
sys.path.append(os.path.join(os.environ['HOME'], 'bin/VaspCZ/sourcecode'))
import zzdlib
import numpy as np
import VaspCZ
pys_path = os.path.join(os.environ['HOME'], 'bin/VaspCZ/sourcecode')
def decode_vib_frequency(data):
# data 是shell代码出来的结果,是一个列表
frequncies = []
for i in range(len(data)):
line = data[i]
if 'THz' in line:
frequncy = line.split('=')[-1].split()[0]
frequncy = float(f'{float(frequncy):.4f}') # 保留4位有效数字
frequncies.append(frequncy)
frequncies_np = np.array(frequncies)
return frequncies_np
def grep_fre(path):
code = f'grep THz {path}/OUTCAR'
result = zzdlib.getshellResult(code)
fre = decode_vib_frequency(result)
return fre
def run_Sb(ele):
vac_1nn = grep_fre(f'{ele}-Energies/Fe107V1/vib_analysis/1nn')
vac_2nn = grep_fre(f'{ele}-Energies/Fe107V1/vib_analysis/2nn')
print(f'vac_1nn: {vac_1nn} vac_2nn: {vac_2nn}')
item1 = np.power(np.prod(vac_1nn), 12) * np.power(np.prod(vac_2nn), 6)
pure = grep_fre(f'{ele}-Energies/Fe108/vib_analysis')
print(f'pure: {pure}')
item2 = np.power(np.prod(pure), 10) # 几次方
sol = grep_fre(f'{ele}-Energies/Fe107{ele}1/vib_analysis/335050') # 溶质位置335050
sol_1nn = grep_fre(f'{ele}-Energies/Fe107{ele}1/vib_analysis/505066')
sol_2nn = grep_fre(f'{ele}-Energies/Fe107{ele}1/vib_analysis/665050')
print(f'sol: {sol} sol_1nn: {sol_1nn} sol_2nn: {sol_2nn}')
item3 = np.prod(sol) * np.power(np.prod(sol_1nn), 12) * np.power(np.prod(sol_2nn), 6)
vs = grep_fre(f'{ele}-Energies/Fe106{ele}1V1/vib_analysis/*')
print(vs.shape)
item4 = np.prod(vs)
inner = item1 * item3 / item2 / item4
Sb = np.log(inner)
print(f'item1: {item1}\nitem2: {item2} \nitem3: {item3} \nitem4: {item4}\ninner: {inner:.4f} Sb: {Sb:.4f}')
def run_Hf_and_Hb(ele):
# 4个文件夹下的结构优化的WARNING 检查和能量检查。
E_ele1V1 = zzdlib.Vasp.check_WARNING_and_Energy(path=f'{ele}-Energies/Fe106{ele}1V1')
E_pure = zzdlib.Vasp.check_WARNING_and_Energy(path=f'{ele}-Energies/Fe108')
E_ele1 = zzdlib.Vasp.check_WARNING_and_Energy(path=f'{ele}-Energies/Fe107{ele}1')
E_V1 = zzdlib.Vasp.check_WARNING_and_Energy(path=f'{ele}-Energies/Fe107V1')
Hf = E_V1 - 107/108*E_pure
Hb = E_ele1V1 + E_pure - E_ele1 - E_V1
print(f'Hf: {Hf:.4f} Hb: {Hb:.4f}')
def run_Sf(ele):
vac_1nn = grep_fre(f'{ele}-Energies/Fe107V1/vib_analysis/1nn')
vac_2nn = grep_fre(f'{ele}-Energies/Fe107V1/vib_analysis/2nn')
# print(f'vac_1nn: {vac_1nn} vac_2nn: {vac_2nn}')
item1 = np.power(np.prod(vac_1nn), 12) * np.power(np.prod(vac_2nn), 6)
pure = grep_fre(f'{ele}-Energies/Fe108/vib_analysis')
# print(f'pure: {pure}')
item2 = np.power(np.prod(pure), 18) # 几次方
inner = item2/item1
Sf = np.log(inner)
print(f'inner: {inner:.4f} Sf: {Sf:.4f}')
def get_w(Hm, v, T=1000):
"""
w = v*exp(-Hm/(kB*T))
:return:
"""
# 假定T=1000
kB = 1.380649 * 10 ** -23
Hm_unit_j = Hm * 1.602 * 10 ** -19
w = v * 10**12 * np.exp(-Hm_unit_j/(kB*T))
return w
def get_f2(Hm_and_v, T=1000):
"""
f2 = (2*w1 + 7*w3*F)/(2*w1 + 2*w2+ 7*w3*F)
:param Hm_and_v:
:return:
"""
data = Hm_and_v
w1 = get_w(Hm_and_v[0,2], Hm_and_v[1,2], T=T)
w3_2nn = get_w(data[0,6], data[1,6], T=T)
w3_3nn = get_w(data[0,8], data[1,8], T=T)
w3_4nn = get_w(data[0,10], data[1,10], T=T)
w3 = (2*w3_2nn + 4*w3_3nn + w3_4nn)/7
w2 = get_w(data[0,4], data[1,4], T=T)
# 获取zeta
w0 = get_w(data[0,0], data[1,0], T=T)
w4_2nn = get_w(data[0,7], data[1,7], T=T)
w4_3nn = get_w(data[0,9], data[1,9], T=T)
w4_4nn = get_w(data[0,11], data[1,11], T=T)
w4 = (2*w4_2nn + 4*w4_3nn + w4_4nn)/7
# print(w4, w0)
zeta = w4/w0
fenzi = (10*np.power(zeta,4) + 180.5*np.power(zeta,3) + 927*np.power(zeta,2) + 1341*zeta)
fenmu = 7*(2*np.power(zeta,4) + 40.2*np.power(zeta,3) + 254*np.power(zeta,2) + 597*zeta + 436)
F = 1 - fenzi/fenmu
print(f'zeta: {zeta}')
print(f'F: {F}')
f2 = (2* w1 + 7 * w3 * F) / (2 * w1 + 2 * w2 + 7 * w3 * F)
return f2
def get_Hm_and_v_from_raw():
cases = 'w0_3nn,w1,w2,w3_2nn,w3_3nn,w3_4nn'.split(',')
Hm_list = []
v_list = []
for i in range(len(cases)):
case = cases[i]
os.chdir(case)
# 获取Hm
# print(pys_path, os.getcwd())
res = zzdlib.getshellResult(f'python3 {pys_path}/NEBCheck.py --func=2')
# print(res)
line, index = zzdlib.File.getLine(res, 'IMAGE')
IMAGE = []
Barrier = []
# print(res)
for j in range(index + 1, len(res)):
if res[j] == '\n':
break
img = int(res[j].split()[0])
barrier = float(res[j].split()[-1])
IMAGE.append(img)
Barrier.append(barrier)
Hm_foreward = np.max(Barrier)
Hm_backward = np.max(Barrier) - Barrier[-1]
Hm_list.append(Hm_foreward)
Hm_list.append(Hm_backward)
# 获取v
code = f'python3 {pys_path}/VaspVibAna_Result.py --isprint=True'
res = zzdlib.getshellResult(code)
v_foreward = zzdlib.File.getLine(res, keywords='foreward')[0].split()[-1]
v_backward = zzdlib.File.getLine(res, keywords='backward')[0].split()[-1]
if v_backward == 'Match':
v_backward = '0'
v_list.append(float(v_foreward))
v_list.append(float(v_backward))
os.chdir('..')
Hm_and_v = np.concatenate((np.array(Hm_list).reshape(1, -1), np.array(v_list).reshape(1, -1)), axis=0)
return Hm_and_v
def run_f2(ele, T=1000):
# 先获取Hm和v的数据
os.chdir(f'{ele}-V')
if os.path.isfile('.Hm_and_v'):
with open('.Hm_and_v') as f:
data = f.read()
Hm_and_v = np.array(eval(data))
else:
Hm_and_v = get_Hm_and_v_from_raw()
with open('.Hm_and_v', 'w') as f:
f.write(str(Hm_and_v.tolist()))
os.chdir('..')
# 算f0
Hm_self = [1.3943 for i in range(12)]
v_self = [6.2805 for i in range(12)]
Hm_and_v_self = np.concatenate((np.array(Hm_self).reshape(1, -1), np.array(v_self).reshape(1, -1)), axis=0)
# print(Hm_and_v_self)
f0 = get_f2(Hm_and_v_self, T=T)
print(f'f0: {f0}')
# 计算
f2 = get_f2(Hm_and_v, T=T)
print(f'f2: {f2}')
return f2
if __name__ == '__main__':
os.system(f'source ~/.bashrc')
ele = os.path.basename(os.getcwd()).split('with')[-1]
print(f'ele: {ele}')
run_Sb(ele)
run_Sf(ele)
run_Hf_and_Hb(ele)
run_f2(ele, T=1000)
|
zybbigpy/VaspCZ | sourcecode/zzdlib.py | <reponame>zybbigpy/VaspCZ
#!/home/zhangzhengde/bin/bin/python
#coding=utf-8
import os
import subprocess
import time
import numpy as np
user = 'zhangzhengde'
current_py_folder = os.path.dirname(os.path.abspath(__file__))
VaspCZ_path = [os.path.dirname(current_py_folder) if 'sourcecode' in current_py_folder else current_py_folder][0] + '/sourcecode'
def setUser(inputusername):
global user
user = inputusername
def getshellResult(code): #获取shell代码的输出,每一行加一个\n结尾,作为字符串组成列表。最后一行如果为空行则删除
result = subprocess.check_output(code,stderr=subprocess.STDOUT,shell=True)
result = result.decode('utf-8').split('\n')
for i in range(len(result)):
result[i] = result[i]+'\n'
if result[-1] == '\n':
result = result[:-1]
return result
def listSum(numlist):#输入是以数字或数字字符串组成的List,返回和,类型是Int或者float
sum = 0
flag = 0
for i in range(len(numlist)):
try:
sum = sum + eval(numlist[i])
flag = flag +1
except:
sum = sum + numlist[i]
flag = flag+1
if flag != len(numlist):
print('zzdlib函数listSum错误')
return sum
class File():
def openFile(path, mode='r',data=None):
if mode == 'r':
with open(path,mode) as f:
data = f.readlines()
return data
elif mode == 'w':
with open(path,mode) as f:
f.writelines(data)
def substituteData(data,keywords,newline,mode='default'): #给出关键词和新行,默认情形搜索出现第一次出现关键词的行并替换,mode等于别的可以设置替换全部出现关键字的行
for i in range(len(data)):
if keywords in data[i]:
if newline[-1] == '\n':
data[i] = newline
else:
data[i] = newline + '\n'
if mode == 'default':
break
return data
def getLine(data,keywords):#给出关键词,返回有关键词的第一行并返回,返回为字符串和所在的行号
for i in range(len(data)):
if keywords in data[i]:
return data[i].strip('\n'),i
return 'Not Match',0
def getAllline(data,keywords): #给出关键词,返回所有带有关键词的行,返回为列表
result = []
for i in range(len(data)):
if keywords in data[i]:
result.append(data[i])
return result
def getNullline(data):
data2 = data.copy()
result = []
for i in range(len(data2)):
data2[i] = data2[i].strip(' ')
if data2[i] == '\n':
result.append(i)
return result
def printData(data):
for i in range(len(data)):
print(data[i].strip('\n'))
def Vaspsh_path():
with open(f'{VaspCZ_path}/build-in_data.txt', 'r') as f:
data =f.readlines()
Vaspsh_path = data[1].split('=')[1].strip('\n')
return Vaspsh_path
def VaspCZ_src_path():
return VaspCZ_path
def VaspCZ_software_path():
with open(f'{VaspCZ_path}/build-in_data.txt', 'r') as f:
data =f.readlines()
path = data[0].split('=')[1].strip('\n')
return path
def Vasp_pseudo_path():
with open(f'{VaspCZ_path}/build-in_data.txt', 'r') as f:
data =f.readlines()
line = File.getLine(data, 'Vasp_Pseudopotential_path')[0]
path = line.split('=')[1].strip('\n')
return path
class Vasp():
def checkInputs():#在提交任务之前,检查Vasp的各项输入:
#检查INCAR
data_INCAR = File.openFile('./INCAR','r')
SYSTEM = File.getLine(data_INCAR,'SYSTEM')[0].split('=')[-1]
ENCUT = File.getLine(data_INCAR,'ENCUT')[0].split('=')[-1]
ISIF = File.getLine(data_INCAR,'ISIF')[0].split('=')[-1]
IBRION = File.getLine(data_INCAR,'IBRION')[0].split('=')[-1]
if IBRION == '-1':
IBRION = 'No update'
elif IBRION == '0':
IBRION = 'Molecular dynamics'
elif IBRION == '1':
IBRION = 'qussi-Newton'
elif IBRION == '2':
IBRION = 'conjugate-gradient'
ISPIN = File.getLine(data_INCAR,'ISPIN')[0].split('=')[-1]
if ISPIN =='1' or ISPIN == 'Not Match':
ISPIN = 'non spin'
spinNum = 9999
elif ISPIN == '2':
ISPIN = File.getLine(data_INCAR,'MAGMOM')[0].split('=')[-1]
kind = ISPIN.split()
spinNum = 0
for kl in range(len(kind)):#kind举例:['23*5','23*-5','1']
spinNum = spinNum + eval(kind[kl].split('*')[0])
EDIFF = File.getLine(data_INCAR,'EDIFF')[0].split('=')[-1]
EDIFFG = File.getLine(data_INCAR,'EDIFFG')[0].split('=')[-1]
# 检查POSCAR
if SYSTEM == 'NEB':
data_POSCAR = File.openFile('./00/POSCAR','r')
else:
data_POSCAR = File.openFile('./POSCAR','r')
elelist = data_POSCAR[5].split()
numlist = data_POSCAR[6].split()
number = listSum(numlist)
POS_out = ''
for i in range(len(elelist)):
POS_out = POS_out + elelist[i]+numlist[i]+' '
# POTCAR
data_POT = File.openFile('./POTCAR','r')
PAW = File.getAllline(data_POT,'PAW')
TITEL = File.getAllline(PAW,'TITEL')
POTelelist = []
POT_out = ''
for tl in range(len(TITEL)):
POTelelist.append(TITEL[tl].split()[3])
POT_out = POT_out + TITEL[tl].split()[3] +' '
#POINTS
data_KP = File.openFile('./KPOINTS','r')
method = data_KP[2].strip('\n')
grid = data_KP[3].strip('\n')
#Vasp.sh
try:
data_Sh = File.openFile('./Vasp.sh','r')
except Exception as e:
sh_path = File.Vaspsh_path()
raise NameError(f'当前目录:{os.getcwd()}不存在Vasp.sh文件,请配置和检查。配置:请将适合本平台的PBS脚本命名为Vasp.sh拷贝到目录"{sh_path}"下。')
jobname = File.getLine(data_Sh,'#PBS -N')[0].strip('\n').split()[-1]
jobnodes = File.getLine(data_Sh,'#PBS -l')[0].strip('\n').split()[-1]
EMER = File.getLine(data_Sh,'#PBS -q')[0].strip('\n').split()[-1]
if EMER == 'Match':
EMER = 'No'
elif EMER == 'EMERGENCY':
EMER = 'Yes'
print('Vasp前检查:\n路径:{}\n计算任务:{} 截断能:{} ISIF:{} 离子更新:{} 磁性:{} 电子收敛:{} 离子收敛:{}'.format(os.getcwd(),SYSTEM,ENCUT,ISIF,IBRION,ISPIN,EDIFF,EDIFFG))
print('POSCAR原子:{}种共计{}个 {} POTCAR原子:{} KPOINTS方法:{} 网格:{} 任务名:{} 节点与核:{} 加急:{}'.format(len(elelist),number,POS_out,POT_out,method,grid,jobname,jobnodes,EMER))
if POTelelist == elelist or spinNum == number or spinNum == 9999:
return True
else:
return False
def check_and_qsub(need_input=True):
'''
need_input: 是否需要输入提交任务。默认是需要,设置为False不需要,检查完成后无错误直接提交
:return:
'''
if Vasp.checkInputs():
if need_input:
ipt = input(f'是否提交任务(默认yes):')
if ipt in ['yes', 'y', '', 'Yes', 'YES', 'Y']:
print('已提交任务')
os.system('qsub Vasp.sh')
else:
print('未提交任务')
else:
print('已提交任务')
os.system('qsub Vasp.sh')
else:
print(f'Vasp输入文件有误,请检查, path: {os.getcwd()}')
exit()
def checkJobstatus(name,includeE=True):
'''
name可以是jobname或者jobid或者qstat -x -u后该行的任意信息。
只会查询那些状态为R Q E 的任务,F的不管。
'''
mjb = []
mjb = getshellResult('qstat -x -u '+user)
for i in range(len(mjb)):
if name in mjb[i]:
status = mjb[i].split()[9]
runtime = mjb[i].split()[10]
if status == 'R' or status == 'Q':
return status,runtime
if includeE == True:
if status == 'E':
return status,runtime
return 'Not Found', '0'
def keepInputs(addfile=[],workdir='./', need_confirm=True):
Flist = 'INCAR,POSCAR,POTCAR,KPOINTS,Vasp.sh'.split(',')
for File in addfile:
Flist.append(File)
print(f'Vasp keep inputs: 保留的文件: {Flist}')
files = os.listdir(workdir)
del_files = []
for i in range(len(files)):
if files[i] in Flist:
pass
else:
del_files.append(files[i])
if need_confirm:
if len(del_files) == 0:
ipt = 'no'
pass
else:
ipt = input(f'准备删除路径"{workdir}"下的文件:{del_files}\n是否删除([y]es/no): ')
else:
ipt = 'y'
ipt = 'y' if ipt == '' else ipt
if ipt in ['y', 'Y', 'yes', 'YES']:
for file in del_files:
os.system(f'rm -rf {file}')
else:
print(f'未删除任何文件')
def checkNEBperiod():
'''
遍历当前路径下的所有文件夹,如果发现有neb计算,判断ini 和fin分别的计算周期,并返回
return: 列表,每个NEB作为一个元素,每个元素也是一个列表,列表下元素分别为NEB的路径、NEB阶段和状态,初态阶段和状态(在计算、未提交、算完),
[[NEB1path,NEBperiod,iniperiod,,finperiod],...] 3/3 2/3 1/3 Done NotDone
'''
result = []
currentpath = os.getcwd()
#print(currentpath)
for dirpath,dirnames,filenames in os.walk('./'):
if ('ini' in dirnames) and ('fin' in dirnames):
NEBpath = currentpath+dirpath.strip('.') #返回的路径是完整路径
#3/3
if 'log' in filenames:
#print(NEBpath)
if Vasp.checkisDone(dirpath):
NEBperiod = '3/3 Done'
else:
NEBperiod = '3/3 NotDone'
iniperiod = '3/3 Done'
finperiod = '3/3 Done'
else: #可能是2/3 或者1/3
#开始判断下一级
NEBperiod = 'Not Match'
if 'log' in os.listdir(dirpath+'/ini'):
#print('in ini')
if Vasp.checkisDone(dirpath+'/ini',isSelf=True):
iniperiod = '2/3 Done'
else:
iniperiod = '2/3 NotDone'
else:
#print('ini without log')
if 'log' in os.listdir(dirpath+'/ini/Opt'):
if Vasp.checkisDone(dirpath+'/ini/Opt'):
iniperiod = '1/3 Done'
else:
iniperiod = '1/3 NotDone'
else:
iniperiod ='0/3 NotInit'
if 'log' in os.listdir(dirpath+'/fin'):
if Vasp.checkisDone(dirpath+'/fin',isSelf=True):
finperiod = '2/3 Done'
else:
finperiod = '2/3 NotDone'
else:
if 'log' in os.listdir(dirpath+'/fin/Opt'):
if Vasp.checkisDone(dirpath+'/fin/Opt'):
finperiod = '1/3 Done'
else:
finperiod = '1/3 NotDone'
else:
finperiod = '0/3 NotInit'
#print(NEBpath)
#print(NEBperiod)
#print(iniperiod)
#print(finperiod)
result.append([NEBpath,NEBperiod,iniperiod,finperiod])
return result
def checkisDone(path,isSelf=False):
'''
传入路径和是否为自洽计算,通过Log判断该计算是否完成,返回True或False
path:传入路径
'''
data_log = File.openFile(path+'/log')
#print(data_log)
#print(len(data_log))
#有时候会出现log中无数据,报错
if len(data_log) == 0:
print('当前路径{}的log中无数据,请检查'.format(os.getcwd()+path.strip('.')))
if isSelf==True:
F = File.getLine(data_log,'F=')[0]
#print('F',F)
if '1 F=' in F:
return True
else:
return False
else:
for i in range(len(data_log)-10,len(data_log)):
if 'reached required accuracy' in data_log[i]:
return True
return False
def decode_POSCAR(POSCAR):
"""
解码POSCAR,返回一个基矢、原子种类、原子数目、每个原子的位置(取前4位)
:param POSCAR:
:return:
"""
scale = float(POSCAR[1]) # 缩放系数
a = np.array([float(tmp) for tmp in POSCAR[2].split()]) * scale # 基矢
b = np.array([float(tmp) for tmp in POSCAR[3].split()]) * scale # 基矢
c = np.array([float(tmp) for tmp in POSCAR[4].split()]) * scale # 基矢
vector = np.concatenate([a, b, c]).reshape(3, 3)
elements = POSCAR[5].split()
number_of_atom = np.array([int(tmp) for tmp in POSCAR[6].split()])
direct, index = File.getLine(POSCAR, keywords='Direct') # 获取Direct所在的索引,它后面的n行就是各个原子的位置。
number_of_atom_sum = int(number_of_atom.sum())
position = []
for i in range(number_of_atom_sum):
x, y, z = POSCAR[index+1+i].split()[:3]
x, y, z = x[:6], y[:6], z[:6]
position.append(np.array([float(x), float(y), float(z)]))
position = np.array(position)
# print(vector, vector.shape)
# print(elements, number_of_atom, number_of_atom_sum)
# print(direct, index)
# print(position, position.shape)
return vector, elements, number_of_atom, position
def modify_Vasp_sh(jobname, nodes, ppn):
with open('./Vasp.sh', 'r') as f:
data = f.readlines()
new_data = []
for line in data:
if ' #PBS -N' in line:
new_data.append(f' #PBS -N {jobname}\n')
elif ' #PBS -l nodes' in line:
new_data.append(f' #PBS -l nodes={nodes}:ppn={ppn}\n')
else:
new_data.append(line)
with open('./Vasp.sh', 'w') as f:
f.writelines(new_data)
def generate_POTCAR(elements=None, pseudotype='PBE'):
if elements is None:
with open('POSCAR', 'r') as f:
data = f.readlines()
res = Vasp.decode_POSCAR(data)
elements = res[1]
path1 = os.path.join(File.Vasp_pseudo_path(), 'PseudoPotential')
path2 = pseudotype
os.system('rm POTCAR')
for i in range(len(elements)):
path3 = elements[i]
path = os.path.join(path1, path2, path3)
if os.path.isfile(f'{path}/POTCAR.Z'):
if i == 0:
os.system(f'zcat {path}/POTCAR.Z >POTCAR')
else:
os.system(f'zcat {path}/POTCAR.Z >>POTCAR')
elif os.path.isfile(f'{path}/POTCAR'):
code = f'cat {path}/POTCAR >POTCAR' if i==0 else f'cat {path}/POTCAR >>POTCAR'
os.system(code)
else:
raise NameError(f'gennerate POTCAR error, element "{elements[i]}" not found in path: {path}\n请正确配置贋势文件路径。\n配置方法:将你的贋势文件夹拷贝到目录"{File.Vasp_pseudo_path()}"下并命名为:PseudoPotential/[贋势类型]。')
def generate_KPOINTS(vector='5 5 5', kptype='Monkhorst'):
data = []
data.append(f'{kptype} pack\n')
data.append('0\n')
data.append(f'{kptype}\n')
data.append(f'{vector}\n')
data.append(f'0. 0. 0.\n')
File.openFile('KPOINTS', 'w', data=data)
def modify_POSCAR_ele(oldele, new_ele):
"""
读取并修改当前POSCAR的元素
:param new_ele:
:return:
"""
with open('POSCAR', 'r') as f:
data = f.readlines()
new_data = []
for line in data:
if oldele in line:
new_data.append(line.replace(oldele, new_ele))
else:
new_data.append(line)
with open('POSCAR', 'w') as f:
f.writelines(new_data)
def modify_POSCAR_Selective_Dynamics(data, indexes):
"""
根据输入的数据和索引修改POSCAR,添加Selective Dynamics, 索引所在的位置设置为T T T, 其他位置设置为 F F F
注意:indexes以POSCAR中一个原子所在位置为初始0
:return:
"""
POSCAR_data = []
direct, direct_index = File.getLine(data, keywords='Direct')
decoded_data = Vasp.decode_POSCAR(data)
number_of_atom = decoded_data[2]
for i in range(len(data)):
if i < direct_index:
POSCAR_data.append(data[i]) # 前面的部分
elif i == direct_index: # Direct部分,要加一个Selective Dynamics
POSCAR_data.append('Selective Dynamics\n')
POSCAR_data.append(data[i])
elif direct_index < i <= direct_index + np.sum(number_of_atom): # 原子位置部分
tmp_i = i - direct_index - 1
if tmp_i not in indexes:
POSCAR_data.append(data[i].strip('\n') + ' F F F\n')
else:
POSCAR_data.append(data[i].strip('\n') + ' T T T\n')
else: # 最后的部分
POSCAR_data.append(data[i])
return POSCAR_data
def modify_INCAR_for_vibration_analysis():
"""
修改当前目录的INCAR为振动分析的INCAR并保存
注意:在Opt基础上的INCAR进行修改
:return:
"""
with open('INCAR', 'r') as f:
data_INCAR = f.readlines()
data_INCAR = File.substituteData(data_INCAR, keywords='SYSTEM', newline='SYSTEM=Vib\n') # 修改
data_INCAR = File.substituteData(data_INCAR, keywords='NSW', newline='NSW=1\n') # 修改
data_INCAR = File.substituteData(data_INCAR, keywords='POTIM', newline='POTIM=0.03\n') # 修改
data_INCAR = File.substituteData(data_INCAR, keywords='IBRION', newline='IBRION=5\n') # 修改
data_INCAR = File.substituteData(data_INCAR, keywords='NFREE', newline='\n') # 删除
data_INCAR.append('NFREE=2\n') # 添加
data_INCAR = File.substituteData(data_INCAR, keywords='ISYM', newline='\n') # 删除
data_INCAR.append('ISYM=0\n') # 添加
data_INCAR = File.substituteData(data_INCAR, keywords='PREC', newline='\n') # 删除
data_INCAR.append('PREC=Accurate\n') # 添加
data_INCAR = File.substituteData(data_INCAR, keywords='NPAR', newline='\n') # 删除
data_INCAR = File.substituteData(data_INCAR, keywords='NCORE', newline='\n') # 删除
with open('INCAR', 'w') as f:
f.writelines(data_INCAR)
def check_WARNING_and_Energy(path='.'):
"""
检查路径下的结构优化是否完成,有无WARNING,返回能量
:return:
"""
if not Vasp.checkisDone(path):
print(f'当前路径{os.getcwd()} Vasp计算没有完成,退出程序')
exit()
OUTCAR_file = os.path.join(path, 'OUTCAR')
OUT_data = File.openFile(OUTCAR_file, 'r')
flag = 0
WARNING_list = []
for i in range(len(OUT_data)):
line = OUT_data[i]
if 'WARNING' in line:
flag = 1
WARNING_list.append(line)
log_file = os.path.join(path, 'log')
log_data = File.openFile(log_file, 'r')
for i in range(len(log_data)):
line = log_data[i]
if 'WARNING' in line:
flag = 1
WARNING_list.append(line)
if flag == 1:
print(f'路径{os.getcwd()} 有警告!!! 警告内容:')
for WARN in WARNING_list:
print(WARN.strip('\n'))
F_list = File.getAllline(log_data, keywords='F=')
energy = F_list[-1].split('F=')[-1].split()[0]
energy = float(energy)
return energy
|
zybbigpy/VaspCZ | othercode/cal_2nn_to_6nn.py | <filename>othercode/cal_2nn_to_6nn.py
"""
运行在fcc_withele目录下,进入ele-Energies/,获取Fe106ele1V1文件夹下的输入文件,把POSCAR改为对应的2nn-6nn,然后进行计算
"""
import os
import subprocess
import sys
sys.path.append(os.path.join(os.environ['HOME'], 'bin/VaspCZ/sourcecode'))
import zzdlib
import numpy as np
import argparse
import VaspCZ
pys_path = os.path.join(os.environ['HOME'], 'bin/VaspCZ/sourcecode')
def run(nodes, ppn, ele):
os.chdir(f'{ele}-Energies')
caselist = '1nn,2nn,3nn,4nn,5nn,6nn'.split(',')
vaclist = '505066,665050,666633,668350,835066,668316'.split(',')
for i in range(len(caselist)):
case = caselist[i]
vac = vaclist[i]
dirname = f'Fe106{ele}1V1_{case}'
if os.path.isdir(dirname):
print(f'{dirname} 已存在,退出')
exit()
os.mkdir(dirname)
os.chdir(dirname)
if case == '1nn':
os.system(f'cp ../Fe106{ele}1V1/* .')
else:
os.system(f'cp ../Fe106{ele}1V1/INCAR .')
os.system(f'cp ../Fe106{ele}1V1/POTCAR .')
os.system(f'cp ../Fe106{ele}1V1/KPOINTS .')
# 获取和修改POSCAR
os.system(f'cp ../Fe106{ele}1V1/POSCAR .')
pos_data = zzdlib.File.openFile('./POSCAR', 'r')
new_pos_data = []
for j in range(len(pos_data)):
line = pos_data[j]
if j < 8: # 前8行是其他信息,到direct
new_pos_data.append(line)
else:
try:
x, y, z = line.strip('\n').split()
except Exception as e:
print(j, line)
x, y, z = '0', '0', '0'
vacx = f'0.{vac[:2]}'
vacy = f'0.{vac[2:4]}'
vacz = f'0.{vac[4::]}'
if x[:4] == vacx and y[:4] == vacy and z[:4] == vacz:
thatline = f'0.500000000 0.500000000 0.666666666\n'
new_pos_data.append(thatline)
else:
new_pos_data.append(line)
zzdlib.File.openFile('./POSCAR', 'w', data=new_pos_data)
# Vasp.sh
jobname = f'fc06{ele}{case}Opt'
Vaspsh_path = zzdlib.File.Vaspsh_path()
os.system(f'cp {Vaspsh_path}/Vasp.sh .')
zzdlib.Vasp.modify_Vasp_sh(jobname, nodes=nodes, ppn=ppn)
zzdlib.Vasp.check_and_qsub(need_input=True)
os.chdir('..')
os.chdir('..')
def check_and_reqsub():
elelist = 'Al,Cr,Cu,Mo,Nb,Ru,Sb,Tc'.split(',')
for ele in elelist:
first_dir = f'fcc_with{ele}/{ele}-Energies'
os.chdir(first_dir)
xnnlist = '1nn,2nn,3nn,4nn,5nn,6nn'.split(',')
for xnn in xnnlist:
second_dir = f'Fe106{ele}1V1_{xnn}'
os.chdir(second_dir)
data_log = zzdlib.File.openFile('log', 'r')
res = zzdlib.File.getLine(data=data_log, keywords='node8')
if res[0] != 'Not Match': # 匹配到Node8了
zzdlib.Vasp.check_and_qsub(need_input=True)
else:
print(f'fcc_with{ele} {xnn} 未检测到node8, 跳过')
os.chdir('..')
os.chdir('../..')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='manual to this script')
parser.add_argument('-nd', '--nodes', type=str, default='1')
parser.add_argument('-np', '--ppn', type=str, default='12')
parser.add_argument('-func', '--function', type=str, default='cal')
args = parser.parse_args()
nodes = args.nodes
ppn = args.ppn
func = args.function
ele = os.path.basename(os.getcwd()).split('with')[-1]
print(f'输入参数: nodes:{nodes} ppn:{ppn} ele:{ele} func:{func}')
if func == 'cal':
run(nodes, ppn, ele)
else:
check_and_reqsub()
|
zybbigpy/VaspCZ | vtstscripts-939/split_dos.py | #!/usr/bin/env python
import numpy as np
#import ase
import aselite as ase
### READ DOSCAR ###
def read_dosfile():
f = open("DOSCAR", 'r')
lines = f.readlines()
f.close()
index = 0
natoms = int(lines[index].strip().split()[0])
index = 5
nedos = int(lines[index].strip().split()[2])
efermi = float(lines[index].strip().split()[3])
print natoms, nedos, efermi
return lines, index, natoms, nedos, efermi
### READ POSCAR or CONTCAR and save pos
def read_posfile():
from ase.io import read
try:
atoms = read('POSCAR')
except IOError:
print "[__main__]: Couldn't open input file POSCAR, atomic positions will not be written...\n"
atoms = []
return atoms
### WRITE DOS0 CONTAINING TOTAL DOS ###
def write_dos0(lines, index, nedos, efermi):
fdos = open("DOS0", 'w')
line = lines[index+1].strip().split()
ncols = int(len(line))
# fdos.write('# %d \n' % (ncols)) #GH not sure why this is here
for n in xrange(nedos):
index +=1
e = float(lines[index].strip().split()[0])
e_f = e-efermi
fdos.write('%15.8f ' % (e_f))
for col in xrange(1, ncols):
dos = float(lines[index].strip().split()[col])
fdos.write('%15.8f ' % (dos))
fdos.write('\n')
return index
### LOOP OVER SETS OF DOS, NATOMS ###
def write_nospin(lines, index, nedos, natoms, ncols, efermi):
atoms = read_posfile()
if len(atoms) < natoms:
pos = np.zeros((natoms, 3))
else:
pos = atoms.get_positions()
for i in xrange(1,natoms+1):
si = str(i)
## OPEN DOSi FOR WRITING ##
fdos = open("DOS"+si, 'w')
index += 1
ia = i-1
# fdos.write('# %d \n' % (ncols))
fdos.write('# %15.8f %15.8f %15.8f \n' % (pos[ia,0], pos[ia,1], pos[ia,2]))
### LOOP OVER NEDOS ###
for n in xrange(nedos):
index += 1
e = float(lines[index].strip().split()[0])
e_f = e-efermi
fdos.write('%15.8f ' % (e_f))
for col in xrange(1, ncols):
dos = float(lines[index].strip().split()[col])
fdos.write('%15.8f ' % (dos))
fdos.write('\n')
fdos.close()
def write_spin(lines, index, nedos, natoms, ncols, efermi):
#pos=[]
atoms = read_posfile()
if len(atoms) < natoms:
pos = np.zeros((natoms, 3))
else:
pos = atoms.get_positions()
nsites = (ncols -1)/2
for i in xrange(1,natoms+1):
si = str(i)
## OPEN DOSi FOR WRITING ##
fdos = open("DOS"+si, 'w')
index += 1
ia = i-1
fdos.write('# %d \n' % (ncols))
fdos.write('# %15.8f %15.8f %15.8f \n' % (pos[ia,0], pos[ia,1], pos[ia,2]))
### LOOP OVER NEDOS ###
for n in xrange(nedos):
index +=1
e = float(lines[index].strip().split()[0])
e_f = e-efermi
fdos.write('%15.8f ' % (e_f))
for site in xrange(nsites):
dos_up = float(lines[index].strip().split()[site*2+1])
dos_down = float(lines[index].strip().split()[site*2+2])*-1
fdos.write('%15.8f %15.8f ' % (dos_up, dos_down))
fdos.write('\n')
fdos.close()
#
if __name__ == '__main__':
import sys
import os
import datetime
import time
import optparse
lines, index, natoms, nedos, efermi = read_dosfile()
index = write_dos0(lines, index, nedos, efermi)
## Test if a spin polarized calculation was performed ##
line = lines[index+2].strip().split()
ncols = int(len(line))
if ncols==7 or ncols==19 or ncols==9 or ncols==33:
write_spin(lines, index, nedos, natoms, ncols, efermi)
is_spin=True
else:
write_nospin(lines, index, nedos, natoms, ncols, efermi)
is_spin=False
print "Spin unrestricted calculation: ", is_spin
|
zybbigpy/VaspCZ | othercode/bcc_complement1.1.py | """
之前所有算的bcc_withele,需要补全一些计算,运行在bcc_withele文件夹下:
补全内容:
1.ele-V下的1nn 2nn Fe_12 Fe_13 Fe_14下的振动分析
2.ele-V下的w5任务的提交。
3.ele-Energies下的 53Fe1Al的振动分析,52Fe1Al1V的振动分析
1.1 更新
更新ele_Energies_vib函数
原来的用1NN 2NN 3NN 5NN的原子振动变化来计算熵增的近似不合理
改为计算全部的原子的振动。
"""
import os
import argparse
import sys
sys.path.append(os.path.join(os.environ['HOME'], 'bin/VaspCZ/sourcecode'))
import zzdlib
import subprocess
pys_path = os.path.join(os.environ['HOME'], 'bin/VaspCZ/sourcecode')
def ele_V_vib(ele, nodes, ppn):
print(f'执行ele-V 1nn 2nn Fe_12 Fe_13 Fe_14 振动分析')
os.chdir(f'{ele}-V')
cases = ['1nn', '2nn', 'Fe_12', 'Fe_13', 'Fe_14']
include_fin = [False, False, True, True, True]
for i in range(len(cases)):
case = cases[i]
os.chdir(case)
code = [
sys.executable, f'{pys_path}/VaspVibAna_forNEB.py', f'-nd={nodes}', f'-np={ppn}',
f'--include_fin={include_fin[i]}']
print(code)
subprocess.call(code, shell=False)
os.chdir('..')
os.chdir('..')
def ele_V_w5(ele, nodes, ppn, w5_path):
print(f'执行ele-V w5 ini fin 的结构优化')
os.chdir(f'{ele}-V')
os.mkdir('w5')
os.chdir('w5')
w5_path = os.path.join(os.environ['HOME'], w5_path)
os.system(f'cp -rf {w5_path}/* .')
infi_list = ['ini', 'fin']
for i in range(len(infi_list)):
infi = infi_list[i]
os.chdir(f'{infi}/Opt')
# INCAR 无需修改
# KPOINT 无序修改
# POSCAR 修改元素
zzdlib.Vasp.modify_POSCAR_ele(oldele='Te', new_ele=ele)
# POTCAR 匹配
zzdlib.Vasp.gennerate_POTCAR() # 从当前目录下的POSCAR 读取元素并生成PBE贋势
# Vasp.sh 匹配平台,修改名称
Vaspsh_path = zzdlib.File.Vaspsh_path()
os.system(f'cp {Vaspsh_path}/Vasp.sh .')
jobname = f'bc{ele}w5{infi[0]}O'
zzdlib.Vasp.modify_Vasp_sh(jobname, nodes=nodes, ppn=ppn)
# 检查和提交
zzdlib.Vasp.check_and_qsub()
os.chdir('../..')
os.chdir('../..')
def ele_Energies_vib(ele, nodes, ppn):
print(f'执行ele-Energies Fe531{ele} Fe52{ele}1V1的振动分析')
os.chdir(f'{ele}-Energies/{ele}-V')
cases = [f'53Fe1{ele}', f'52Fe1{ele}1V']
# cases = [f'53Fe1{ele}']
for i in range(len(cases)):
case = cases[i]
os.chdir(case)
if os.path.isdir('vib_analysis'):
print(f'{ele} {case} vib_analysis已存在,退出')
exit()
else:
os.mkdir('vib_analysis')
os.chdir('vib_analysis')
nn3 = ('0.3333', '0.6666', '0.6666')
nn5 = ('0.6666', '0.6666', '0.6666')
valuable_position = [
('0.3333', '0.3333', '0.6666'), ('0.3333', '0.6666', '0.6666'), ('0.6666', '0.6666', '0.6666'),
('0.5000', '0.5000', '0.5000'), ('0.3333', '0.3333', '0.3333')]
xnn_list = ['solute', 'nn1', 'nn2', 'nn3', 'nn5']
if case == f'52Fe1{ele}1V':
xnn_list.remove('nn1')
# 获取原子数目
data_POS = zzdlib.File.openFile('../POSCAR')
pos_result = zzdlib.Vasp.decode_POSCAR(data_POS)
number_of_atoms = int(pos_result[2].sum())
for j in range(number_of_atoms):
dir_name = f'atom{j+1}'
os.mkdir(dir_name)
os.chdir(dir_name)
# INCAR 需要修改
os.system(f'cp ../../INCAR .')
zzdlib.Vasp.modify_INCAR_for_vibration_analysis()
# POSCAR 从上级从是CONTCAR拷贝,需要修改
os.system(f'cp ../../CONTCAR POSCAR')
indexes = [j]
data_POS = zzdlib.File.openFile('POSCAR', 'r')
POSCAR_data = zzdlib.Vasp.modify_POSCAR_Selective_Dynamics(data=data_POS, indexes=indexes)
zzdlib.File.openFile('POSCAR', 'w', data=POSCAR_data)
# KPOINTS 无序修改
os.system(f'cp ../../KPOINTS .')
# POTCAR 无序修改
os.system(f'cp ../../POTCAR .')
# Vasp.sh 需要修改
Vaspsh_path = zzdlib.File.Vaspsh_path()
os.system(f'cp {Vaspsh_path}/Vasp.sh .')
jobname = f'bc{case[4::]}{j+1}_vib'
zzdlib.Vasp.modify_Vasp_sh(jobname, nodes=nodes, ppn=ppn)
zzdlib.Vasp.check_and_qsub(need_input=False)
os.chdir('..')
os.chdir('..')
os.chdir('..')
os.chdir('../..')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-nd', '--nodes', default='1', type=str)
parser.add_argument('-np', '--ppn', default='8', type=str)
parser.add_argument('-w5p', '--w5_path', default='zhangzd/bcc_54atom/w5_prepare', type=str)
args = parser.parse_args()
nodes = args.nodes
ppn = args.ppn
w5_path = args.w5_path
ele = os.path.basename(os.getcwd()).split('with')[-1]
ipt = input(f'功能:1.{ele}-Energies_vib 2.{ele}-V_vib 3.{ele}-V_w5_Opt\n请输入需要的功能(默认all): ')
if ipt in ['1', 'all', 'A', 'a', '']:
ele_Energies_vib(ele, nodes, ppn)
if ipt in ['2', 'all', 'A', 'a', '']:
ele_V_vib(ele, nodes, ppn)
if ipt in ['3', 'all', 'A', 'a', '']:
ele_V_w5(ele, nodes, ppn, w5_path)
|
zybbigpy/VaspCZ | vtstscripts-939/tist.py | <gh_stars>10-100
#!/usr/bin/env python
from __future__ import print_function
import sys
import re
import os
import gzip
def sumIonicTime(f,subt,ast):
"sum all ionic times in an OUTCAR"
subtt = 0
subett = 0
elecStep = 0
avgStepTime = 0
for line in f :
string = line.split()
if len(string) > 0 :
if string[0] == 'LOOP+:' :
temps = string[3]
temps2 = temps[:-1]
temps2 = float(temps2)
subtt = subtt + temps2
if string[0] == "LOOP:" :
temps = string[3]
temps2 = temps[:-1]
temps2 = float(temps2)
subett = subett + temps2
elecStep = elecStep + 1
avgStepTime = subett / elecStep
return (subtt,avgStepTime)
def avgElectronicStep(f):
"find the average electronic step time"
subtt = 0
elecStep = 0
avgStepTime = 0
for line in f :
print("aaa")
string = line.split()
print(string)
if len(string) > 0 :
if string[0] == "LOOP:" :
temps = string[3]
temps2 = temps[:-1]
temps2 = float(temps2)
subtt = subtt + temps2
elecStep = elecStep + 1
print(elecStep)
# avgStepTime = subtt / elecStep
return avgStepTime
tt = 0
subtt = 0
avgElecStep = 0
aESSum = 0 # sum of avgElecStep
numOUTCAR = 0 # number of OUTCARS read
hasOUTCAR = os.path.isfile('OUTCAR')
if hasOUTCAR :
numOUTCAR = numOUTCAR + 1
print("There is a file OUTCAR :",hasOUTCAR)
f = open("OUTCAR")
(subtt,avgElecStep) = sumIonicTime(f,subtt,avgElecStep)
print('this OUTCAR recorded total ionic step time:',subtt,' sec')
tt = tt + subtt
f.close()
f = open("OUTCAR")
# avgElecStep = avgElectronicStep(f)
print("In this OUTCAR the average electronic step time is :", avgElecStep," sec")
aESSum = aESSum + avgElecStep
# f = open("OUTCAR")
else :
print("There is no OUTCAR in current directory")
distList = os.listdir("./")
for dir in distList :
if os.path.isdir(dir):
print("current dir is ",dir)
if os.path.isfile(os.path.join(dir,"OUTCAR.gz")) :
numOUTCAR = numOUTCAR + 1
print("reading from OUTCAR.gz")
f = gzip.open(os.path.join(dir,"OUTCAR.gz"))
(subtt,avgElecStep) = sumIonicTime(f,subtt,avgElecStep)
print('this OUTCAR recorded total ionic step time:',subtt,' sec')
# avgElecStep = avgElectronicStep(f)
print("In this OUTCAR the average electronic step time is :", avgElecStep," sec")
aESSum = aESSum + avgElecStep
f.close()
tt = tt + subtt
else :
print("There is no OUTCAR.gz here")
if os.path.isfile(os.path.join(dir,"OUTCAR")) :
numOUTCAR = numOUTCAR + 1
subtt = 0
f = open(os.path.join(dir,"OUTCAR"))
# for line in f:
# string = line.split()
# if len(string) > 0 :
# if string[0] == 'LOOP+:' :
# temps = string[3]
# temps2 = temps[:-1]
# temps2 = float(temps2)
# subtt = subtt + temps2
(subtt,avgElecStep) = sumIonicTime(f,subtt,avgElecStep)
print('this OUTCAR recorded total ionic step time:',subtt,' sec')
# avgElecStep = avgElectronicStep(f)
print("In this OUTCAR the average electronic step time is :", avgElecStep," sec")
aESSum = aESSum + avgElecStep
f.close()
tt = tt + subtt
else :
print("There is no OUTCAR here")
#print("yippi")
print("All OUTCARS combined took total ionic step time: ",tt,' sec')
print("which in hours is :",tt/3600,' hours')
print("checking for number of cores used...")
if os.path.isfile("job.sub") :
f = open("job.sub")
for line in f:
string = line.split()
if len(string) > 1 :
if string[1] == "-n" :
print("number of cores is", string[2])
print("total computational time is :", tt/3600 * float(string[2])," hours")
f.close()
print("overall average electronic step time for all OUTCAR's read is :", aESSum / numOUTCAR," sec")
#for line in f:
|
zybbigpy/VaspCZ | othercode/get_Sb_forbcc.py | import os
import subprocess
import sys
sys.path.append(os.path.join(os.environ['HOME'], 'bin/VaspCZ/sourcecode'))
import zzdlib
import numpy as np
def decode_vib_frequency(data):
# data 是shell代码出来的结果,是一个列表
frequncies = []
for i in range(len(data)):
line = data[i]
if 'THz' in line:
frequncy = line.split('=')[-1].split()[0]
frequncy = float(f'{float(frequncy):.4f}') # 保留4位有效数字
frequncies.append(frequncy)
frequncies_np = np.array(frequncies)
return frequncies_np
def run(ele, correct=False):
code1 = f'grep THz {ele}-Energies/{ele}-V/52Fe1{ele}1V/vib_analysis/*/OUTCAR'
result1 = zzdlib.getshellResult(code1)
fre1 = decode_vib_frequency(result1)
code2 = f'grep THz {ele}-Energies/{ele}-V/53Fe1{ele}/vib_analysis/*/OUTCAR'
result2 = zzdlib.getshellResult(code2)
fre2 = decode_vib_frequency(result2)
print(fre1.shape, fre2.shape)
# 主要是因为bcc_withCr 中 52Fe1Cr1V的atom46原子振动计算没有完成,fre1缺少一个原子的振动数据,用其他的原子的振动的平均值补充上去
if correct:
number = fre1.shape[0]
row = int(number/3)
print(f'总{number} 行{row}')
fre1 = fre1.reshape(row, 3)
fre1_mean = np.mean(fre1, axis=0)
fre1_mean = fre1_mean.reshape(1, 3)
fre1 = np.concatenate((fre1, fre1_mean), axis=0)
fre1 = fre1.reshape(number+3)
vac_1nn = np.array([6.6160, 6.6107, 5.5804])
vac_2nn = np.array([7.0517, 7.0461, 6.6759])
item1 = np.power(np.prod(vac_1nn), 8) * np.power(np.prod(vac_2nn), 6)
pure = np.array([7.0748, 7.0727, 7.0708])
item2 = np.power(np.prod(pure), 15)
item4 = np.prod(fre1)
item3 = np.prod(fre2)
inner = item1 * item3 / item2 / item4
Sb = np.log(inner)
print(f'item1: {item1}\nitem2: {item2} \nitem3: {item3} \nitem4: {item4}\n inner: {inner} Sb: {Sb}')
def run2(ele):
atoms = [53, 16, 12, 25, 14, 17, 23, 26, 39, 40, 37, 42, 31, 48, 27, 36, 30, 28, 13, 11, 5]
fre1 = np.array([])
for i in range(len(atoms)):
atom = str(atoms[i])
code = f'grep THz {ele}-Energies/{ele}-V/52Fe1{ele}1V/vib_analysis/atom{atom}/OUTCAR'
result = zzdlib.getshellResult(code)
fre = decode_vib_frequency(result)
fre1 = np.concatenate((fre1, fre), axis=0)
print(fre1.shape)
vac_1nn = np.array([6.6160, 6.6107, 5.5804])
vac_2nn = np.array([7.0517, 7.0461, 6.6759])
item1 = np.power(np.prod(vac_1nn), 8) * np.power(np.prod(vac_2nn), 6)
pure = np.array([7.0748, 7.0727, 7.0708])
item2 = np.power(np.prod(pure), 8)
sol = np.array([6.4545, 6.4515, 6.4482])
sol_1nn = np.array([7.0020, 6.9931, 6.8190])
sol_2nn = np.array([7.1250, 7.0793, 7.0716])
item3 = np.prod(sol) * np.power(np.prod(sol_1nn), 8) * np.power(np.prod(sol_2nn), 6)
item4 = np.prod(fre1)
inner = item1 * item3 / item2 / item4
Sb = np.log(inner)
print(f'item1: {item1}\nitem2: {item2} \nitem3: {item3} \nitem4: {item4}\n inner: {inner} Sb: {Sb}')
if __name__ == '__main__':
ele = 'Cr'
ipt = input(f'选择功能(默认1):1.所有原子 2.部分原子')
if ipt == '1' or ipt == '':
run(ele, correct=True)
else:
run2(ele)
|
zybbigpy/VaspCZ | uninstall.py | <reponame>zybbigpy/VaspCZ<filename>uninstall.py
import os, sys
import install
# 获取安装目录
home = os.path.expanduser('~')
ins_path = install.install_path if install.install_path != '~' else home
install_path = ins_path if '~/' not in ins_path else os.path.join(home, ins_path.split('~/')[-1])
# 获取lib安装目录
lib_path = None
for path in sys.path:
if os.path.basename(path) == 'site-packages':
lib_path = path
break
if lib_path is None:
raise NameError('Did not found python lib path when uninstall VaspCZ lib')
print(f'{"install path":<25}{install_path}')
print(f'{"lib path":<25}{lib_path}')
def uninstall():
print(f'{"":-<20}{"VaspCZ Uninstalling...":^20}{"":-<20}')
# 卸载VaspCZ
current_path = os.getcwd()
os.chdir(install_path)
print(f'uninstall VaspCZ software from {install_path}')
if os.path.isdir('VaspCZ'):
os.system(f'rm -rf VaspCZ')
if os.path.isdir('vtst'):
os.system(f'rm -rf vtst')
print(f'uninstall lib from {lib_path}')
os.chdir(lib_path)
if os.path.isdir('VaspCZ'):
os.system(f'rm -rf VaspCZ')
print(f'remove path in .bashrc')
# 删除.bashrc路径
with open(f'{os.path.expanduser("~")}/.bashrc') as f:
data = f.readlines()
new_data = []
for i in range(len(data)):
line = data[i]
if '#VaspCZ' in line:
pass
elif f'{install_path}/VaspCZ' in line:
pass
elif '#vtst' in line:
pass
elif f'{install_path}/vtst' in line:
pass
else:
new_data.append(line)
with open(f'{os.path.expanduser("~")}/.bashrc', 'w') as f:
f.writelines(new_data)
print(f'{"":-<20}{"VaspCZ Uninstalled":^20}{"":-<20}')
if __name__ == '__main__':
uninstall()
|
zybbigpy/VaspCZ | othercode/qsub_energies_Opt1.0.py | <gh_stars>10-100
"""
在ele-Energies文件夹下运行
1.0
进入两个文件夹,提交结构优化。[Fe106Cr1v1, Fe107Cr1]
Cr是old-ele, 替换成新的ele
"""
import os
import argparse
import sys
sys.path.append(os.path.join(os.environ['HOME'], 'bin/VaspCZ/sourcecode'))
import zzdlib
def run(nodes, ppn, ele, oele):
dirs = [f'Fe106{oele}1V1', f'Fe107{oele}1']
for dir in dirs:
new_dir = dir.replace(oele, ele)
os.system(f'mv {dir} {new_dir}')
os.chdir(new_dir)
# INCAR无需修改
# POSCAR需要修改
zzdlib.Vasp.modify_POSCAR_ele(oele, ele)
# POTCAR 需要修改
zzdlib.Vasp.gennerate_POTCAR()
# KPOINT无需修改
# Vasp.sh 修改nodes 和ppn
jobname = f'fc{new_dir[5::]}_Opt'
zzdlib.Vasp.modify_Vasp_sh(jobname, nodes, ppn)
zzdlib.Vasp.check_and_qsub()
os.chdir('..')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-nd', '--nodes', default='1', type=str)
parser.add_argument('-np', '--ppn', default='8', type=str)
parser.add_argument('-ele', '--element', default='Cr', type=str)
parser.add_argument('-oele', '--old_element', default='Te', type=str)
args = parser.parse_args()
nodes = args.nodes
ppn = args.ppn
ele = args.element
oele = args.old_element
run(nodes, ppn, ele, oele) |
zybbigpy/VaspCZ | sourcecode/VaspTestENCUT.py | #!/home/zhangzhengde/bin/bin/python3
# coding=utf-8
import os
import argparse
import VaspCZ.zzdlib as zzd
def modify_vasp_sh(jobname, nodes, ppn):
with open('./Vasp.sh', 'r') as f:
data = f.readlines()
new_data = []
for line in data:
if ' #PBS -N' in line:
new_data.append(f' #PBS -N {jobname}\n')
elif ' #PBS -l nodes' in line:
new_data.append(f' #PBS -l nodes={nodes}:ppn={ppn}\n')
else:
new_data.append(line)
with open('./Vasp.sh', 'w') as f:
f.writelines(new_data)
def run(jobname, nodes, ppn, encut):
input_files = 'INCAR,POSCAR,POTCAR,KPOINTS'.split(',')
for i in input_files:
if i not in os.listdir():
raise NameError(f'ENCUT Test: input file "{i}" missing in current dir.')
if os.path.isdir(encut): # 有目录什么也不做
print(f'ENCUT:{encut} already exists, do nothing.')
pass
else:
os.system('mkdir '+encut) # 创建目录
for file in input_files:
if os.path.isfile(file):
os.system(f'cp {file} {encut}')# 拷贝输入文件
os.chdir(encut) # 进入创建的目录
vasp_sh_path = zzd.File.Vaspsh_path()
os.system(f'cp {vasp_sh_path}/Vasp.sh .')
# 需修改INCAR
data_INCAR = zzd.File.openFile('INCAR', 'r')
data_new = zzd.File.substituteData(data_INCAR, keywords='ENCUT', newline=f'ENCUT={encut}\n')
zzd.File.openFile('INCAR', 'w', data=data_new)
# 无需修改POTCAR
# 无需修改POSCAR
# 无需修改KPOINTS
# 修改Vasp.sh,指定任务和任务名,修改,提交任务
modify_vasp_sh(f'{jobname}_{encut}', nodes, ppn)
# 测试代码,打印
#os.system('cat KPOINTS')
#os.system('cat Vasp.sh')
zzd.Vasp.check_and_qsub()
os.chdir('..')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-jb', '--jobname_prefix', default='k_test', type=str)
parser.add_argument('-nd', '--nodes', default='1', type=str)
parser.add_argument('-np', '--ppn', default='8', type=str)
parser.add_argument('-EN', '--ENCUTs', default='200,250,300,350,400,450,500,550,600,650,700', type=str)
args = parser.parse_args()
jobname = args.jobname_prefix
nodes = args.nodes
ENCUTs = args.ENCUTs.split(',')
ppn = args.ppn
print(f'running k_point test \n parameter: \njobname_prefix:{jobname} nodes:{nodes} ppn:{ppn} \nENCUTs:{ENCUTs}')
inp = input('confirm run ([y]es/no): ')
if inp in ['', 'y', 'yes', 'Y', 'Yes', 'YES']:
for encut in ENCUTs:
run(jobname, nodes, ppn, encut)
else:
print('Did not run.') |
zybbigpy/VaspCZ | vtstscripts-939/kdb/remote_client.py | <reponame>zybbigpy/VaspCZ
import urllib
import httplib
import pickle
from aselite import read_any, write_vasp
import sys
import os
import shutil
import getpass
import remote_config
from kdb import Kdb
host = 'theory.cm.utexas.edu'
port = 8080
def server_create_account():
#grab info from user
info = remote_config.RemoteConfig().config()
#create/populate dictionary
params = {}
params['first'] = info[0]
params['last'] = info[1]
params['email'] = info[2]
params['password'] = info[3]
#format for http post
params = urllib.urlencode(params)
headers = {'Content-type': 'application/x-www-form-urlencoded', 'Accept': 'text/plain'}
conn = httplib.HTTPConnection(host=host, port=port)
#send http POST request
conn.request('POST', '/account_create', params, headers)
#grab results
response = conn.getresponse()
#print response.status, response.reason
data = response.read()
#if account created store email/password for later use
if data == "account added":
print data
set_account_info([info[2],info[3]])
return 1
else:
print data
answer = raw_input('Would you like to try again? [y/n] ')
if 'y' in answer.lower() :
out = server_create_account()
else:
return 0
return out
def get_account_info():
with open('.kdb', 'rb') as infile:
output = pickle.load(infile)
return output
def set_account_info(info):
with open('.kdb', 'w') as outfile:
pickle.dump([info[0], info[1]], outfile)
def server_insert(reactant, saddle, product, mode):
try:
email_pass = get_account_info()
except IOError:
answer = raw_input('no account information found. Do you have an account? [y/n] ')
if 'y' in answer.lower():
email_pass = []
email_pass.append(raw_input('email: '))
email_pass.append(getpass.getpass('password: '))
set_account_info(email_pass)
print "Attempting to insert process."
else:
print "No problem, lets create one."
if server_create_account():
email_pass = get_account_info()
else:
return 0
params = {}
params['email'] = email_pass[0]
params['password'] = email_pass[1]
params['reactant'] = pickle.dumps(reactant)
params['saddle'] = pickle.dumps(saddle)
params['product'] = pickle.dumps(product)
if mode is not None:
params['mode'] = pickle.dumps(mode)
params = urllib.urlencode(params)
headers = {'Content-type': 'application/x-www-form-urlencoded', 'Accept': 'text/plain'}
conn = httplib.HTTPConnection(host=host, port=port)
conn.request('POST', '/insert', params, headers)
response = conn.getresponse()
#print response.status, response.reason
data = response.read()
print data
if data == "invalid account info":
os.remove('.kdb')
def server_query(reactant):
params = {}
params['reactant'] = pickle.dumps(reactant)
params = urllib.urlencode(params)
headers = {'Content-type': 'application/x-www-form-urlencoded', 'Accept': 'text/plain'}
conn = httplib.HTTPConnection(host=host, port=port)
conn.request('POST', '/query', params, headers)
response = conn.getresponse()
#print response.status, response.reason
data = response.read()
suggestion_dict = pickle.loads(data)
try:
os.mkdir('kdbmatches')
except:
shutil.rmtree('kdbmatches')
os.mkdir('kdbmatches')
for key in suggestion_dict:
write_vasp('kdbmatches' + "/SADDLE_%d" % key, suggestion_dict[key][0])
write_vasp('kdbmatches' + "/PRODUCT_%d" % key, suggestion_dict[key][1])
try:
Kdb().save_mode('kdbmatches' + "/MODE_%d" % key, suggestion_dict[key][2])
except:
pass
print "done, output now in kdbmatches/"
def run(args):
if not Kdb().check_version():
sys.exit()
if len(args) < 1:
print "first parameter sohuld be either: inert or query"
sys.exit()
if args[0] == 'insert':
if len(args) < 4:
print "parameters for insert should include reactant, saddle, and product files."
sys.exit()
try:
reactant = read_any(args[1])
saddle = read_any(args[2])
product = read_any(args[3])
except IOError:
print "One or more files could not be read."
sys.exit()
try:
mode = Kdb().load_mode(args[3])
except:
mode = None
server_insert(reactant, saddle, product, mode)
elif args[0] == 'query':
if len(args) < 2:
print "parameters for query should include a reactant file."
sys.exit()
try:
reactant = read_any(args[1])
except IOError:
print "could not read reactant file."
sys.exit()
server_query(reactant)
if __name__ == "__main__":
args = sys.argv[1:]
run(args)
|
zybbigpy/VaspCZ | sourcecode/VaspCheckResults.py | import os
import VaspCZ.zzdlib as zzd
import argparse
def CheckWARNING(usript):
# usrinput为1时,只打印是否完成,为2时候,只打印能量信息,为3时,打印全部
Path = []
Energy = []
Step = []
mag = []
dist = []
RMS = []
warnflag = 0
jstat = ['Running...', 'Done', 'SC Running...', 'SC done', 'Stoped']
for dirpath, dirnames, filenames in os.walk('./'):
if 'INCAR' in filenames and 'POSCAR' in filenames and 'Vasp.sh' in filenames:
data_Sh = zzd.File.openFile(dirpath + '/Vasp.sh', 'r')
data_INCAR = zzd.File.openFile(dirpath + '/INCAR', 'r')
SYSTEM = zzd.File.getLine(data_INCAR, 'SYSTEM')[0].split('=')[-1]
jobname = zzd.File.getLine(data_Sh, '#PBS -N')[0].strip('\n').split()[-1]
# jobstatus = zzd.Vasp.checkJobstatus(jobname)
# # print('Path:{} {} {}'.format(dirpath,jobstatus,jobname))
# if jobstatus == 'Q':
# print('Path:{:<60}任务正在排队... '.format(dirpath))
# elif jobstatus == 'R' or 'log' in filenames: # 正在计算和算完了用相同的检测方法
if 'log' in filenames:
if usript == '1' or usript == '3':
data_log = zzd.File.openFile(dirpath + '/log', 'r')
reachRA = zzd.File.getLine(data_log, 'reached required accuracy')[0]
try:
RMM = zzd.File.getAllline(data_log, 'RMM:')[-1].split()[1]
RMM = RMM + ' RMM'
except:
RMM = '0 RMM'
try:
DAV = zzd.File.getAllline(data_log, 'DAV:')[-1].split()[1]
DAV = DAV + ' DAV'
except:
DAV = '0 DAV'
termination = zzd.File.getLine(data_log, 'Ctrl-C caught... cleaning up processes')[0]
try:
ionstep = zzd.File.getAllline(data_log, 'F=')[-1].split()[0]
except:
ionstep = '0'
# print('Path:{} {} {}'.format(dirpath,reachRA,v1F))
if 'reached required accuracy' in reachRA:
print('Path:{:<60}{:<15}{:>3}F{:>9}'.format(dirpath, jstat[1], ionstep, RMM))
elif SYSTEM == 'Self' or SYSTEM == 'Static' or SYSTEM == 'self' or SYSTEM == 'static':
vRMM = zzd.File.getLine(data_log, 'RMM:')[0]
vDAV = zzd.File.getLine(data_log, 'DAV:')[0]
if ('DAV:' in vDAV or 'RMM:' in vRMM) and ionstep == '0':
print('Path:{:<60}{:<15}{:>3}F{:>9}{:>7}{:>5}'.format(dirpath, jstat[2], ionstep, RMM, DAV,
'*'))
elif ionstep == '1': # 没有完成,但有1F了
print('Path:{:<60}{:<15}{:>3}F{:>9}'.format(dirpath, jstat[3], ionstep, RMM))
elif 'cleaning up processes' in termination:
print('Path:{:<60}{:<15}{:>3}F{:>9}{:>7}'.format(dirpath, jstat[4], ionstep, RMM, DAV))
else:
print(
'Path:{:<60}{:<15}{:>3}F{:>9}{:>7}{:>5}'.format(dirpath, jstat[0], ionstep, RMM, DAV, '*'))
# 检查警告
try:
data_OUTCAR = zzd.File.openFile(dirpath + '/OUTCAR', 'r')
WARNING_log = zzd.File.getAllline(data_log, 'WARNING')
WARNING_OUT = zzd.File.getAllline(data_OUTCAR, 'WARNING')
ERROR_log = zzd.File.getAllline(data_log, 'ERROR')
if WARNING_log != [] or WARNING_OUT != []:
print('Path:{:<60} 出现警告'.format(dirpath, ionstep))
zzd.File.printData(WARNING_log)
zzd.File.printData(WARNING_OUT)
warnflag = 1
if ERROR_log != []:
zzd.File.printData(ERROR_log)
warnflag = 1
except Exception as e:
print('path:{} {}'.format(dirpath, e))
if usript == '2' or usript == '3':
# 打印能量信息
data_log = zzd.File.openFile(dirpath + '/log', 'r')
try:
log_lastF = zzd.File.getAllline(data_log, 'F=')[-1]
except:
log_lastF = '0 F= 0 E0= 0 d E 0 mag= 0' # 在存在log,Log中连1步都没算完的时候
if len(dirpath) > 15:
path = '...' + dirpath[-15:]
else:
path = dirpath
Path.append(path)
Energy.append(log_lastF.split()[4])
Step.append(log_lastF.split()[0])
try:
mag.append(log_lastF.split()[9])
except:
mag.append('0')
dist.append(
zzd.getshellResult('dist.pl ' + dirpath + '/POSCAR ' + dirpath + '/CONTCAR')[-1].strip('\n'))
try:
RMS.append(zzd.getshellResult('grep RMS ' + dirpath + '/OUTCAR')[-1].split()[4])
except: # 如果出现没算完,grep返回一个空的列表的时候
RMS.append('0')
if (usript == '1' or usript == '3') and warnflag == 0:
print('无警告或错误')
if (usript == '2' or usript == '3'):
print('{:<18}{:<11}{:<5}{:^10}{:^10}{:^10}'.format('路径', '能量', '步数', 'mag', 'dist', 'RMS'))
for i in range(len(Path)):
print('{:<20}{:<13.4f}{:<7}{:<12.4f}{:<10.4f}{:<10.4f}'.format(Path[i], eval(Energy[i]), Step[i],
eval(mag[i]), eval(dist[i]), eval(RMS[i])))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-func', '--function', default='3', type=str)
args = parser.parse_args()
CheckWARNING(args.function) |
zybbigpy/VaspCZ | vtstscripts-939/insplot.py | #!/usr/bin/env python
from os import system
from os.path import dirname, abspath, join
vtst_path = dirname(abspath(__file__))
system("grep ut insout.dat | grep -v itr | cut -c 5-100 > o.u.t.t.e.m.p")
system("gnuplot %s" % join(vtst_path, 'insplot.gnu'))
system("rm o.u.t.t.e.m.p")
|
zybbigpy/VaspCZ | vtstscripts-939/con2xyz.py | <filename>vtstscripts-939/con2xyz.py<gh_stars>10-100
#!/usr/bin/env python
import aselite
from sys import argv, exit
if len(argv) < 2 or '-h' in argv:
print "usage: con2xyz.py FILENAME\n"
exit(0)
filename = argv[1]
atoms = aselite.read_con(filename)
aselite.write_xyz(filename.replace('con', 'xyz'), atoms)
|
zybbigpy/VaspCZ | sourcecode/VaspCZ1.0.0.py | <reponame>zybbigpy/VaspCZ
#!/home/zhangzhengde/bin/bin/python3
#coding=utf-8
import os, sys
import zzdlib as zzd
import subprocess
import string
import shutil
def CheckWARNING(usript):
#usrinput为1时,只打印是否完成,为2时候,只打印能量信息,为3时,打印全部
Path = []
Energy =[]
Step =[]
mag =[]
dist =[]
RMS =[]
warnflag = 0
jstat = ['Running...','Done','SC Running...','SC done','Stoped']
for dirpath,dirnames,filenames in os.walk('./'):
if 'INCAR' in filenames and 'POSCAR' in filenames and 'Vasp.sh' in filenames:
data_Sh = zzd.File.openFile(dirpath+'/Vasp.sh','r')
data_INCAR = zzd.File.openFile(dirpath+'/INCAR','r')
SYSTEM = zzd.File.getLine(data_INCAR,'SYSTEM')[0].split('=')[-1]
jobname = zzd.File.getLine(data_Sh,'#PBS -N')[0].strip('\n').split()[-1]
jobstatus = zzd.Vasp.checkJobstatus(jobname)
#print('Path:{} {} {}'.format(dirpath,jobstatus,jobname))
if jobstatus == 'Q':
print('Path:{:<60}任务正在排队... '.format(dirpath))
elif jobstatus == 'R' or 'log' in filenames: #正在计算和算完了用相同的检测方法
if usript == '1' or usript =='3':
data_log = zzd.File.openFile(dirpath+'/log','r')
reachRA = zzd.File.getLine(data_log,'reached required accuracy')[0]
try:
RMM = zzd.File.getAllline(data_log,'RMM:')[-1].split()[1]
RMM = RMM+' RMM'
except:
RMM = '0 RMM'
try:
DAV = zzd.File.getAllline(data_log,'DAV:')[-1].split()[1]
DAV = DAV+' DAV'
except:
DAV = '0 DAV'
termination = zzd.File.getLine(data_log,'Ctrl-C caught... cleaning up processes')[0]
try:
ionstep = zzd.File.getAllline(data_log,'F=')[-1].split()[0]
except:
ionstep = '0'
#print('Path:{} {} {}'.format(dirpath,reachRA,v1F))
if 'reached required accuracy' in reachRA:
print('Path:{:<60}{:<15}{:>3}F{:>9}'.format(dirpath,jstat[1],ionstep,RMM))
elif SYSTEM == 'Self' or SYSTEM =='Static' or SYSTEM == 'self' or SYSTEM =='static':
vRMM = zzd.File.getLine(data_log,'RMM:')[0]
vDAV = zzd.File.getLine(data_log,'DAV:')[0]
if ('DAV:' in vDAV or 'RMM:' in vRMM) and ionstep =='0':
print('Path:{:<60}{:<15}{:>3}F{:>9}{:>7}{:>5}'.format(dirpath,jstat[2],ionstep,RMM,DAV,'*'))
elif ionstep == '1': #没有完成,但有1F了
print('Path:{:<60}{:<15}{:>3}F{:>9}'.format(dirpath,jstat[3],ionstep,RMM))
elif 'cleaning up processes' in termination:
print('Path:{:<60}{:<15}{:>3}F{:>9}{:>7}'.format(dirpath,jstat[4],ionstep,RMM,DAV))
else:
print('Path:{:<60}{:<15}{:>3}F{:>9}{:>7}{:>5}'.format(dirpath,jstat[0],ionstep,RMM,DAV,'*'))
#检查警告
try:
data_OUTCAR = zzd.File.openFile(dirpath+'/OUTCAR','r')
WARNING_log = zzd.File.getAllline(data_log,'WARNING')
WARNING_OUT = zzd.File.getAllline(data_OUTCAR,'WARNING')
ERROR_log = zzd.File.getAllline(data_log,'ERROR')
if WARNING_log !=[] or WARNING_OUT !=[]:
print('Path:{:<60} 出现警告'.format(dirpath,ionstep))
zzd.File.printData(WARNING_log)
zzd.File.printData(WARNING_OUT)
warnflag = 1
if ERROR_log !=[]:
zzd.File.printData(ERROR_log)
warnflag =1
except Exception as e:
print('path:{} {}'.format(dirpath,e))
if usript == '2' or usript == '3':
#打印能量信息
data_log = zzd.File.openFile(dirpath+'/log','r')
try:
log_lastF = zzd.File.getAllline(data_log,'F=')[-1]
except:
log_lastF = '0 F= 0 E0= 0 d E 0 mag= 0' #在存在log,Log中连1步都没算完的时候
if len(dirpath) >15:
path = '...'+dirpath[-15:]
else:
path = dirpath
Path.append(path)
Energy.append(log_lastF.split()[4])
Step.append(log_lastF.split()[0])
try:
mag.append(log_lastF.split()[9])
except:
mag.append('0')
dist.append(zzd.getshellResult('dist.pl '+dirpath+'/POSCAR '+dirpath+'/CONTCAR')[-1].strip('\n'))
try:
RMS.append(zzd.getshellResult('grep RMS '+dirpath+'/OUTCAR')[-1].split()[4])
except:#如果出现没算完,grep返回一个空的列表的时候
RMS.append('0')
if (usript == '1' or usript == '3') and warnflag == 0:
print('无警告或错误')
if (usript == '2' or usript == '3'):
print('{:<18}{:<11}{:<5}{:^10}{:^10}{:^10}'.format('路径','能量','步数','mag','dist','RMS'))
for i in range(len(Path)):
print('{:<20}{:<13.4f}{:<7}{:<12.4f}{:<10.4f}{:<10.4f}'.format(Path[i],eval(Energy[i]),Step[i],eval(mag[i]),eval(dist[i]),eval(RMS[i])))
def VaspNEBCheckDist(POSorCONT):
if '00' in os.listdir() and 'INCAR' in os.listdir():
data_log = zzd.File.openFile('./INCAR')
image = zzd.File.getLine(data_log,'IMAGES')[0].split('=')[-1].strip('\n')
if int(image) <=9:
#os.system('cp ini/CONTCAR 00/CONTCAR')
#os.system('cp fin/CONTCAR 0'+str(int(image)+1)+'/CONTCAR')
for i in range(0,int(image)+1):
if i == 0:
dist = zzd.getshellResult('dist.pl ./0'+str(i)+'/POSCAR ./0'+str(i+1)+'/'+POSorCONT)
elif i == int(image):
dist = zzd.getshellResult('dist.pl ./0'+str(i)+'/'+POSorCONT+' ./0'+str(i+1)+'/POSCAR')
else:
dist = zzd.getshellResult('dist.pl ./0'+str(i)+'/'+POSorCONT+' ./0'+str(i+1)+'/'+POSorCONT)
print('{} 0{}-0{} {}'.format(POSorCONT,i,i+1,dist[0].strip('\n')))
else:
print('image too large')
else:
print('当前不在NEB目录,退出程序')
exit()
def VaspNEBCheckRMS():
print('CheckNEBRMS is running...')
if '00' in os.listdir() and 'INCAR' in os.listdir():
data_INCAR = zzd.File.openFile('./INCAR')
data_log = zzd.File.openFile('./log')
image = zzd.File.getLine(data_INCAR,'IMAGES')[0].split('=')[-1].strip('\n')
#print(zzd.File.getAllline(data_log,'F='))
ionstep = zzd.File.getAllline(data_log,'F=')[-1].split()[0]
if int(image) <=9:
#print('aa')
#data00 = zzd.getshellResult('grep RMS 00/OUTCAR')
data01 = zzd.getshellResult('grep RMS 01/OUTCAR')
data02 = zzd.getshellResult('grep RMS 02/OUTCAR')
data03 = zzd.getshellResult('grep RMS 03/OUTCAR')
#data04 = zzd.getshellResult('grep RMS 04/OUTCAR')
#print(data00)
print('{:^3}{:^10}{:^10}{:^10}{:^10}'.format('Step','01-RMS','02-RMS','03-RMS','01+02+03'))
for i in range(int(ionstep)):
print('{:>3}{:>10}{:>10}{:>10}{:>10.6f}'.format(i+1,data01[i].split()[4],data02[i].split()[4],data03[i].split()[4],float(data01[i].split()[4])+float(data02[i].split()[4])+float(data03[i].split()[4])))
else:
print('不在NEB目录,退出程序')
exit()
if __name__ =='__main__':
VaspCZ_path = os.path.dirname(os.path.abspath(__file__))+ '/sourcecode'
while True:
ipt = input('''
|============================================================|
| VASP TOOLS |
|------------------------------------------------------------|
| (1)Check |
| (2)NEBCheck |
| (3)VaspOpt-Sta |
| (4)VaspINFISta-NEB |
| (5)VaspKeepInputs |
| (6)VaspModiFile |
| (7)VaspGetNELM |
| (8)VaspQsub |
| (9)VaspMiniTools |
| (0)Quit |
|------------------------------------------------------------|
| by: <NAME> (<EMAIL>) |
|============================================================|
Input(default=1): ''')
if ipt == '1' or ipt == '':
while True:
ipt1 = input('''
|============================================================|
| Check Parameter |
|------------------------------------------------------------|
| (1)Only Job Status |
| (2)Current Results |
| (3)All |
| (0)Back |
|============================================================|
Input(default=3): ''')
if ipt1 == '':
ipt1 = '3'
if ipt1 != '0':
print('\nRun Vasp Check...')
CheckWARNING(ipt1)
exit()
else:
break
elif ipt == '2':
while True:
ipt2 = input('''
|============================================================|
| Check NEB Parameter |
|------------------------------------------------------------|
| (1)Only Job Status |
| (2)NEB Results |
| (3)Above |
| (4)NEB Barrier |
| (0)Back |
|============================================================|
Input(default=4): ''')
if ipt2 == '1':
subprocess.call(f'{sys.executable} {VaspCZ_path}/NEBCheck1.1.py --func=1',shell=True)
elif ipt2 == '2':
subprocess.call(f'{sys.executable} {VaspCZ_path}/NEBCheck.py --func=2',shell=True)
elif ipt2 == '3':
subprocess.call(f'{sys.executable} {VaspCZ_path}/NEBCheck.py --func=3',shell=True)
elif ipt2 == '4' or ipt2 == '':
subprocess.call(f'{sys.executable} {VaspCZ_path}/NEBCheck.py --func=4',shell=True)
elif ipt2 == '0':
break
else:
continue
exit()
elif ipt == '3':
ipt3plus = input('''
|============================================================|
| Vasp Opt-Sta |
|------------------------------------------------------------|
| (1) Opt-Sta (Current Folder) |
| (2) INI FIN Opt-Sta |
| (0) back |
|============================================================|
Input:(defalut=1) ''')
while True:
if ipt3plus == '0':
break
elif ipt3plus == '1' or ipt3plus == '':
subprocess.call(f'{sys.executable} {VaspCZ_path}/VaspOpt-Sta.py')
exit()
elif ipt3plus == '2':
ipt3 = input('''
|============================================================|
| Vasp INI FIN Opt-Sta |
|------------------------------------------------------------|
| This tool is used for Running self-consistent calculations|
|when structure optimization are finished in ini/Opt and fin/|
|Opt respectively. |
|------------------------------------------------------------|
| Parameter $1 $2 $3 represent nodes,ppn and isEMERGENCY re-|
|spectively. |
| i.e input '2 12 yes' means nodes=2,ppn=12 and run on EMER-|
|GENCY node. Default setting is from ini/Opt/Vasp.sh and fin-|
|/Opt/Vasp.sh |
| (1) 0 0 default
| (0) back
|============================================================|
Input: ''')
while True:
if ipt3 == '0':
break
else:
try:
nc = ipt3.split()[0]+','+ipt3.split()[1]
except:
nc = '0,0'
try:
EMER = ipt3.split()[2]
except:
EMER='default'
subprocess.call(f'{sys.executable} {VaspCZ_path}/VaspINFIOpt-Sta.py --nc='+nc+' --EMER='+EMER,shell=True)
exit()
elif ipt == '4':
ipt4 = input('''
|============================================================|
| Vasp INI FIN Sta-NEB |
|------------------------------------------------------------|
| This tool is used for Running NEB calculations when self- |
|onsistent are finished in ini/ and fin/ respectively. |
|------------------------------------------------------------|
| Parameter $1 $2 $3 represent nodes,ppn and isEMERGENCY re-|
|spectively. |
| i.e input '2 12 yes' means nodes=2,ppn=12 and run on EMER-|
|GENCY node. Default setting is from ini/Opt/Vasp.sh and fin-|
|/Opt/Vasp.sh |
| (1) 0 0 default |
| (0) back |
|============================================================|
Input: ''')
while True:
if ipt4 == '0':
break
else:
try:
nc = ipt4.split()[0]+','+ipt4.split()[1]
except:
nc = '0,0'
try:
EMER = ipt4.split()[2]
except:
EMER ='default'
subprocess.call(f'{sys.executable} {VaspCZ_path}/VaspINFISta-NEB.py --nc='+nc+' --EMER='+EMER,shell=True)
exit()
elif ipt == '5':
subprocess.call(f'{sys.executable} {VaspCZ_path}/VaspKeepInputs.py',shell=True)
break
elif ipt == '6':
subprocess.call(f'{sys.executable} {VaspCZ_path}/VaspModiFile.py',shell=True)
break
elif ipt == '7':
subprocess.call(f'{sys.executable} {VaspCZ_path}/VaspGetNELM.py',shell=True)
break
elif ipt == '8':
subprocess.call(f'{sys.executable} {VaspCZ_path}/VaspQsub.py',shell=True)
break
elif ipt == '9':
while True:
ipt9 = input('''
|============================================================|
| VASP Mini Tools |
|------------------------------------------------------------|
| (1)VaspNEBCheckdist |
| (2)VaspNEBCheckRMS |
| (3)VaspNEBKeepINFI_Optinputs |
| (4)NEBKeepInputs |
| (5)k-point mesh test |
| (0)back |
|============================================================|
Input(default=2): ''')
if ipt9 == '0':
break
elif ipt9 == '1':
ipt91 = input('Check POS or CONT(default=POS): ')
if ipt91 == 'CONT':
VaspNEBCheckDist('CONTCAR')
else:
VaspNEBCheckDist('POSCAR')
elif ipt9 == '' or ipt9 == '2':
VaspNEBCheckRMS()
elif ipt9 == '3':
subprocess.call(f'{sys.executable} {VaspCZ_path}/VaspNEBKeepINFI_OptInputs.py',shell=True)
elif ipt9 == '4':
subprocess.call(f'{sys.executable} {VaspCZ_path}/NEBKeepInputs.py',shell=True)
elif ipt9 == '5':
while True:
ipt95 = input('''
|============================================================|
| VASP k-point mesh test |
|------------------------------------------------------------|
| This is the Vasp k-point mesh test tool. Please prepare t-|
|he input files (INCAR, POSCAR, POTCAR, KPOINTS, Vasp.sh) of |
|VASP in current directory before run the tool. |
| Please input the jobname_prefix, nodes, ppn and the k_mesh|
|you want to test. (separate with one white space) |
| For example: --jobname_prefix=k_test --nodes=1 --ppn=8 |
| --k_mesh=111,333,555,777,999 |
| ----------------------------------------- |
| parameter default |
| ----------------------------------------- |
| jobname_prefix k_test |
| nodes 1 |
| ppn 8 |
| k_mesh 111,333,555,777,999 |
| ----------------------------------------- |
| Go back while input 0 |
|============================================================|
Input: ''')
if ipt95 == '0':
break
else:
try:
inputs = ipt95.split()
dp = {
'jobname_prefix':'k_test', 'nodes':'1', 'ppn':'8',
'k_mesh': '111,333,555,777,999'}
for item in inputs:
para, value = item.strip('--').split('=')
print(f'输入的参数为{para}:{value}')
dp[para] = value
# print(dp)
code = f'{sys.executable} {VaspCZ_path}/k_point_test.py --jobname_prefix={dp["jobname_prefix"]} --nodes={dp["nodes"]} --ppn={dp["ppn"]} --k_mesh={dp["k_mesh"]}'
print(code)
subprocess.call(code, shell=True)
exit()
except Exception as e:
print(f'Vasp k-mesh test error: {e}')
continue
else:
continue
elif ipt == '0':
exit()
else:
print('Error Input')
continue
|
tdsimao/codewars_solutions | katas/kyu_4/sudoko_validator.py | """
https://www.codewars.com/kata/529bf0e9bdf7657179000008
"""
def validSolution(board):
return valid_rows(board) and valid_columns(board) and valid_sub_regions(board)
def valid_rows(board):
for row in board:
if has_repeated_values(row):
return False
return True
def valid_columns(board):
for i in range(9):
if has_repeated_values(row[i] for row in board):
return False
return True
def valid_sub_regions(board):
for i in range(0, 9, 3):
for j in range(0, 9, 3):
if has_repeated_values(sub_region(board, i, j)):
return False
return True
def sub_region(board, x, y):
for i in range(x, x + 3):
for j in range(y, y + 3):
yield board[i][j]
def has_repeated_values(values, values_len=9):
return len(set(values)) != values_len
|
neoGurb/Home-Assisant-FusionSolar-Kiosk | custom_components/fusion_solar_kiosk/fusion_solar_kiosk_api.py | """API client for FusionSolar Kiosk."""
import logging
import html
import json
from .const import (
ATTR_DATA,
ATTR_FAIL_CODE,
ATTR_SUCCESS,
ATTR_DATA_REALKPI,
)
from requests import get
_LOGGER = logging.getLogger(__name__)
class FusionSolarKioksApi:
def __init__(self, host):
self._host = host
def getRealTimeKpi(self, id: str):
url = self._host + '/pvmswebsite/nologin/assets/build/index.html#/kiosk?kk=' + id
headers = {
'accept': 'application/json',
}
try:
response = get(url, headers=headers)
# _LOGGER.debug(response.text)
jsonData = response.json()
if not jsonData[ATTR_SUCCESS]:
raise FusionSolarKioskApiError(f'Retrieving the data failed with failCode: {jsonData[ATTR_FAIL_CODE]}, data: {jsonData[ATTR_DATA]}')
# convert encoded html string to JSON
jsonData[ATTR_DATA] = json.loads(html.unescape(jsonData[ATTR_DATA]))
return jsonData[ATTR_DATA][ATTR_DATA_REALKPI]
except FusionSolarKioskApiError as error:
_LOGGER.error(error)
_LOGGER.debug(response.text)
except FusionSolarKioskApiError as error:
_LOGGER.error(error)
_LOGGER.debug(response.text)
return {
ATTR_SUCCESS: False
}
class FusionSolarKioskApiError(Exception):
pass
|
BeginToCodeSnaps/PythonSnaps | EG3-05 housemartins.py | # EG3-05 housemartins
import snaps
snaps.display_image('Housemartins.jpg')
snaps.display_message('Hull Rocks',color=(255,255,255),vert='top')
|
BeginToCodeSnaps/PythonSnaps | EG3-01 Throw a single die.py | <filename>EG3-01 Throw a single die.py
import random #import the random library
print(random.randint(1,6))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.